repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
vbshah1992/microblog | flask/lib/python2.7/site-packages/pip-1.5.4-py2.7.egg/pip/_vendor/html5lib/filters/lint.py | 250 | 4062 | from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %s") % name)
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %s") % name)
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %s") % token["name"])
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %r") % name)
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %r") % value)
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %s") % name)
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%s) does not match StartTag (%s)") % (name, start_name))
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %r") % data)
if not data:
raise LintError(_("%s token with empty data") % type)
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: ") % data)
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %s") % name)
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %s") % type)
yield token
| bsd-3-clause |
FIWARE-TMForum/business-ecosystem-charging-backend | src/wstore/__init__.py | 1 | 2300 | # -*- coding: utf-8 -*-
# Copyright (c) 2016 - 2017 CoNWeT Lab., Universidad Politécnica de Madrid
# This file belongs to the business-charging-backend
# of the Business API Ecosystem.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from wstore.models import Context
from wstore.store_commons.utils.url import is_valid_url
from wstore.ordering.inventory_client import InventoryClient
from wstore.rss_adaptor.rss_manager import ProviderManager
testing = sys.argv[1:2] == ['test']
if not testing:
# Validate that a correct site and local_site has been provided
if not is_valid_url(settings.SITE) or not is_valid_url(settings.LOCAL_SITE):
raise ImproperlyConfigured('SITE and LOCAL_SITE settings must be a valid URL')
# Create context object if it does not exists
if not len(Context.objects.all()):
Context.objects.create()
inventory = InventoryClient()
inventory.create_inventory_subscription()
# Create RSS default aggregator and provider
credentials = {
'user': settings.STORE_NAME,
'roles': [settings.ADMIN_ROLE],
'email': settings.WSTOREMAIL
}
prov_manager = ProviderManager(credentials)
try:
prov_manager.register_aggregator({
'aggregatorId': settings.WSTOREMAIL,
'aggregatorName': settings.STORE_NAME,
'defaultAggregator': True
})
except Exception as e: # If the error is a conflict means that the aggregator is already registered
if e.response.status_code != 409:
raise e
| agpl-3.0 |
aequitas/home-assistant | homeassistant/components/conversation/__init__.py | 7 | 5514 | """Support for functionality to have conversations with Home Assistant."""
import logging
import re
import voluptuous as vol
from homeassistant import core
from homeassistant.components import http
from homeassistant.components.cover import (
INTENT_CLOSE_COVER, INTENT_OPEN_COVER)
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import EVENT_COMPONENT_LOADED
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, intent
from homeassistant.loader import bind_hass
from homeassistant.setup import ATTR_COMPONENT
from .util import create_matcher
_LOGGER = logging.getLogger(__name__)
ATTR_TEXT = 'text'
DOMAIN = 'conversation'
REGEX_TURN_COMMAND = re.compile(r'turn (?P<name>(?: |\w)+) (?P<command>\w+)')
REGEX_TYPE = type(re.compile(''))
UTTERANCES = {
'cover': {
INTENT_OPEN_COVER: ['Open [the] [a] [an] {name}[s]'],
INTENT_CLOSE_COVER: ['Close [the] [a] [an] {name}[s]']
}
}
SERVICE_PROCESS = 'process'
SERVICE_PROCESS_SCHEMA = vol.Schema({
vol.Required(ATTR_TEXT): cv.string,
})
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({
vol.Optional('intents'): vol.Schema({
cv.string: vol.All(cv.ensure_list, [cv.string])
})
})}, extra=vol.ALLOW_EXTRA)
@core.callback
@bind_hass
def async_register(hass, intent_type, utterances):
"""Register utterances and any custom intents.
Registrations don't require conversations to be loaded. They will become
active once the conversation component is loaded.
"""
intents = hass.data.get(DOMAIN)
if intents is None:
intents = hass.data[DOMAIN] = {}
conf = intents.get(intent_type)
if conf is None:
conf = intents[intent_type] = []
for utterance in utterances:
if isinstance(utterance, REGEX_TYPE):
conf.append(utterance)
else:
conf.append(create_matcher(utterance))
async def async_setup(hass, config):
"""Register the process service."""
config = config.get(DOMAIN, {})
intents = hass.data.get(DOMAIN)
if intents is None:
intents = hass.data[DOMAIN] = {}
for intent_type, utterances in config.get('intents', {}).items():
conf = intents.get(intent_type)
if conf is None:
conf = intents[intent_type] = []
conf.extend(create_matcher(utterance) for utterance in utterances)
async def process(service):
"""Parse text into commands."""
text = service.data[ATTR_TEXT]
_LOGGER.debug('Processing: <%s>', text)
try:
await _process(hass, text)
except intent.IntentHandleError as err:
_LOGGER.error('Error processing %s: %s', text, err)
hass.services.async_register(
DOMAIN, SERVICE_PROCESS, process, schema=SERVICE_PROCESS_SCHEMA)
hass.http.register_view(ConversationProcessView)
# We strip trailing 's' from name because our state matcher will fail
# if a letter is not there. By removing 's' we can match singular and
# plural names.
async_register(hass, intent.INTENT_TURN_ON, [
'Turn [the] [a] {name}[s] on',
'Turn on [the] [a] [an] {name}[s]',
])
async_register(hass, intent.INTENT_TURN_OFF, [
'Turn [the] [a] [an] {name}[s] off',
'Turn off [the] [a] [an] {name}[s]',
])
async_register(hass, intent.INTENT_TOGGLE, [
'Toggle [the] [a] [an] {name}[s]',
'[the] [a] [an] {name}[s] toggle',
])
@callback
def register_utterances(component):
"""Register utterances for a component."""
if component not in UTTERANCES:
return
for intent_type, sentences in UTTERANCES[component].items():
async_register(hass, intent_type, sentences)
@callback
def component_loaded(event):
"""Handle a new component loaded."""
register_utterances(event.data[ATTR_COMPONENT])
hass.bus.async_listen(EVENT_COMPONENT_LOADED, component_loaded)
# Check already loaded components.
for component in hass.config.components:
register_utterances(component)
return True
async def _process(hass, text):
"""Process a line of text."""
intents = hass.data.get(DOMAIN, {})
for intent_type, matchers in intents.items():
for matcher in matchers:
match = matcher.match(text)
if not match:
continue
response = await hass.helpers.intent.async_handle(
DOMAIN, intent_type,
{key: {'value': value} for key, value
in match.groupdict().items()}, text)
return response
class ConversationProcessView(http.HomeAssistantView):
"""View to retrieve shopping list content."""
url = '/api/conversation/process'
name = "api:conversation:process"
@RequestDataValidator(vol.Schema({
vol.Required('text'): str,
}))
async def post(self, request, data):
"""Send a request for processing."""
hass = request.app['hass']
try:
intent_result = await _process(hass, data['text'])
except intent.IntentHandleError as err:
intent_result = intent.IntentResponse()
intent_result.async_set_speech(str(err))
if intent_result is None:
intent_result = intent.IntentResponse()
intent_result.async_set_speech("Sorry, I didn't understand that")
return self.json(intent_result)
| apache-2.0 |
valentin-krasontovitsch/ansible | lib/ansible/plugins/doc_fragments/docker.py | 5 | 5695 | # -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = r'''
options:
docker_host:
description:
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
the module will automatically replace C(tcp) in the connection URL with C(https).
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
instead. If the environment variable is not set, the default value will be used.
type: str
default: unix://var/run/docker.sock
aliases: [ docker_url ]
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
be used instead. If the environment variable is not set, the default value will be used.
type: str
default: localhost
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by docker-py.
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
used instead. If the environment variable is not set, the default value will be used.
type: str
default: auto
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
cacert_path:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: str
aliases: [ tls_ca_cert ]
cert_path:
description:
- Path to the client's TLS certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: str
aliases: [ tls_client_cert ]
key_path:
description:
- Path to the client's TLS key file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: str
aliases: [ tls_client_key ]
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by ssl.py module.
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
used instead.
type: str
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
instead. If the environment variable is not set, the default value will be used.
type: bool
default: false
tls_verify:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
used instead. If the environment variable is not set, the default value will be used.
type: bool
default: false
debug:
description:
- Debug mode
type: bool
default: false
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
U(https://docker-py.readthedocs.io/en/stable/machine/) for more details.
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(pip).
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
In general, it will use C($HOME/docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
and use C($DOCKER_CONFIG/config.json) otherwise.
'''
| gpl-3.0 |
rmboggs/django | django/conf/locale/ml/formats.py | 1007 | 1815 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
mbauskar/sapphire-erpnext | erpnext/stock/get_item_details.py | 1 | 15380 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.utils import flt, cint, add_days, cstr
import json
from erpnext.accounts.doctype.pricing_rule.pricing_rule import get_pricing_rule_for_item
from erpnext.setup.utils import get_exchange_rate
from frappe.model.meta import get_field_precision
@frappe.whitelist()
def get_item_details(args):
"""
args = {
"item_code": "",
"warehouse": None,
"customer": "",
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"plc_conversion_rate": 1.0,
"parenttype": "",
"parent": "",
"supplier": None,
"transaction_date": None,
"conversion_rate": 1.0,
"buying_price_list": None,
"is_subcontracted": "Yes" / "No",
"transaction_type": "selling",
"ignore_pricing_rule": 0/1
"project_name": "",
}
"""
args = process_args(args)
item_doc = frappe.get_doc("Item", args.item_code)
item = item_doc
validate_item_details(args, item)
out = get_basic_details(args, item)
get_party_item_code(args, item_doc, out)
if out.get("warehouse"):
out.update(get_available_qty(args.item_code, out.warehouse))
out.update(get_projected_qty(item.name, out.warehouse))
get_price_list_rate(args, item_doc, out)
if args.transaction_type == "selling" and cint(args.is_pos):
out.update(get_pos_profile_item_details(args.company, args))
# update args with out, if key or value not exists
for key, value in out.iteritems():
if args.get(key) is None:
args[key] = value
out.update(get_pricing_rule_for_item(args))
if args.get("parenttype") in ("Sales Invoice", "Delivery Note"):
if item_doc.has_serial_no == 1 and not args.serial_no:
out.serial_no = get_serial_nos_by_fifo(args, item_doc)
if args.transaction_date and item.lead_time_days:
out.schedule_date = out.lead_time_date = add_days(args.transaction_date,
item.lead_time_days)
if args.get("is_subcontracted") == "Yes":
out.bom = get_default_bom(args.item_code)
return out
def process_args(args):
if isinstance(args, basestring):
args = json.loads(args)
args = frappe._dict(args)
if not args.get("transaction_type"):
if args.get("parenttype")=="Material Request" or \
frappe.get_meta(args.get("parenttype")).get_field("supplier"):
args.transaction_type = "buying"
else:
args.transaction_type = "selling"
if not args.get("price_list"):
args.price_list = args.get("selling_price_list") or args.get("buying_price_list")
if args.barcode:
args.item_code = get_item_code(barcode=args.barcode)
elif not args.item_code and args.serial_no:
args.item_code = get_item_code(serial_no=args.serial_no)
return args
@frappe.whitelist()
def get_item_code(barcode=None, serial_no=None):
if barcode:
item_code = frappe.db.get_value("Item", {"barcode": barcode})
if not item_code:
frappe.throw(_("No Item with Barcode {0}").format(barcode))
elif serial_no:
item_code = frappe.db.get_value("Serial No", serial_no, "item_code")
if not item_code:
frappe.throw(_("No Item with Serial No {0}").format(serial_no))
return item_code
def validate_item_details(args, item):
if not args.company:
throw(_("Please specify Company"))
from erpnext.stock.doctype.item.item import validate_end_of_life
validate_end_of_life(item.name, item.end_of_life)
if args.transaction_type == "selling":
# validate if sales item or service item
if args.get("order_type") == "Maintenance":
if item.is_service_item != 1:
throw(_("Item {0} must be a Service Item.").format(item.name))
elif item.is_sales_item != 1:
throw(_("Item {0} must be a Sales Item").format(item.name))
if cint(item.has_variants):
throw(_("Item {0} is a template, please select one of its variants").format(item.name))
elif args.transaction_type == "buying" and args.parenttype != "Material Request":
# validate if purchase item or subcontracted item
if item.is_purchase_item != 1:
throw(_("Item {0} must be a Purchase Item").format(item.name))
if args.get("is_subcontracted") == "Yes" and item.is_sub_contracted_item != 1:
throw(_("Item {0} must be a Sub-contracted Item").format(item.name))
def get_basic_details(args, item):
if not item:
item = frappe.get_doc("Item", args.get("item_code"))
if item.variant_of:
item.update_template_tables()
from frappe.defaults import get_user_default_as_list
user_default_warehouse_list = get_user_default_as_list('warehouse')
user_default_warehouse = user_default_warehouse_list[0] \
if len(user_default_warehouse_list)==1 else ""
out = frappe._dict({
"item_code": item.name,
"item_name": item.item_name,
"description": cstr(item.description).strip(),
"image": cstr(item.image).strip(),
"warehouse": user_default_warehouse or args.warehouse or item.default_warehouse,
"income_account": get_default_income_account(args, item),
"expense_account": get_default_expense_account(args, item),
"cost_center": get_default_cost_center(args, item),
"batch_no": None,
"item_tax_rate": json.dumps(dict(([d.tax_type, d.tax_rate] for d in
item.get("taxes")))),
"uom": item.stock_uom,
"min_order_qty": flt(item.min_order_qty) if args.parenttype == "Material Request" else "",
"conversion_factor": 1.0,
"qty": args.qty or 1.0,
"stock_qty": 1.0,
"price_list_rate": 0.0,
"base_price_list_rate": 0.0,
"rate": 0.0,
"base_rate": 0.0,
"amount": 0.0,
"base_amount": 0.0,
"net_rate": 0.0,
"net_amount": 0.0,
"discount_percentage": 0.0
})
# if default specified in item is for another company, fetch from company
for d in [["Account", "income_account", "default_income_account"], ["Account", "expense_account", "default_expense_account"],
["Cost Center", "cost_center", "cost_center"], ["Warehouse", "warehouse", ""]]:
company = frappe.db.get_value(d[0], out.get(d[1]), "company")
if not out[d[1]] or (company and args.company != company):
out[d[1]] = frappe.db.get_value("Company", args.company, d[2]) if d[2] else None
for fieldname in ("item_name", "item_group", "barcode", "brand", "stock_uom"):
out[fieldname] = item.get(fieldname)
return out
def get_default_income_account(args, item):
return (item.income_account
or args.income_account
or frappe.db.get_value("Item Group", item.item_group, "default_income_account"))
def get_default_expense_account(args, item):
return (item.expense_account
or args.expense_account
or frappe.db.get_value("Item Group", item.item_group, "default_expense_account"))
def get_default_cost_center(args, item):
return (frappe.db.get_value("Project", args.get("project_name"), "cost_center")
or (item.selling_cost_center if args.get("transaction_type") == "selling" else item.buying_cost_center)
or frappe.db.get_value("Item Group", item.item_group, "default_cost_center")
or args.get("cost_center"))
def get_price_list_rate(args, item_doc, out):
meta = frappe.get_meta(args.parenttype)
if meta.get_field("currency"):
validate_price_list(args)
validate_conversion_rate(args, meta)
price_list_rate = get_price_list_rate_for(args, item_doc.name)
if not price_list_rate and item_doc.variant_of:
price_list_rate = get_price_list_rate_for(args, item_doc.variant_of)
if not price_list_rate:
if args.price_list and args.rate:
insert_item_price(args)
return {}
out.price_list_rate = flt(price_list_rate) * flt(args.plc_conversion_rate) \
/ flt(args.conversion_rate)
if not out.price_list_rate and args.transaction_type == "buying":
from erpnext.stock.doctype.item.item import get_last_purchase_details
out.update(get_last_purchase_details(item_doc.name,
args.parent, args.conversion_rate))
def insert_item_price(args):
"""Insert Item Price if Price List and Price List Rate are specified and currency is the same"""
if frappe.db.get_value("Price List", args.price_list, "currency") == args.currency \
and cint(frappe.db.get_single_value("Stock Settings", "auto_insert_price_list_rate_if_missing")):
if frappe.has_permission("Item Price", "write"):
price_list_rate = args.rate / args.conversion_factor \
if args.get("conversion_factor") else args.rate
item_price = frappe.get_doc({
"doctype": "Item Price",
"price_list": args.price_list,
"item_code": args.item_code,
"currency": args.currency,
"price_list_rate": price_list_rate
})
item_price.insert()
frappe.msgprint("Item Price added for {0} in Price List {1}".format(args.item_code,
args.price_list))
def get_price_list_rate_for(args, item_code):
return frappe.db.get_value("Item Price",
{"price_list": args.price_list, "item_code": item_code}, "price_list_rate")
def validate_price_list(args):
if args.get("price_list"):
if not frappe.db.get_value("Price List",
{"name": args.price_list, args.transaction_type: 1, "enabled": 1}):
throw(_("Price List {0} is disabled").format(args.price_list))
else:
throw(_("Price List not selected"))
def validate_conversion_rate(args, meta):
from erpnext.controllers.accounts_controller import validate_conversion_rate
if (not args.conversion_rate
and args.currency==frappe.db.get_value("Company", args.company, "default_currency")):
args.conversion_rate = 1.0
# validate currency conversion rate
validate_conversion_rate(args.currency, args.conversion_rate,
meta.get_label("conversion_rate"), args.company)
args.conversion_rate = flt(args.conversion_rate,
get_field_precision(meta.get_field("conversion_rate"),
frappe._dict({"fields": args})))
# validate price list currency conversion rate
if not args.get("price_list_currency"):
throw(_("Price List Currency not selected"))
else:
validate_conversion_rate(args.price_list_currency, args.plc_conversion_rate,
meta.get_label("plc_conversion_rate"), args.company)
args.plc_conversion_rate = flt(args.plc_conversion_rate,
get_field_precision(meta.get_field("plc_conversion_rate"),
frappe._dict({"fields": args})))
def get_party_item_code(args, item_doc, out):
if args.transaction_type == "selling":
customer_item_code = item_doc.get("customer_items", {"customer_name": args.customer})
out.customer_item_code = customer_item_code[0].ref_code if customer_item_code else None
else:
item_supplier = item_doc.get("supplier_items", {"supplier": args.supplier})
out.supplier_part_no = item_supplier[0].supplier_part_no if item_supplier else None
def get_pos_profile_item_details(company, args, pos_profile=None):
res = frappe._dict()
if not pos_profile:
pos_profile = get_pos_profile(company)
if pos_profile:
for fieldname in ("income_account", "cost_center", "warehouse", "expense_account"):
if not args.get(fieldname) and pos_profile.get(fieldname):
res[fieldname] = pos_profile.get(fieldname)
if res.get("warehouse"):
res.actual_qty = get_available_qty(args.item_code,
res.warehouse).get("actual_qty")
return res
@frappe.whitelist()
def get_pos_profile(company):
pos_profile = frappe.db.sql("""select * from `tabPOS Profile` where user = %s
and company = %s""", (frappe.session['user'], company), as_dict=1)
if not pos_profile:
pos_profile = frappe.db.sql("""select * from `tabPOS Profile`
where ifnull(user,'') = '' and company = %s""", company, as_dict=1)
return pos_profile and pos_profile[0] or None
def get_serial_nos_by_fifo(args, item_doc):
if frappe.db.get_single_value("Stock Settings", "automatically_set_serial_nos_based_on_fifo"):
return "\n".join(frappe.db.sql_list("""select name from `tabSerial No`
where item_code=%(item_code)s and warehouse=%(warehouse)s
order by timestamp(purchase_date, purchase_time) asc limit %(qty)s""", {
"item_code": args.item_code,
"warehouse": args.warehouse,
"qty": abs(cint(args.qty))
}))
def get_actual_batch_qty(batch_no,warehouse,item_code):
actual_batch_qty = 0
if batch_no:
actual_batch_qty = flt(frappe.db.sql("""select sum(actual_qty)
from `tabStock Ledger Entry`
where warehouse=%s and item_code=%s and batch_no=%s""",
(warehouse, item_code, batch_no))[0][0])
return actual_batch_qty
@frappe.whitelist()
def get_conversion_factor(item_code, uom):
variant_of = frappe.db.get_value("Item", item_code, "variant_of")
filters = {"parent": item_code, "uom": uom}
if variant_of:
filters["parent"] = ("in", (item_code, variant_of))
return {"conversion_factor": frappe.db.get_value("UOM Conversion Detail",
filters, "conversion_factor")}
@frappe.whitelist()
def get_projected_qty(item_code, warehouse):
return {"projected_qty": frappe.db.get_value("Bin",
{"item_code": item_code, "warehouse": warehouse}, "projected_qty")}
@frappe.whitelist()
def get_available_qty(item_code, warehouse):
return frappe.db.get_value("Bin", {"item_code": item_code, "warehouse": warehouse},
["projected_qty", "actual_qty"], as_dict=True) or {}
@frappe.whitelist()
def get_batch_qty(batch_no,warehouse,item_code):
actual_batch_qty = get_actual_batch_qty(batch_no,warehouse,item_code)
if batch_no:
return {'actual_batch_qty': actual_batch_qty}
@frappe.whitelist()
def apply_price_list(args):
"""
args = {
"item_list": [{"doctype": "", "name": "", "item_code": "", "brand": "", "item_group": ""}, ...],
"conversion_rate": 1.0,
"selling_price_list": None,
"price_list_currency": None,
"plc_conversion_rate": 1.0,
"parenttype": "",
"parent": "",
"supplier": None,
"transaction_date": None,
"conversion_rate": 1.0,
"buying_price_list": None,
"transaction_type": "selling",
"ignore_pricing_rule": 0/1
}
"""
args = process_args(args)
parent = get_price_list_currency_and_exchange_rate(args)
children = []
if "item_list" in args:
item_list = args.get("item_list")
del args["item_list"]
args.update(parent)
for item in item_list:
args_copy = frappe._dict(args.copy())
args_copy.update(item)
item_details = apply_price_list_on_item(args_copy)
children.append(item_details)
return {
"parent": parent,
"children": children
}
def apply_price_list_on_item(args):
item_details = frappe._dict()
item_doc = frappe.get_doc("Item", args.item_code)
get_price_list_rate(args, item_doc, item_details)
item_details.update(get_pricing_rule_for_item(args))
return item_details
def get_price_list_currency(price_list):
if price_list:
result = frappe.db.get_value("Price List", {"name": price_list,
"enabled": 1}, ["name", "currency"], as_dict=True)
if not result:
throw(_("Price List {0} is disabled").format(price_list))
return result.currency
def get_price_list_currency_and_exchange_rate(args):
price_list_currency = get_price_list_currency(args.price_list)
plc_conversion_rate = args.plc_conversion_rate
if (not plc_conversion_rate) or (price_list_currency and args.price_list_currency \
and price_list_currency != args.price_list_currency):
plc_conversion_rate = get_exchange_rate(price_list_currency, args.currency) or plc_conversion_rate
return {
"price_list_currency": price_list_currency,
"plc_conversion_rate": plc_conversion_rate
}
@frappe.whitelist()
def get_default_bom(item_code=None):
if item_code:
bom = frappe.db.get_value("BOM", {"docstatus": 1, "is_default": 1, "is_active": 1, "item": item_code})
if bom:
return bom
else:
frappe.throw(_("No default BOM exists for Item {0}").format(item_code))
| agpl-3.0 |
costypetrisor/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
Krossom/python-for-android | python-modules/twisted/twisted/words/test/test_irc.py | 49 | 58572 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.irc}.
"""
import time
from twisted.trial import unittest
from twisted.trial.unittest import TestCase
from twisted.words.protocols import irc
from twisted.words.protocols.irc import IRCClient
from twisted.internet import protocol
from twisted.test.proto_helpers import StringTransport, StringIOWithoutClosing
class ModeParsingTests(unittest.TestCase):
"""
Tests for L{twisted.words.protocols.irc.parseModes}.
"""
paramModes = ('klb', 'b')
def test_emptyModes(self):
"""
Parsing an empty mode string raises L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '', [])
def test_emptyModeSequence(self):
"""
Parsing a mode string that contains an empty sequence (either a C{+} or
C{-} followed directly by another C{+} or C{-}, or not followed by
anything at all) raises L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '++k', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-+k', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '+', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-', [])
def test_malformedModes(self):
"""
Parsing a mode string that does not start with C{+} or C{-} raises
L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, 'foo', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '%', [])
def test_nullModes(self):
"""
Parsing a mode string that contains no mode characters raises
L{irc.IRCBadModes}.
"""
self.assertRaises(irc.IRCBadModes, irc.parseModes, '+', [])
self.assertRaises(irc.IRCBadModes, irc.parseModes, '-', [])
def test_singleMode(self):
"""
Parsing a single mode setting with no parameters results in that mode,
with no parameters, in the "added" direction and no modes in the
"removed" direction.
"""
added, removed = irc.parseModes('+s', [])
self.assertEquals(added, [('s', None)])
self.assertEquals(removed, [])
added, removed = irc.parseModes('-s', [])
self.assertEquals(added, [])
self.assertEquals(removed, [('s', None)])
def test_singleDirection(self):
"""
Parsing a single-direction mode setting with multiple modes and no
parameters, results in all modes falling into the same direction group.
"""
added, removed = irc.parseModes('+stn', [])
self.assertEquals(added, [('s', None),
('t', None),
('n', None)])
self.assertEquals(removed, [])
added, removed = irc.parseModes('-nt', [])
self.assertEquals(added, [])
self.assertEquals(removed, [('n', None),
('t', None)])
def test_multiDirection(self):
"""
Parsing a multi-direction mode setting with no parameters.
"""
added, removed = irc.parseModes('+s-n+ti', [])
self.assertEquals(added, [('s', None),
('t', None),
('i', None)])
self.assertEquals(removed, [('n', None)])
def test_consecutiveDirection(self):
"""
Parsing a multi-direction mode setting containing two consecutive mode
sequences with the same direction results in the same result as if
there were only one mode sequence in the same direction.
"""
added, removed = irc.parseModes('+sn+ti', [])
self.assertEquals(added, [('s', None),
('n', None),
('t', None),
('i', None)])
self.assertEquals(removed, [])
def test_mismatchedParams(self):
"""
If the number of mode parameters does not match the number of modes
expecting parameters, L{irc.IRCBadModes} is raised.
"""
self.assertRaises(irc.IRCBadModes,
irc.parseModes,
'+k', [],
self.paramModes)
self.assertRaises(irc.IRCBadModes,
irc.parseModes,
'+kl', ['foo', '10', 'lulz_extra_param'],
self.paramModes)
def test_parameters(self):
"""
Modes which require parameters are parsed and paired with their relevant
parameter, modes which do not require parameters do not consume any of
the parameters.
"""
added, removed = irc.parseModes(
'+klbb',
['somekey', '42', 'nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEquals(added, [('k', 'somekey'),
('l', '42'),
('b', 'nick!user@host'),
('b', 'other!*@*')])
self.assertEquals(removed, [])
added, removed = irc.parseModes(
'-klbb',
['nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEquals(added, [])
self.assertEquals(removed, [('k', None),
('l', None),
('b', 'nick!user@host'),
('b', 'other!*@*')])
# Mix a no-argument mode in with argument modes.
added, removed = irc.parseModes(
'+knbb',
['somekey', 'nick!user@host', 'other!*@*'],
self.paramModes)
self.assertEquals(added, [('k', 'somekey'),
('n', None),
('b', 'nick!user@host'),
('b', 'other!*@*')])
self.assertEquals(removed, [])
stringSubjects = [
"Hello, this is a nice string with no complications.",
"xargs%(NUL)smight%(NUL)slike%(NUL)sthis" % {'NUL': irc.NUL },
"embedded%(CR)snewline%(CR)s%(NL)sFUN%(NL)s" % {'CR': irc.CR,
'NL': irc.NL},
"escape!%(X)s escape!%(M)s %(X)s%(X)sa %(M)s0" % {'X': irc.X_QUOTE,
'M': irc.M_QUOTE}
]
class QuotingTest(unittest.TestCase):
def test_lowquoteSanity(self):
"""Testing client-server level quote/dequote"""
for s in stringSubjects:
self.failUnlessEqual(s, irc.lowDequote(irc.lowQuote(s)))
def test_ctcpquoteSanity(self):
"""Testing CTCP message level quote/dequote"""
for s in stringSubjects:
self.failUnlessEqual(s, irc.ctcpDequote(irc.ctcpQuote(s)))
class Dispatcher(irc._CommandDispatcherMixin):
"""
A dispatcher that exposes one known command and handles unknown commands.
"""
prefix = 'disp'
def disp_working(self, a, b):
"""
A known command that returns its input.
"""
return a, b
def disp_unknown(self, name, a, b):
"""
Handle unknown commands by returning their name and inputs.
"""
return name, a, b
class DispatcherTests(unittest.TestCase):
"""
Tests for L{irc._CommandDispatcherMixin}.
"""
def test_dispatch(self):
"""
Dispatching a command invokes the correct handler.
"""
disp = Dispatcher()
args = (1, 2)
res = disp.dispatch('working', *args)
self.assertEquals(res, args)
def test_dispatchUnknown(self):
"""
Dispatching an unknown command invokes the default handler.
"""
disp = Dispatcher()
name = 'missing'
args = (1, 2)
res = disp.dispatch(name, *args)
self.assertEquals(res, (name,) + args)
def test_dispatchMissingUnknown(self):
"""
Dispatching an unknown command, when no default handler is present,
results in an exception being raised.
"""
disp = Dispatcher()
disp.disp_unknown = None
self.assertRaises(irc.UnhandledCommand, disp.dispatch, 'bar')
class ServerSupportedFeatureTests(unittest.TestCase):
"""
Tests for L{ServerSupportedFeatures} and related functions.
"""
def test_intOrDefault(self):
"""
L{_intOrDefault} converts values to C{int} if possible, otherwise
returns a default value.
"""
self.assertEquals(irc._intOrDefault(None), None)
self.assertEquals(irc._intOrDefault([]), None)
self.assertEquals(irc._intOrDefault(''), None)
self.assertEquals(irc._intOrDefault('hello', 5), 5)
self.assertEquals(irc._intOrDefault('123'), 123)
self.assertEquals(irc._intOrDefault(123), 123)
def test_splitParam(self):
"""
L{ServerSupportedFeatures._splitParam} splits ISUPPORT parameters
into key and values. Parameters without a separator are split into a
key and a list containing only the empty string. Escaped parameters
are unescaped.
"""
params = [('FOO', ('FOO', [''])),
('FOO=', ('FOO', [''])),
('FOO=1', ('FOO', ['1'])),
('FOO=1,2,3', ('FOO', ['1', '2', '3'])),
('FOO=A\\x20B', ('FOO', ['A B'])),
('FOO=\\x5Cx', ('FOO', ['\\x'])),
('FOO=\\', ('FOO', ['\\'])),
('FOO=\\n', ('FOO', ['\\n']))]
_splitParam = irc.ServerSupportedFeatures._splitParam
for param, expected in params:
res = _splitParam(param)
self.assertEquals(res, expected)
self.assertRaises(ValueError, _splitParam, 'FOO=\\x')
self.assertRaises(ValueError, _splitParam, 'FOO=\\xNN')
self.assertRaises(ValueError, _splitParam, 'FOO=\\xN')
self.assertRaises(ValueError, _splitParam, 'FOO=\\x20\\x')
def test_splitParamArgs(self):
"""
L{ServerSupportedFeatures._splitParamArgs} splits ISUPPORT parameter
arguments into key and value. Arguments without a separator are
split into a key and an empty string.
"""
res = irc.ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2', 'C:', 'D'])
self.assertEquals(res, [('A', '1'),
('B', '2'),
('C', ''),
('D', '')])
def test_splitParamArgsProcessor(self):
"""
L{ServerSupportedFeatures._splitParamArgs} uses the argument processor
passed to to convert ISUPPORT argument values to some more suitable
form.
"""
res = irc.ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2', 'C'],
irc._intOrDefault)
self.assertEquals(res, [('A', 1),
('B', 2),
('C', None)])
def test_parsePrefixParam(self):
"""
L{ServerSupportedFeatures._parsePrefixParam} parses the ISUPPORT PREFIX
parameter into a mapping from modes to prefix symbols, returns
C{None} if there is no parseable prefix parameter or raises
C{ValueError} if the prefix parameter is malformed.
"""
_parsePrefixParam = irc.ServerSupportedFeatures._parsePrefixParam
self.assertEquals(_parsePrefixParam(''), None)
self.assertRaises(ValueError, _parsePrefixParam, 'hello')
self.assertEquals(_parsePrefixParam('(ov)@+'),
{'o': ('@', 0),
'v': ('+', 1)})
def test_parseChanModesParam(self):
"""
L{ServerSupportedFeatures._parseChanModesParam} parses the ISUPPORT
CHANMODES parameter into a mapping from mode categories to mode
characters. Passing fewer than 4 parameters results in the empty string
for the relevant categories. Passing more than 4 parameters raises
C{ValueError}.
"""
_parseChanModesParam = irc.ServerSupportedFeatures._parseChanModesParam
self.assertEquals(
_parseChanModesParam([]),
{'addressModes': '',
'param': '',
'setParam': '',
'noParam': ''})
self.assertEquals(
_parseChanModesParam(['b', 'k', 'l', 'imnpst']),
{'addressModes': 'b',
'param': 'k',
'setParam': 'l',
'noParam': 'imnpst'})
self.assertEquals(
_parseChanModesParam(['b', 'k', 'l']),
{'addressModes': 'b',
'param': 'k',
'setParam': 'l',
'noParam': ''})
self.assertRaises(
ValueError,
_parseChanModesParam, ['a', 'b', 'c', 'd', 'e'])
def test_parse(self):
"""
L{ServerSupportedFeatures.parse} changes the internal state of the
instance to reflect the features indicated by the parsed ISUPPORT
parameters, including unknown parameters and unsetting previously set
parameters.
"""
supported = irc.ServerSupportedFeatures()
supported.parse(['MODES=4',
'CHANLIMIT=#:20,&:10',
'INVEX',
'EXCEPTS=Z',
'UNKNOWN=A,B,C'])
self.assertEquals(supported.getFeature('MODES'), 4)
self.assertEquals(supported.getFeature('CHANLIMIT'),
[('#', 20),
('&', 10)])
self.assertEquals(supported.getFeature('INVEX'), 'I')
self.assertEquals(supported.getFeature('EXCEPTS'), 'Z')
self.assertEquals(supported.getFeature('UNKNOWN'), ('A', 'B', 'C'))
self.assertTrue(supported.hasFeature('INVEX'))
supported.parse(['-INVEX'])
self.assertFalse(supported.hasFeature('INVEX'))
# Unsetting a previously unset parameter should not be a problem.
supported.parse(['-INVEX'])
def _parse(self, features):
"""
Parse all specified features according to the ISUPPORT specifications.
@type features: C{list} of C{(featureName, value)}
@param features: Feature names and values to parse
@rtype: L{irc.ServerSupportedFeatures}
"""
supported = irc.ServerSupportedFeatures()
features = ['%s=%s' % (name, value or '')
for name, value in features]
supported.parse(features)
return supported
def _parseFeature(self, name, value=None):
"""
Parse a feature, with the given name and value, according to the
ISUPPORT specifications and return the parsed value.
"""
supported = self._parse([(name, value)])
return supported.getFeature(name)
def _testIntOrDefaultFeature(self, name, default=None):
"""
Perform some common tests on a feature known to use L{_intOrDefault}.
"""
self.assertEquals(
self._parseFeature(name, None),
default)
self.assertEquals(
self._parseFeature(name, 'notanint'),
default)
self.assertEquals(
self._parseFeature(name, '42'),
42)
def _testFeatureDefault(self, name, features=None):
"""
Features known to have default values are reported as being present by
L{irc.ServerSupportedFeatures.hasFeature}, and their value defaults
correctly, when they don't appear in an ISUPPORT message.
"""
default = irc.ServerSupportedFeatures()._features[name]
if features is None:
features = [('DEFINITELY_NOT', 'a_feature')]
supported = self._parse(features)
self.assertTrue(supported.hasFeature(name))
self.assertEquals(supported.getFeature(name), default)
def test_support_CHANMODES(self):
"""
The CHANMODES ISUPPORT parameter is parsed into a C{dict} giving the
four mode categories, C{'addressModes'}, C{'param'}, C{'setParam'}, and
C{'noParam'}.
"""
self._testFeatureDefault('CHANMODES')
self._testFeatureDefault('CHANMODES', [('CHANMODES', 'b,,lk,')])
self._testFeatureDefault('CHANMODES', [('CHANMODES', 'b,,lk,ha,ha')])
self.assertEquals(
self._parseFeature('CHANMODES', ''),
{'addressModes': '',
'param': '',
'setParam': '',
'noParam': ''})
self.assertEquals(
self._parseFeature('CHANMODES', ',A'),
{'addressModes': '',
'param': 'A',
'setParam': '',
'noParam': ''})
self.assertEquals(
self._parseFeature('CHANMODES', 'A,Bc,Def,Ghij'),
{'addressModes': 'A',
'param': 'Bc',
'setParam': 'Def',
'noParam': 'Ghij'})
def test_support_IDCHAN(self):
"""
The IDCHAN support parameter is parsed into a sequence of two-tuples
giving channel prefix and ID length pairs.
"""
self.assertEquals(
self._parseFeature('IDCHAN', '!:5'),
[('!', '5')])
def test_support_MAXLIST(self):
"""
The MAXLIST support parameter is parsed into a sequence of two-tuples
giving modes and their limits.
"""
self.assertEquals(
self._parseFeature('MAXLIST', 'b:25,eI:50'),
[('b', 25), ('eI', 50)])
# A non-integer parameter argument results in None.
self.assertEquals(
self._parseFeature('MAXLIST', 'b:25,eI:50,a:3.1415'),
[('b', 25), ('eI', 50), ('a', None)])
self.assertEquals(
self._parseFeature('MAXLIST', 'b:25,eI:50,a:notanint'),
[('b', 25), ('eI', 50), ('a', None)])
def test_support_NETWORK(self):
"""
The NETWORK support parameter is parsed as the network name, as
specified by the server.
"""
self.assertEquals(
self._parseFeature('NETWORK', 'IRCNet'),
'IRCNet')
def test_support_SAFELIST(self):
"""
The SAFELIST support parameter is parsed into a boolean indicating
whether the safe "list" command is supported or not.
"""
self.assertEquals(
self._parseFeature('SAFELIST'),
True)
def test_support_STATUSMSG(self):
"""
The STATUSMSG support parameter is parsed into a string of channel
status that support the exclusive channel notice method.
"""
self.assertEquals(
self._parseFeature('STATUSMSG', '@+'),
'@+')
def test_support_TARGMAX(self):
"""
The TARGMAX support parameter is parsed into a dictionary, mapping
strings to integers, of the maximum number of targets for a particular
command.
"""
self.assertEquals(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3'),
{'PRIVMSG': 4,
'NOTICE': 3})
# A non-integer parameter argument results in None.
self.assertEquals(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3,KICK:3.1415'),
{'PRIVMSG': 4,
'NOTICE': 3,
'KICK': None})
self.assertEquals(
self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3,KICK:notanint'),
{'PRIVMSG': 4,
'NOTICE': 3,
'KICK': None})
def test_support_NICKLEN(self):
"""
The NICKLEN support parameter is parsed into an integer value
indicating the maximum length of a nickname the client may use,
otherwise, if the parameter is missing or invalid, the default value
(as specified by RFC 1459) is used.
"""
default = irc.ServerSupportedFeatures()._features['NICKLEN']
self._testIntOrDefaultFeature('NICKLEN', default)
def test_support_CHANNELLEN(self):
"""
The CHANNELLEN support parameter is parsed into an integer value
indicating the maximum channel name length, otherwise, if the
parameter is missing or invalid, the default value (as specified by
RFC 1459) is used.
"""
default = irc.ServerSupportedFeatures()._features['CHANNELLEN']
self._testIntOrDefaultFeature('CHANNELLEN', default)
def test_support_CHANTYPES(self):
"""
The CHANTYPES support parameter is parsed into a tuple of
valid channel prefix characters.
"""
self._testFeatureDefault('CHANTYPES')
self.assertEquals(
self._parseFeature('CHANTYPES', '#&%'),
('#', '&', '%'))
def test_support_KICKLEN(self):
"""
The KICKLEN support parameter is parsed into an integer value
indicating the maximum length of a kick message a client may use.
"""
self._testIntOrDefaultFeature('KICKLEN')
def test_support_PREFIX(self):
"""
The PREFIX support parameter is parsed into a dictionary mapping
modes to two-tuples of status symbol and priority.
"""
self._testFeatureDefault('PREFIX')
self._testFeatureDefault('PREFIX', [('PREFIX', 'hello')])
self.assertEquals(
self._parseFeature('PREFIX', None),
None)
self.assertEquals(
self._parseFeature('PREFIX', '(ohv)@%+'),
{'o': ('@', 0),
'h': ('%', 1),
'v': ('+', 2)})
self.assertEquals(
self._parseFeature('PREFIX', '(hov)@%+'),
{'o': ('%', 1),
'h': ('@', 0),
'v': ('+', 2)})
def test_support_TOPICLEN(self):
"""
The TOPICLEN support parameter is parsed into an integer value
indicating the maximum length of a topic a client may set.
"""
self._testIntOrDefaultFeature('TOPICLEN')
def test_support_MODES(self):
"""
The MODES support parameter is parsed into an integer value
indicating the maximum number of "variable" modes (defined as being
modes from C{addressModes}, C{param} or C{setParam} categories for
the C{CHANMODES} ISUPPORT parameter) which may by set on a channel
by a single MODE command from a client.
"""
self._testIntOrDefaultFeature('MODES')
def test_support_EXCEPTS(self):
"""
The EXCEPTS support parameter is parsed into the mode character
to be used for "ban exception" modes. If no parameter is specified
then the character C{e} is assumed.
"""
self.assertEquals(
self._parseFeature('EXCEPTS', 'Z'),
'Z')
self.assertEquals(
self._parseFeature('EXCEPTS'),
'e')
def test_support_INVEX(self):
"""
The INVEX support parameter is parsed into the mode character to be
used for "invite exception" modes. If no parameter is specified then
the character C{I} is assumed.
"""
self.assertEquals(
self._parseFeature('INVEX', 'Z'),
'Z')
self.assertEquals(
self._parseFeature('INVEX'),
'I')
class IRCClientWithoutLogin(irc.IRCClient):
performLogin = 0
class CTCPTest(unittest.TestCase):
def setUp(self):
self.file = StringIOWithoutClosing()
self.transport = protocol.FileWrapper(self.file)
self.client = IRCClientWithoutLogin()
self.client.makeConnection(self.transport)
def test_ERRMSG(self):
"""Testing CTCP query ERRMSG.
Not because this is this is an especially important case in the
field, but it does go through the entire dispatch/decode/encode
process.
"""
errQuery = (":[email protected] PRIVMSG #theChan :"
"%(X)cERRMSG t%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF})
errReply = ("NOTICE nick :%(X)cERRMSG t :"
"No error has occoured.%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF})
self.client.dataReceived(errQuery)
reply = self.file.getvalue()
self.failUnlessEqual(errReply, reply)
def test_noNumbersVERSION(self):
"""
If attributes for version information on L{IRCClient} are set to
C{None}, the parts of the CTCP VERSION response they correspond to
are omitted.
"""
self.client.versionName = "FrobozzIRC"
self.client.ctcpQuery_VERSION("[email protected]", "#theChan", None)
versionReply = ("NOTICE nick :%(X)cVERSION %(vname)s::"
"%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF,
'vname': self.client.versionName})
reply = self.file.getvalue()
self.assertEquals(versionReply, reply)
def test_fullVERSION(self):
"""
The response to a CTCP VERSION query includes the version number and
environment information, as specified by L{IRCClient.versionNum} and
L{IRCClient.versionEnv}.
"""
self.client.versionName = "FrobozzIRC"
self.client.versionNum = "1.2g"
self.client.versionEnv = "ZorkOS"
self.client.ctcpQuery_VERSION("[email protected]", "#theChan", None)
versionReply = ("NOTICE nick :%(X)cVERSION %(vname)s:%(vnum)s:%(venv)s"
"%(X)c%(EOL)s"
% {'X': irc.X_DELIM,
'EOL': irc.CR + irc.LF,
'vname': self.client.versionName,
'vnum': self.client.versionNum,
'venv': self.client.versionEnv})
reply = self.file.getvalue()
self.assertEquals(versionReply, reply)
def tearDown(self):
self.transport.loseConnection()
self.client.connectionLost()
del self.client
del self.transport
class NoticingClient(IRCClientWithoutLogin, object):
methods = {
'created': ('when',),
'yourHost': ('info',),
'myInfo': ('servername', 'version', 'umodes', 'cmodes'),
'luserClient': ('info',),
'bounce': ('info',),
'isupport': ('options',),
'luserChannels': ('channels',),
'luserOp': ('ops',),
'luserMe': ('info',),
'receivedMOTD': ('motd',),
'privmsg': ('user', 'channel', 'message'),
'joined': ('channel',),
'left': ('channel',),
'noticed': ('user', 'channel', 'message'),
'modeChanged': ('user', 'channel', 'set', 'modes', 'args'),
'pong': ('user', 'secs'),
'signedOn': (),
'kickedFrom': ('channel', 'kicker', 'message'),
'nickChanged': ('nick',),
'userJoined': ('user', 'channel'),
'userLeft': ('user', 'channel'),
'userKicked': ('user', 'channel', 'kicker', 'message'),
'action': ('user', 'channel', 'data'),
'topicUpdated': ('user', 'channel', 'newTopic'),
'userRenamed': ('oldname', 'newname')}
def __init__(self, *a, **kw):
# It is important that IRCClient.__init__ is not called since
# traditionally it did not exist, so it is important that nothing is
# initialised there that would prevent subclasses that did not (or
# could not) invoke the base implementation. Any protocol
# initialisation should happen in connectionMode.
self.calls = []
def __getattribute__(self, name):
if name.startswith('__') and name.endswith('__'):
return super(NoticingClient, self).__getattribute__(name)
try:
args = super(NoticingClient, self).__getattribute__('methods')[name]
except KeyError:
return super(NoticingClient, self).__getattribute__(name)
else:
return self.makeMethod(name, args)
def makeMethod(self, fname, args):
def method(*a, **kw):
if len(a) > len(args):
raise TypeError("TypeError: %s() takes %d arguments "
"(%d given)" % (fname, len(args), len(a)))
for (name, value) in zip(args, a):
if name in kw:
raise TypeError("TypeError: %s() got multiple values "
"for keyword argument '%s'" % (fname, name))
else:
kw[name] = value
if len(kw) != len(args):
raise TypeError("TypeError: %s() takes %d arguments "
"(%d given)" % (fname, len(args), len(a)))
self.calls.append((fname, kw))
return method
def pop(dict, key, default):
try:
value = dict[key]
except KeyError:
return default
else:
del dict[key]
return value
class ClientImplementationTests(unittest.TestCase):
def setUp(self):
self.file = StringIOWithoutClosing()
self.transport = protocol.FileWrapper(self.file)
self.client = NoticingClient()
self.client.makeConnection(self.transport)
def tearDown(self):
self.transport.loseConnection()
self.client.connectionLost()
del self.client
del self.transport
def _serverTestImpl(self, code, msg, func, **kw):
host = pop(kw, 'host', 'server.host')
nick = pop(kw, 'nick', 'nickname')
args = pop(kw, 'args', '')
message = (":" +
host + " " +
code + " " +
nick + " " +
args + " :" +
msg + "\r\n")
self.client.dataReceived(message)
self.assertEquals(
self.client.calls,
[(func, kw)])
def testYourHost(self):
msg = "Your host is some.host[blah.blah/6667], running version server-version-3"
self._serverTestImpl("002", msg, "yourHost", info=msg)
def testCreated(self):
msg = "This server was cobbled together Fri Aug 13 18:00:25 UTC 2004"
self._serverTestImpl("003", msg, "created", when=msg)
def testMyInfo(self):
msg = "server.host server-version abcDEF bcdEHI"
self._serverTestImpl("004", msg, "myInfo",
servername="server.host",
version="server-version",
umodes="abcDEF",
cmodes="bcdEHI")
def testLuserClient(self):
msg = "There are 9227 victims and 9542 hiding on 24 servers"
self._serverTestImpl("251", msg, "luserClient",
info=msg)
def _sendISUPPORT(self):
args = ("MODES=4 CHANLIMIT=#:20 NICKLEN=16 USERLEN=10 HOSTLEN=63 "
"TOPICLEN=450 KICKLEN=450 CHANNELLEN=30 KEYLEN=23 CHANTYPES=# "
"PREFIX=(ov)@+ CASEMAPPING=ascii CAPAB IRCD=dancer")
msg = "are available on this server"
self._serverTestImpl("005", msg, "isupport", args=args,
options=['MODES=4',
'CHANLIMIT=#:20',
'NICKLEN=16',
'USERLEN=10',
'HOSTLEN=63',
'TOPICLEN=450',
'KICKLEN=450',
'CHANNELLEN=30',
'KEYLEN=23',
'CHANTYPES=#',
'PREFIX=(ov)@+',
'CASEMAPPING=ascii',
'CAPAB',
'IRCD=dancer'])
def test_ISUPPORT(self):
"""
The client parses ISUPPORT messages sent by the server and calls
L{IRCClient.isupport}.
"""
self._sendISUPPORT()
def testBounce(self):
msg = "Try server some.host, port 321"
self._serverTestImpl("010", msg, "bounce",
info=msg)
def testLuserChannels(self):
args = "7116"
msg = "channels formed"
self._serverTestImpl("254", msg, "luserChannels", args=args,
channels=int(args))
def testLuserOp(self):
args = "34"
msg = "flagged staff members"
self._serverTestImpl("252", msg, "luserOp", args=args,
ops=int(args))
def testLuserMe(self):
msg = "I have 1937 clients and 0 servers"
self._serverTestImpl("255", msg, "luserMe",
info=msg)
def test_receivedMOTD(self):
"""
Lines received in I{RPL_MOTDSTART} and I{RPL_MOTD} are delivered to
L{IRCClient.receivedMOTD} when I{RPL_ENDOFMOTD} is received.
"""
lines = [
":host.name 375 nickname :- host.name Message of the Day -",
":host.name 372 nickname :- Welcome to host.name",
":host.name 376 nickname :End of /MOTD command."]
for L in lines:
self.assertEquals(self.client.calls, [])
self.client.dataReceived(L + '\r\n')
self.assertEquals(
self.client.calls,
[("receivedMOTD", {"motd": ["host.name Message of the Day -", "Welcome to host.name"]})])
# After the motd is delivered, the tracking variable should be
# reset.
self.assertIdentical(self.client.motd, None)
def test_withoutMOTDSTART(self):
"""
If L{IRCClient} receives I{RPL_MOTD} and I{RPL_ENDOFMOTD} without
receiving I{RPL_MOTDSTART}, L{IRCClient.receivedMOTD} is still
called with a list of MOTD lines.
"""
lines = [
":host.name 372 nickname :- Welcome to host.name",
":host.name 376 nickname :End of /MOTD command."]
for L in lines:
self.client.dataReceived(L + '\r\n')
self.assertEquals(
self.client.calls,
[("receivedMOTD", {"motd": ["Welcome to host.name"]})])
def _clientTestImpl(self, sender, group, type, msg, func, **kw):
ident = pop(kw, 'ident', 'ident')
host = pop(kw, 'host', 'host')
wholeUser = sender + '!' + ident + '@' + host
message = (":" +
wholeUser + " " +
type + " " +
group + " :" +
msg + "\r\n")
self.client.dataReceived(message)
self.assertEquals(
self.client.calls,
[(func, kw)])
self.client.calls = []
def testPrivmsg(self):
msg = "Tooty toot toot."
self._clientTestImpl("sender", "#group", "PRIVMSG", msg, "privmsg",
ident="ident", host="host",
# Expected results below
user="sender!ident@host",
channel="#group",
message=msg)
self._clientTestImpl("sender", "recipient", "PRIVMSG", msg, "privmsg",
ident="ident", host="host",
# Expected results below
user="sender!ident@host",
channel="recipient",
message=msg)
def test_getChannelModeParams(self):
"""
L{IRCClient.getChannelModeParams} uses ISUPPORT information, either
given by the server or defaults, to determine which channel modes
require arguments when being added or removed.
"""
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEquals(add, ['b', 'h', 'k', 'l', 'o', 'v'])
self.assertEquals(remove, ['b', 'h', 'o', 'v'])
def removeFeature(name):
name = '-' + name
msg = "are available on this server"
self._serverTestImpl(
'005', msg, 'isupport', args=name, options=[name])
self.assertIdentical(
self.client.supported.getFeature(name), None)
self.client.calls = []
# Remove CHANMODES feature, causing getFeature('CHANMODES') to return
# None.
removeFeature('CHANMODES')
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEquals(add, ['h', 'o', 'v'])
self.assertEquals(remove, ['h', 'o', 'v'])
# Remove PREFIX feature, causing getFeature('PREFIX') to return None.
removeFeature('PREFIX')
add, remove = map(sorted, self.client.getChannelModeParams())
self.assertEquals(add, [])
self.assertEquals(remove, [])
# Restore ISUPPORT features.
self._sendISUPPORT()
self.assertNotIdentical(
self.client.supported.getFeature('PREFIX'), None)
def test_getUserModeParams(self):
"""
L{IRCClient.getUserModeParams} returns a list of user modes (modes that
the user sets on themself, outside of channel modes) that require
parameters when added and removed, respectively.
"""
add, remove = map(sorted, self.client.getUserModeParams())
self.assertEquals(add, [])
self.assertEquals(remove, [])
def _sendModeChange(self, msg, args='', target=None):
"""
Build a MODE string and send it to the client.
"""
if target is None:
target = '#chan'
message = ":[email protected] MODE %s %s %s\r\n" % (
target, msg, args)
self.client.dataReceived(message)
def _parseModeChange(self, results, target=None):
"""
Parse the results, do some test and return the data to check.
"""
if target is None:
target = '#chan'
for n, result in enumerate(results):
method, data = result
self.assertEquals(method, 'modeChanged')
self.assertEquals(data['user'], '[email protected]')
self.assertEquals(data['channel'], target)
results[n] = tuple([data[key] for key in ('set', 'modes', 'args')])
return results
def _checkModeChange(self, expected, target=None):
"""
Compare the expected result with the one returned by the client.
"""
result = self._parseModeChange(self.client.calls, target)
self.assertEquals(result, expected)
self.client.calls = []
def test_modeMissingDirection(self):
"""
Mode strings that do not begin with a directional character, C{'+'} or
C{'-'}, have C{'+'} automatically prepended.
"""
self._sendModeChange('s')
self._checkModeChange([(True, 's', (None,))])
def test_noModeParameters(self):
"""
No parameters are passed to L{IRCClient.modeChanged} for modes that
don't take any parameters.
"""
self._sendModeChange('-s')
self._checkModeChange([(False, 's', (None,))])
self._sendModeChange('+n')
self._checkModeChange([(True, 'n', (None,))])
def test_oneModeParameter(self):
"""
Parameters are passed to L{IRCClient.modeChanged} for modes that take
parameters.
"""
self._sendModeChange('+o', 'a_user')
self._checkModeChange([(True, 'o', ('a_user',))])
self._sendModeChange('-o', 'a_user')
self._checkModeChange([(False, 'o', ('a_user',))])
def test_mixedModes(self):
"""
Mixing adding and removing modes that do and don't take parameters
invokes L{IRCClient.modeChanged} with mode characters and parameters
that match up.
"""
self._sendModeChange('+osv', 'a_user another_user')
self._checkModeChange([(True, 'osv', ('a_user', None, 'another_user'))])
self._sendModeChange('+v-os', 'a_user another_user')
self._checkModeChange([(True, 'v', ('a_user',)),
(False, 'os', ('another_user', None))])
def test_tooManyModeParameters(self):
"""
Passing an argument to modes that take no parameters results in
L{IRCClient.modeChanged} not being called and an error being logged.
"""
self._sendModeChange('+s', 'wrong')
self._checkModeChange([])
errors = self.flushLoggedErrors(irc.IRCBadModes)
self.assertEquals(len(errors), 1)
self.assertSubstring(
'Too many parameters', errors[0].getErrorMessage())
def test_tooFewModeParameters(self):
"""
Passing no arguments to modes that do take parameters results in
L{IRCClient.modeChange} not being called and an error being logged.
"""
self._sendModeChange('+o')
self._checkModeChange([])
errors = self.flushLoggedErrors(irc.IRCBadModes)
self.assertEquals(len(errors), 1)
self.assertSubstring(
'Not enough parameters', errors[0].getErrorMessage())
def test_userMode(self):
"""
A C{MODE} message whose target is our user (the nickname of our user,
to be precise), as opposed to a channel, will be parsed according to
the modes specified by L{IRCClient.getUserModeParams}.
"""
target = self.client.nickname
# Mode "o" on channels is supposed to take a parameter, but since this
# is not a channel this will not cause an exception.
self._sendModeChange('+o', target=target)
self._checkModeChange([(True, 'o', (None,))], target=target)
def getUserModeParams():
return ['Z', '']
# Introduce our own user mode that takes an argument.
self.patch(self.client, 'getUserModeParams', getUserModeParams)
self._sendModeChange('+Z', 'an_arg', target=target)
self._checkModeChange([(True, 'Z', ('an_arg',))], target=target)
class BasicServerFunctionalityTestCase(unittest.TestCase):
def setUp(self):
self.f = StringIOWithoutClosing()
self.t = protocol.FileWrapper(self.f)
self.p = irc.IRC()
self.p.makeConnection(self.t)
def check(self, s):
self.assertEquals(self.f.getvalue(), s)
def testPrivmsg(self):
self.p.privmsg("this-is-sender", "this-is-recip", "this is message")
self.check(":this-is-sender PRIVMSG this-is-recip :this is message\r\n")
def testNotice(self):
self.p.notice("this-is-sender", "this-is-recip", "this is notice")
self.check(":this-is-sender NOTICE this-is-recip :this is notice\r\n")
def testAction(self):
self.p.action("this-is-sender", "this-is-recip", "this is action")
self.check(":this-is-sender ACTION this-is-recip :this is action\r\n")
def testJoin(self):
self.p.join("this-person", "#this-channel")
self.check(":this-person JOIN #this-channel\r\n")
def testPart(self):
self.p.part("this-person", "#that-channel")
self.check(":this-person PART #that-channel\r\n")
def testWhois(self):
"""
Verify that a whois by the client receives the right protocol actions
from the server.
"""
timestamp = int(time.time()-100)
hostname = self.p.hostname
req = 'requesting-nick'
targ = 'target-nick'
self.p.whois(req, targ, 'target', 'host.com',
'Target User', 'irc.host.com', 'A fake server', False,
12, timestamp, ['#fakeusers', '#fakemisc'])
expected = '\r\n'.join([
':%(hostname)s 311 %(req)s %(targ)s target host.com * :Target User',
':%(hostname)s 312 %(req)s %(targ)s irc.host.com :A fake server',
':%(hostname)s 317 %(req)s %(targ)s 12 %(timestamp)s :seconds idle, signon time',
':%(hostname)s 319 %(req)s %(targ)s :#fakeusers #fakemisc',
':%(hostname)s 318 %(req)s %(targ)s :End of WHOIS list.',
'']) % dict(hostname=hostname, timestamp=timestamp, req=req, targ=targ)
self.check(expected)
class DummyClient(irc.IRCClient):
def __init__(self):
self.lines = []
def sendLine(self, m):
self.lines.append(m)
class ClientMsgTests(unittest.TestCase):
def setUp(self):
self.client = DummyClient()
def testSingleLine(self):
self.client.msg('foo', 'bar')
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar'])
def testDodgyMaxLength(self):
self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 0)
self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 3)
def testMultipleLine(self):
maxLen = len('PRIVMSG foo :') + 3 + 2 # 2 for line endings
self.client.msg('foo', 'barbazbo', maxLen)
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar',
'PRIVMSG foo :baz',
'PRIVMSG foo :bo'])
def testSufficientWidth(self):
msg = 'barbazbo'
maxLen = len('PRIVMSG foo :%s' % (msg,)) + 2
self.client.msg('foo', msg, maxLen)
self.assertEquals(self.client.lines, ['PRIVMSG foo :%s' % (msg,)])
self.client.lines = []
self.client.msg('foo', msg, maxLen-1)
self.assertEquals(2, len(self.client.lines))
self.client.lines = []
self.client.msg('foo', msg, maxLen+1)
self.assertEquals(1, len(self.client.lines))
def test_newlinesAtStart(self):
"""
An LF at the beginning of the message is ignored.
"""
self.client.lines = []
self.client.msg('foo', '\nbar')
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar'])
def test_newlinesAtEnd(self):
"""
An LF at the end of the message is ignored.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n')
self.assertEquals(self.client.lines, ['PRIVMSG foo :bar'])
def test_newlinesWithinMessage(self):
"""
An LF within a message causes a new line.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n\nbaz')
self.assertEquals(self.client.lines, [
'PRIVMSG foo :bar',
'PRIVMSG foo :baz'
])
def test_consecutiveNewlines(self):
"""
Consecutive LFs do not cause a blank line.
"""
self.client.lines = []
self.client.msg('foo', 'bar\n\nbaz')
self.assertEquals(self.client.lines, [
'PRIVMSG foo :bar',
'PRIVMSG foo :baz',
])
def test_longLinesCauseNewLines(self):
"""
Lines that would break the 512-byte barrier cause two lines to be sent.
"""
# The maximum length of a line is 512 bytes, including the line prefix
# and the trailing CRLF.
maxLineLength = irc.MAX_COMMAND_LENGTH - 2 - len('PRIVMSG foo :')
self.client.msg('foo', 'o' * (maxLineLength+1))
self.assertEquals(self.client.lines, [
'PRIVMSG foo :' + maxLineLength * 'o',
'PRIVMSG foo :o',
])
def test_newlinesBeforeLineBreaking(self):
"""
IRCClient breaks on newlines before it breaks long lines.
"""
# Because MAX_COMMAND_LENGTH includes framing characters, this long
# line is slightly longer than half the permissible message size.
longline = 'o' * (irc.MAX_COMMAND_LENGTH // 2)
self.client.msg('foo', longline + '\n' + longline)
self.assertEquals(self.client.lines, [
'PRIVMSG foo :' + longline,
'PRIVMSG foo :' + longline,
])
def test_lineBreakOnWordBoundaries(self):
"""
IRCClient prefers to break long lines at word boundaries.
"""
# Because MAX_COMMAND_LENGTH includes framing characters, this long
# line is slightly longer than half the permissible message size.
longline = 'o' * (irc.MAX_COMMAND_LENGTH // 2)
self.client.msg('foo', longline + ' ' + longline)
self.assertEquals(self.client.lines, [
'PRIVMSG foo :' + longline,
'PRIVMSG foo :' + longline,
])
def testSplitSanity(self):
# Whiteboxing
self.assertRaises(ValueError, irc.split, 'foo', -1)
self.assertRaises(ValueError, irc.split, 'foo', 0)
self.assertEquals([], irc.split('', 1))
self.assertEquals([], irc.split(''))
def test_splitDelimiters(self):
"""
Test that split() skips any delimiter (space or newline) that it finds
at the very beginning of the string segment it is operating on.
Nothing should be added to the output list because of it.
"""
r = irc.split("xx yyz", 2)
self.assertEquals(['xx', 'yy', 'z'], r)
r = irc.split("xx\nyyz", 2)
self.assertEquals(['xx', 'yy', 'z'], r)
def test_splitValidatesLength(self):
"""
split() raises ValueError if given a length <= 0
"""
self.assertRaises(ValueError, irc.split, "foo", 0)
self.assertRaises(ValueError, irc.split, "foo", -1)
class ClientTests(TestCase):
"""
Tests for the protocol-level behavior of IRCClient methods intended to
be called by application code.
"""
def setUp(self):
"""
Create and connect a new L{IRCClient} to a new L{StringTransport}.
"""
self.transport = StringTransport()
self.protocol = IRCClient()
self.protocol.performLogin = False
self.protocol.makeConnection(self.transport)
# Sanity check - we don't want anything to have happened at this
# point, since we're not in a test yet.
self.assertEquals(self.transport.value(), "")
def getLastLine(self, transport):
"""
Return the last IRC message in the transport buffer.
"""
return transport.value().split('\r\n')[-2]
def test_away(self):
"""
L{IRCCLient.away} sends an AWAY command with the specified message.
"""
message = "Sorry, I'm not here."
self.protocol.away(message)
expected = [
'AWAY :%s' % (message,),
'',
]
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_back(self):
"""
L{IRCClient.back} sends an AWAY command with an empty message.
"""
self.protocol.back()
expected = [
'AWAY :',
'',
]
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_whois(self):
"""
L{IRCClient.whois} sends a WHOIS message.
"""
self.protocol.whois('alice')
self.assertEquals(
self.transport.value().split('\r\n'),
['WHOIS alice', ''])
def test_whoisWithServer(self):
"""
L{IRCClient.whois} sends a WHOIS message with a server name if a
value is passed for the C{server} parameter.
"""
self.protocol.whois('alice', 'example.org')
self.assertEquals(
self.transport.value().split('\r\n'),
['WHOIS example.org alice', ''])
def test_register(self):
"""
L{IRCClient.register} sends NICK and USER commands with the
username, name, hostname, server name, and real name specified.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = None
self.protocol.register(username, hostname, servername)
expected = [
'NICK %s' % (username,),
'USER %s %s %s :%s' % (
username, hostname, servername, self.protocol.realname),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_registerWithPassword(self):
"""
If the C{password} attribute of L{IRCClient} is not C{None}, the
C{register} method also sends a PASS command with it as the
argument.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = 'testpass'
self.protocol.register(username, hostname, servername)
expected = [
'PASS %s' % (self.protocol.password,),
'NICK %s' % (username,),
'USER %s %s %s :%s' % (
username, hostname, servername, self.protocol.realname),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_registerWithTakenNick(self):
"""
Verify that the client repeats the L{IRCClient.setNick} method with a
new value when presented with an C{ERR_NICKNAMEINUSE} while trying to
register.
"""
username = 'testuser'
hostname = 'testhost'
servername = 'testserver'
self.protocol.realname = 'testname'
self.protocol.password = 'testpass'
self.protocol.register(username, hostname, servername)
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertNotEquals(lastLine, 'NICK %s' % (username,))
# Keep chaining underscores for each collision
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(lastLine, 'NICK %s' % (username + '__',))
def test_overrideAlterCollidedNick(self):
"""
L{IRCClient.alterCollidedNick} determines how a nickname is altered upon
collision while a user is trying to change to that nickname.
"""
nick = 'foo'
self.protocol.alterCollidedNick = lambda nick: nick + '***'
self.protocol.register(nick)
self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(
lastLine, 'NICK %s' % (nick + '***',))
def test_nickChange(self):
"""
When a NICK command is sent after signon, C{IRCClient.nickname} is set
to the new nickname I{after} the server sends an acknowledgement.
"""
oldnick = 'foo'
newnick = 'bar'
self.protocol.register(oldnick)
self.protocol.irc_RPL_WELCOME('prefix', ['param'])
self.protocol.setNick(newnick)
self.assertEquals(self.protocol.nickname, oldnick)
self.protocol.irc_NICK('%s!quux@qux' % (oldnick,), [newnick])
self.assertEquals(self.protocol.nickname, newnick)
def test_erroneousNick(self):
"""
Trying to register an illegal nickname results in the default legal
nickname being set, and trying to change a nickname to an illegal
nickname results in the old nickname being kept.
"""
# Registration case: change illegal nickname to erroneousNickFallback
badnick = 'foo'
self.assertEquals(self.protocol._registered, False)
self.protocol.register(badnick)
self.protocol.irc_ERR_ERRONEUSNICKNAME('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(
lastLine, 'NICK %s' % (self.protocol.erroneousNickFallback,))
self.protocol.irc_RPL_WELCOME('prefix', ['param'])
self.assertEquals(self.protocol._registered, True)
self.protocol.setNick(self.protocol.erroneousNickFallback)
self.assertEquals(
self.protocol.nickname, self.protocol.erroneousNickFallback)
# Illegal nick change attempt after registration. Fall back to the old
# nickname instead of erroneousNickFallback.
oldnick = self.protocol.nickname
self.protocol.setNick(badnick)
self.protocol.irc_ERR_ERRONEUSNICKNAME('prefix', ['param'])
lastLine = self.getLastLine(self.transport)
self.assertEquals(
lastLine, 'NICK %s' % (badnick,))
self.assertEquals(self.protocol.nickname, oldnick)
def test_describe(self):
"""
L{IRCClient.desrcibe} sends a CTCP ACTION message to the target
specified.
"""
target = 'foo'
channel = '#bar'
action = 'waves'
self.protocol.describe(target, action)
self.protocol.describe(channel, action)
expected = [
'PRIVMSG %s :\01ACTION %s\01' % (target, action),
'PRIVMSG %s :\01ACTION %s\01' % (channel, action),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
def test_me(self):
"""
L{IRCClient.me} sends a CTCP ACTION message to the target channel
specified.
If the target does not begin with a standard channel prefix,
'#' is prepended.
"""
target = 'foo'
channel = '#bar'
action = 'waves'
self.protocol.me(target, action)
self.protocol.me(channel, action)
expected = [
'PRIVMSG %s :\01ACTION %s\01' % ('#' + target, action),
'PRIVMSG %s :\01ACTION %s\01' % (channel, action),
'']
self.assertEquals(self.transport.value().split('\r\n'), expected)
warnings = self.flushWarnings(
offendingFunctions=[self.test_me])
self.assertEquals(
warnings[0]['message'],
"me() is deprecated since Twisted 9.0. Use IRCClient.describe().")
self.assertEquals(warnings[0]['category'], DeprecationWarning)
self.assertEquals(len(warnings), 2)
def test_noticedDoesntPrivmsg(self):
"""
The default implementation of L{IRCClient.noticed} doesn't invoke
C{privmsg()}
"""
def privmsg(user, channel, message):
self.fail("privmsg() should not have been called")
self.protocol.privmsg = privmsg
self.protocol.irc_NOTICE('spam', "I don't want any spam!")
| apache-2.0 |
ofayans/freeipa | ipaclient/remote_plugins/2_49/automount.py | 8 | 34860 | #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
Automount
Stores automount(8) configuration for autofs(8) in IPA.
The base of an automount configuration is the configuration file auto.master.
This is also the base location in IPA. Multiple auto.master configurations
can be stored in separate locations. A location is implementation-specific
with the default being a location named 'default'. For example, you can have
locations by geographic region, by floor, by type, etc.
Automount has three basic object types: locations, maps and keys.
A location defines a set of maps anchored in auto.master. This allows you
to store multiple automount configurations. A location in itself isn't
very interesting, it is just a point to start a new automount map.
A map is roughly equivalent to a discrete automount file and provides
storage for keys.
A key is a mount point associated with a map.
When a new location is created, two maps are automatically created for
it: auto.master and auto.direct. auto.master is the root map for all
automount maps for the location. auto.direct is the default map for
direct mounts and is mounted on /-.
An automount map may contain a submount key. This key defines a mount
location within the map that references another map. This can be done
either using automountmap-add-indirect --parentmap or manually
with automountkey-add and setting info to "-type=autofs :<mapname>".
EXAMPLES:
Locations:
Create a named location, "Baltimore":
ipa automountlocation-add baltimore
Display the new location:
ipa automountlocation-show baltimore
Find available locations:
ipa automountlocation-find
Remove a named automount location:
ipa automountlocation-del baltimore
Show what the automount maps would look like if they were in the filesystem:
ipa automountlocation-tofiles baltimore
Import an existing configuration into a location:
ipa automountlocation-import baltimore /etc/auto.master
The import will fail if any duplicate entries are found. For
continuous operation where errors are ignored, use the --continue
option.
Maps:
Create a new map, "auto.share":
ipa automountmap-add baltimore auto.share
Display the new map:
ipa automountmap-show baltimore auto.share
Find maps in the location baltimore:
ipa automountmap-find baltimore
Create an indirect map with auto.share as a submount:
ipa automountmap-add-indirect baltimore --parentmap=auto.share --mount=sub auto.man
This is equivalent to:
ipa automountmap-add-indirect baltimore --mount=/man auto.man
ipa automountkey-add baltimore auto.man --key=sub --info="-fstype=autofs ldap:auto.share"
Remove the auto.share map:
ipa automountmap-del baltimore auto.share
Keys:
Create a new key for the auto.share map in location baltimore. This ties
the map we previously created to auto.master:
ipa automountkey-add baltimore auto.master --key=/share --info=auto.share
Create a new key for our auto.share map, an NFS mount for man pages:
ipa automountkey-add baltimore auto.share --key=man --info="-ro,soft,rsize=8192,wsize=8192 ipa.example.com:/shared/man"
Find all keys for the auto.share map:
ipa automountkey-find baltimore auto.share
Find all direct automount keys:
ipa automountkey-find baltimore --key=/-
Remove the man key from the auto.share map:
ipa automountkey-del baltimore auto.share --key=man
""")
register = Registry()
@register()
class automountkey(Object):
takes_params = (
parameters.Str(
'automountkey',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
label=_(u'Mount information'),
),
parameters.Str(
'description',
required=False,
primary_key=True,
label=_(u'description'),
exclude=('webui', 'cli'),
),
)
@register()
class automountlocation(Object):
takes_params = (
parameters.Str(
'cn',
primary_key=True,
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
@register()
class automountmap(Object):
takes_params = (
parameters.Str(
'automountmapname',
primary_key=True,
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
parameters.Str(
'description',
required=False,
label=_(u'Description'),
),
)
@register()
class automountkey_add(Method):
__doc__ = _("Create a new automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
),
)
takes_options = (
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
cli_name='info',
label=_(u'Mount information'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountkey_del(Method):
__doc__ = _("Delete an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountkey_find(Method):
__doc__ = _("Search for an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
),
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'automountkey',
required=False,
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class automountkey_mod(Method):
__doc__ = _("Modify an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
),
)
takes_options = (
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Str(
'newautomountinformation',
required=False,
cli_name='newinfo',
label=_(u'New mount information'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Str(
'rename',
required=False,
label=_(u'Rename'),
doc=_(u'Rename the automount key object'),
exclude=('webui',),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountkey_show(Method):
__doc__ = _("Display an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_add(Method):
__doc__ = _("Create a new automount location.")
takes_args = (
parameters.Str(
'cn',
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
takes_options = (
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_del(Method):
__doc__ = _("Delete an automount location.")
takes_args = (
parameters.Str(
'cn',
multivalue=True,
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_find(Method):
__doc__ = _("Search for an automount location.")
takes_args = (
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'cn',
required=False,
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("location")'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class automountlocation_show(Method):
__doc__ = _("Display an automount location.")
takes_args = (
parameters.Str(
'cn',
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_tofiles(Method):
__doc__ = _("Generate automount files for a specific location.")
takes_args = (
parameters.Str(
'cn',
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
has_output = (
output.Output(
'result',
),
)
@register()
class automountmap_add(Method):
__doc__ = _("Create a new automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_add_indirect(Method):
__doc__ = _("Create a new indirect mount point.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'key',
cli_name='mount',
label=_(u'Mount point'),
),
parameters.Str(
'parentmap',
required=False,
label=_(u'Parent map'),
doc=_(u'Name of parent automount map (default: auto.master).'),
default=u'auto.master',
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_del(Method):
__doc__ = _("Delete an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapname',
multivalue=True,
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_find(Method):
__doc__ = _("Search for an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'automountmapname',
required=False,
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("map")'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class automountmap_mod(Method):
__doc__ = _("Modify an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_show(Method):
__doc__ = _("Display an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.Output(
'value',
unicode,
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
| gpl-3.0 |
gustavo-guimaraes/siga | backend/venv/lib/python2.7/site-packages/unidecode/x026.py | 165 | 4020 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'[?]', # 0x14
'[?]', # 0x15
'[?]', # 0x16
'[?]', # 0x17
'[?]', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'#', # 0x6f
'', # 0x70
'', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| mit |
ininex/geofire-python | resource/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/cp949prober.py | 2801 | 1782 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
| mit |
RecursiveGreen/pymod | formats/MOD.py | 1 | 9729 | import struct
from pymod.constants import *
from pymod.module import *
from pymod.tables import *
from pymod.util import *
MOD_TYPES = (
('M.K.', 'Amiga-NewTracker', 4),
('M!K!', 'Amiga-ProTracker', 4),
('M&K!', 'Amiga-NoiseTracker', 4),
('N.T.', 'Amiga-NoiseTracker?', 4), # ???, mentioned in libModplug
('CD81', '8 Channel Falcon', 8),
('OCTA', 'Amiga Oktalyzer', 8), # SchismTracker/libModplug have
('OKTA', 'Amiga Oktalyzer', 8), # 'C' or 'K', but not both
('FLT4', '4 Channel Startrekker', 4),
('FLT8', '8 Channel Startrekker', 8),
('2CHN', '2 Channel MOD', 2),
('3CHN', '3 Channel MOD', 3), # Does this show up ever?
('4CHN', '4 Channel MOD', 4),
('5CHN', '5 Channel TakeTracker', 5),
('6CHN', '6 Channel MOD', 6),
('7CHN', '7 Channel TakeTracker', 7),
('8CHN', '8 Channel MOD', 8),
('9CHN', '9 Channel TakeTracker', 9),
('10CH', '10 Channel MOD', 10),
('11CH', '11 Channel TakeTracker', 11),
('12CH', '12 Channel MOD', 12),
('13CH', '13 Channel TakeTracker', 13),
('14CH', '14 Channel MOD', 14),
('15CH', '15 Channel TakeTracker', 15),
('16CH', '16 Channel MOD', 16),
('18CH', '18 Channel MOD', 18),
('20CH', '20 Channel MOD', 20),
('22CH', '22 Channel MOD', 22),
('24CH', '24 Channel MOD', 24),
('26CH', '26 Channel MOD', 26),
('28CH', '28 Channel MOD', 28),
('30CH', '30 Channel MOD', 30),
('32CH', '32 Channel MOD', 32),
('16CN', '16 Channel MOD', 16), # Not certain where these two
('32CN', '32 Channel MOD', 32), # come from. (libModplug)
('TDZ1', '1 Channel TakeTracker', 1),
('TDZ2', '2 Channel TakeTracker', 2),
('TDZ3', '3 Channel TakeTracker', 3),
('TDZ4', '4 Channel MOD', 4),
('TDZ5', '5 Channel MOD', 5),
('TDZ6', '6 Channel MOD', 6),
('TDZ7', '7 Channel MOD', 7),
('TDZ8', '8 Channel MOD', 8),
('TDZ9', '9 Channel MOD', 9)
)
class MODNote(Note):
"""The definition of a generic MOD note and it's effects"""
def __init__(self, pattdata=[]):
if pattdata:
note = self.mod_period_to_note(((pattdata[0] & 0xf) << 8) + pattdata[1])
instrument = (pattdata[0] & 0xf0) + (pattdata[2] >> 4)
voleffect = VOLFX_NONE
volparam = 0
effect = pattdata[2] & 0xf
param = pattdata[3]
super(MODNote, self).__init__(note, instrument, voleffect, volparam, effect, param)
else:
super(MODNote, self).__init__(0, 0, 0, 0, 0, 0)
def mod_period_to_note(self, period):
if period:
for num in range(NOTE_LAST + 1):
if period >= (32 * period_table[num % 12] >> (num / 12 + 2)):
return num + 1
return NOTE_NONE
def __unicode__(self):
keys = ['C-', 'C#', 'D-', 'D#', 'E-', 'F-', 'F#', 'G-', 'G#', 'A-', 'A#', 'B-']
commands = '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if self.note == 0: ret1 = '...'
elif self.note > 0 and self.note <=120:
split = divmod(self.note-1, 12)
ret1 = '%s%s' % (keys[split[1]], str(split[0]))
elif self.note == 254: ret1 = '^^^'
elif self.note == 255: ret1 = '==='
if self.instrument: ret2 = str(self.instrument).zfill(2)
else: ret2 = '..'
# No volume columns for MOD files
ret3 = '..'
if self.effect: letter = commands[self.effect-1]
else: letter = '.'
ret4 = '%s%s' % (letter, hex(self.param)[2:].zfill(2).upper())
return '%s %s %s %s' % (ret1, ret2, ret3, ret4)
def __repr__(self):
return self.__unicode__()
class MODPattern(Pattern):
"""The definition of the MOD pattern"""
def __init__(self, file=None, rows=64, channels=4):
super(MODPattern, self).__init__(rows, channels)
if file:
self.load(file)
else:
self.data = self.empty(self.rows, self.channels)
def empty(self, rows, channels):
pattern = []
for row in range(rows):
pattern.append([])
for channel in range(channels):
pattern[row].append(MODNote())
return pattern
def load(self, file):
self.data = self.empty(self.rows, self.channels)
for row in range(self.rows):
for channel in range(self.channels):
self.data[row][channel] = MODNote(list(struct.unpack(">4B", file.read(4))))
class MODSample(Sample):
"""Definition of an MOD sample"""
def __init__(self, file=None):
super(MODSample, self).__init__()
self.modsamploadflags = SF_8 | SF_LE | SF_M | SF_PCMS
if file: self.load(file, 0)
def load(self, file, loadtype=0):
if loadtype == 0:
# Loads the MOD sample headers
modsampname = struct.unpack(">22s", file.read(22))[0]
modsamplength = struct.unpack(">H", file.read(2))[0]
modsampfinetune = struct.unpack(">b", file.read(1))[0]
modsampvolume = struct.unpack(">B", file.read(1))[0]
modsamploopbegin = struct.unpack(">H", file.read(2))[0]
modsamplooplength = struct.unpack(">H", file.read(2))[0]
# Parse it into generic Sample
self.name = modsampname
self.filename = modsampname
self.volume = MIN(modsampvolume, 64) * 4
self.length = modsamplength * 2
self.c5speed = MOD_FINETUNE(modsampfinetune)
self.loopbegin = modsamploopbegin
if modsamplooplength > 2: self.flags = self.flags | CHN_LOOP
self.loopend = self.loopbegin + modsamplooplength
elif loadtype == 1:
# . . .otherwise, load sample data
super(MODSample, self).load(file, file.tell(), self.modsamploadflags)
class MOD(Module):
"""A class that holds a generic MOD file"""
def __init__(self, filename=None):
super(MOD, self).__init__()
if not filename:
self.id = '4CHN' # /b/, for teh lulz. . .(bad joke)
self.tracker = '4 Channel MOD'
self.restartpos = 0
self.channelnum = 4
self.samplenum = 31
else:
f = open(filename, 'rb') # NOTE: MOD files should be big-endian!
self.filename = filename
f.seek(1080) # Magic number is in middle of file.
magic = struct.unpack(">4s", f.read(4))[0]
self.id = ''
for TYPE in MOD_TYPES:
if magic == TYPE[0]:
self.id = magic
self.tracker = TYPE[1]
self.channelnum = TYPE[2]
self.samplenum = 31
break
if self.id == '':
self.id = '????'
self.tracker = '*OLD* 4 Channel MOD'
self.channelnum = 4
self.samplenum = 15
f.seek(0)
self.name = struct.unpack(">20s", f.read(20))[0] # Song title (padded with NULL)
self.samples = []
for num in range(self.samplenum):
self.samples.append(MODSample(f)) # Loading sample headers
self.ordernum = struct.unpack(">B", f.read(1))[0] # Number of orders in song
self.restartpos = struct.unpack(">B", f.read(1))[0] # Restart position
self.orders = list(struct.unpack(">128B", f.read(128)))
# Fixes for buggy Startrekker MOD's. . .
fixed = 0
if self.id == 'FLT8':
for order in self.orders:
if order & 1:
fixed = 1
self.id = 'FLT4'
self.tracker = '4 Channel Startrekker (buggy)'
self.channelnum = 4
if not fixed:
for num in range(128):
self.orders[num] = self.orders[num] >> 1
self.patternnum = max(self.orders) + 1
self.tempo = 125
self.speed = 6
curpos = f.tell()
# Testing for WOW files. . .
if self.id == 'M.K.':
f.seek(0, 2)
sampsize = 0
for num in range(self.samplenum):
sampsize = sampsize + self.samples[num].length
if f.tell() == 2048 * self.patternnum + sampsize + 3132:
self.channelnum = 8
self.tracker = 'Mods Grave WOW'
f.seek(curpos)
if self.id != '????':
f.seek(4, 1) # Skip the magic id. . .
self.patterns = []
if self.patternnum:
for num in range(self.patternnum):
self.patterns.append(MODPattern(f, channels=self.channelnum))
for num in range(self.samplenum):
self.samples[num].load(f, 1) # Loading sample data
f.close()
def detect(filename):
f = open(filename, 'rb')
f.seek(1080)
magic = struct.unpack(">4s", f.read(4))[0]
f.close()
for TYPE in MOD_TYPES:
if magic == TYPE[0]:
return 2
if filename.lower().endswith('.mod') or filename.lower().startswith('mod.'):
return 1
else:
return 0
detect = staticmethod(detect)
| gpl-3.0 |
heiko-r/paparazzi | sw/tools/airframe_editor/gui_dialogs.py | 29 | 1632 | #!/usr/bin/env python
from __future__ import print_function
import gtk
from os import path
if gtk.pygtk_version < (2, 3, 90):
print("Please upgrade your pygtk")
raise SystemExit
def filechooser(pathname):
dialog = gtk.FileChooserDialog("Open ...", None,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_current_folder(pathname)
filter = gtk.FileFilter()
filter.set_name("Airframe File")
filter.add_pattern("*.xml")
dialog.add_filter(filter)
response = dialog.run()
filename = ""
if response == gtk.RESPONSE_OK:
filename = dialog.get_filename()
elif response == gtk.RESPONSE_CANCEL:
print("No file selected")
dialog.destroy()
return filename
def error_loading_xml(s):
err_msg = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE,
"Error Loading XML: " + s)
err_msg.run()
err_msg.destroy()
def about(home):
about_d = gtk.AboutDialog()
about_d.set_program_name("Paparazzi Airframe Editor")
about_d.set_version("0.1")
about_d.set_copyright("(c) GPL v2")
about_d.set_comments("Airframe Editor")
about_d.set_website("http://paparazzi.github.io")
about_d.set_logo(gtk.gdk.pixbuf_new_from_file(path.join(home, "data/pictures/penguin_icon.png")))
about_d.run()
about_d.destroy()
| gpl-2.0 |
kuri65536/python-for-android | python-modules/twisted/twisted/python/hook.py | 90 | 5266 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I define support for hookable instance methods.
These are methods which you can register pre-call and post-call external
functions to augment their functionality. People familiar with more esoteric
languages may think of these as \"method combinations\".
This could be used to add optional preconditions, user-extensible callbacks
(a-la emacs) or a thread-safety mechanism.
The four exported calls are:
- L{addPre}
- L{addPost}
- L{removePre}
- L{removePost}
All have the signature (class, methodName, callable), and the callable they
take must always have the signature (instance, *args, **kw) unless the
particular signature of the method they hook is known.
Hooks should typically not throw exceptions, however, no effort will be made by
this module to prevent them from doing so. Pre-hooks will always be called,
but post-hooks will only be called if the pre-hooks do not raise any exceptions
(they will still be called if the main method raises an exception). The return
values and exception status of the main method will be propogated (assuming
none of the hooks raise an exception). Hooks will be executed in the order in
which they are added.
"""
# System Imports
import string
### Public Interface
class HookError(Exception):
"An error which will fire when an invariant is violated."
def addPre(klass, name, func):
"""hook.addPre(klass, name, func) -> None
Add a function to be called before the method klass.name is invoked.
"""
_addHook(klass, name, PRE, func)
def addPost(klass, name, func):
"""hook.addPost(klass, name, func) -> None
Add a function to be called after the method klass.name is invoked.
"""
_addHook(klass, name, POST, func)
def removePre(klass, name, func):
"""hook.removePre(klass, name, func) -> None
Remove a function (previously registered with addPre) so that it
is no longer executed before klass.name.
"""
_removeHook(klass, name, PRE, func)
def removePost(klass, name, func):
"""hook.removePre(klass, name, func) -> None
Remove a function (previously registered with addPost) so that it
is no longer executed after klass.name.
"""
_removeHook(klass, name, POST, func)
### "Helper" functions.
hooked_func = """
import %(module)s
def %(name)s(*args, **kw):
klazz = %(module)s.%(klass)s
for preMethod in klazz.%(preName)s:
preMethod(*args, **kw)
try:
return klazz.%(originalName)s(*args, **kw)
finally:
for postMethod in klazz.%(postName)s:
postMethod(*args, **kw)
"""
_PRE = '__hook_pre_%s_%s_%s__'
_POST = '__hook_post_%s_%s_%s__'
_ORIG = '__hook_orig_%s_%s_%s__'
def _XXX(k,n,s):
"string manipulation garbage"
x = s % (string.replace(k.__module__,'.','_'), k.__name__, n)
return x
def PRE(k,n):
"(private) munging to turn a method name into a pre-hook-method-name"
return _XXX(k,n,_PRE)
def POST(k,n):
"(private) munging to turn a method name into a post-hook-method-name"
return _XXX(k,n,_POST)
def ORIG(k,n):
"(private) munging to turn a method name into an `original' identifier"
return _XXX(k,n,_ORIG)
def _addHook(klass, name, phase, func):
"(private) adds a hook to a method on a class"
_enhook(klass, name)
if not hasattr(klass, phase(klass, name)):
setattr(klass, phase(klass, name), [])
phaselist = getattr(klass, phase(klass, name))
phaselist.append(func)
def _removeHook(klass, name, phase, func):
"(private) removes a hook from a method on a class"
phaselistname = phase(klass, name)
if not hasattr(klass, ORIG(klass,name)):
raise HookError("no hooks present!")
phaselist = getattr(klass, phase(klass, name))
try: phaselist.remove(func)
except ValueError:
raise HookError("hook %s not found in removal list for %s"%
(name,klass))
if not getattr(klass, PRE(klass,name)) and not getattr(klass, POST(klass, name)):
_dehook(klass, name)
def _enhook(klass, name):
"(private) causes a certain method name to be hooked on a class"
if hasattr(klass, ORIG(klass, name)):
return
def newfunc(*args, **kw):
for preMethod in getattr(klass, PRE(klass, name)):
preMethod(*args, **kw)
try:
return getattr(klass, ORIG(klass, name))(*args, **kw)
finally:
for postMethod in getattr(klass, POST(klass, name)):
postMethod(*args, **kw)
try:
newfunc.func_name = name
except TypeError:
# Older python's don't let you do this
pass
oldfunc = getattr(klass, name).im_func
setattr(klass, ORIG(klass, name), oldfunc)
setattr(klass, PRE(klass, name), [])
setattr(klass, POST(klass, name), [])
setattr(klass, name, newfunc)
def _dehook(klass, name):
"(private) causes a certain method name no longer to be hooked on a class"
if not hasattr(klass, ORIG(klass, name)):
raise HookError("Cannot unhook!")
setattr(klass, name, getattr(klass, ORIG(klass,name)))
delattr(klass, PRE(klass,name))
delattr(klass, POST(klass,name))
delattr(klass, ORIG(klass,name))
| apache-2.0 |
yahoo/npm | node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py | 778 | 65880 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 3)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'Pure',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
| artistic-2.0 |
leggitta/mne-python | mne/decoding/mixin.py | 19 | 1063 | class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn"""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
| bsd-3-clause |
ooici/coi-services | ion/agents/instrument/test/test_agent_persistence.py | 1 | 29918 | #!/usr/bin/env python
"""
@package ion.agents.instrument.test.test_agent_persistence
@file ion/agents.instrument/test_agent_persistence.py
@author Edward Hunter
@brief Test cases for R2 instrument agent state and config persistence between running instances.
"""
__author__ = 'Edward Hunter'
# Import pyon first for monkey patching.
# Pyon log and config objects.
from pyon.public import log
from pyon.public import CFG
from pyon.public import get_obj_registry
# Standard imports.
import sys
import time
import socket
import re
import json
import unittest
import os
from copy import deepcopy
# 3rd party imports.
import gevent
from gevent.event import AsyncResult
from nose.plugins.attrib import attr
from mock import patch
# Pyon pubsub and event support.
from pyon.event.event import EventSubscriber, EventPublisher
from pyon.ion.stream import StandaloneStreamSubscriber
from ion.services.dm.utility.granule_utils import RecordDictionaryTool
# Pyon unittest support.
from pyon.util.int_test import IonIntegrationTestCase
# Pyon exceptions.
from pyon.core.exception import BadRequest, Conflict, Timeout, ResourceError
# Agent imports.
from pyon.util.context import LocalContextMixin
from pyon.agent.agent import ResourceAgentClient
from pyon.agent.agent import ResourceAgentState
from pyon.agent.agent import ResourceAgentEvent
# Driver imports.
from ion.agents.instrument.driver_int_test_support import DriverIntegrationTestSupport
# Objects and clients.
from interface.objects import AgentCommand
from interface.services.icontainer_agent import ContainerAgentClient
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient
from interface.services.dm.idataset_management_service import DatasetManagementServiceClient
# Alerts.
from interface.objects import StreamAlertType, AggregateStatusType
from interface.services.cei.iprocess_dispatcher_service import ProcessDispatcherServiceClient
from interface.objects import ProcessDefinition, ProcessStateEnum
from pyon.core.object import IonObjectSerializer, IonObjectDeserializer
from pyon.core.bootstrap import IonObject
"""
--with-pycc
--with-queueblame
bin/nosetests -s -v --nologcapture --with-queueblame --with-pycc ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence
bin/nosetests -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence
bin/nosetests --with-pycc -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_agent_config_persistence
bin/nosetests -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_agent_config_persistence
bin/nosetests -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_agent_state_persistence
bin/nosetests -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_agent_rparam_persistence
bin/nosetests -s -v --nologcapture ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_test_cei_launch_mode
bin/nosetests -s -v --nologcapture --with-queueblame --with-pycc ion/agents/instrument/test/test_agent_persistence.py:TestAgentPersistence.test_test_cei_launch_mode
"""
###############################################################################
# Global constants.
###############################################################################
DEV_ADDR = CFG.device.sbe37.host
DEV_PORT = CFG.device.sbe37.port
DATA_PORT = CFG.device.sbe37.port_agent_data_port
CMD_PORT = CFG.device.sbe37.port_agent_cmd_port
PA_BINARY = CFG.device.sbe37.port_agent_binary
DELIM = CFG.device.sbe37.delim
WORK_DIR = CFG.device.sbe37.workdir
DRV_URI = CFG.device.sbe37.dvr_egg
from ion.agents.instrument.test.agent_test_constants import IA_RESOURCE_ID
from ion.agents.instrument.test.agent_test_constants import IA_NAME
from ion.agents.instrument.test.agent_test_constants import IA_MOD
from ion.agents.instrument.test.agent_test_constants import IA_CLS
from ion.agents.instrument.test.load_test_driver_egg import load_egg
DVR_CONFIG = load_egg()
# Load MI modules from the egg
from mi.instrument.seabird.sbe37smb.ooicore.driver import SBE37ProtocolEvent
from mi.instrument.seabird.sbe37smb.ooicore.driver import SBE37Parameter
class FakeProcess(LocalContextMixin):
"""
A fake process used because the test case is not an ion process.
"""
name = ''
id=''
process_type = ''
@attr('HARDWARE', group='mi')
@patch.dict(CFG, {'endpoint':{'receive':{'timeout': 360}}})
@unittest.skipIf((not os.getenv('PYCC_MODE', False)) and os.getenv('CEI_LAUNCH_TEST', False), 'Skip until tests support launch port agent configurations.')
class TestAgentPersistence(IonIntegrationTestCase):
"""
"""
############################################################################
# Setup, teardown.
############################################################################
def setUp(self):
"""
Set up driver integration support.
Start port agent, add port agent cleanup.
Start container.
Start deploy services.
Define agent config.
"""
self._ia_client = None
log.info('Creating driver integration test support:')
log.info('driver uri: %s', DRV_URI)
log.info('device address: %s', DEV_ADDR)
log.info('device port: %s', DEV_PORT)
log.info('log delimiter: %s', DELIM)
log.info('work dir: %s', WORK_DIR)
self._support = DriverIntegrationTestSupport(None,
None,
DEV_ADDR,
DEV_PORT,
DATA_PORT,
CMD_PORT,
PA_BINARY,
DELIM,
WORK_DIR)
# Start port agent, add stop to cleanup.
self._start_pagent()
self.addCleanup(self._support.stop_pagent)
# Start container.
log.info('Staring capability container.')
self._start_container()
# Bring up services in a deploy file (no need to message)
log.info('Staring deploy services.')
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
log.info('building stream configuration')
# Setup stream config.
self._build_stream_config()
# Create agent config.
self._agent_config = {
'driver_config' : DVR_CONFIG,
'stream_config' : self._stream_config,
'agent' : {'resource_id': IA_RESOURCE_ID},
'test_mode' : True,
'forget_past' : False,
'enable_persistence' : True,
'aparam_pubrate_config' :
{
'raw' : 2,
'parsed' : 2
}
}
self._ia_client = None
self._ia_pid = '1234'
self.addCleanup(self._verify_agent_reset)
self.addCleanup(self.container.state_repository.put_state,
self._ia_pid, {})
###############################################################################
# Port agent helpers.
###############################################################################
def _start_pagent(self):
"""
Construct and start the port agent.
"""
port = self._support.start_pagent()
log.info('Port agent started at port %i',port)
# Configure driver to use port agent port number.
DVR_CONFIG['comms_config'] = {
'addr' : 'localhost',
'port' : port,
'cmd_port' : CMD_PORT
}
###############################################################################
# Data stream helpers.
###############################################################################
def _build_stream_config(self):
"""
"""
# Create a pubsub client to create streams.
pubsub_client = PubsubManagementServiceClient(node=self.container.node)
dataset_management = DatasetManagementServiceClient()
# Create streams and subscriptions for each stream named in driver.
self._stream_config = {}
stream_name = 'parsed'
param_dict_name = 'ctd_parsed_param_dict'
pd_id = dataset_management.read_parameter_dictionary_by_name(param_dict_name, id_only=True)
stream_def_id = pubsub_client.create_stream_definition(name=stream_name, parameter_dictionary_id=pd_id)
pd = pubsub_client.read_stream_definition(stream_def_id).parameter_dictionary
stream_id, stream_route = pubsub_client.create_stream(name=stream_name,
exchange_point='science_data',
stream_definition_id=stream_def_id)
stream_config = dict(routing_key=stream_route.routing_key,
exchange_point=stream_route.exchange_point,
stream_id=stream_id,
stream_definition_ref=stream_def_id,
parameter_dictionary=pd)
self._stream_config[stream_name] = stream_config
stream_name = 'raw'
param_dict_name = 'ctd_raw_param_dict'
pd_id = dataset_management.read_parameter_dictionary_by_name(param_dict_name, id_only=True)
stream_def_id = pubsub_client.create_stream_definition(name=stream_name, parameter_dictionary_id=pd_id)
pd = pubsub_client.read_stream_definition(stream_def_id).parameter_dictionary
stream_id, stream_route = pubsub_client.create_stream(name=stream_name,
exchange_point='science_data',
stream_definition_id=stream_def_id)
stream_config = dict(routing_key=stream_route.routing_key,
exchange_point=stream_route.exchange_point,
stream_id=stream_id,
stream_definition_ref=stream_def_id,
parameter_dictionary=pd)
self._stream_config[stream_name] = stream_config
###############################################################################
# Agent start stop helpers.
###############################################################################
def _start_agent(self, bootmode=None):
"""
"""
container_client = ContainerAgentClient(node=self.container.node,
name=self.container.name)
agent_config = deepcopy(self._agent_config)
agent_config['bootmode'] = bootmode
self._ia_pid = container_client.spawn_process(name=IA_NAME,
module=IA_MOD,
cls=IA_CLS,
config=agent_config,
process_id=self._ia_pid)
# Start a resource agent client to talk with the instrument agent.
self._ia_client = None
self._ia_client = ResourceAgentClient(IA_RESOURCE_ID, process=FakeProcess())
log.info('Got instrument agent client %s.', str(self._ia_client))
def _stop_agent(self):
"""
"""
if self._ia_pid:
container_client = ContainerAgentClient(node=self.container.node,
name=self.container.name)
container_client.terminate_process(self._ia_pid)
if self._ia_client:
self._ia_client = None
def _verify_agent_reset(self):
"""
Check agent state and reset if necessary.
This called if a test fails and reset hasn't occurred.
"""
if self._ia_client is None:
return
state = self._ia_client.get_agent_state()
if state != ResourceAgentState.UNINITIALIZED:
cmd = AgentCommand(command=ResourceAgentEvent.RESET)
retval = self._ia_client.execute_agent(cmd)
self._ia_client = None
###############################################################################
# Tests.
###############################################################################
def test_agent_config_persistence(self):
"""
test_agent_config_persistence
Test that agent parameter configuration is persisted between running
instances.
"""
# Start the agent.
self._start_agent()
# We start in uninitialized state.
# In this state there is no driver process.
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
# Ping the agent.
retval = self._ia_client.ping_agent()
log.info(retval)
# Confirm the default agent parameters.
#{'streams': {'raw': ['quality_flag', 'ingestion_timestamp', 'port_timestamp', 'raw', 'lat', 'driver_timestamp', 'preferred_timestamp', 'lon', 'internal_timestamp', 'time'], 'parsed': ['quality_flag', 'ingestion_timestamp', 'port_timestamp', 'pressure', 'lat', 'driver_timestamp', 'conductivity', 'preferred_timestamp', 'temp', 'density', 'salinity', 'lon', 'internal_timestamp', 'time']}}
retval = self._ia_client.get_agent(['streams'])['streams']
self.assertIn('raw', retval.keys())
self.assertIn('parsed', retval.keys())
#{'pubrate': {'raw': 0, 'parsed': 0}}
retval = self._ia_client.get_agent(['pubrate'])['pubrate']
self.assertIn('raw', retval.keys())
self.assertIn('parsed', retval.keys())
self.assertEqual(retval['raw'], 2)
self.assertEqual(retval['parsed'], 2)
#{'alerts': []}
retval = self._ia_client.get_agent(['alerts'])['alerts']
self.assertEqual(retval, [])
# Define a few new parameters and set them.
# Confirm they are set.
alert_def_1 = {
'name' : 'current_warning_interval',
'stream_name' : 'parsed',
'description' : 'Current is below normal range.',
'alert_type' : StreamAlertType.WARNING,
'aggregate_type' : AggregateStatusType.AGGREGATE_DATA,
'value_id' : 'temp',
'lower_bound' : None,
'lower_rel_op' : None,
'upper_bound' : 10.0,
'upper_rel_op' : '<',
'alert_class' : 'IntervalAlert'
}
alert_def_2 = {
'name' : 'temp_alarm_interval',
'stream_name' : 'parsed',
'description' : 'Temperatoure is critical.',
'alert_type' : StreamAlertType.ALARM,
'aggregate_type' : AggregateStatusType.AGGREGATE_DATA,
'value_id' : 'temp',
'lower_bound' : None,
'lower_rel_op' : None,
'upper_bound' : 20.0,
'upper_rel_op' : '<',
'alert_class' : 'IntervalAlert'
}
alert_def3 = {
'name' : 'late_data_warning',
'stream_name' : 'parsed',
'description' : 'Expected data has not arrived.',
'alert_type' : StreamAlertType.WARNING,
'aggregate_type' : AggregateStatusType.AGGREGATE_COMMS,
'time_delta' : 180,
'alert_class' : 'LateDataAlert'
}
orig_alerts = [alert_def_1,alert_def_2, alert_def3]
pubrate = {
'parsed' : 10,
'raw' : 20
}
params = {
'alerts' : orig_alerts,
'pubrate' : pubrate
}
# Set the new agent params and confirm.
self._ia_client.set_agent(params)
params = [
'alerts',
'pubrate'
]
retval = self._ia_client.get_agent(params)
pubrate = retval['pubrate']
alerts = retval['alerts']
self.assertIn('raw', pubrate.keys())
self.assertIn('parsed', pubrate.keys())
self.assertEqual(pubrate['parsed'], 10)
self.assertEqual(pubrate['raw'], 20)
count = 0
for x in alerts:
x.pop('status')
x.pop('value')
for y in orig_alerts:
if x['name'] == y['name']:
count += 1
self.assertItemsEqual(x.keys(), y.keys())
self.assertEqual(count, 3)
# Now stop and restart the agent.
self._stop_agent()
gevent.sleep(15)
self._start_agent('restart')
# We start in uninitialized state.
# In this state there is no driver process.
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
# Ping the agent.
retval = self._ia_client.ping_agent()
log.info(retval)
# Confirm the persisted parameters.
params = [
'alerts',
'pubrate'
]
retval = self._ia_client.get_agent(params)
pubrate = retval['pubrate']
alerts = retval['alerts']
self.assertIn('raw', pubrate.keys())
self.assertIn('parsed', pubrate.keys())
self.assertEqual(pubrate['parsed'], 10)
self.assertEqual(pubrate['raw'], 20)
count = 0
for x in alerts:
x.pop('status')
x.pop('value')
for y in orig_alerts:
if x['name'] == y['name']:
count += 1
self.assertItemsEqual(x.keys(), y.keys())
self.assertEqual(count, 3)
def test_agent_state_persistence(self):
"""
test_agent_state_persistence
Verify that agents can be restored to their prior running state.
"""
self._start_agent()
# We start in uninitialized state.
# In this state there is no driver process.
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
# Ping the agent.
retval = self._ia_client.ping_agent()
log.info(retval)
alert_def3 = {
'name' : 'late_data_warning',
'stream_name' : 'parsed',
'description' : 'Expected data has not arrived.',
'alert_type' : StreamAlertType.WARNING,
'aggregate_type' : AggregateStatusType.AGGREGATE_COMMS,
'time_delta' : 180,
'alert_class' : 'LateDataAlert'
}
orig_pubrate = {
'parsed' : 10,
'raw' : 20
}
params = {
'alerts' : [alert_def3],
'pubrate' : orig_pubrate
}
# Set the new agent params and confirm.
self._ia_client.set_agent(params)
# Initialize the agent.
# The agent is spawned with a driver config, but you can pass one in
# optinally with the initialize command. This validates the driver
# config, launches a driver process and connects to it via messaging.
# If successful, we switch to the inactive state.
cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.INACTIVE)
# Ping the driver proc.
retval = self._ia_client.ping_resource()
log.info(retval)
cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.IDLE)
cmd = AgentCommand(command=ResourceAgentEvent.RUN)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.COMMAND)
# Acquire sample returns a string, not a particle. The particle
# is created by the data handler though.
cmd = AgentCommand(command=SBE37ProtocolEvent.ACQUIRE_SAMPLE)
retval = self._ia_client.execute_resource(cmd)
# Now stop and restart the agent.
self._stop_agent()
gevent.sleep(15)
self._start_agent('restart')
timeout = gevent.Timeout(240)
timeout.start()
try:
while True:
state = self._ia_client.get_agent_state()
print '## in state: ' + state
if state == ResourceAgentState.COMMAND:
timeout.cancel()
break
else:
gevent.sleep(1)
except gevent.Timeout:
self.fail("Could not restore agent state to COMMAND.")
params = [
'alerts',
'pubrate'
]
retval = self._ia_client.get_agent(params)
alerts = retval['alerts']
pubrate = retval['pubrate']
self.assertEqual(len(alerts), 1)
self.assertEqual(alert_def3['name'], alerts[0]['name'])
self.assertEqual(pubrate['raw'], 20)
self.assertEqual(pubrate['parsed'], 10)
cmd = AgentCommand(command=ResourceAgentEvent.PAUSE)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.STOPPED)
# Now stop and restart the agent.
self._stop_agent()
gevent.sleep(15)
self._start_agent('restart')
timeout = gevent.Timeout(240)
timeout.start()
try:
while True:
state = self._ia_client.get_agent_state()
if state == ResourceAgentState.STOPPED:
timeout.cancel()
break
else:
gevent.sleep(1)
except gevent.Timeout:
self.fail("Could not restore agent state to STOPPED.")
retval = self._ia_client.get_agent(params)
alerts = retval['alerts']
pubrate = retval['pubrate']
self.assertEqual(len(alerts), 1)
self.assertEqual(alert_def3['name'], alerts[0]['name'])
self.assertEqual(pubrate['raw'], 20)
self.assertEqual(pubrate['parsed'], 10)
# Reset the agent. This causes the driver messaging to be stopped,
# the driver process to end and switches us back to uninitialized.
cmd = AgentCommand(command=ResourceAgentEvent.RESET)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
def test_agent_rparam_persistence(self):
"""
test_agent_rparam_persistence
Verify ability to restore device configuration.
### Original values:
{'TA0': -0.0002572242, 'OUTPUTSV': False, 'NAVG': 0}
### Values after set:
{'TA0': -0.0005144484, 'OUTPUTSV': True, 'NAVG': 1}
### Restore config:
{'PTCA1': 0.6603433, 'WBOTC': 1.2024e-05, 'PCALDATE': [12, 8, 2005],
'STORETIME': False, 'CPCOR': 9.57e-08, 'PTCA2': 0.00575649,
'OUTPUTSV': True, 'SAMPLENUM': 0, 'TCALDATE': [8, 11, 2005],
'OUTPUTSAL': False, 'TA2': -9.717158e-06, 'POFFSET': 0.0,
'INTERVAL': 19733, 'SYNCWAIT': 0, 'CJ': 3.339261e-05,
'CI': 0.0001334915, 'CH': 0.1417895, 'TA0': -0.0005144484,
'TA1': 0.0003138936, 'NAVG': 1, 'TA3': 2.138735e-07, '
RCALDATE': [8, 11, 2005], 'CG': -0.987093, 'CTCOR': 3.25e-06, '
PTCB0': 24.6145, 'PTCB1': -0.0009, 'PTCB2': 0.0,
'CCALDATE': [8, 11, 2005], 'PA0': 5.916199, 'PA1': 0.4851819,
'PA2': 4.596432e-07, 'SYNCMODE': False, 'PTCA0': 276.2492,
'TXREALTIME': True, 'RTCA2': -3.022745e-08, 'RTCA1': 1.686132e-06,
'RTCA0': 0.9999862}
### Of which we have:
{'TA0': -0.0005144484, 'OUTPUTSV': True, 'NAVG': 1}
"""
self._start_agent()
# We start in uninitialized state.
# In this state there is no driver process.
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
# Ping the agent.
retval = self._ia_client.ping_agent()
log.info(retval)
# Initialize the agent.
# The agent is spawned with a driver config, but you can pass one in
# optinally with the initialize command. This validates the driver
# config, launches a driver process and connects to it via messaging.
# If successful, we switch to the inactive state.
cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.INACTIVE)
# Ping the driver proc.
retval = self._ia_client.ping_resource()
log.info(retval)
cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.IDLE)
cmd = AgentCommand(command=ResourceAgentEvent.RUN)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.COMMAND)
params = [
SBE37Parameter.OUTPUTSV,
SBE37Parameter.NAVG,
SBE37Parameter.TA0
]
retval = self._ia_client.get_resource(params)
orig_params = retval
new_params = {
SBE37Parameter.OUTPUTSV : not orig_params[SBE37Parameter.OUTPUTSV],
SBE37Parameter.NAVG : orig_params[SBE37Parameter.NAVG] + 1,
SBE37Parameter.TA0 : orig_params[SBE37Parameter.TA0] * 2
}
#print '########### orig params'
#print str(orig_params)
self._ia_client.set_resource(new_params)
retval = self._ia_client.get_resource(params)
self.assertEqual(retval[SBE37Parameter.OUTPUTSV],
new_params[SBE37Parameter.OUTPUTSV])
self.assertEqual(retval[SBE37Parameter.NAVG],
new_params[SBE37Parameter.NAVG])
delta = max(retval[SBE37Parameter.TA0],
new_params[SBE37Parameter.TA0])*.01
self.assertAlmostEqual(retval[SBE37Parameter.TA0],
new_params[SBE37Parameter.TA0], delta=delta)
#print '########### new params'
#print str(retval)
# Now stop and restart the agent.
self._stop_agent()
self._support.stop_pagent()
gevent.sleep(10)
self._start_pagent()
gevent.sleep(10)
self._start_agent('restart')
timeout = gevent.Timeout(600)
timeout.start()
try:
while True:
state = self._ia_client.get_agent_state()
if state == ResourceAgentState.COMMAND:
timeout.cancel()
break
else:
gevent.sleep(3)
except gevent.Timeout:
self.fail("Could not restore agent state to COMMAND.")
# Verify the parameters have been restored as needed.
retval = self._ia_client.get_resource(params)
#print '########### restored params'
#print str(retval)
self.assertEqual(retval[SBE37Parameter.OUTPUTSV],
new_params[SBE37Parameter.OUTPUTSV])
self.assertEqual(retval[SBE37Parameter.NAVG],
new_params[SBE37Parameter.NAVG])
delta = max(retval[SBE37Parameter.TA0],
new_params[SBE37Parameter.TA0])*.01
self.assertAlmostEqual(retval[SBE37Parameter.TA0],
new_params[SBE37Parameter.TA0], delta=delta)
# Reset the agent. This causes the driver messaging to be stopped,
# the driver process to end and switches us back to uninitialized.
cmd = AgentCommand(command=ResourceAgentEvent.RESET)
retval = self._ia_client.execute_agent(cmd)
state = self._ia_client.get_agent_state()
self.assertEqual(state, ResourceAgentState.UNINITIALIZED)
@unittest.skip('Making CEI friendly.')
def test_cei_launch_mode(self):
pdc = ProcessDispatcherServiceClient(node=self.container.node)
p_def = ProcessDefinition(name='Agent007')
p_def.executable = {
'module' : 'ion.agents.instrument.instrument_agent',
'class' : 'InstrumentAgent'
}
p_def_id = pdc.create_process_definition(p_def)
pid = pdc.create_process(p_def_id)
def event_callback(event, *args, **kwargs):
print '######### proc %s in state %s' % (event.origin, ProcessStateEnum._str_map[event.state])
sub = EventSubscriber(event_type='ProcessLifecycleEvent',
callback=event_callback,
origin=pid,
origin_type='DispatchedProcess')
sub.start()
agent_config = deepcopy(self._agent_config)
agent_config['bootmode'] = 'restart'
pdc.schedule_process(p_def_id, process_id=pid,
configuration=agent_config)
gevent.sleep(5)
pdc.cancel_process(pid)
gevent.sleep(15)
sub.stop()
| bsd-2-clause |
akaszynski/vtkInterface | examples/02-plot/lighting.py | 1 | 1906 | """
Lighting Controls
~~~~~~~~~~~~~~~~~
Control aspects of the rendered mesh's lighting such as Ambient, Diffuse,
and Specular. These options only work if the ``lighting`` argument to
``add_mesh`` is ``True`` (it's true by default).
You can turn off all lighting by passing ``lighting=False`` to ``add_mesh``.
"""
# sphinx_gallery_thumbnail_number = 4
import pyvista as pv
from pyvista import examples
mesh = examples.download_st_helens().warp_by_scalar()
cpos = [(575848., 5128459., 22289.),
(562835.0, 5114981.5, 2294.5),
(-0.5, -0.5, 0.7)]
###############################################################################
# First, lets take a look at the mesh with default lighting conditions
mesh.plot(cpos=cpos, show_scalar_bar=False)
###############################################################################
# What about with no lighting
mesh.plot(lighting=False, cpos=cpos, show_scalar_bar=False)
###############################################################################
# Demonstration of the specular property
p = pv.Plotter(shape=(1,2), window_size=[1500, 500])
p.subplot(0,0)
p.add_mesh(mesh, show_scalar_bar=False)
p.add_text('No Specular')
p.subplot(0,1)
s = 1.0
p.add_mesh(mesh, specular=s, show_scalar_bar=False)
p.add_text('Specular of {}'.format(s))
p.link_views()
p.view_isometric()
p.show(cpos=cpos)
###############################################################################
# Just specular
mesh.plot(specular=0.5, cpos=cpos, show_scalar_bar=False)
###############################################################################
# Specular power
mesh.plot(specular=0.5, specular_power=15,
cpos=cpos, show_scalar_bar=False)
###############################################################################
# Demonstration of all three in use
mesh.plot(diffuse=0.5, specular=0.5, ambient=0.5,
cpos=cpos, show_scalar_bar=False)
| mit |
nion-software/nionswift | nion/swift/HistogramPanel.py | 1 | 37648 | # standard libraries
import functools
import gettext
import operator
import typing
# third party libraries
import numpy
# local libraries
from nion.data import Core
from nion.data import Image
from nion.swift import DisplayPanel
from nion.swift import Panel
from nion.swift.model import DisplayItem
from nion.swift.model import Graphics
from nion.ui import CanvasItem
from nion.ui import DrawingContext
from nion.ui import Widgets
from nion.utils import Binding
from nion.utils import Event
from nion.utils import Model
from nion.utils import Stream
_ = gettext.gettext
class AdornmentsCanvasItem(CanvasItem.AbstractCanvasItem):
"""A canvas item to draw the adornments on top of the histogram.
The adornments are the black and white lines shown during mouse
adjustment of the display limits.
Callers are expected to set the display_limits property and
then call update.
"""
def __init__(self):
super().__init__()
self.display_limits = (0,1)
def _repaint(self, drawing_context):
"""Repaint the canvas item. This will occur on a thread."""
# canvas size
canvas_width = self.canvas_size[1]
canvas_height = self.canvas_size[0]
left = self.display_limits[0]
right = self.display_limits[1]
# draw left display limit
if left > 0.0:
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.move_to(left * canvas_width, 1)
drawing_context.line_to(left * canvas_width, canvas_height-1)
drawing_context.line_width = 2
drawing_context.stroke_style = "#000"
drawing_context.stroke()
# draw right display limit
if right < 1.0:
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.move_to(right * canvas_width, 1)
drawing_context.line_to(right * canvas_width, canvas_height-1)
drawing_context.line_width = 2
drawing_context.stroke_style = "#FFF"
drawing_context.stroke()
# draw border
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.move_to(0,canvas_height)
drawing_context.line_to(canvas_width,canvas_height)
drawing_context.line_width = 1
drawing_context.stroke_style = "#444"
drawing_context.stroke()
class SimpleLineGraphCanvasItem(CanvasItem.AbstractCanvasItem):
"""A canvas item to draw a simple line graph.
The caller can specify a background color by setting the background_color
property in the format of a CSS color.
The caller must update the data by setting the data property. The data must
be a numpy array with a range from 0,1. The data will be re-binned to the
width of the canvas item and plotted.
"""
def __init__(self):
super().__init__()
self.__data = None
self.__background_color = None
self.__retained_rebin_1d = dict()
@property
def data(self):
"""Return the data."""
return self.__data
@data.setter
def data(self, data):
"""Set the data and mark the canvas item for updating.
Data should be a numpy array with a range from 0,1.
"""
self.__data = data
self.update()
@property
def background_color(self):
"""Return the background color."""
return self.__background_color
@background_color.setter
def background_color(self, background_color):
"""Set the background color. Use CSS color format."""
self.__background_color = background_color
self.update()
def _repaint(self, drawing_context):
"""Repaint the canvas item. This will occur on a thread."""
# canvas size
canvas_width = self.canvas_size[1]
canvas_height = self.canvas_size[0]
# draw background
if self.background_color:
with drawing_context.saver():
drawing_context.begin_path()
drawing_context.move_to(0,0)
drawing_context.line_to(canvas_width,0)
drawing_context.line_to(canvas_width,canvas_height)
drawing_context.line_to(0,canvas_height)
drawing_context.close_path()
drawing_context.fill_style = self.background_color
drawing_context.fill()
# draw the data, if any
if (self.data is not None and len(self.data) > 0):
# draw the histogram itself
with drawing_context.saver():
drawing_context.begin_path()
binned_data = Image.rebin_1d(self.data, int(canvas_width), self.__retained_rebin_1d) if int(canvas_width) != self.data.shape[0] else self.data
for i in range(canvas_width):
drawing_context.move_to(i, canvas_height)
drawing_context.line_to(i, canvas_height * (1 - binned_data[i]))
drawing_context.line_width = 1
drawing_context.stroke_style = "#444"
drawing_context.stroke()
class ColorMapCanvasItem(CanvasItem.AbstractCanvasItem):
def __init__(self):
super().__init__()
self.update_sizing(self.sizing.with_fixed_height(4))
self.__color_map_data = None
@property
def color_map_data(self) -> numpy.ndarray:
"""Return the data."""
return self.__color_map_data
@color_map_data.setter
def color_map_data(self, data: numpy.ndarray) -> None:
"""Set the data and mark the canvas item for updating.
Data should be an ndarray of shape (256, 3) with type uint8
"""
self.__color_map_data = data
self.update()
def _repaint(self, drawing_context: DrawingContext.DrawingContext):
"""Repaint the canvas item. This will occur on a thread."""
# canvas size
canvas_width = self.canvas_size.width
canvas_height = self.canvas_size.height
with drawing_context.saver():
if self.__color_map_data is not None:
rgba_image = numpy.empty((4,) + self.__color_map_data.shape[:-1], dtype=numpy.uint32)
Image.get_rgb_view(rgba_image)[:] = self.__color_map_data[numpy.newaxis, :, :] # scalar data assigned to each component of rgb view
Image.get_alpha_view(rgba_image)[:] = 255
drawing_context.draw_image(rgba_image, 0, 0, canvas_width, canvas_height)
class HistogramCanvasItem(CanvasItem.CanvasItemComposition):
"""A canvas item to draw and control a histogram."""
def __init__(self, cursor_changed_fn: typing.Callable[[float], None]):
super().__init__()
# tell the canvas item that we want mouse events.
self.wants_mouse_events = True
# create the component canvas items: adornments and the graph.
self.__adornments_canvas_item = AdornmentsCanvasItem()
self.__simple_line_graph_canvas_item = SimpleLineGraphCanvasItem()
self.__histogram_color_map_canvas_item = ColorMapCanvasItem()
# canvas items get added back to front
column = CanvasItem.CanvasItemComposition()
column.layout = CanvasItem.CanvasItemColumnLayout()
graph_and_adornments = CanvasItem.CanvasItemComposition()
graph_and_adornments.add_canvas_item(self.__simple_line_graph_canvas_item)
graph_and_adornments.add_canvas_item(self.__adornments_canvas_item)
column.add_canvas_item(graph_and_adornments)
column.add_canvas_item(self.__histogram_color_map_canvas_item)
self.add_canvas_item(column)
# used for mouse tracking.
self.__pressed = False
self.on_set_display_limits = None
self.__cursor_changed = cursor_changed_fn
def close(self):
self._set_histogram_data(None)
super().close()
@property
def background_color(self):
"""Return the background color."""
return self.__simple_line_graph_canvas_item.background_color
@background_color.setter
def background_color(self, background_color):
"""Set the background color, in the CSS color format."""
self.__simple_line_graph_canvas_item.background_color = background_color
def _set_histogram_data(self, histogram_data):
# if the user is currently dragging the display limits, we don't want to update
# from changing data at the same time. but we _do_ want to draw the updated data.
if not self.__pressed:
self.__adornments_canvas_item.display_limits = (0, 1)
self.histogram_data = histogram_data
# make sure the adornments get updated
self.__adornments_canvas_item.update()
@property
def histogram_data(self):
return self.__simple_line_graph_canvas_item.data
@histogram_data.setter
def histogram_data(self, histogram_data):
self.__simple_line_graph_canvas_item.data = histogram_data
@property
def color_map_data(self) -> numpy.ndarray:
return self.__histogram_color_map_canvas_item.color_map_data
@color_map_data.setter
def color_map_data(self, color_map_data: numpy.ndarray) -> None:
self.__histogram_color_map_canvas_item.color_map_data = color_map_data
def __set_display_limits(self, display_limits):
self.__adornments_canvas_item.display_limits = display_limits
self.__adornments_canvas_item.update()
def mouse_double_clicked(self, x, y, modifiers):
if super().mouse_double_clicked(x, y, modifiers):
return True
self.__set_display_limits((0, 1))
if callable(self.on_set_display_limits):
self.on_set_display_limits(None)
return True
def mouse_pressed(self, x, y, modifiers):
if super().mouse_pressed(x, y, modifiers):
return True
self.__pressed = True
self.start = float(x)/self.canvas_size[1]
self.__set_display_limits((self.start, self.start))
return True
def mouse_released(self, x, y, modifiers):
if super().mouse_released(x, y, modifiers):
return True
self.__pressed = False
display_limit_range = self.__adornments_canvas_item.display_limits[1] - self.__adornments_canvas_item.display_limits[0]
if 0 < display_limit_range < 1:
if callable(self.on_set_display_limits):
self.on_set_display_limits(self.__adornments_canvas_item.display_limits)
self.__set_display_limits((0, 1))
return True
def mouse_position_changed(self, x, y, modifiers):
if callable(self.__cursor_changed):
self.__cursor_changed(x / self.canvas_size[1])
if super().mouse_position_changed(x, y, modifiers):
return True
canvas_width = self.canvas_size[1]
if self.__pressed:
current = float(x)/canvas_width
self.__set_display_limits((min(self.start, current), max(self.start, current)))
return True
def mouse_exited(self) -> bool:
if callable(self.__cursor_changed):
self.__cursor_changed(None)
return True
class HistogramWidgetData:
def __init__(self, data=None, display_range=None):
self.data = data
self.display_range = display_range
class HistogramWidget(Widgets.CompositeWidgetBase):
def __init__(self, document_controller, display_item_stream, histogram_widget_data_model, color_map_data_model, cursor_changed_fn):
super().__init__(document_controller.ui.create_column_widget(properties={"min-height": 84, "max-height": 84}))
ui = document_controller.ui
self.__display_item_stream = display_item_stream.add_ref()
self.__histogram_data_model = histogram_widget_data_model
self.__color_map_data_model = color_map_data_model
self.__display_range = None
def histogram_data_changed(key: str) -> None:
if key == "value":
histogram_widget_data = self.__histogram_data_model.value
self.__histogram_canvas_item._set_histogram_data(histogram_widget_data.data)
self.__display_range = histogram_widget_data.display_range
self.__histogram_data_property_changed_event_listener = self.__histogram_data_model.property_changed_event.listen(histogram_data_changed)
def set_display_limits(display_limits):
# display_limits in this context are in the range of 0,1
# we ask for the display_range from the display to get actual
# data values (never None), and create new display limits
# based on those data values combined with display_limits.
# then we set the display_limits on the display, which have
# the same units as the data values.
display_item = self.__display_item_stream.value
display_data_channel = display_item.display_data_channel if display_item else None
if display_data_channel:
new_display_limits = None
if display_limits is not None and self.__display_range is not None:
data_min, data_max = self.__display_range
lower_display_limit = data_min + display_limits[0] * (data_max - data_min)
upper_display_limit = data_min + display_limits[1] * (data_max - data_min)
new_display_limits = (lower_display_limit, upper_display_limit)
command = DisplayPanel.ChangeDisplayDataChannelCommand(document_controller.document_model, display_data_channel, display_limits=new_display_limits, title=_("Change Display Limits"))
command.perform()
document_controller.push_undo_command(command)
def cursor_changed(canvas_x):
if callable(cursor_changed_fn):
cursor_changed_fn(canvas_x, self.__display_range)
# create a canvas widget for this panel and put a histogram canvas item in it.
self.__histogram_canvas_item = HistogramCanvasItem(cursor_changed)
self.__histogram_canvas_item.on_set_display_limits = set_display_limits
histogram_widget = ui.create_canvas_widget()
histogram_widget.canvas_item.add_canvas_item(self.__histogram_canvas_item)
def handle_update_color_map_data(color_map_data):
self.__histogram_canvas_item.color_map_data = color_map_data
def color_map_data_changed(key: str) -> None:
if key == "value":
self.__histogram_canvas_item.color_map_data = self.__color_map_data_model.value
self.__color_map_data_stream_listener = self.__color_map_data_model.property_changed_event.listen(color_map_data_changed)
histogram_data_changed("value")
color_map_data_changed("value")
self.content_widget.add(histogram_widget)
def close(self):
self.__color_map_data_stream_listener.close()
self.__color_map_data_stream_listener = None
self.__display_item_stream.remove_ref()
self.__display_item_stream = None
self.__histogram_canvas_item = None
self.__histogram_data_property_changed_event_listener.close()
self.__histogram_data_property_changed_event_listener = None
super().close()
def _recompute(self):
pass
@property
def _histogram_canvas_item(self):
return self.__histogram_canvas_item
@property
def _histogram_data_func_value_model(self):
# for testing
return self.__histogram_data_model
class StatisticsWidget(Widgets.CompositeWidgetBase):
def __init__(self, ui, statistics_model):
super().__init__(ui.create_column_widget(properties={"min-height": 18 * 3, "max-height": 18 * 3}))
# create property models for the UI
self._stats1_property = Model.PropertyModel(str())
self._stats2_property = Model.PropertyModel(str())
self.__statistics_model = statistics_model
def statistics_changed(key: str) -> None:
if key == "value":
statistics_data = self.__statistics_model.value
statistic_strings = list()
for key in sorted(statistics_data.keys()):
value = statistics_data[key]
if value is not None:
statistic_str = "{0} {1}".format(key, value)
else:
statistic_str = "{0} {1}".format(key, _("N/A"))
statistic_strings.append(statistic_str)
self._stats1_property.value = "\n".join(statistic_strings[:(len(statistic_strings) + 1) // 2])
self._stats2_property.value = "\n".join(statistic_strings[(len(statistic_strings) + 1) // 2:])
self.__statistics_property_changed_event_listener = self.__statistics_model.property_changed_event.listen(statistics_changed)
statistics_changed("value")
stats_column1 = ui.create_column_widget(properties={"min-width": 140, "max-width": 140})
stats_column2 = ui.create_column_widget(properties={"min-width": 140, "max-width": 140})
stats_column1_label = ui.create_label_widget()
stats_column2_label = ui.create_label_widget()
stats_column1.add(stats_column1_label)
stats_column2.add(stats_column2_label)
stats_section = ui.create_row_widget()
stats_section.add_spacing(13)
stats_section.add(stats_column1)
stats_section.add_stretch()
stats_section.add(stats_column2)
stats_section.add_spacing(13)
stats_column1_label.bind_text(Binding.PropertyBinding(self._stats1_property, "value"))
stats_column2_label.bind_text(Binding.PropertyBinding(self._stats2_property, "value"))
self.content_widget.add(stats_section)
def close(self):
self.__statistics_property_changed_event_listener.close()
self.__statistics_property_changed_event_listener = None
super().close()
@property
def _statistics_func_value_model(self):
# for testing
return self.__statistics_model
def _recompute(self):
pass
# import asyncio
class HistogramPanel(Panel.Panel):
""" A panel to present a histogram of the selected data item. """
def __init__(self, document_controller, panel_id, properties, debounce=True, sample=True):
super().__init__(document_controller, panel_id, _("Histogram"))
def calculate_region_data(display_data_and_metadata, region):
if region is not None and display_data_and_metadata is not None:
if display_data_and_metadata.is_data_1d and isinstance(region, Graphics.IntervalGraphic):
interval = region.interval
if 0 <= interval[0] < 1 and 0 < interval[1] <= 1:
start, end = int(interval[0] * display_data_and_metadata.data_shape[0]), int(interval[1] * display_data_and_metadata.data_shape[0])
if end - start >= 1:
cropped_data_and_metadata = Core.function_crop_interval(display_data_and_metadata, interval)
if cropped_data_and_metadata:
return cropped_data_and_metadata
elif display_data_and_metadata.is_data_2d and isinstance(region, Graphics.RectangleTypeGraphic):
cropped_data_and_metadata = Core.function_crop(display_data_and_metadata, region.bounds)
if cropped_data_and_metadata:
return cropped_data_and_metadata
return display_data_and_metadata
def calculate_region_data_func(display_data_and_metadata, region):
return functools.partial(calculate_region_data, display_data_and_metadata, region)
def calculate_histogram_widget_data(display_data_and_metadata_func, display_range):
bins = 320
subsample = 0 # hard coded subsample size
subsample_fraction = None # fraction of total pixels
subsample_min = 1024 # minimum subsample size
display_data_and_metadata = display_data_and_metadata_func()
display_data = display_data_and_metadata.data if display_data_and_metadata else None
if display_data is not None:
total_pixels = numpy.product(display_data.shape, dtype=numpy.uint64)
if not subsample and subsample_fraction:
subsample = min(max(total_pixels * subsample_fraction, subsample_min), total_pixels)
if subsample:
factor = total_pixels / subsample
data_sample = numpy.random.choice(display_data.reshape(numpy.product(display_data.shape, dtype=numpy.uint64)), subsample)
else:
factor = 1.0
data_sample = numpy.copy(display_data)
if display_range is None or data_sample is None:
return HistogramWidgetData()
histogram_data = factor * numpy.histogram(data_sample, range=display_range, bins=bins)[0]
histogram_max = numpy.max(histogram_data) # assumes that histogram_data is int
if histogram_max > 0:
histogram_data = histogram_data / float(histogram_max)
return HistogramWidgetData(histogram_data, display_range)
return HistogramWidgetData()
def calculate_histogram_widget_data_func(display_data_and_metadata_model_func, display_range):
return functools.partial(calculate_histogram_widget_data, display_data_and_metadata_model_func, display_range)
display_item_stream = TargetDisplayItemStream(document_controller)
display_data_channel_stream = StreamPropertyStream(display_item_stream, "display_data_channel")
region_stream = TargetRegionStream(display_item_stream)
def compare_data(a, b):
return numpy.array_equal(a.data if a else None, b.data if b else None)
display_data_and_metadata_stream = DisplayDataChannelTransientsStream(display_data_channel_stream, "display_data_and_metadata", cmp=compare_data)
display_range_stream = DisplayDataChannelTransientsStream(display_data_channel_stream, "display_range")
region_data_and_metadata_func_stream = Stream.CombineLatestStream((display_data_and_metadata_stream, region_stream), calculate_region_data_func)
histogram_widget_data_func_stream = Stream.CombineLatestStream((region_data_and_metadata_func_stream, display_range_stream), calculate_histogram_widget_data_func)
color_map_data_stream = StreamPropertyStream(display_data_channel_stream, "color_map_data", cmp=numpy.array_equal)
if debounce:
histogram_widget_data_func_stream = Stream.DebounceStream(histogram_widget_data_func_stream, 0.05, document_controller.event_loop)
if sample:
histogram_widget_data_func_stream = Stream.SampleStream(histogram_widget_data_func_stream, 0.5, document_controller.event_loop)
def cursor_changed_fn(canvas_x: float, display_range) -> None:
if not canvas_x:
document_controller.cursor_changed(None)
if display_item_stream and display_item_stream.value and canvas_x:
if display_range is not None: # can be None with empty data
displayed_intensity_calibration = display_item_stream.value.displayed_intensity_calibration
adjusted_x = display_range[0] + canvas_x * (display_range[1] - display_range[0])
adjusted_x = displayed_intensity_calibration.convert_to_calibrated_value_str(adjusted_x)
document_controller.cursor_changed([_('Intensity: ') + str(adjusted_x)])
else:
document_controller.cursor_changed(None)
self.__histogram_widget_data_model = Model.FuncStreamValueModel(histogram_widget_data_func_stream, document_controller.event_loop, value=HistogramWidgetData(), cmp=numpy.array_equal)
self.__color_map_data_model = Model.StreamValueModel(color_map_data_stream, cmp=numpy.array_equal)
self._histogram_widget = HistogramWidget(document_controller, display_item_stream, self.__histogram_widget_data_model, self.__color_map_data_model, cursor_changed_fn)
def calculate_statistics(display_data_and_metadata_func, display_data_range, region, displayed_intensity_calibration):
display_data_and_metadata = display_data_and_metadata_func()
data = display_data_and_metadata.data if display_data_and_metadata else None
data_range = display_data_range
if data is not None and data.size > 0 and displayed_intensity_calibration:
mean = numpy.mean(data)
std = numpy.std(data)
rms = numpy.sqrt(numpy.mean(numpy.square(numpy.absolute(data))))
sum_data = mean * functools.reduce(operator.mul, Image.dimensional_shape_from_shape_and_dtype(data.shape, data.dtype))
if region is None:
data_min, data_max = data_range if data_range is not None else (None, None)
else:
data_min, data_max = numpy.amin(data), numpy.amax(data)
mean_str = displayed_intensity_calibration.convert_to_calibrated_value_str(mean)
std_str = displayed_intensity_calibration.convert_to_calibrated_value_str(std)
data_min_str = displayed_intensity_calibration.convert_to_calibrated_value_str(data_min)
data_max_str = displayed_intensity_calibration.convert_to_calibrated_value_str(data_max)
rms_str = displayed_intensity_calibration.convert_to_calibrated_value_str(rms)
sum_data_str = displayed_intensity_calibration.convert_to_calibrated_value_str(sum_data)
return { "mean": mean_str, "std": std_str, "min": data_min_str, "max": data_max_str, "rms": rms_str, "sum": sum_data_str }
return dict()
def calculate_statistics_func(display_data_and_metadata_model_func, display_data_range, region, displayed_intensity_calibration):
return functools.partial(calculate_statistics, display_data_and_metadata_model_func, display_data_range, region, displayed_intensity_calibration)
display_data_range_stream = DisplayDataChannelTransientsStream(display_data_channel_stream, "data_range")
displayed_intensity_calibration_stream = StreamPropertyStream(display_item_stream, 'displayed_intensity_calibration')
statistics_func_stream = Stream.CombineLatestStream((region_data_and_metadata_func_stream, display_data_range_stream, region_stream, displayed_intensity_calibration_stream), calculate_statistics_func)
if debounce:
statistics_func_stream = Stream.DebounceStream(statistics_func_stream, 0.05, document_controller.event_loop)
if sample:
statistics_func_stream = Stream.SampleStream(statistics_func_stream, 0.5, document_controller.event_loop)
self.__statistics_model = Model.FuncStreamValueModel(statistics_func_stream, document_controller.event_loop, value=dict(), cmp=numpy.array_equal)
self._statistics_widget = StatisticsWidget(self.ui, self.__statistics_model)
# create the main column with the histogram and the statistics section
column = self.ui.create_column_widget(properties={"height": 80 + 18 * 3 + 12})
column.add(self._histogram_widget)
column.add_spacing(6)
column.add(self._statistics_widget)
column.add_spacing(6)
column.add_stretch()
# this is necessary to make the panel happy
self.widget = column
def close(self):
self.__histogram_widget_data_model.close()
self.__histogram_widget_data_model = None
self.__color_map_data_model.close()
self.__color_map_data_model = None
self.__statistics_model.close()
self.__statistics_model = None
super().close()
class TargetDataItemStream(Stream.AbstractStream):
def __init__(self, document_controller):
super().__init__()
# outgoing messages
self.value_stream = Event.Event()
# cached values
self.__value = None
# listen for selected data item changes
self.__focused_data_item_changed_event_listener = document_controller.focused_data_item_changed_event.listen(self.__focused_data_item_changed)
# manually send the first data item changed message to set things up.
self.__focused_display_item_changed(document_controller.selected_display_item)
def close(self):
# disconnect data item binding
self.__focused_display_item_changed(None)
self.__focused_display_item_changed_event_listener.close()
self.__focused_display_item_changed_event_listener = None
super().close()
@property
def value(self):
return self.__value
def __focused_display_item_changed(self, display_item: typing.Optional[DisplayItem.DisplayItem]) -> None:
data_item = display_item.data_item if display_item else None
if data_item != self.__value:
self.__value = data_item
self.value_stream.fire(data_item)
class TargetDisplayItemStream(Stream.AbstractStream):
def __init__(self, document_controller):
super().__init__()
# outgoing messages
self.value_stream = Event.Event()
# cached values
self.__value = None
# listen for selected data item changes
self.__focused_display_item_changed_event_listener = document_controller.focused_display_item_changed_event.listen(self.__focused_display_item_changed)
# manually send the first data item changed message to set things up.
self.__focused_display_item_changed(document_controller.selected_display_item)
def close(self):
# disconnect data item binding
self.__focused_display_item_changed(None)
self.__focused_display_item_changed_event_listener.close()
self.__focused_display_item_changed_event_listener = None
super().close()
@property
def value(self):
return self.__value
def __focused_display_item_changed(self, display_item: typing.Optional[DisplayItem.DisplayItem]) -> None:
if display_item != self.__value:
self.__value = display_item
self.value_stream.fire(display_item)
class TargetRegionStream(Stream.AbstractStream):
def __init__(self, display_item_stream):
super().__init__()
# outgoing messages
self.value_stream = Event.Event()
# references
self.__display_item_stream = display_item_stream.add_ref()
# initialize
self.__display_graphic_selection_changed_event_listener = None
self.__value = None
# listen for display changes
self.__display_stream_listener = display_item_stream.value_stream.listen(self.__display_item_changed)
self.__graphic_changed_event_listener = None
self.__graphic_about_to_be_removed_event_listener = None
self.__display_item_changed(display_item_stream.value)
def close(self):
self.__display_item_changed(None)
self.__display_stream_listener.close()
self.__display_stream_listener = None
self.__display_item_stream.remove_ref()
self.__display_item_stream = None
super().close()
@property
def value(self):
return self.__value
def __display_item_changed(self, display_item):
def display_graphic_selection_changed(graphic_selection):
current_index = graphic_selection.current_index
if current_index is not None:
new_value = display_item.graphics[current_index]
if new_value != self.__value:
self.__value = new_value
def graphic_changed():
self.value_stream.fire(self.__value)
def graphic_removed():
self.__value = None
self.value_stream.fire(None)
if self.__graphic_changed_event_listener:
self.__graphic_changed_event_listener.close()
self.__graphic_changed_event_listener = None
if self.__graphic_about_to_be_removed_event_listener:
self.__graphic_about_to_be_removed_event_listener.close()
self.__graphic_about_to_be_removed_event_listener = None
if self.__value:
self.__graphic_changed_event_listener = self.__value.graphic_changed_event.listen(graphic_changed)
self.__graphic_about_to_be_removed_event_listener = self.__value.about_to_be_removed_event.listen(graphic_removed)
graphic_changed()
elif self.__value is not None:
self.__value = None
if self.__graphic_changed_event_listener:
self.__graphic_changed_event_listener.close()
self.__graphic_changed_event_listener = None
if self.__graphic_about_to_be_removed_event_listener:
self.__graphic_about_to_be_removed_event_listener.close()
self.__graphic_about_to_be_removed_event_listener = None
self.value_stream.fire(None)
if self.__graphic_changed_event_listener:
self.__graphic_changed_event_listener.close()
self.__graphic_changed_event_listener = None
if self.__graphic_about_to_be_removed_event_listener:
self.__graphic_about_to_be_removed_event_listener.close()
self.__graphic_about_to_be_removed_event_listener = None
if self.__display_graphic_selection_changed_event_listener:
self.__display_graphic_selection_changed_event_listener.close()
self.__display_graphic_selection_changed_event_listener = None
if display_item:
self.__display_graphic_selection_changed_event_listener = display_item.graphic_selection_changed_event.listen(display_graphic_selection_changed)
display_graphic_selection_changed(display_item.graphic_selection)
elif self.__value is not None:
self.__value = None
self.value_stream.fire(None)
class StreamPropertyStream(Stream.ConcatStream):
def __init__(self, stream, property_name, cmp=None):
super().__init__(stream, lambda x: Stream.PropertyChangedEventStream(x, property_name, cmp))
class DisplayDataChannelTransientsStream(Stream.AbstractStream):
# TODO: add a display_data_changed to Display class and use it here
def __init__(self, display_data_channel_stream, property_name, cmp=None):
super().__init__()
# outgoing messages
self.value_stream = Event.Event()
# initialize
self.__property_name = property_name
self.__value = None
self.__display_values_changed_listener = None
self.__next_calculated_display_values_listener = None
self.__cmp = cmp if cmp else operator.eq
# listen for display changes
self.__display_data_channel_stream = display_data_channel_stream.add_ref()
self.__display_data_channel_stream_listener = display_data_channel_stream.value_stream.listen(self.__display_data_channel_changed)
self.__display_data_channel_changed(display_data_channel_stream.value)
def close(self):
self.__display_data_channel_changed(None)
self.__display_data_channel_stream_listener.close()
self.__display_data_channel_stream_listener = None
self.__display_data_channel_stream.remove_ref()
self.__display_data_channel_stream = None
super().close()
@property
def value(self):
return self.__value
def __display_data_channel_changed(self, display_data_channel):
def display_values_changed():
display_values = display_data_channel.get_calculated_display_values(True)
new_value = getattr(display_values, self.__property_name) if display_values else None
if not self.__cmp(new_value, self.__value):
self.__value = new_value
self.value_stream.fire(self.__value)
if self.__next_calculated_display_values_listener:
self.__next_calculated_display_values_listener.close()
self.__next_calculated_display_values_listener = None
if self.__display_values_changed_listener:
self.__display_values_changed_listener.close()
self.__display_values_changed_listener = None
if display_data_channel:
# there are two listeners - the first when new display properties have triggered new display values.
# the second whenever actual new display values arrive. this ensures the display gets updated after
# the user changes it. could use some rethinking.
self.__next_calculated_display_values_listener = display_data_channel.add_calculated_display_values_listener(display_values_changed)
self.__display_values_changed_listener = display_data_channel.display_values_changed_event.listen(display_values_changed)
display_values_changed()
else:
self.__value = None
self.value_stream.fire(None)
| gpl-3.0 |
leighpauls/k2cro4 | third_party/webdriver/pylib/test/selenium/webdriver/common/google_one_box.py | 51 | 1418 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.common.exceptions import NoSuchElementException
from results_page import ResultsPage
from page_loader import require_loaded
class GoogleOneBox(object):
"""This class models a page that has a google search bar."""
def __init__(self, driver, url):
self._driver = driver
self._url = url
def is_loaded(self):
try :
self._driver.find_element_by_name("q")
return True
except NoSuchElementException:
return False
def load(self):
self._driver.get(self._url)
@require_loaded
def search_for(self, search_term):
element = self._driver.find_element_by_name("q")
element.send_keys(search_term)
element.submit()
return ResultsPage(self._driver)
| bsd-3-clause |
tcwicklund/django | django/contrib/admin/sites.py | 120 | 21146 | from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.engine import Engine
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
_empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Instantiate the admin class to save in the registry
admin_obj = admin_class(model, self)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_obj.check())
self._registry[model] = admin_obj
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
@property
def empty_value_display(self):
return self._empty_value_display
@empty_value_display.setter
def empty_value_display(self, empty_value_display):
self._empty_value_display = empty_value_display
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigured(
"Put 'django.contrib.admin' in your INSTALLED_APPS "
"setting in order to use the admin application.")
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in your INSTALLED_APPS "
"setting in order to use the admin application.")
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors):
raise ImproperlyConfigured(
"Enable 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATES setting in order to use the admin "
"application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in self._registry.items():
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
"""
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': self.site_url,
'has_permission': self.has_permission(request),
'available_apps': self.get_app_list(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
request.current_app = self.name
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
request.current_app = self.name
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
request.current_app = self.name
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name)
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
request.current_app = self.name
return login(request, **defaults)
def _build_app_dict(self, request, label=None):
"""
Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
if label:
raise PermissionDenied
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def get_app_list(self, request):
"""
Returns a sorted list of all the installed apps that have been
registered in this site.
"""
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_list = self.get_app_list(request)
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or
'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
app_name = apps.get_app_config(app_label).verbose_name
context = dict(self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| bsd-3-clause |
elit3ge/SickRage | sickbeard/providers/binsearch.py | 7 | 3930 | # Author: moparisthebest <[email protected]>
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import urllib
import re
import generic
from sickbeard import logger
from sickbeard import tvcache
class BinSearchProvider(generic.NZBProvider):
def __init__(self):
generic.NZBProvider.__init__(self, "BinSearch")
self.enabled = False
self.public = True
self.cache = BinSearchCache(self)
self.urls = {'base_url': 'https://www.binsearch.info/'}
self.url = self.urls['base_url']
def isEnabled(self):
return self.enabled
class BinSearchCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll Binsearch every 30 minutes max
self.minTime = 30
# compile and save our regular expressions
# this pulls the title from the URL in the description
self.descTitleStart = re.compile('^.*https?://www\.binsearch\.info/.b=')
self.descTitleEnd = re.compile('&.*$')
# these clean up the horrible mess of a title if the above fail
self.titleCleaners = [
re.compile('.?yEnc.?\(\d+/\d+\)$'),
re.compile(' \[\d+/\d+\] '),
]
def _get_title_and_url(self, item):
"""
Retrieves the title and URL data from the item XML node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns: A tuple containing two strings representing title and URL respectively
"""
title = item.get('description')
if title:
title = u'' + title
if self.descTitleStart.match(title):
title = self.descTitleStart.sub('', title)
title = self.descTitleEnd.sub('', title)
title = title.replace('+', '.')
else:
# just use the entire title, looks hard/impossible to parse
title = item.get('title')
if title:
for titleCleaner in self.titleCleaners:
title = titleCleaner.sub('', title)
url = item.get('link')
if url:
url = url.replace('&', '&')
return (title, url)
def updateCache(self):
# check if we should update
if not self.shouldUpdate():
return
# clear cache
self._clearCache()
# set updated
self.setLastUpdate()
cl = []
for group in ['alt.binaries.boneless','alt.binaries.misc','alt.binaries.hdtv','alt.binaries.hdtv.x264','alt.binaries.tv','alt.binaries.tvseries','alt.binaries.teevee']:
url = self.provider.url + 'rss.php?'
urlArgs = {'max': 1000,'g': group}
url += urllib.urlencode(urlArgs)
logger.log(u"BinSearch cache update URL: " + url, logger.DEBUG)
for item in self.getRSSFeed(url)['entries'] or []:
ci = self._parseItem(item)
if ci is not None:
cl.append(ci)
if len(cl) > 0:
myDB = self._getDB()
myDB.mass_action(cl)
def _checkAuth(self, data):
return data if data['feed'] and data['feed']['title'] != 'Invalid Link' else None
provider = BinSearchProvider()
| gpl-3.0 |
kchodorow/tensorflow | tensorflow/python/client/session_benchmark.py | 32 | 4750 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for interacting with the `tf.Session`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SessionBenchmark(test.Benchmark):
"""Tests and benchmarks for interacting with the `tf.Session`."""
def _benchmarkFeed(self, name, target, size, iters):
"""Runs a microbenchmark to measure the cost of feeding a tensor.
Reports the median cost of feeding a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be feed.
iters: The number of iterations to perform.
"""
feed_val = np.random.rand(size).astype(np.float32)
times = []
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=[size])
# Fetch the operation rather than the tensor, to avoid measuring the time
# to fetch back the value.
no_op = array_ops.identity(p).op
with session.Session(target) as sess:
sess.run(no_op, feed_dict={p: feed_val}) # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
sess.run(no_op, feed_dict={p: feed_val})
end_time = time.time()
times.append(end_time - start_time)
print("%s %d %f" % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def _benchmarkFetch(self, name, target, size, iters):
"""Runs a microbenchmark to measure the cost of fetching a tensor.
Reports the median cost of fetching a tensor of `size` * `sizeof(float)`
bytes.
Args:
name: A human-readable name for logging the output.
target: The session target to use for the benchmark.
size: The number of floating-point numbers to be fetched.
iters: The number of iterations to perform.
"""
times = []
with ops.Graph().as_default():
# Define the tensor to be fetched as a variable, to avoid
# constant-folding.
v = variables.Variable(random_ops.random_normal([size]))
with session.Session(target) as sess:
sess.run(v.initializer)
sess.run(v) # Warm-up run.
for _ in xrange(iters):
start_time = time.time()
sess.run(v)
end_time = time.time()
times.append(end_time - start_time)
print("%s %d %f" % (name, size, np.median(times)))
self.report_benchmark(iters=1, wall_time=np.median(times), name=name)
def benchmarkGrpcSession(self):
server = server_lib.Server.create_local_server()
self._benchmarkFeed("benchmark_session_feed_grpc_4B", server.target, 1,
10000)
session.Session.reset(server.target)
self._benchmarkFeed("benchmark_session_feed_grpc_4MB", server.target, 1
<< 20, 100)
session.Session.reset(server.target)
self._benchmarkFetch("benchmark_session_fetch_grpc_4B", server.target, 1,
20000)
session.Session.reset(server.target)
self._benchmarkFetch("benchmark_session_fetch_grpc_4MB", server.target, 1
<< 20, 100)
session.Session.reset(server.target)
def benchmarkDirectSession(self):
self._benchmarkFeed("benchmark_session_feed_direct_4B", "", 1, 5000)
self._benchmarkFeed("benchmark_session_feed_direct_4MB", "", 1 << 20, 200)
self._benchmarkFetch("benchmark_session_fetch_direct_4B", "", 1, 5000)
self._benchmarkFetch("benchmark_session_fetch_direct_4MB", "", 1 << 20, 100)
if __name__ == "__main__":
test.main()
| apache-2.0 |
tatsuy/ardupilot | Tools/LogAnalyzer/tests/TestDualGyroDrift.py | 73 | 5485 | from __future__ import print_function
from LogAnalyzer import Test,TestResult
import DataflashLog
# import scipy
# import pylab #### TEMP!!! only for dev
# from scipy import signal
class TestDualGyroDrift(Test):
'''test for gyro drift between dual IMU data'''
def __init__(self):
Test.__init__(self)
self.name = "Gyro Drift"
self.enable = False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# if "IMU" not in logdata.channels or "IMU2" not in logdata.channels:
# self.result.status = TestResult.StatusType.NA
# return
# imuX = logdata.channels["IMU"]["GyrX"].listData
# imu2X = logdata.channels["IMU2"]["GyrX"].listData
# # NOTE: weird thing about Holger's log is that the counts of IMU+IMU2 are different
# print("length 1: %.2f, length 2: %.2f" % (len(imuX),len(imu2X)))
# #assert(len(imuX) == len(imu2X))
# # divide the curve into segments and get the average of each segment
# # we will get the diff between those averages, rather than a per-sample diff as the IMU+IMU2 arrays are often not the same length
# diffThresholdWARN = 0.03
# diffThresholdFAIL = 0.05
# nSamples = 10
# imu1XAverages, imu1YAverages, imu1ZAverages, imu2XAverages, imu2YAverages, imu2ZAverages = ([],[],[],[],[],[])
# imuXDiffAverages, imuYDiffAverages, imuZDiffAverages = ([],[],[])
# maxDiffX, maxDiffY, maxDiffZ = (0,0,0)
# sliceLength1 = len(logdata.channels["IMU"]["GyrX"].dictData.values()) / nSamples
# sliceLength2 = len(logdata.channels["IMU2"]["GyrX"].dictData.values()) / nSamples
# for i in range(0,nSamples):
# imu1XAverages.append(numpy.mean(logdata.channels["IMU"]["GyrX"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu1YAverages.append(numpy.mean(logdata.channels["IMU"]["GyrY"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu1ZAverages.append(numpy.mean(logdata.channels["IMU"]["GyrZ"].dictData.values()[i*sliceLength1:i*sliceLength1+sliceLength1]))
# imu2XAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrX"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imu2YAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrY"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imu2ZAverages.append(numpy.mean(logdata.channels["IMU2"]["GyrZ"].dictData.values()[i*sliceLength2:i*sliceLength2+sliceLength2]))
# imuXDiffAverages.append(imu2XAverages[-1]-imu1XAverages[-1])
# imuYDiffAverages.append(imu2YAverages[-1]-imu1YAverages[-1])
# imuZDiffAverages.append(imu2ZAverages[-1]-imu1ZAverages[-1])
# if abs(imuXDiffAverages[-1]) > maxDiffX:
# maxDiffX = imuXDiffAverages[-1]
# if abs(imuYDiffAverages[-1]) > maxDiffY:
# maxDiffY = imuYDiffAverages[-1]
# if abs(imuZDiffAverages[-1]) > maxDiffZ:
# maxDiffZ = imuZDiffAverages[-1]
# if max(maxDiffX,maxDiffY,maxDiffZ) > diffThresholdFAIL:
# self.result.status = TestResult.StatusType.FAIL
# self.result.statusMessage = "IMU/IMU2 gyro averages differ by more than %s radians" % diffThresholdFAIL
# elif max(maxDiffX,maxDiffY,maxDiffZ) > diffThresholdWARN:
# self.result.status = TestResult.StatusType.WARN
# self.result.statusMessage = "IMU/IMU2 gyro averages differ by more than %s radians" % diffThresholdWARN
# # pylab.plot(zip(*imuX)[0], zip(*imuX)[1], 'g')
# # pylab.plot(zip(*imu2X)[0], zip(*imu2X)[1], 'r')
# #pylab.plot(range(0,(nSamples*sliceLength1),sliceLength1), imu1ZAverages, 'b')
# print("Gyro averages1X: " + repr(imu1XAverages))
# print("Gyro averages1Y: " + repr(imu1YAverages))
# print("Gyro averages1Z: " + repr(imu1ZAverages) + "\n")
# print("Gyro averages2X: " + repr(imu2XAverages))
# print("Gyro averages2Y: " + repr(imu2YAverages))
# print("Gyro averages2Z: " + repr(imu2ZAverages) + "\n")
# print("Gyro averages diff X: " + repr(imuXDiffAverages))
# print("Gyro averages diff Y: " + repr(imuYDiffAverages))
# print("Gyro averages diff Z: " + repr(imuZDiffAverages))
# # lowpass filter using numpy
# # cutoff = 100
# # fs = 10000.0
# # b,a = scipy.signal.filter_design.butter(5,cutoff/(fs/2))
# # imuXFiltered = scipy.signal.filtfilt(b,a,zip(*imuX)[1])
# # imu2XFiltered = scipy.signal.filtfilt(b,a,zip(*imu2X)[1])
# #pylab.plot(imuXFiltered, 'r')
# # TMP: DISPLAY BEFORE+AFTER plots
# pylab.show()
# # print("imuX average before lowpass filter: %.8f" % logdata.channels["IMU"]["GyrX"].avg())
# # print("imuX average after lowpass filter: %.8f" % numpy.mean(imuXFiltered))
# # print("imu2X average before lowpass filter: %.8f" % logdata.channels["IMU2"]["GyrX"].avg())
# # print("imu2X average after lowpass filter: %.8f" % numpy.mean(imu2XFiltered))
# avg1X = logdata.channels["IMU"]["GyrX"].avg()
# avg1Y = logdata.channels["IMU"]["GyrY"].avg()
# avg1Z = logdata.channels["IMU"]["GyrZ"].avg()
# avg2X = logdata.channels["IMU2"]["GyrX"].avg()
# avg2Y = logdata.channels["IMU2"]["GyrY"].avg()
# avg2Z = logdata.channels["IMU2"]["GyrZ"].avg()
# avgRatioX = (max(avg1X,avg2X) - min(avg1X,avg2X)) / #abs(max(avg1X,avg2X) / min(avg1X,avg2X))
# avgRatioY = abs(max(avg1Y,avg2Y) / min(avg1Y,avg2Y))
# avgRatioZ = abs(max(avg1Z,avg2Z) / min(avg1Z,avg2Z))
# self.result.statusMessage = "IMU gyro avg: %.4f,%.4f,%.4f\nIMU2 gyro avg: %.4f,%.4f,%.4f\nAvg ratio: %.4f,%.4f,%.4f" % (avg1X,avg1Y,avg1Z, avg2X,avg2Y,avg2Z, avgRatioX,avgRatioY,avgRatioZ)
| gpl-3.0 |
fragro/django-postman | setup.py | 6 | 1183 | from setuptools import setup, find_packages
setup(
name='django-postman',
version=__import__('postman').__version__,
description='User-to-User messaging system for Django, with gateway to AnonymousUser,' \
' moderation and thread management, user & exchange filters, inbox/sent/archives/trash folders,' \
' support for apps: auto-complete, notification, mailer.',
long_description=open('docs/index.rst').read().split('\n----\n', 1)[0],
author='Patrick Samson',
author_email='[email protected]',
url='http://bitbucket.org/psam/django-postman/overview',
license='BSD',
packages=find_packages(exclude=('docs',)),
include_package_data=True,
keywords='django messages messaging email moderation',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications :: Email',
],
install_requires=[
'Django',
],
)
| bsd-3-clause |
konstruktoid/ansible-upstream | lib/ansible/modules/monitoring/icinga2_feature.py | 89 | 4294 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Loic Blot <[email protected]>
# Copyright (c) 2018, Ansible Project
# Sponsored by Infopro Digital. http://www.infopro-digital.com/
# Sponsored by E.T.A.I. http://www.etai.fr/
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: icinga2_feature
short_description: Manage Icinga2 feature
description:
- This module can be used to enable or disable an Icinga2 feature.
version_added: "2.3"
author: "Loic Blot (@nerzhul)"
options:
name:
description:
- This is the feature name to enable or disable.
required: True
state:
description:
- If set to C(present) and feature is disabled, then feature is enabled.
- If set to C(present) and feature is already enabled, then nothing is changed.
- If set to C(absent) and feature is enabled, then feature is disabled.
- If set to C(absent) and feature is already disabled, then nothing is changed.
choices: [ "present", "absent" ]
default: present
'''
EXAMPLES = '''
- name: Enable ido-pgsql feature
icinga2_feature:
name: ido-pgsql
state: present
- name: Disable api feature
icinga2_feature:
name: api
state: absent
'''
RETURN = '''
#
'''
import re
from ansible.module_utils.basic import AnsibleModule
class Icinga2FeatureHelper:
def __init__(self, module):
self.module = module
self._icinga2 = module.get_bin_path('icinga2', True)
self.feature_name = self.module.params['name']
self.state = self.module.params['state']
def _exec(self, args):
cmd = [self._icinga2, 'feature']
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return rc, out
def manage(self):
rc, out = self._exec(["list"])
if rc != 0:
self.module.fail_json(msg="Unable to list icinga2 features. "
"Ensure icinga2 is installed and present in binary path.")
# If feature is already in good state, just exit
if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \
(re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"):
self.module.exit_json(changed=False)
if self.module.check_mode:
self.module.exit_json(changed=True)
feature_enable_str = "enable" if self.state == "present" else "disable"
rc, out = self._exec([feature_enable_str, self.feature_name])
change_applied = False
if self.state == "present":
if rc != 0:
self.module.fail_json(msg="Failed to %s feature %s."
" icinga2 command returned %s" % (feature_enable_str,
self.feature_name,
out))
if re.search("already enabled", out) is None:
change_applied = True
else:
if rc == 0:
change_applied = True
# RC is not 0 for this already disabled feature, handle it as no change applied
elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out):
change_applied = False
else:
self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out)
self.module.exit_json(changed=change_applied)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=["present", "absent"], default="present")
),
supports_check_mode=True
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
Icinga2FeatureHelper(module).manage()
if __name__ == '__main__':
main()
| gpl-3.0 |
Distrotech/intellij-community | python/lib/Lib/dummy_threading.py | 102 | 2900 | """Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
The module ``_dummy_threading`` is added to ``sys.modules`` in order
to not have ``threading`` considered imported. Had ``threading`` been
directly imported it would have made all subsequent imports succeed
regardless of whether ``thread`` was available which is not desired.
:Author: Brett Cannon
:Contact: [email protected]
XXX: Try to get rid of ``_dummy_threading``.
"""
from sys import modules as sys_modules
import dummy_thread
# Declaring now so as to not have to nest ``try``s to get proper clean-up.
holding_thread = False
holding_threading = False
holding__threading_local = False
try:
# Could have checked if ``thread`` was not in sys.modules and gone
# a different route, but decided to mirror technique used with
# ``threading`` below.
if 'thread' in sys_modules:
held_thread = sys_modules['thread']
holding_thread = True
# Must have some module named ``thread`` that implements its API
# in order to initially import ``threading``.
sys_modules['thread'] = sys_modules['dummy_thread']
if 'threading' in sys_modules:
# If ``threading`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held_threading = sys_modules['threading']
holding_threading = True
del sys_modules['threading']
if '_threading_local' in sys_modules:
# If ``_threading_local`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held__threading_local = sys_modules['_threading_local']
holding__threading_local = True
del sys_modules['_threading_local']
import threading
# Need a copy of the code kept somewhere...
sys_modules['_dummy_threading'] = sys_modules['threading']
del sys_modules['threading']
sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
del sys_modules['_threading_local']
from _dummy_threading import *
from _dummy_threading import __all__
finally:
# Put back ``threading`` if we overwrote earlier
if holding_threading:
sys_modules['threading'] = held_threading
del held_threading
del holding_threading
# Put back ``_threading_local`` if we overwrote earlier
if holding__threading_local:
sys_modules['_threading_local'] = held__threading_local
del held__threading_local
del holding__threading_local
# Put back ``thread`` if we overwrote, else del the entry we made
if holding_thread:
sys_modules['thread'] = held_thread
del held_thread
else:
del sys_modules['thread']
del holding_thread
del dummy_thread
del sys_modules
| apache-2.0 |
dssg/cincinnati2015-public | evaluation/webapp/evaluation.py | 1 | 5838 | from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
from webapp import config
def weighted_f1(scores):
f1_0 = scores["f1"][0] * scores["support"][0]
f1_1 = scores["f1"][1] * scores["support"][1]
return (f1_0 + f1_1) / (scores["support"][0] + scores["support"][1])
def plot_normalized_confusion_matrix(labels, predictions):
cutoff = 0.5
predictions_binary = np.copy(predictions)
predictions_binary[predictions_binary >= cutoff] = 1
predictions_binary[predictions_binary < cutoff] = 0
cm = metrics.confusion_matrix(labels, predictions_binary)
np.set_printoptions(precision=2)
fig = plt.figure()
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
target_names = ["No violation", "Violation"]
plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Normalized Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return fig
def plot_feature_importances(feature_names, feature_importances):
importances = list(zip(feature_names, list(feature_importances)))
importances = pd.DataFrame(importances, columns=["Feature", "Importance"])
importances = importances.set_index("Feature")
importances = importances.sort(columns="Importance", ascending=False)
importances = importances[0:20]
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
importances.plot(kind="barh", legend=False, ax=ax)
plt.tight_layout()
plt.title("Feature importances (Top 20)")
return fig
def plot_growth(results):
results = pd.DataFrame(results, columns=["date", "score"])
results = results.set_index("date")
results["score"] = results["score"].astype(float)
results = results.reindex(pd.date_range(datetime(2015, 7, 28), datetime(2015, 8, 27)))
results["random"] = pd.Series(3409/float(6124), index=results.index)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots(figsize=(8, 3))
results["score"].plot(legend=False, ax=ax, marker="x")
results["random"].plot(legend=False, ax=ax, style='--')
ax.set_ylabel(config.score_name)
plt.tight_layout()
ax.set_ylim(0.5, 1.0)
return fig
def precision_at_x_percent(test_labels, test_predictions, x_percent=0.01, return_cutoff=False):
cutoff_index = int(len(test_predictions) * x_percent)
cutoff_index = min(cutoff_index, len(test_predictions) -1)
sorted_by_probability = np.sort(test_predictions)[::-1]
cutoff_probability = sorted_by_probability[cutoff_index]
test_predictions_binary = np.copy(test_predictions)
test_predictions_binary[test_predictions_binary >= cutoff_probability] = 1
test_predictions_binary[test_predictions_binary < cutoff_probability] = 0
precision, _, _, _ = metrics.precision_recall_fscore_support(test_labels, test_predictions_binary)
precision = precision[1] # only interested in precision for label 1
if return_cutoff:
return precision, cutoff_probability
else:
return precision
def plot_precision_recall_n(test_labels, test_predictions):
y_score = test_predictions
precision_curve, recall_curve, pr_thresholds = metrics.precision_recall_curve(test_labels, y_score)
precision_curve = precision_curve[:-1]
recall_curve = recall_curve[:-1]
pct_above_per_thresh = []
number_scored = len(y_score)
for value in pr_thresholds:
num_above_thresh = len(y_score[y_score>=value])
pct_above_thresh = num_above_thresh / float(number_scored)
pct_above_per_thresh.append(pct_above_thresh)
pct_above_per_thresh = np.array(pct_above_per_thresh)
with plt.style.context(('ggplot')):
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, precision_curve, "#000099")
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color="#000099")
plt.ylim([0.0, 1.0])
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, "#CC0000")
ax2.set_ylabel('recall', color="#CC0000")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title("Precision-recall for top x%")
return fig
def plot_precision_cutoff(test_labels, test_predictions):
percent_range = [0.001* i for i in range(1, 10)] + [0.01 * i for i in range(1, 101)]
precisions_and_cutoffs = [precision_at_x_percent(test_labels, test_predictions, x_percent=p, return_cutoff=True)
for p in percent_range]
precisions, cutoffs = zip(*precisions_and_cutoffs)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
ax.plot(percent_range, precisions, "#000099")
ax.set_xlabel('percent of population')
ax.set_ylabel('precision', color="#000099")
plt.ylim([0.0, 1.0])
ax2 = ax.twinx()
ax2.plot(percent_range, cutoffs, "#CC0000")
ax2.set_ylabel('cutoff at', color="#CC0000")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title("Precision at x%")
return fig
def plot_ROC(test_labels, test_predictions):
fpr, tpr, thresholds = metrics.roc_curve(test_labels, test_predictions, pos_label=1)
with plt.style.context(('ggplot')):
fig, ax = plt.subplots()
ax.plot(fpr[2], tpr[2])
#ax.plot([0, 1], [0, 1], 'k--')
#plt.xlim([0.0, 1.0])
#plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
return fig
| mit |
oihane/odoo | openerp/addons/base/tests/test_orm.py | 49 | 18125 | from collections import defaultdict
from openerp.tools import mute_logger
from openerp.tests import common
UID = common.ADMIN_USER_ID
DB = common.DB
class TestORM(common.TransactionCase):
""" test special behaviors of ORM CRUD functions
TODO: use real Exceptions types instead of Exception """
def setUp(self):
super(TestORM, self).setUp()
cr, uid = self.cr, self.uid
self.partner = self.registry('res.partner')
self.users = self.registry('res.users')
self.p1 = self.partner.name_create(cr, uid, 'W')[0]
self.p2 = self.partner.name_create(cr, uid, 'Y')[0]
self.ir_rule = self.registry('ir.rule')
# sample unprivileged user
employee_gid = self.ref('base.group_user')
self.uid2 = self.users.create(cr, uid, {'name': 'test user', 'login': 'test', 'groups_id': [4,employee_gid]})
@mute_logger('openerp.models')
def testAccessDeletedRecords(self):
""" Verify that accessing deleted records works as expected """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
self.partner.unlink(cr, uid, [p1])
# read() is expected to skip deleted records because our API is not
# transactional for a sequence of search()->read() performed from the
# client-side... a concurrent deletion could therefore cause spurious
# exceptions even when simply opening a list view!
# /!\ Using unprileged user to detect former side effects of ir.rules!
self.assertEqual([{'id': p2, 'name': 'Y'}], self.partner.read(cr, uid2, [p1,p2], ['name']), "read() should skip deleted records")
self.assertEqual([], self.partner.read(cr, uid2, [p1], ['name']), "read() should skip deleted records")
# Deleting an already deleted record should be simply ignored
self.assertTrue(self.partner.unlink(cr, uid, [p1]), "Re-deleting should be a no-op")
# Updating an already deleted record should raise, even as admin
with self.assertRaises(Exception):
self.partner.write(cr, uid, [p1], {'name': 'foo'})
@mute_logger('openerp.models')
def testAccessFilteredRecords(self):
""" Verify that accessing filtered records works as expected for non-admin user """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
partner_model = self.registry('ir.model').search(cr, uid, [('model','=','res.partner')])[0]
self.ir_rule.create(cr, uid, {'name': 'Y is invisible',
'domain_force': [('id', '!=', p1)],
'model_id': partner_model})
# search as unprivileged user
partners = self.partner.search(cr, uid2, [])
self.assertFalse(p1 in partners, "W should not be visible...")
self.assertTrue(p2 in partners, "... but Y should be visible")
# read as unprivileged user
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1], ['name'])
# write as unprivileged user
with self.assertRaises(Exception):
self.partner.write(cr, uid2, [p1], {'name': 'foo'})
# unlink as unprivileged user
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1])
# Prepare mixed case
self.partner.unlink(cr, uid, [p2])
# read mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1,p2], ['name'])
# delete mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1,p2])
def test_multi_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
records = self.partner.read(self.cr, UID, [record_id])
self.assertIsInstance(records, list)
def test_one_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
record = self.partner.read(self.cr, UID, record_id)
self.assertIsInstance(record, dict)
@mute_logger('openerp.models')
def test_search_read(self):
# simple search_read
self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
found = self.partner.search_read(self.cr, UID, [['name', '=', 'MyPartner1']], ['name'])
self.assertEqual(len(found), 1)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertTrue('id' in found[0])
# search_read correct order
self.partner.create(self.cr, UID, {'name': 'MyPartner2'})
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertEqual(found[1]['name'], 'MyPartner2')
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name desc")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner2')
self.assertEqual(found[1]['name'], 'MyPartner1')
# search_read that finds nothing
found = self.partner.search_read(self.cr, UID, [['name', '=', 'Does not exists']], ['name'])
self.assertEqual(len(found), 0)
def test_exists(self):
partner = self.partner.browse(self.cr, UID, [])
# check that records obtained from search exist
recs = partner.search([])
self.assertTrue(recs)
self.assertEqual(recs.exists(), recs)
# check that there is no record with id 0
recs = partner.browse([0])
self.assertFalse(recs.exists())
def test_groupby_date(self):
partners = dict(
A='2012-11-19',
B='2012-12-17',
C='2012-12-31',
D='2013-01-07',
E='2013-01-14',
F='2013-01-28',
G='2013-02-11',
)
all_partners = []
partners_by_day = defaultdict(set)
partners_by_month = defaultdict(set)
partners_by_year = defaultdict(set)
for name, date in partners.items():
p = self.partner.create(self.cr, UID, dict(name=name, date=date))
all_partners.append(p)
partners_by_day[date].add(p)
partners_by_month[date.rsplit('-', 1)[0]].add(p)
partners_by_year[date.split('-', 1)[0]].add(p)
def read_group(interval, domain=None):
main_domain = [('id', 'in', all_partners)]
if domain:
domain = ['&'] + main_domain + domain
else:
domain = main_domain
rg = self.partner.read_group(self.cr, self.uid, domain, ['date'], 'date' + ':' + interval)
result = {}
for r in rg:
result[r['date:' + interval]] = set(self.partner.search(self.cr, self.uid, r['__domain']))
return result
self.assertEqual(len(read_group('day')), len(partners_by_day))
self.assertEqual(len(read_group('month')), len(partners_by_month))
self.assertEqual(len(read_group('year')), len(partners_by_year))
rg = self.partner.read_group(self.cr, self.uid, [('id', 'in', all_partners)],
['date'], ['date:month', 'date:day'], lazy=False)
self.assertEqual(len(rg), len(all_partners))
class TestInherits(common.TransactionCase):
""" test the behavior of the orm for models that use _inherits;
specifically: res.users, that inherits from res.partner
"""
def setUp(self):
super(TestInherits, self).setUp()
self.partner = self.registry('res.partner')
self.user = self.registry('res.users')
def test_default(self):
""" `default_get` cannot return a dictionary or a new id """
defaults = self.user.default_get(self.cr, UID, ['partner_id'])
if 'partner_id' in defaults:
self.assertIsInstance(defaults['partner_id'], (bool, int, long))
def test_create(self):
""" creating a user should automatically create a new partner """
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo = self.user.browse(self.cr, UID, foo_id)
self.assertNotIn(foo.partner_id.id, partners_before)
def test_create_with_ancestor(self):
""" creating a user with a specific 'partner_id' should not create a new partner """
par_id = self.partner.create(self.cr, UID, {'name': 'Foo'})
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'})
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(set(partners_before), set(partners_after))
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, 'Foo')
self.assertEqual(foo.partner_id.id, par_id)
@mute_logger('openerp.models')
def test_read(self):
""" inherited fields should be read without any indirection """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo_values, = self.user.read(self.cr, UID, [foo_id])
partner_id = foo_values['partner_id'][0]
partner_values, = self.partner.read(self.cr, UID, [partner_id])
self.assertEqual(foo_values['name'], partner_values['name'])
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, foo.partner_id.name)
@mute_logger('openerp.models')
def test_copy(self):
""" copying a user should automatically copy its partner, too """
foo_id = self.user.create(self.cr, UID, {
'name': 'Foo',
'login': 'foo',
'password': 'foo',
'supplier': True,
})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
bar_id = self.user.copy(self.cr, UID, foo_id, {
'login': 'bar',
'password': 'bar',
})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
self.assertEqual(foo_before, foo_after)
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertEqual(bar.name, 'Foo (copy)')
self.assertEqual(bar.login, 'bar')
self.assertEqual(foo.supplier, bar.supplier)
self.assertNotEqual(foo.id, bar.id)
self.assertNotEqual(foo.partner_id.id, bar.partner_id.id)
@mute_logger('openerp.models')
def test_copy_with_ancestor(self):
""" copying a user with 'parent_id' in defaults should not duplicate the partner """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo',
'login_date': '2016-01-01', 'signature': 'XXX'})
par_id = self.partner.create(self.cr, UID, {'name': 'Bar'})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
partners_before = self.partner.search(self.cr, UID, [])
bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(foo_before, foo_after)
self.assertEqual(set(partners_before), set(partners_after))
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertNotEqual(foo.id, bar.id)
self.assertEqual(bar.partner_id.id, par_id)
self.assertEqual(bar.login, 'bar', "login is given from copy parameters")
self.assertFalse(bar.login_date, "login_date should not be copied from original record")
self.assertEqual(bar.name, 'Bar', "name is given from specific partner")
self.assertEqual(bar.signature, foo.signature, "signature should be copied")
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
def sorted_by_id(list_of_dicts):
"sort dictionaries by their 'id' field; useful for comparisons"
return sorted(list_of_dicts, key=lambda d: d.get('id'))
class TestO2MSerialization(common.TransactionCase):
""" test the orm method 'write' on one2many fields """
def setUp(self):
super(TestO2MSerialization, self).setUp()
self.partner = self.registry('res.partner')
def test_no_command(self):
" empty list of commands yields an empty list of records "
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [])
self.assertEqual(results, [])
def test_CREATE_commands(self):
" returns the VALUES dict as-is "
values = [{'foo': 'bar'}, {'foo': 'baz'}, {'foo': 'baq'}]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', map(CREATE, values))
self.assertEqual(results, values)
def test_LINK_TO_command(self):
" reads the records from the database, records are returned with their ids. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(LINK_TO, ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_bare_ids_command(self):
" same as the equivalent LINK_TO commands "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', ids, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_UPDATE_command(self):
" take the in-db records and merge the provided information in "
id_foo = self.partner.create(self.cr, UID, {'name': 'foo'})
id_bar = self.partner.create(self.cr, UID, {'name': 'bar'})
id_baz = self.partner.create(self.cr, UID, {'name': 'baz', 'city': 'tag'})
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
LINK_TO(id_foo),
UPDATE(id_bar, {'name': 'qux', 'city': 'tagtag'}),
UPDATE(id_baz, {'name': 'quux'})
], ['name', 'city'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': id_foo, 'name': 'foo', 'city': False},
{'id': id_bar, 'name': 'qux', 'city': 'tagtag'},
{'id': id_baz, 'name': 'quux', 'city': 'tag'}
]))
def test_DELETE_command(self):
" deleted records are not returned at all. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = [DELETE(ids[0]), DELETE(ids[1]), DELETE(ids[2])]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(results, [])
def test_mixed_commands(self):
ids = [
self.partner.create(self.cr, UID, {'name': name})
for name in ['NObar', 'baz', 'qux', 'NOquux', 'NOcorge', 'garply']
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
CREATE({'name': 'foo'}),
UPDATE(ids[0], {'name': 'bar'}),
LINK_TO(ids[1]),
DELETE(ids[2]),
UPDATE(ids[3], {'name': 'quux',}),
UPDATE(ids[4], {'name': 'corge'}),
CREATE({'name': 'grault'}),
LINK_TO(ids[5])
], ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'name': 'foo'},
{'id': ids[0], 'name': 'bar'},
{'id': ids[1], 'name': 'baz'},
{'id': ids[3], 'name': 'quux'},
{'id': ids[4], 'name': 'corge'},
{'name': 'grault'},
{'id': ids[5], 'name': 'garply'}
]))
def test_LINK_TO_pairs(self):
"LINK_TO commands can be written as pairs, instead of triplets"
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(lambda id: (4, id), ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_singleton_commands(self):
"DELETE_ALL can appear as a singleton"
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [DELETE_ALL()], ['name'])
self.assertEqual(results, [])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
anirudhSK/chromium | tools/telemetry/telemetry/core/platform/android_platform_backend_unittest.py | 2 | 3190 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import unittest
from telemetry import test
from telemetry.core import bitmap
from telemetry.core import util
from telemetry.core.platform import android_platform_backend
from telemetry.unittest import system_stub
class MockAdbCommands(object):
def __init__(self, mock_content, system_properties):
self.mock_content = mock_content
self.system_properties = system_properties
if self.system_properties.get('ro.product.cpu.abi') == None:
self.system_properties['ro.product.cpu.abi'] = 'armeabi-v7a'
def CanAccessProtectedFileContents(self):
return True
# pylint: disable=W0613
def GetProtectedFileContents(self, file_name):
return self.mock_content
def PushIfNeeded(self, host_binary, device_path):
pass
def RunShellCommand(self, command):
return []
class AndroidPlatformBackendTest(unittest.TestCase):
def setUp(self):
self._stubs = system_stub.Override(android_platform_backend,
['perf_control', 'thermal_throttle'])
def tearDown(self):
self._stubs.Restore()
@test.Disabled('chromeos')
def testGetCpuStats(self):
proc_stat_content = [
'7702 (.android.chrome) S 167 167 0 0 -1 1077936448 '
'3247 0 0 0 4 1 0 0 20 0 9 0 5603962 337379328 5867 '
'4294967295 1074458624 1074463824 3197495984 3197494152 '
'1074767676 0 4612 0 38136 4294967295 0 0 17 0 0 0 0 0 0 '
'1074470376 1074470912 1102155776']
adb_valid_proc_content = MockAdbCommands(proc_stat_content, {})
backend = android_platform_backend.AndroidPlatformBackend(
adb_valid_proc_content, False)
cpu_stats = backend.GetCpuStats('7702')
self.assertEquals(cpu_stats, {'CpuProcessTime': 5.0})
@test.Disabled('chromeos')
def testGetCpuStatsInvalidPID(self):
# Mock an empty /proc/pid/stat.
adb_empty_proc_stat = MockAdbCommands([], {})
backend = android_platform_backend.AndroidPlatformBackend(
adb_empty_proc_stat, False)
cpu_stats = backend.GetCpuStats('7702')
self.assertEquals(cpu_stats, {})
@test.Disabled
def testFramesFromMp4(self):
mock_adb = MockAdbCommands([])
backend = android_platform_backend.AndroidPlatformBackend(mock_adb, False)
try:
backend.InstallApplication('avconv')
finally:
if not backend.CanLaunchApplication('avconv'):
logging.warning('Test not supported on this platform')
return # pylint: disable=W0150
vid = os.path.join(util.GetUnittestDataDir(), 'vid.mp4')
expected_timestamps = [
0,
763,
783,
940,
1715,
1732,
1842,
1926,
]
# pylint: disable=W0212
for i, timestamp_bitmap in enumerate(backend._FramesFromMp4(vid)):
timestamp, bmp = timestamp_bitmap
self.assertEquals(timestamp, expected_timestamps[i])
expected_bitmap = bitmap.Bitmap.FromPngFile(os.path.join(
util.GetUnittestDataDir(), 'frame%d.png' % i))
self.assertTrue(expected_bitmap.IsEqual(bmp))
| bsd-3-clause |
hubert667/AIR | build/scripts-2.7/summarize-learning-experiment.py | 1 | 3971 | #!/usr/bin/python
# This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
# KH, 2012/06/25
"""
Summarizes the output of an online learning experiment.
"""
try:
from include import *
except:
pass
import argparse
import gzip
import yaml
from numpy import cumsum, mean, std, zeros
# parse arguments
parser = argparse.ArgumentParser(
prog="python summarize-learning-experiment.py",
description="Summarize the output of an online learning experiment.")
parser.add_argument("-g", "--discount_factor", type=float, default=0.995,
help="Discount factor to apply when evaluating online performance.")
parser.add_argument("-f", "--fold_dirs", nargs="+", required=True,
help="List all directories that contain runs of different folds for the "
"current data set. Results will be averaged over all folds and runs.")
parser.add_argument("-s", "--file_ext", default="txt.gz",
help="File extension of the files in which run results are stored.")
parser.add_argument("-o", "--output_base", #required=True,
help="Filebase for output files. Output will be stored in OUTPUT_BASE.txt"
" (numbers) and OUTPUT_BASE_(online|offline).pdf (plots).")
args = parser.parse_args()
is_initialized = False
agg_online_ndcg = None
add_offline_ndcg = None
count_queries = 0
count_runs = 0
# for each fold and run
for fold in args.fold_dirs:
for filename in sorted(os.listdir(fold)):
if not filename.endswith(args.file_ext):
continue
filename = os.path.join(fold, filename)
if os.path.getsize(filename) == 0:
continue
if filename.endswith(".gz"):
fh = gzip.open(filename, "r")
else:
fh = open(filename, "r")
print >> sys.stderr, "Processing %s" % filename
count_runs += 1
# read data from output file
data = yaml.load(fh)
fh.close()
if not is_initialized:
count_queries = len(data["online_evaluation.NdcgEval"])
agg_online_ndcg = [ [] for i in range(count_queries) ]
agg_offline_ndcg = [ [] for i in range(count_queries) ]
is_initialized = True
# aggregate (and apply discounting)
# (i is the index of the query, i.e., i=3 means performance after the
# third query has been observed), the second index points to
# the run id
for i, value in enumerate(data["online_evaluation.NdcgEval"]):
prev = 0.0
if i > 0:
prev = agg_online_ndcg[i-1][-1]
# discount + cumsum
agg_online_ndcg[i].append(prev + args.discount_factor**i * value)
for i, value in enumerate(data["offline_test_evaluation.NdcgEval"]):
agg_offline_ndcg[i].append(value)
print >> sys.stderr, "Computing results for up to %d queries." % count_queries
print >> sys.stderr, "Averaging over %d folds and runs." % count_runs
# output gnuplot file:
# QUERY_COUNT OFFLINE_MEAN OFFLINE_STD ONLINE_MEAN ONLINE_STD
if args.output_base:
out_filename = "%s.txt" % args.output_base
out_fh = open(out_filename, "w")
else:
out_fh = sys.stdout
for i in range(count_queries):
print >> out_fh, "%d %.5f %.5f %.5f %.5f" % (i,
mean(agg_offline_ndcg[i]), std(agg_offline_ndcg[i]),
mean(agg_online_ndcg[i]), std(agg_online_ndcg[i]))
if args.output_base:
out_fh.close()
| gpl-3.0 |
devendermishrajio/nova_test_latest | doc/ext/nova_todo.py | 64 | 3385 | # -*- coding: utf-8 -*-
# This is a hack of the builtin todo extension, to make the todo_list
# more user friendly.
from sphinx.ext.todo import *
import re
def _(s):
return s
def process_todo_nodes(app, doctree, fromdocname):
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
node.parent.remove(node)
# Replace all todolist nodes with a list of the collected todos.
# Augment each todo with a backlink to the original location.
env = app.builder.env
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
# remove the item that was added in the constructor, since I'm tired of
# reading through docutils for the proper way to construct an empty list
lists = []
for i in range(5):
lists.append(nodes.bullet_list("", nodes.Text('', '')))
lists[i].remove(lists[i][0])
lists[i]['classes'].append('todo_list')
for node in doctree.traverse(todolist):
if not app.config['todo_include_todos']:
node.replace_self([])
continue
for todo_info in env.todo_all_todos:
para = nodes.paragraph()
# Create a reference
newnode = nodes.reference('', '')
filename = env.doc2path(todo_info['docname'], base=None)
link = (_('%(filename)s, line %(line_info)d') %
{'filename': filename, 'line_info': todo_info['lineno']})
innernode = nodes.emphasis(link, link)
newnode['refdocname'] = todo_info['docname']
try:
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, todo_info['docname'])
newnode['refuri'] += '#' + todo_info['target']['refid']
except NoUri:
# ignore if no URI can be determined, e.g. for LaTeX output
pass
newnode.append(innernode)
para += newnode
para['classes'].append('todo_link')
todo_entry = todo_info['todo']
env.resolve_references(todo_entry, todo_info['docname'],
app.builder)
item = nodes.list_item('', para)
todo_entry[1]['classes'].append('details')
comment = todo_entry[1]
m = re.match(r"^P(\d)", comment.astext())
priority = 5
if m:
priority = int(m.group(1))
if priority < 0:
priority = 1
if priority > 5:
priority = 5
item['classes'].append('todo_p' + str(priority))
todo_entry['classes'].append('todo_p' + str(priority))
item.append(comment)
lists[priority - 1].insert(0, item)
node.replace_self(lists)
def setup(app):
app.add_config_value('todo_include_todos', False, False)
app.add_node(todolist)
app.add_node(todo_node,
html=(visit_todo_node, depart_todo_node),
latex=(visit_todo_node, depart_todo_node),
text=(visit_todo_node, depart_todo_node))
app.add_directive('todo', Todo)
app.add_directive('todolist', TodoList)
app.connect('doctree-read', process_todos)
app.connect('doctree-resolved', process_todo_nodes)
app.connect('env-purge-doc', purge_todos)
| apache-2.0 |
zuoshifan/instimager | imager/telescope.py | 1 | 33995 | import abc
import numpy as np
import visibility
from util import util
from util import typeutil
from cora.util import hputil, units
from caput import config
def in_range(arr, min, max):
"""Check if array entries are within the given range.
Parameters
----------
arr : np.ndarray
Array to check.
min, max : scalar or np.ndarray
Minimum and maximum values to test against. Values can be in arrays
broadcastable against `arr`.
Returns
-------
val : boolean
True if all entries are within range.
"""
return (arr >= min).all() and (arr < max).all()
def out_of_range(arr, min, max):
return not in_range(arr, min, max)
def map_half_plane(arr):
arr = np.where((arr[:, 0] < 0.0)[:, np.newaxis], -arr, arr)
arr = np.where(np.logical_and(arr[:, 0] == 0.0, arr[:, 1] < 0.0)[:, np.newaxis], -arr, arr)
return arr
def _merge_keyarray(keys1, keys2, mask1=None, mask2=None):
tmask1 = mask1 if mask1 is not None else np.ones_like(keys1, dtype=np.bool)
tmask2 = mask2 if mask2 is not None else np.ones_like(keys2, dtype=np.bool)
# Merge two groups of feed arrays
cmask = np.logical_and(tmask1, tmask2)
ckeys = _remap_keyarray(keys1 + 1.0J * keys2, mask=cmask)
if mask1 is None and mask2 is None:
return ckeys
else:
return ckeys, cmask
def _remap_keyarray(keyarray, mask=None):
# Look through an array of keys and attach integer labels to each
# equivalent classes of keys (also take into account masking).
if mask is None:
mask = np.ones(keyarray.shape, np.bool)
ind = np.where(mask)
un, inv = np.unique(keyarray[ind], return_inverse=True)
fmap = -1*np.ones(keyarray.shape, dtype=np.int)
fmap[ind] = np.arange(un.size)[inv]
return fmap
def _get_indices(keyarray, mask=None):
# Return a pair of indices for each group of equivalent feed pairs
if mask is None:
mask = np.ones(keyarray.shape, np.bool)
wm = np.where(mask.ravel())[0]
keysflat = keyarray.ravel()[wm]
un, ind = np.unique(keysflat, return_index=True)
# CHANGE: np (< 1.6) does not support multiple indices in np.unravel_index
#upairs = np.array(np.unravel_index(wm[ind], keyarray.shape)).T
upairs = np.array([np.unravel_index(i1, keyarray.shape) for i1 in wm[ind] ])
#return np.sort(upairs, axis=-1) # Sort to ensure we are in upper triangle
return upairs
def max_lm(baselines, wavelengths, uwidth, vwidth=0.0):
"""Get the maximum (l,m) that a baseline is sensitive to.
Parameters
----------
baselines : np.ndarray
An array of baselines.
wavelengths : np.ndarray
An array of wavelengths.
uwidth : np.ndarray
Width of the receiver in the u-direction.
vwidth : np.ndarray
Width of the receiver in the v-direction.
Returns
-------
lmax, mmax : array_like
"""
umax = (np.abs(baselines[:, 0]) + uwidth) / wavelengths
vmax = (np.abs(baselines[:, 1]) + vwidth) / wavelengths
mmax = np.ceil(2 * np.pi * umax).astype(np.int64)
lmax = np.ceil((mmax**2 + (2*np.pi*vmax)**2)**0.5).astype(np.int64)
return lmax, mmax
def latlon_to_sphpol(latlon):
zenith = np.array([np.pi / 2.0 - np.radians(latlon[0]),
np.remainder(np.radians(latlon[1]), 2*np.pi)])
return zenith
class TransitTelescope(config.Reader):
"""Base class for simulating any transit interferometer.
This is an abstract class, and several methods must be implemented before it
is usable. These are:
* `feedpositions` - a property which contains the positions of all the feeds
* `_get_unique` - calculates which baselines are identical
* `_transfer_single` - calculate the beam transfer for a single baseline+freq
* `_make_matrix_array` - makes an array of the right size to hold the
transfer functions
* `_copy_transfer_into_single` - copy a single transfer matrix into a
collection.
The last two are required for supporting polarised beam functions.
Properties
----------
freq_lower, freq_higher : scalar
The lower / upper bound of the lowest / highest frequency bands.
num_freq : scalar
The number of frequency bands (only use for setting up the frequency
binning). Generally using `nfreq` is preferred.
tsys_flat : scalar
The system temperature (in K). Override `tsys` for anything more
sophisticated.
positive_m_only: boolean
Whether to only deal with half the `m` range. In many cases we are
much less sensitive to negative-m (depending on the hemisphere, and
baseline alignment). This does not affect the beams calculated, only
how they're used in further calculation. Default: False
minlength, maxlength : scalar
Minimum and maximum baseline lengths to include (in metres).
"""
__metaclass__ = abc.ABCMeta # Enforce Abstract class
# zenith = config.Property(proptype=latlon_to_sphpol, default=[45.0, 0.0])
zenith = config.Property(proptype=latlon_to_sphpol, default=[45.0, 10.0])
freq_lower = config.Property(proptype=typeutil.nonnegative_float, default=400.0)
freq_upper = config.Property(proptype=typeutil.nonnegative_float, default=800.0)
num_freq = config.Property(proptype=typeutil.positive_int, default=50)
tsys_flat = config.Property(proptype=typeutil.nonnegative_float, default=50.0, key='tsys')
ndays = config.Property(proptype=typeutil.positive_int, default=733)
accuracy_boost = config.Property(proptype=typeutil.positive_float, default=1.0)
l_boost = config.Property(proptype=typeutil.positive_float, default=1.0)
minlength = config.Property(proptype=typeutil.nonnegative_float, default=0.0)
maxlength = config.Property(proptype=typeutil.nonnegative_float, default=1.0e7)
auto_correlations = config.Property(proptype=bool, default=False)
# def __init__(self, latitude=45, longitude=0):
# """Initialise a telescope object.
# Parameters
# ----------
# latitude, longitude : scalar
# Position on the Earths surface of the telescope (in degrees).
# """
# # NOTE: latlon_to_sphpol is automatically applied on assignment
# self.zenith = [latitude, longitude]
def __getstate__(self):
state = self.__dict__.copy()
#delkeys = ['_baselines', '_redundancy', '_frequencies'] + self._extdelkeys
for key in self.__dict__:
#if (key in delkeys) or (key[0] == "_"):
if (key[0] == "_"):
del state[key]
return state
#========= Properties related to baselines =========
_baselines = None
@property
def baselines(self):
"""The unique baselines in the telescope. Packed as [[u1, v1], [u2, v2], ...]."""
if self._baselines == None:
self.calculate_feedpairs()
return self._baselines
_redundancy = None
@property
def redundancy(self):
"""The redundancy of each baseline (corresponds to entries in
cyl.baselines)."""
if self._redundancy == None:
self.calculate_feedpairs()
return self._redundancy
@property
def nbase(self):
"""The number of unique baselines."""
return self.npairs
@property
def npairs(self):
"""The number of unique feed pairs."""
return self.uniquepairs.shape[0]
_uniquepairs = None
@property
def uniquepairs(self):
"""An (npairs, 2) array of the feed pairs corresponding to each baseline."""
if self._uniquepairs == None:
self.calculate_feedpairs()
return self._uniquepairs
_feedmap = None
@property
def feedmap(self):
"""An (nfeed, nfeed) array giving the mapping between feedpairs and
the calculated baselines. Each entry is an index into the arrays of unique pairs."""
if self._feedmap == None:
self.calculate_feedpairs()
return self._feedmap
_feedmask = None
@property
def feedmask(self):
"""An (nfeed, nfeed) array giving the entries that have been
calculated. This allows to mask out pairs we want to ignore."""
if self._feedmask == None:
self.calculate_feedpairs()
return self._feedmask
_feedconj = None
@property
def feedconj(self):
"""An (nfeed, nfeed) array giving the feed pairs which must be complex
conjugated."""
if self._feedconj == None:
self.calculate_feedpairs()
return self._feedconj
#===================================================
#======== Properties related to frequencies ========
_frequencies = None
@property
def frequencies(self):
"""The centre of each frequency band (in MHz)."""
if self._frequencies == None:
self.calculate_frequencies()
return self._frequencies
def calculate_frequencies(self):
#self._frequencies = np.linspace(self.freq_lower, self.freq_upper, self.num_freq)
self._frequencies = self.freq_lower + (np.arange(self.num_freq) + 0.5) * ((self.freq_upper - self.freq_lower) / self.num_freq)
@property
def wavelengths(self):
"""The central wavelength of each frequency band (in metres)."""
return units.c / (1e6 * self.frequencies)
@property
def nfreq(self):
"""The number of frequency bins."""
return self.frequencies.shape[0]
#===================================================
#======== Properties related to the feeds ==========
@property
def nfeed(self):
"""The number of feeds."""
return self.feedpositions.shape[0]
#===================================================
#======= Properties related to polarisation ========
@property
def num_pol_sky(self):
"""The number of polarisation combinations on the sky that we are
considering. Should be either 1 (T=I only), 3 (T, Q, U) or 4 (T, Q, U and V).
"""
return self._npol_sky_
#===================================================
#===== Properties related to harmonic spread =======
@property
def lmax(self):
"""The maximum l the telescope is sensitive to."""
lmax, mmax = max_lm(self.baselines, self.wavelengths[-1], self.u_width, self.v_width)
return int(np.ceil(lmax.max() * self.l_boost))
@property
def mmax(self):
"""The maximum m the telescope is sensitive to."""
lmax, mmax = max_lm(self.baselines, self.wavelengths[-1], self.u_width, self.v_width)
return int(np.ceil(mmax.max() * self.l_boost))
#===================================================
#== Methods for calculating the unique baselines ===
def calculate_feedpairs(self):
"""Calculate all the unique feedpairs and their redundancies, and set
the internal state of the object.
"""
# Get unique pairs, and create mapping arrays
self._feedmap, self._feedmask = self._get_unique()
# Identify conjugate pairs
self._feedconj = np.tril(np.ones_like(self._feedmap), -1).astype(np.bool)
# Reorder and conjugate baselines such that the default feedpair
# points W->E (to ensure we use positive-m)
self._make_ew()
# Sort baselines into order
self._sort_pairs()
# Create mask of included pairs, that are not conjugated
tmask = np.logical_and(self._feedmask, np.logical_not(self._feedconj))
self._uniquepairs = _get_indices(self._feedmap, mask=tmask)
self._redundancy = np.bincount(self._feedmap[np.where(tmask)]) # Triangle mask to avoid double counting
self._baselines = self.feedpositions[self._uniquepairs[:, 0]] - self.feedpositions[self._uniquepairs[:, 1]]
def _make_ew(self):
# Reorder baselines pairs, such that the baseline vector always points E (or pure N)
tmask = np.logical_and(self._feedmask, np.logical_not(self._feedconj))
uniq = _get_indices(self._feedmap, mask=tmask)
for i in range(uniq.shape[0]):
sep = self.feedpositions[uniq[i, 0]] - self.feedpositions[uniq[i, 1]]
if sep[0] < 0.0 or (sep[0] == 0.0 and sep[1] < 0.0):
# Reorder feed pairs and conjugate mapping
# self._uniquepairs[i, 1], self._uniquepairs[i, 0] = self._uniquepairs[i, 0], self._uniquepairs[i, 1]
self._feedconj = np.where(self._feedmap == i, np.logical_not(self._feedconj), self._feedconj)
def _unique_baselines(self):
"""Map of equivalent baseline lengths, and mask of ones to exclude.
"""
# Construct array of indices
fshape = [self.nfeed, self.nfeed]
f_ind = np.indices(fshape)
# Construct array of baseline separations in complex representation
bl1 = (self.feedpositions[f_ind[0]] - self.feedpositions[f_ind[1]])
bl2 = np.around(bl1[..., 0] + 1.0J * bl1[..., 1], 7)
# Construct array of baseline lengths
blen = np.sum(bl1**2, axis=-1)**0.5
# Create mask of included baselines
mask = np.logical_and(blen >= self.minlength, blen <= self.maxlength)
# Remove the auto correlated baselines between all polarisations
if not self.auto_correlations:
mask = np.logical_and(blen > 0.0, mask)
return _remap_keyarray(bl2, mask), mask
def _unique_beams(self):
"""Map of unique beam pairs, and mask of ones to exclude.
"""
# Construct array of indices
fshape = [self.nfeed, self.nfeed]
bci, bcj = np.broadcast_arrays(self.beamclass[:, np.newaxis], self.beamclass[np.newaxis, :])
beam_map = _merge_keyarray(bci, bcj)
if self.auto_correlations:
beam_mask = np.ones(fshape, dtype=np.bool)
else:
beam_mask = np.logical_not(np.identity(self.nfeed, dtype=np.bool))
return beam_map, beam_mask
def _get_unique(self):
"""Calculate the unique baseline pairs.
All feeds are assumed to be identical. Baselines are identified if
they have the same length, and are selected such that they point East
(to ensure that the sensitivity ends up in positive-m modes).
It is also possible to select only baselines within a particular
length range by setting the `minlength` and `maxlength` properties.
Parameters
----------
fpairs : np.ndarray
An array of all the feed pairs, packed as [[i1, i2, ...], [j1, j2, ...] ].
Returns
-------
baselines : np.ndarray
An array of all the unique pairs. Packed as [ [i1, i2, ...], [j1, j2, ...]].
redundancy : np.ndarray
For each unique pair, give the number of equivalent pairs.
"""
# Fetch and merge map of unique feed pairs
base_map, base_mask = self._unique_baselines()
beam_map, beam_mask = self._unique_beams()
comb_map, comb_mask = _merge_keyarray(base_map, beam_map, mask1=base_mask, mask2=beam_mask)
# Take into account conjugation by identifying
comb_map = np.dstack((comb_map, comb_map.T)).min(axis=-1)
comb_map = _remap_keyarray(comb_map, comb_mask)
return comb_map, comb_mask
def _sort_pairs(self):
"""Re-order keys into a desired sort order.
By default the order is lexicographic in (baseline u, baselines v,
beamclass i, beamclass j).
"""
# Create mask of included pairs, that are not conjugated
tmask = np.logical_and(self._feedmask, np.logical_not(self._feedconj))
uniq = _get_indices(self._feedmap, mask=tmask)
fi, fj = uniq[:, 0], uniq[:, 1]
# Fetch keys by which to sort (lexicographically)
bx = self.feedpositions[fi, 0] - self.feedpositions[fj, 0]
by = self.feedpositions[fi, 1] - self.feedpositions[fj, 1]
ci = self.beamclass[fi]
cj = self.beamclass[fj]
## Sort by constructing a numpy array with the keys as fields, and use
## np.argsort to get the indices
# Create array of keys to sort
dt = np.dtype('f8,f8,i4,i4')
sort_arr = np.zeros(fi.size, dtype=dt)
sort_arr['f0'] = bx
sort_arr['f1'] = by
sort_arr['f2'] = cj
sort_arr['f3'] = ci
# Get map which sorts
sort_ind = np.argsort(sort_arr)
# Invert mapping
tmp_sort_ind = sort_ind.copy()
sort_ind[tmp_sort_ind] = np.arange(sort_ind.size)
# Remap feedmap entries
fm_copy = self._feedmap.copy()
wmask = np.where(self._feedmask)
fm_copy[wmask] = sort_ind[self._feedmap[wmask]]
self._feedmap = fm_copy
#===================================================
#==== Methods for calculating Transfer matrices ====
def transfer_matrices(self, bl_indices, f_indices, global_lmax = True):
"""Calculate the spherical harmonic transfer matrices for baseline and
frequency combinations.
Parameters
----------
bl_indices : array_like
Indices of baselines to calculate.
f_indices : array_like
Indices of frequencies to calculate. Must be broadcastable against
`bl_indices`.
global_lmax : boolean, optional
If set (default), the output size `lside` in (l,m) is big enough to
hold the maximum for the entire telescope. If not set it is only big
enough for the requested set.
Returns
-------
transfer : np.ndarray, dtype=np.complex128
An array containing the transfer functions. The shape is somewhat
complicated, the first indices correspond to the broadcast size of
`bl_indices` and `f_indices`, then there may be some polarisation
indices, then finally the (l,m) indices, range (lside, 2*lside-1).
"""
# Broadcast arrays against each other
bl_indices, f_indices = np.broadcast_arrays(bl_indices, f_indices)
## Check indices are all in range
if out_of_range(bl_indices, 0, self.npairs):
raise Exception("Baseline indices aren't valid")
if out_of_range(f_indices, 0, self.nfreq):
raise Exception("Frequency indices aren't valid")
# Fetch the set of lmax's for the baselines (in order to reduce time
# regenerating Healpix maps)
lmax, mmax = np.ceil(self.l_boost * np.array(max_lm(self.baselines[bl_indices], self.wavelengths[f_indices], self.u_width, self.v_width))).astype(np.int64)
#lmax, mmax = lmax * self.l_boost, mmax * self.l_boost
# Set the size of the (l,m) array to write into
lside = self.lmax if global_lmax else lmax.max()
# Generate the array for the Transfer functions
tshape = bl_indices.shape + (self.num_pol_sky, lside+1, 2*lside+1)
print "Size: %i elements. Memory %f GB." % (np.prod(tshape), 2*np.prod(tshape) * 8.0 / 2**30)
tarray = np.zeros(tshape, dtype=np.complex128)
# Sort the baselines by ascending lmax and iterate through in that
# order, calculating the transfer matrices
for iflat in np.argsort(lmax.flat):
ind = np.unravel_index(iflat, lmax.shape)
trans = self._transfer_single(bl_indices[ind], f_indices[ind], lmax[ind], lside)
## Iterate over pol combinations and copy into transfer array
for pi in range(self.num_pol_sky):
islice = (ind + (pi,) + (slice(None),slice(None)))
tarray[islice] = trans[pi]
return tarray
def transfer_for_frequency(self, freq):
"""Fetch all transfer matrices for a given frequency.
Parameters
----------
freq : integer
The frequency index.
Returns
-------
transfer : np.ndarray
The transfer matrices. Packed as in `TransitTelescope.transfer_matrices`.
"""
bi = np.arange(self.npairs)
fi = freq * np.ones_like(bi)
return self.transfer_matrices(bi, fi)
def transfer_for_baseline(self, baseline):
"""Fetch all transfer matrices for a given baseline.
Parameters
----------
baseline : integer
The baseline index.
Returns
-------
transfer : np.ndarray
The transfer matrices. Packed as in `TransitTelescope.transfer_matrices`.
"""
fi = np.arange(self.nfreq)
bi = baseline * np.ones_like(fi)
return self.transfer_matrices(bi, fi)
#===================================================
#======== Noise properties of the telescope ========
def tsys(self, f_indices = None):
"""The system temperature.
Currenty has a flat T_sys across the whole bandwidth. Override for
anything more complicated.
Parameters
----------
f_indices : array_like
Indices of frequencies to get T_sys at.
Returns
-------
tsys : array_like
System temperature at requested frequencies.
"""
if f_indices == None:
freq = self.frequencies
else:
freq = self.frequencies[f_indices]
return np.ones_like(freq) * self.tsys_flat
def noisepower(self, bl_indices, f_indices, ndays = None):
"""Calculate the instrumental noise power spectrum.
Assume we are still within the regime where the power spectrum is white
in `m` modes.
Parameters
----------
bl_indices : array_like
Indices of baselines to calculate.
f_indices : array_like
Indices of frequencies to calculate. Must be broadcastable against
`bl_indices`.
ndays : integer
The number of sidereal days observed.
Returns
-------
noise_ps : np.ndarray
The noise power spectrum.
"""
ndays = self.ndays if not ndays else ndays # Set to value if not set.
# Broadcast arrays against each other
bl_indices, f_indices = np.broadcast_arrays(bl_indices, f_indices)
#bw = np.abs(self.frequencies[1] - self.frequencies[0]) * 1e6
bw = 1.0e6 * (self.freq_upper - self.freq_lower) / self.num_freq
delnu = units.t_sidereal * bw / (2*np.pi)
noisepower = self.tsys(f_indices)**2 / (2 * np.pi * delnu * ndays)
noisebase = noisepower / self.redundancy[bl_indices]
return noisebase
def noisepower_feedpairs(self, fi, fj, f_indices, m, ndays=None):
ndays = self.ndays if not ndays else ndays
bw = np.abs(self.frequencies[1] - self.frequencies[0]) * 1e6
delnu = units.t_sidereal * bw / (2*np.pi)
noisepower = self.tsys(f_indices)**2 / (2 * np.pi * delnu * ndays)
return np.ones_like(fi) * np.ones_like(fj) * np.ones_like(m) * noisepower / 2.0 # For unpolarised only at the moment.
#===================================================
_nside = None
def _init_trans(self, nside):
## Internal function for generating some common Healpix maps (position,
## horizon). These should need to be generated only when nside changes.
# Angular positions in healpix map of nside
self._nside = nside
self._angpos = hputil.ang_positions(nside)
# The horizon function
self._horizon = visibility.horizon(self._angpos, self.zenith)
#===================================================
#================ ABSTRACT METHODS =================
#===================================================
# Implement to specify feed positions in the telescope.
@abc.abstractproperty
def feedpositions(self):
"""An (nfeed,2) array of the feed positions relative to an arbitary point (in m)"""
return
# Implement to specify the beams of the telescope
@abc.abstractproperty
def beamclass(self):
"""An nfeed array of the class of each beam (identical labels are
considered to have identical beams)."""
return
# Implement to specify feed positions in the telescope.
@abc.abstractproperty
def u_width(self):
"""The approximate physical width (in the u-direction) of the dish/telescope etc, for
calculating the maximum (l,m)."""
return
# Implement to specify feed positions in the telescope.
@abc.abstractproperty
def v_width(self):
"""The approximate physical length (in the v-direction) of the dish/telescope etc, for
calculating the maximum (l,m)."""
return
# The work method which does the bulk of calculating all the transfer matrices.
@abc.abstractmethod
def _transfer_single(self, bl_index, f_index, lmax, lside):
"""Calculate transfer matrix for a single baseline+frequency.
**Abstract method** must be implemented.
Parameters
----------
bl_index : integer
The index of the baseline to calculate.
f_index : integer
The index of the frequency to calculate.
lmax : integer
The maximum *l* we are interested in. Determines accuracy of
spherical harmonic transforms.
lside : integer
The size of array to embed the transfer matrix within.
Returns
-------
transfer : np.ndarray
The transfer matrix, an array of shape (pol_indices, lside,
2*lside-1). Where the `pol_indices` are usually only present if
considering the polarised case.
"""
return
#===================================================
#============== END ABSTRACT METHODS ===============
#===================================================
class UnpolarisedTelescope(TransitTelescope):
"""A base for an unpolarised telescope.
Again, an abstract class, but the only things that require implementing are
the `feedpositions`, `_get_unique` and the `beam` function.
"""
__metaclass__ = abc.ABCMeta
_npol_sky_ = 1
@abc.abstractmethod
def beam(self, feed, freq):
"""Beam for a particular feed.
Parameters
----------
feed : integer
Index for the feed.
freq : integer
Index for the frequency.
Returns
-------
beam : np.ndarray
A Healpix map (of size self._nside) of the beam. Potentially
complex.
"""
return
#===== Implementations of abstract functions =======
def _beam_map_single(self, bl_index, f_index):
# Get beam maps for each feed.
feedi, feedj = self.uniquepairs[bl_index]
beami, beamj = self.beam(feedi, f_index), self.beam(feedj, f_index)
# Get baseline separation and fringe map.
uv = self.baselines[bl_index] / self.wavelengths[f_index]
fringe = visibility.fringe(self._angpos, self.zenith, uv)
# Beam solid angle (integrate over beam^2 - equal area pixels)
omega_A = (np.abs(beami) * np.abs(beamj) * self._horizon).sum() * (4*np.pi / beami.size)
# Calculate the complex visibility
cvis = self._horizon * fringe * beami * beamj / omega_A
return cvis
def _transfer_single(self, bl_index, f_index, lmax, lside):
if self._nside != hputil.nside_for_lmax(lmax, accuracy_boost=self.accuracy_boost):
self._init_trans(hputil.nside_for_lmax(lmax, accuracy_boost=self.accuracy_boost))
cvis = self._beam_map_single(bl_index, f_index)
# Perform the harmonic transform to get the transfer matrix (conj is correct - see paper)
btrans = hputil.sphtrans_complex(cvis.conj(), centered = False, lmax = lmax, lside=lside).conj()
return [ btrans ]
#===================================================
def noisepower(self, bl_indices, f_indices, ndays = None):
"""Calculate the instrumental noise power spectrum.
Assume we are still within the regime where the power spectrum is white
in `m` modes.
Parameters
----------
bl_indices : array_like
Indices of baselines to calculate.
f_indices : array_like
Indices of frequencies to calculate. Must be broadcastable against
`bl_indices`.
ndays : integer
The number of sidereal days observed.
Returns
-------
noise_ps : np.ndarray
The noise power spectrum.
"""
bnoise = TransitTelescope.noisepower(self, bl_indices, f_indices, ndays)
return bnoise[..., np.newaxis] * 0.5 # Correction for unpolarisedness
class PolarisedTelescope(TransitTelescope):
"""A base for a polarised telescope.
Again, an abstract class, but the only things that require implementing are
the `feedpositions`, `_get_unique` and the beam functions `beamx` and `beamy`.
Abstract Methods
----------------
beamx, beamy : methods
Routines giving the field pattern for the x and y feeds.
"""
__metaclass__ = abc.ABCMeta
_npol_sky_ = 4
def _beam_map_single(self, bl_index, f_index):
p_stokes = [ 0.5 * np.array([[1.0, 0.0], [0.0, 1.0]]),
0.5 * np.array([[1.0, 0.0], [0.0, -1.0]]),
0.5 * np.array([[0.0, 1.0], [1.0, 0.0]]),
0.5 * np.array([[0.0, -1.0J], [1.0J, 0.0]]) ]
# Get beam maps for each feed.
feedi, feedj = self.uniquepairs[bl_index]
beami, beamj = self.beam(feedi, f_index), self.beam(feedj, f_index)
# Get baseline separation and fringe map.
uv = self.baselines[bl_index] / self.wavelengths[f_index]
fringe = visibility.fringe(self._angpos, self.zenith, uv)
pow_stokes = [ np.sum(beami * np.dot(beamj, polproj), axis=1) * self._horizon for polproj in p_stokes]
pxarea = (4*np.pi / beami.shape[0])
om_i = np.sum(np.abs(beami)**2 * self._horizon[:, np.newaxis]) * pxarea
om_j = np.sum(np.abs(beamj)**2 * self._horizon[:, np.newaxis]) * pxarea
omega_A = (om_i * om_j)**0.5
cv_stokes = [ p * (2 * fringe / omega_A) for p in pow_stokes ]
return cv_stokes
#===== Implementations of abstract functions =======
def _transfer_single(self, bl_index, f_index, lmax, lside):
if self._nside != hputil.nside_for_lmax(lmax):
self._init_trans(hputil.nside_for_lmax(lmax))
bmap = self._beam_map_single(bl_index, f_index)
btrans = [ pb.conj() for pb in hputil.sphtrans_complex_pol([bm.conj() for bm in bmap], centered = False, lmax = int(lmax), lside=lside) ]
return btrans
#===================================================
class SimpleUnpolarisedTelescope(UnpolarisedTelescope):
"""A base for a unpolarised telescope.
Again, an abstract class, but the only things that require implementing are
the `feedpositions`, `_get_unique` and the beam functions `beamx` and `beamy`.
Abstract Methods
----------------
beam : method
Routines giving the field pattern for the feeds.
"""
__metaclass__ = abc.ABCMeta
@property
def beamclass(self):
"""Simple beam mode of single polarisation feeds."""
return np.zeros(self._single_feedpositions.shape[0], dtype=np.int)
@abc.abstractproperty
def _single_feedpositions(self):
"""An (nfeed,2) array of the feed positions relative to an arbitrary point (in m)"""
return
@property
def feedpositions(self):
return self._single_feedpositions
class SimplePolarisedTelescope(PolarisedTelescope):
"""A base for a polarised telescope.
Again, an abstract class, but the only things that require implementing are
the `feedpositions`, `_get_unique` and the beam functions `beamx` and `beamy`.
Abstract Methods
----------------
beamx, beamy : methods
Routines giving the field pattern for the x and y feeds.
"""
__metaclass__ = abc.ABCMeta
@property
def beamclass(self):
"""Simple beam mode of dual polarisation feeds."""
nsfeed = self._single_feedpositions.shape[0]
return np.concatenate((np.zeros(nsfeed), np.ones(nsfeed))).astype(np.int)
def beam(self, feed, freq):
if self.beamclass[feed] % 2 == 0:
return self.beamx(feed, freq)
else:
return self.beamy(feed, freq)
@abc.abstractproperty
def _single_feedpositions(self):
"""An (nfeed,2) array of the feed positions relative to an arbitrary point (in m)"""
return
@property
def feedpositions(self):
return np.concatenate((self._single_feedpositions, self._single_feedpositions))
@abc.abstractmethod
def beamx(self, feed, freq):
"""Beam for the X polarisation feed.
Parameters
----------
feed : integer
Index for the feed.
freq : integer
Index for the frequency.
Returns
-------
beam : np.ndarray
Healpix maps (of size [self._nside, 2]) of the field pattern in the
theta and phi directions.
"""
@abc.abstractmethod
def beamy(self, feed, freq):
"""Beam for the Y polarisation feed.
Parameters
----------
feed : integer
Index for the feed.
freq : integer
Index for the frequency.
Returns
-------
beam : np.ndarray
Healpix maps (of size [self._nside, 2]) of the field pattern in the
theta and phi directions.
"""
| gpl-2.0 |
you21979/phantomjs | src/breakpad/src/tools/gyp/test/sibling/gyptest-all.py | 151 | 1061 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('build/all.gyp', chdir='src')
test.build('build/all.gyp', test.ALL, chdir='src')
chdir = 'src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format == 'make':
chdir = 'src'
if test.format == 'xcode':
chdir = 'src/prog1'
test.run_built_executable('prog1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'src/prog2'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| bsd-3-clause |
ovresko/erpnext | erpnext/education/doctype/fee_schedule/fee_schedule.py | 7 | 5324 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
from frappe.utils import money_in_words
from frappe.utils import cint, flt, cstr
from frappe.utils.background_jobs import enqueue
from frappe import _
class FeeSchedule(Document):
def onload(self):
info = self.get_dashboard_info()
self.set_onload('dashboard_info', info)
def get_dashboard_info(self):
info = {
"total_paid": 0,
"total_unpaid": 0,
"currency": erpnext.get_company_currency(self.company)
}
fees_amount = frappe.db.sql("""select sum(grand_total), sum(outstanding_amount) from tabFees
where fee_schedule=%s and docstatus=1""", (self.name))
if fees_amount:
info["total_paid"] = flt(fees_amount[0][0]) - flt(fees_amount[0][1])
info["total_unpaid"] = flt(fees_amount[0][1])
return info
def validate(self):
self.calculate_total_and_program()
def calculate_total_and_program(self):
no_of_students = 0
for d in self.student_groups:
# if not d.total_students:
d.total_students = get_total_students(d.student_group, self.academic_year,
self.academic_term, self.student_category)
no_of_students += cint(d.total_students)
# validate the program of fee structure and student groups
student_group_program = frappe.db.get_value("Student Group", d.student_group, "program")
if self.program and student_group_program and self.program != student_group_program:
frappe.msgprint(_("Program in the Fee Structure and Student Group {0} are different.")
.format(d.student_group))
self.grand_total = no_of_students*self.total_amount
self.grand_total_in_words = money_in_words(self.grand_total)
def create_fees(self):
self.db_set("fee_creation_status", "In Process")
frappe.publish_realtime("fee_schedule_progress",
{"progress": "0", "reload": 1}, user=frappe.session.user)
total_records = sum([int(d.total_students) for d in self.student_groups])
if total_records > 10:
frappe.msgprint(_('''Fee records will be created in the background.
In case of any error the error message will be updated in the Schedule.'''))
enqueue(generate_fee, queue='default', timeout=6000, event='generate_fee',
fee_schedule=self.name)
else:
generate_fee(self.name)
def generate_fee(fee_schedule):
doc = frappe.get_doc("Fee Schedule", fee_schedule)
error = False
total_records = sum([int(d.total_students) for d in doc.student_groups])
created_records = 0
if not total_records:
frappe.throw(_("Please setup Students under Student Groups"))
for d in doc.student_groups:
students = get_students(d.student_group, doc.academic_year, doc.academic_term, doc.student_category)
for student in students:
try:
fees_doc = get_mapped_doc("Fee Schedule", fee_schedule, {
"Fee Schedule": {
"doctype": "Fees",
"field_map": {
"name": "Fee Schedule"
}
}
})
fees_doc.student = student.student
fees_doc.student_name = student.student_name
fees_doc.program = student.program
fees_doc.student_batch = student.student_batch_name
fees_doc.send_payment_request = doc.send_email
fees_doc.save()
fees_doc.submit()
created_records += 1
frappe.publish_realtime("fee_schedule_progress", {"progress": str(int(created_records * 100/total_records))}, user=frappe.session.user)
except Exception as e:
error = True
err_msg = frappe.local.message_log and "\n\n".join(frappe.local.message_log) or cstr(e)
if error:
frappe.db.rollback()
frappe.db.set_value("Fee Schedule", fee_schedule, "fee_creation_status", "Failed")
frappe.db.set_value("Fee Schedule", fee_schedule, "error_log", err_msg)
else:
frappe.db.set_value("Fee Schedule", fee_schedule, "fee_creation_status", "Successful")
frappe.db.set_value("Fee Schedule", fee_schedule, "error_log", None)
frappe.publish_realtime("fee_schedule_progress",
{"progress": "100", "reload": 1}, user=frappe.session.user)
def get_students(student_group, academic_year, academic_term=None, student_category=None):
conditions = ""
if student_category:
conditions = " and pe.student_category='{}'".format(frappe.db.escape(student_category))
if academic_term:
conditions = " and pe.academic_term='{}'".format(frappe.db.escape(academic_term))
students = frappe.db.sql("""
select pe.student, pe.student_name, pe.program, pe.student_batch_name
from `tabStudent Group Student` sgs, `tabProgram Enrollment` pe
where
pe.student = sgs.student and pe.academic_year = %s
and sgs.parent = %s and sgs.active = 1
{conditions}
""".format(conditions=conditions), (academic_year, student_group), as_dict=1)
return students
@frappe.whitelist()
def get_total_students(student_group, academic_year, academic_term=None, student_category=None):
total_students = get_students(student_group, academic_year, academic_term, student_category)
return len(total_students)
@frappe.whitelist()
def get_fee_structure(source_name,target_doc=None):
fee_request = get_mapped_doc("Fee Structure", source_name,
{"Fee Structure": {
"doctype": "Fee Schedule"
}}, ignore_permissions=True)
return fee_request
| gpl-3.0 |
iiisthu/sparkSdn | python/pyspark/conf.py | 4 | 5745 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.conf import SparkConf
>>> from pyspark.context import SparkContext
>>> conf = SparkConf()
>>> conf.setMaster("local").setAppName("My app")
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.master")
u'local'
>>> conf.get("spark.app.name")
u'My app'
>>> sc = SparkContext(conf=conf)
>>> sc.master
u'local'
>>> sc.appName
u'My app'
>>> sc.sparkHome == None
True
>>> conf = SparkConf()
>>> conf.setSparkHome("/path")
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.home")
u'/path'
>>> conf.setExecutorEnv("VAR1", "value1")
<pyspark.conf.SparkConf object at ...>
>>> conf.setExecutorEnv(pairs = [("VAR3", "value3"), ("VAR4", "value4")])
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.executorEnv.VAR1")
u'value1'
>>> print conf.toDebugString()
spark.executorEnv.VAR1=value1
spark.executorEnv.VAR3=value3
spark.executorEnv.VAR4=value4
spark.home=/path
>>> sorted(conf.getAll(), key=lambda p: p[0])
[(u'spark.executorEnv.VAR1', u'value1'), (u'spark.executorEnv.VAR3', u'value3'), (u'spark.executorEnv.VAR4', u'value4'), (u'spark.home', u'/path')]
"""
class SparkConf(object):
"""
Configuration for a Spark application. Used to set various Spark
parameters as key-value pairs.
Most of the time, you would create a SparkConf object with
C{SparkConf()}, which will load values from C{spark.*} Java system
properties as well. In this case, any parameters you set directly on
the C{SparkConf} object take priority over system properties.
For unit tests, you can also call C{SparkConf(false)} to skip
loading external settings and get the same configuration no matter
what the system properties are.
All setter methods in this class support chaining. For example,
you can write C{conf.setMaster("local").setAppName("My app")}.
Note that once a SparkConf object is passed to Spark, it is cloned
and can no longer be modified by the user.
"""
def __init__(self, loadDefaults=True, _jvm=None):
"""
Create a new Spark configuration.
@param loadDefaults: whether to load values from Java system
properties (True by default)
@param _jvm: internal parameter used to pass a handle to the
Java VM; does not need to be set by users
"""
from pyspark.context import SparkContext
SparkContext._ensure_initialized()
_jvm = _jvm or SparkContext._jvm
self._jconf = _jvm.SparkConf(loadDefaults)
def set(self, key, value):
"""Set a configuration property."""
self._jconf.set(key, unicode(value))
return self
def setMaster(self, value):
"""Set master URL to connect to."""
self._jconf.setMaster(value)
return self
def setAppName(self, value):
"""Set application name."""
self._jconf.setAppName(value)
return self
def setSparkHome(self, value):
"""Set path where Spark is installed on worker nodes."""
self._jconf.setSparkHome(value)
return self
def setExecutorEnv(self, key=None, value=None, pairs=None):
"""Set an environment variable to be passed to executors."""
if (key != None and pairs != None) or (key == None and pairs == None):
raise Exception("Either pass one key-value pair or a list of pairs")
elif key != None:
self._jconf.setExecutorEnv(key, value)
elif pairs != None:
for (k, v) in pairs:
self._jconf.setExecutorEnv(k, v)
return self
def setAll(self, pairs):
"""
Set multiple parameters, passed as a list of key-value pairs.
@param pairs: list of key-value pairs to set
"""
for (k, v) in pairs:
self._jconf.set(k, v)
return self
def get(self, key, defaultValue=None):
"""Get the configured value for some key, or return a default otherwise."""
if defaultValue == None: # Py4J doesn't call the right get() if we pass None
if not self._jconf.contains(key):
return None
return self._jconf.get(key)
else:
return self._jconf.get(key, defaultValue)
def getAll(self):
"""Get all values as a list of key-value pairs."""
pairs = []
for elem in self._jconf.getAll():
pairs.append((elem._1(), elem._2()))
return pairs
def contains(self, key):
"""Does this configuration contain a given key?"""
return self._jconf.contains(key)
def toDebugString(self):
"""
Returns a printable version of the configuration, as a list of
key=value pairs, one per line.
"""
return self._jconf.toDebugString()
def _test():
import doctest
(failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
mfjb/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
citrusleaf/dd-agent | utils/flare.py | 5 | 20834 | # stdlib
import atexit
import cStringIO as StringIO
from functools import partial
import glob
try:
import grp
except ImportError:
# The module only exists on Unix platforms
grp = None
import logging
import os
try:
import pwd
except ImportError:
# Same as above (exists on Unix platforms only)
pwd = None
import re
import stat
import subprocess
import sys
import tarfile
import tempfile
from time import strftime
import traceback
# 3p
import requests
# DD imports
from checks.check_status import CollectorStatus, DogstatsdStatus, ForwarderStatus
from config import (
check_yaml,
get_confd_path,
get_config,
get_config_path,
get_logging_config,
get_url_endpoint,
)
from jmxfetch import JMXFetch
from util import get_hostname
from utils.jmx import jmx_command, JMXFiles
from utils.platform import Platform
# Globals
log = logging.getLogger(__name__)
def configcheck():
all_valid = True
for conf_path in glob.glob(os.path.join(get_confd_path(), "*.yaml")):
basename = os.path.basename(conf_path)
try:
check_yaml(conf_path)
except Exception, e:
all_valid = False
print "%s contains errors:\n %s" % (basename, e)
else:
print "%s is valid" % basename
if all_valid:
print "All yaml files passed. You can now run the Datadog agent."
return 0
else:
print("Fix the invalid yaml files above in order to start the Datadog agent. "
"A useful external tool for yaml parsing can be found at "
"http://yaml-online-parser.appspot.com/")
return 1
class Flare(object):
"""
Compress all important logs and configuration files for debug,
and then send them to Datadog (which transfers them to Support)
"""
DATADOG_SUPPORT_URL = '/support/flare'
PASSWORD_REGEX = re.compile('( *(\w|_)*pass(word)?:).+')
COMMENT_REGEX = re.compile('^ *#.*')
APIKEY_REGEX = re.compile('^api_key: *\w+(\w{5})$')
REPLACE_APIKEY = r'api_key: *************************\1'
COMPRESSED_FILE = 'datadog-agent-{0}.tar.bz2'
# We limit to 10MB arbitrarily
MAX_UPLOAD_SIZE = 10485000
TIMEOUT = 60
def __init__(self, cmdline=False, case_id=None):
self._case_id = case_id
self._cmdline = cmdline
self._init_tarfile()
self._init_permissions_file()
self._save_logs_path()
self._config = get_config()
self._api_key = self._config.get('api_key')
self._url = "{0}{1}".format(
get_url_endpoint(self._config.get('dd_url'), endpoint_type='flare'),
self.DATADOG_SUPPORT_URL
)
self._hostname = get_hostname(self._config)
self._prefix = "datadog-{0}".format(self._hostname)
# On Unix system, check that the user is root (to call supervisorctl & status)
# Otherwise emit a warning, and ask for confirmation
@staticmethod
def check_user_rights():
if Platform.is_linux() and not os.geteuid() == 0:
log.warning("You are not root, some information won't be collected")
choice = raw_input('Are you sure you want to continue [y/N]? ')
if choice.strip().lower() not in ['yes', 'y']:
print 'Aborting'
sys.exit(1)
else:
log.warn('Your user has to have at least read access'
' to the logs and conf files of the agent')
# Collect all conf and logs files and compress them
def collect(self):
if not self._api_key:
raise Exception('No api_key found')
log.info("Collecting logs and configuration files:")
self._add_logs_tar()
self._add_conf_tar()
log.info(" * datadog-agent configcheck output")
self._add_command_output_tar('configcheck.log', configcheck)
log.info(" * datadog-agent status output")
self._add_command_output_tar('status.log', self._supervisor_status)
log.info(" * datadog-agent info output")
self._add_command_output_tar('info.log', self._info_all)
self._add_jmxinfo_tar()
log.info(" * pip freeze")
self._add_command_output_tar('freeze.log', self._pip_freeze,
command_desc="pip freeze --no-cache-dir")
log.info(" * log permissions on collected files")
self._permissions_file.close()
self._add_file_tar(self._permissions_file.name, 'permissions.log',
log_permissions=False)
log.info("Saving all files to {0}".format(self._tar_path))
self._tar.close()
# Upload the tar file
def upload(self):
self._check_size()
if self._cmdline:
self._ask_for_confirmation()
email = self._ask_for_email()
log.info("Uploading {0} to Datadog Support".format(self._tar_path))
url = self._url
if self._case_id:
url = '{0}/{1}'.format(self._url, str(self._case_id))
url = "{0}?api_key={1}".format(url, self._api_key)
files = {'flare_file': open(self._tar_path, 'rb')}
data = {
'case_id': self._case_id,
'hostname': self._hostname,
'email': email
}
self._resp = requests.post(url, files=files, data=data,
timeout=self.TIMEOUT)
self._analyse_result()
# Start by creating the tar file which will contain everything
def _init_tarfile(self):
# Default temp path
self._tar_path = os.path.join(
tempfile.gettempdir(),
self.COMPRESSED_FILE.format(strftime("%Y-%m-%d-%H-%M-%S"))
)
if os.path.exists(self._tar_path):
os.remove(self._tar_path)
self._tar = tarfile.open(self._tar_path, 'w:bz2')
# Create a file to log permissions on collected files and write header line
def _init_permissions_file(self):
self._permissions_file = tempfile.NamedTemporaryFile(mode='w', prefix='dd', delete=False)
if Platform.is_unix():
self._permissions_file_format = "{0:50} | {1:5} | {2:10} | {3:10}\n"
header = self._permissions_file_format.format("File path", "mode", "owner", "group")
self._permissions_file.write(header)
self._permissions_file.write('-'*len(header) + "\n")
else:
self._permissions_file.write("Not implemented: file permissions are only logged on Unix platforms")
# Save logs file paths
def _save_logs_path(self):
prefix = ''
if Platform.is_windows():
prefix = 'windows_'
config = get_logging_config()
self._collector_log = config.get('{0}collector_log_file'.format(prefix))
self._forwarder_log = config.get('{0}forwarder_log_file'.format(prefix))
self._dogstatsd_log = config.get('{0}dogstatsd_log_file'.format(prefix))
self._jmxfetch_log = config.get('jmxfetch_log_file')
# Add logs to the tarfile
def _add_logs_tar(self):
self._add_log_file_tar(self._collector_log)
self._add_log_file_tar(self._forwarder_log)
self._add_log_file_tar(self._dogstatsd_log)
self._add_log_file_tar(self._jmxfetch_log)
self._add_log_file_tar(
"{0}/*supervisord.log".format(os.path.dirname(self._collector_log))
)
def _add_log_file_tar(self, file_path):
for f in glob.glob('{0}*'.format(file_path)):
if self._can_read(f):
self._add_file_tar(
f,
os.path.join('log', os.path.basename(f))
)
# Collect all conf
def _add_conf_tar(self):
conf_path = get_config_path()
if self._can_read(conf_path):
self._add_file_tar(
self._strip_comment(conf_path),
os.path.join('etc', 'datadog.conf'),
original_file_path=conf_path
)
if not Platform.is_windows():
supervisor_path = os.path.join(
os.path.dirname(get_config_path()),
'supervisor.conf'
)
if self._can_read(supervisor_path):
self._add_file_tar(
self._strip_comment(supervisor_path),
os.path.join('etc', 'supervisor.conf'),
original_file_path=supervisor_path
)
for file_path in glob.glob(os.path.join(get_confd_path(), '*.yaml')) +\
glob.glob(os.path.join(get_confd_path(), '*.yaml.default')):
if self._can_read(file_path, output=False):
self._add_clean_confd(file_path)
# Collect JMXFetch-specific info and save to jmxinfo directory if jmx config
# files are present and valid
def _add_jmxinfo_tar(self):
_, _, should_run_jmx = self._capture_output(self._should_run_jmx)
if should_run_jmx:
# status files (before listing beans because executing jmxfetch overwrites status files)
for file_name, file_path in [
(JMXFiles._STATUS_FILE, JMXFiles.get_status_file_path()),
(JMXFiles._PYTHON_STATUS_FILE, JMXFiles.get_python_status_file_path())
]:
if self._can_read(file_path, warn=False):
self._add_file_tar(
file_path,
os.path.join('jmxinfo', file_name)
)
# beans lists
for command in ['list_matching_attributes', 'list_everything']:
log.info(" * datadog-agent jmx {0} output".format(command))
self._add_command_output_tar(
os.path.join('jmxinfo', '{0}.log'.format(command)),
partial(self._jmx_command_call, command)
)
# java version
log.info(" * java -version output")
_, _, java_bin_path = self._capture_output(
lambda: JMXFetch.get_configuration(get_confd_path())[2] or 'java')
self._add_command_output_tar(
os.path.join('jmxinfo', 'java_version.log'),
lambda: self._java_version(java_bin_path),
command_desc="{0} -version".format(java_bin_path)
)
# Add a file to the tar and append the file's rights to the permissions log (on Unix)
# If original_file_path is passed, the file_path will be added to the tar but the original file's
# permissions are logged
def _add_file_tar(self, file_path, target_path, log_permissions=True, original_file_path=None):
target_full_path = os.path.join(self._prefix, target_path)
if log_permissions and Platform.is_unix():
stat_file_path = original_file_path or file_path
file_stat = os.stat(stat_file_path)
# The file mode is returned in binary format, convert it to a more readable octal string
mode = oct(stat.S_IMODE(file_stat.st_mode))
try:
uname = pwd.getpwuid(file_stat.st_uid).pw_name
except KeyError:
uname = str(file_stat.st_uid)
try:
gname = grp.getgrgid(file_stat.st_gid).gr_name
except KeyError:
gname = str(file_stat.st_gid)
self._permissions_file.write(self._permissions_file_format.format(stat_file_path, mode, uname, gname))
self._tar.add(file_path, target_full_path)
# Returns whether JMXFetch should run or not
def _should_run_jmx(self):
jmx_process = JMXFetch(get_confd_path(), self._config)
jmx_process.configure(clean_status_file=False)
return jmx_process.should_run()
# Check if the file is readable (and log it)
@classmethod
def _can_read(cls, f, output=True, warn=True):
if os.access(f, os.R_OK):
if output:
log.info(" * {0}".format(f))
return True
else:
if warn:
log.warn(" * not readable - {0}".format(f))
return False
# Return path to a temp file without comment
def _strip_comment(self, file_path):
_, temp_path = tempfile.mkstemp(prefix='dd')
atexit.register(os.remove, temp_path)
with open(temp_path, 'w') as temp_file:
with open(file_path, 'r') as orig_file:
for line in orig_file.readlines():
if not self.COMMENT_REGEX.match(line):
temp_file.write(re.sub(self.APIKEY_REGEX, self.REPLACE_APIKEY, line))
return temp_path
# Remove password before collecting the file
def _add_clean_confd(self, file_path):
basename = os.path.basename(file_path)
temp_path, password_found = self._strip_password(file_path)
log.info(" * {0}{1}".format(file_path, password_found))
self._add_file_tar(
temp_path,
os.path.join('etc', 'conf.d', basename),
original_file_path=file_path
)
# Return path to a temp file without password and comment
def _strip_password(self, file_path):
_, temp_path = tempfile.mkstemp(prefix='dd')
atexit.register(os.remove, temp_path)
with open(temp_path, 'w') as temp_file:
with open(file_path, 'r') as orig_file:
password_found = ''
for line in orig_file.readlines():
if self.PASSWORD_REGEX.match(line):
line = re.sub(self.PASSWORD_REGEX, r'\1 ********', line)
password_found = ' - this file contains a password which '\
'has been removed in the version collected'
if not self.COMMENT_REGEX.match(line):
temp_file.write(line)
return temp_path, password_found
# Add output of the command to the tarfile
def _add_command_output_tar(self, name, command, command_desc=None):
out, err, _ = self._capture_output(command, print_exc_to_stderr=False)
_, temp_path = tempfile.mkstemp(prefix='dd')
with open(temp_path, 'w') as temp_file:
if command_desc:
temp_file.write(">>>> CMD <<<<\n")
temp_file.write(command_desc)
temp_file.write("\n")
temp_file.write(">>>> STDOUT <<<<\n")
temp_file.write(out.getvalue())
out.close()
temp_file.write(">>>> STDERR <<<<\n")
temp_file.write(err.getvalue())
err.close()
self._add_file_tar(temp_path, name, log_permissions=False)
os.remove(temp_path)
# Capture the output of a command (from both std streams and loggers) and the
# value returned by the command
def _capture_output(self, command, print_exc_to_stderr=True):
backup_out, backup_err = sys.stdout, sys.stderr
out, err = StringIO.StringIO(), StringIO.StringIO()
backup_handlers = logging.root.handlers[:]
logging.root.handlers = [logging.StreamHandler(out)]
sys.stdout, sys.stderr = out, err
return_value = None
try:
return_value = command()
except Exception:
# Print the exception to either stderr or `err`
traceback.print_exc(file=backup_err if print_exc_to_stderr else err)
finally:
# Stop capturing in a `finally` block to reset std streams' and loggers'
# behaviors no matter what
sys.stdout, sys.stderr = backup_out, backup_err
logging.root.handlers = backup_handlers
return out, err, return_value
# Print supervisor status (and nothing on windows)
def _supervisor_status(self):
if Platform.is_windows():
print 'Windows - status not implemented'
else:
agent_exec = self._get_path_agent_exec()
print '{0} status'.format(agent_exec)
self._print_output_command([agent_exec, 'status'])
supervisor_exec = self._get_path_supervisor_exec()
print '{0} status'.format(supervisor_exec)
self._print_output_command([supervisor_exec,
'-c', self._get_path_supervisor_conf(),
'status'])
# Find the agent exec (package or source)
def _get_path_agent_exec(self):
if Platform.is_mac():
agent_exec = '/opt/datadog-agent/bin/datadog-agent'
else:
agent_exec = '/etc/init.d/datadog-agent'
if not os.path.isfile(agent_exec):
agent_exec = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../bin/agent'
)
return agent_exec
# Find the supervisor exec (package or source)
def _get_path_supervisor_exec(self):
supervisor_exec = '/opt/datadog-agent/bin/supervisorctl'
if not os.path.isfile(supervisor_exec):
supervisor_exec = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../venv/bin/supervisorctl'
)
return supervisor_exec
# Find the supervisor conf (package or source)
def _get_path_supervisor_conf(self):
if Platform.is_mac():
supervisor_conf = '/opt/datadog-agent/etc/supervisor.conf'
else:
supervisor_conf = '/etc/dd-agent/supervisor.conf'
if not os.path.isfile(supervisor_conf):
supervisor_conf = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../supervisord/supervisord.conf'
)
return supervisor_conf
# Print output of command
def _print_output_command(self, command):
try:
status = subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
status = 'Not able to get output, exit number {0}, exit output:\n'\
'{1}'.format(str(e.returncode), e.output)
print status
# Print info of all agent components
def _info_all(self):
CollectorStatus.print_latest_status(verbose=True)
DogstatsdStatus.print_latest_status(verbose=True)
ForwarderStatus.print_latest_status(verbose=True)
# Call jmx_command with std streams redirection
def _jmx_command_call(self, command):
try:
jmx_command([command], self._config, redirect_std_streams=True)
except Exception, e:
print "Unable to call jmx command {0}: {1}".format(command, e)
# Print java version
def _java_version(self, java_bin_path):
try:
self._print_output_command([java_bin_path, '-version'])
except OSError:
print 'Unable to execute java bin with command: {0}'.format(java_bin_path)
# Run a pip freeze
def _pip_freeze(self):
try:
import pip
pip.main(['freeze', '--no-cache-dir'])
except ImportError:
print 'Unable to import pip'
# Check if the file is not too big before upload
def _check_size(self):
if os.path.getsize(self._tar_path) > self.MAX_UPLOAD_SIZE:
log.info("{0} won't be uploaded, its size is too important.\n"
"You can send it directly to support by mail.")
sys.exit(1)
# Function to ask for confirmation before upload
def _ask_for_confirmation(self):
print '{0} is going to be uploaded to Datadog.'.format(self._tar_path)
choice = raw_input('Do you want to continue [Y/n]? ')
if choice.strip().lower() not in ['yes', 'y', '']:
print 'Aborting (you can still use {0})'.format(self._tar_path)
sys.exit(1)
# Ask for email if needed
def _ask_for_email(self):
# We ask everytime now, as it is also the 'id' to check
# that the case is the good one if it exists
return raw_input('Please enter your email: ').lower()
# Print output (success/error) of the request
def _analyse_result(self):
# First catch our custom explicit 400
if self._resp.status_code == 400:
raise Exception('Your request is incorrect: {0}'.format(self._resp.json()['error']))
# Then raise potential 500 and 404
self._resp.raise_for_status()
try:
json_resp = self._resp.json()
# Failed parsing
except ValueError:
raise Exception('An unknown error has occured - '
'Please contact support by email')
# Finally, correct
log.info("Your logs were successfully uploaded. For future reference,"
" your internal case id is {0}".format(json_resp['case_id']))
| bsd-3-clause |
OdifYltsaeb/django-guardian | guardian/backends.py | 12 | 2073 | from django.db import models
from guardian.conf import settings
from guardian.exceptions import WrongAppError
from guardian.core import ObjectPermissionChecker
from guardian.models import User
class ObjectPermissionBackend(object):
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username, password):
return None
def has_perm(self, user_obj, perm, obj=None):
"""
Returns ``True`` if given ``user_obj`` has ``perm`` for ``obj``. If no
``obj`` is given, ``False`` is returned.
.. note::
Remember, that if user is not *active*, all checks would return
``False``.
Main difference between Django's ``ModelBackend`` is that we can pass
``obj`` instance here and ``perm`` doesn't have to contain
``app_label`` as it can be retrieved from given ``obj``.
**Inactive user support**
If user is authenticated but inactive at the same time, all checks
always returns ``False``.
"""
# Backend checks only object permissions
if obj is None:
return False
# Backend checks only permissions for Django models
if not isinstance(obj, models.Model):
return False
# This is how we support anonymous users - simply try to retrieve User
# instance and perform checks for that predefined user
if not user_obj.is_authenticated():
user_obj = User.objects.get(pk=settings.ANONYMOUS_USER_ID)
# Do not check any further if user is not active
if not user_obj.is_active:
return False
if len(perm.split('.')) > 1:
app_label, perm = perm.split('.')
if app_label != obj._meta.app_label:
raise WrongAppError("Passed perm has app label of '%s' and "
"given obj has '%s'" % (app_label, obj._meta.app_label))
check = ObjectPermissionChecker(user_obj)
return check.has_perm(perm, obj)
| bsd-2-clause |
inmomentsoftware/pledgeservice | lib/requests/packages/chardet/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| agpl-3.0 |
ChemiKhazi/Sprytile | rx/linq/observable/groupby.py | 3 | 1224 | from rx import Observable
from rx.internal import extensionmethod
@extensionmethod(Observable)
def group_by(self, key_selector, element_selector=None,
key_serializer=None):
"""Groups the elements of an observable sequence according to a
specified key selector function and comparer and selects the resulting
elements by using a specified function.
1 - observable.group_by(lambda x: x.id)
2 - observable.group_by(lambda x: x.id, lambda x: x.name)
3 - observable.group_by(
lambda x: x.id,
lambda x: x.name,
lambda x: str(x))
Keyword arguments:
key_selector -- A function to extract the key for each element.
element_selector -- [Optional] A function to map each source element to
an element in an observable group.
comparer -- {Function} [Optional] Used to determine whether the objects
are equal.
Returns a sequence of observable groups, each of which corresponds to a
unique key value, containing all elements that share that same key
value.
"""
def duration_selector(x):
return Observable.never()
return self.group_by_until(key_selector, element_selector, duration_selector, key_serializer)
| mit |
codingvirtual/fullstack-p4-conference | utils.py | 384 | 1576 | import json
import os
import time
import uuid
from google.appengine.api import urlfetch
from models import Profile
def getUserId(user, id_type="email"):
if id_type == "email":
return user.email()
if id_type == "oauth":
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfetch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
if id_type == "custom":
# implement your own user_id creation and getting algorythm
# this is just a sample that queries datastore for an existing profile
# and generates an id if profile does not exist for an email
profile = Conference.query(Conference.mainEmail == user.email())
if profile:
return profile.id()
else:
return str(uuid.uuid1().get_hex())
| apache-2.0 |
zzxuanyuan/root | documentation/doxygen/converttonotebook.py | 4 | 35547 | #!/usr/bin/env python
# Author: Pau Miquel i Mir <[email protected]> <[email protected]>>
# Date: July, 2016
#
# DISCLAIMER: This script is a prototype and a work in progress. Indeed, it is possible that
# it may not work for certain tutorials, and that it, or the tutorial, might need to be
# tweaked slightly to ensure full functionality. Please do not hesitate to email the author
# with any questions or with examples that do not work.
#
# HELP IT DOESN'T WORK: Two possible solutions:
# 1. Check that all the types returned by the tutorial are in the gTypesList. If they aren't,
# simply add them.
# 2. If the tutorial takes a long time to execute (more than 90 seconds), add the name of the
# tutorial to the list of long tutorials listLongTutorials, in the function findTimeout.
#
# REQUIREMENTS: This script needs jupyter to be properly installed, as it uses the python
# package nbformat and calls the shell commands `jupyter nbconvert` and `jupyter trust`. The
# rest of the packages used should be included in a standard installation of python. The script
# is intended to be run on a UNIX based system.
#
#
# FUNCTIONING:
# -----------
# The converttonotebook script creates Jupyter notebooks from raw C++ or python files.
# Particularly, it is indicated to convert the ROOT tutorials found in the ROOT
# repository.
#
# The script should be called from bash with the following format:
# python /path/to/script/converttonotebook.py /path/to/<macro>.C /path/to/outdir
#
# Indeed the script takes two arguments, the path to the macro and the path to the directory
# where the notebooks will be created
#
# The script's general functioning is as follows. The macro to be converted is imported as a string.
# A series of modifications are made to this string, for instance delimiting where markdown and
# code cells begin and end. Then, this string is converted into ipynb format using a function
# in the nbconvert package. Finally, the notebook is executed and output.
#
# For converting python tutorials it is fairly straightforward. It extracts the description and
# author information from the header and then removes it. It also converts any comment at the
# beginning of a line into a Markdown cell.
#
# For C++ files the process is slightly more complex. The script separates the functions from the
# main code. The main function is identified as it has the same name as the macro file. The other
# functions are considered functions. The main function is "extracted" and presented as main code.
# The helper functions are placed in their own code cell with the %%cpp -d magic to enable function
# defintion. Finally, as with Python macros, relevant information is extracted from the header, and
# newline comments are converted into Markdown cells (unless they are in helper functions).
#
# The script creates an .ipynb version of the macro, with the full output included.
# The files are named:
# <macro>.<C or py>.nbconvert.ipynb
#
# It is called by filter.cxx, which in turn is called by doxygen when processing any file
# in the ROOT repository. filter.cxx only calls convertonotebook.py when the string \notebook
# is found in the header of the tutorial, but this script checks for its presence as well.
import re
import os
import sys
import json
import time
import doctest
import textwrap
import subprocess
from nbformat import v3, v4
from datetime import datetime, date
# List of types that will be considered when looking for a C++ function. If a macro returns a
# type not included on the list, the regular expression will not match it, and thus the function
# will not be properly defined. Thus, any other type returned by function must be added to this list
# for the script to work correctly.
gTypesList = ["void", "int", "Int_t", "TF1", "string", "bool", "double", "float", "char",
"TCanvas", "TTree", "TString", "TSeqCollection", "Double_t", "TFile", "Long64_t", "Bool_t", "TH1",
"RooDataSet", "RooWorkspace" , "HypoTestInverterResult" , "TVectorD" , "TArrayF", "UInt_t"]
# -------------------------------------
# -------- Function definitions--------
# -------------------------------------
def unindenter(string, spaces = 3):
"""
Returns string with each line unindented by 3 spaces. If line isn't indented, it stays the same.
>>> unindenter(" foobar")
'foobar\\n'
>>> unindenter("foobar")
'foobar\\n'
>>> unindenter('''foobar
... foobar
... foobar''')
'foobar\\nfoobar\\nfoobar\\n'
"""
newstring = ''
lines = string.splitlines()
for line in lines:
if line.startswith(spaces*' '):
newstring += (line[spaces:] + "\n")
else:
newstring += (line + "\n")
return newstring
def readHeaderPython(text):
"""
Extract author and description from header, eliminate header from text. Also returns
notebook boolean, which is True if the string \notebook is present in the header
Also determine options (-js, -nodraw, -header) passed in \notebook command, and
return their booleans
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, False, False, False)
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook -js
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, True, False, False)
>>> readHeaderPython('''## \\file
... ## \\ingroup tutorials
... ## \\\\notebook -nodraw
... ## This is the description of the tutorial
... ##
... ## \\macro_image
... ## \\macro_code
... ##
... ## \\\\author John Brown
... def tutorialfuncion()''')
('def tutorialfuncion()\\n', 'This is the description of the tutorial\\n\\n\\n', 'John Brown', True, False, True, False)
"""
lines = text.splitlines()
description = ''
author = ''
isNotebook = False
isJsroot = False
nodraw = False
needsHeaderFile = False
for i, line in enumerate(lines):
if line.startswith("## \\aut"):
author = line[11:]
elif line.startswith("## \\note"):
isNotebook = True
if "-js" in line:
isJsroot = True
if "-nodraw" in line:
nodraw = True
if "-header" in line:
needsHeaderFile = True
elif line.startswith("##"):
if not line.startswith("## \\") and isNotebook:
description += (line[3:] + '\n')
else:
break
newtext = ''
for line in lines[i:]:
newtext += (line + "\n")
return newtext, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile
def pythonComments(text):
"""
Converts comments delimited by # or ## and on a new line into a markdown cell.
For python files only
>>> pythonComments('''## This is a
... ## multiline comment
... def function()''')
'# <markdowncell>\\n## This is a\\n## multiline comment\\n# <codecell>\\ndef function()\\n'
>>> pythonComments('''def function():
... variable = 5 # Comment not in cell
... # Comment also not in cell''')
'def function():\\n variable = 5 # Comment not in cell\\n # Comment also not in cell\\n'
"""
text = text.splitlines()
newtext = ''
inComment = False
for i, line in enumerate(text):
if line.startswith("#") and not inComment: # True if first line of comment
inComment = True
newtext += "# <markdowncell>\n"
newtext += (line + "\n")
elif inComment and not line.startswith("#"): # True if first line after comment
inComment = False
newtext += "# <codecell>\n"
newtext += (line+"\n")
else:
newtext += (line+"\n")
return newtext
def pythonMainFunction(text):
lines = text.splitlines()
functionContentRe = re.compile('def %s\\(.*\\):' % tutName , flags = re.DOTALL | re.MULTILINE)
newtext = ''
inMainFunction = False
hasMainFunction = False
for line in lines:
if hasMainFunction:
if line.startswith("""if __name__ == "__main__":""") or line.startswith("""if __name__ == '__main__':"""):
break
match = functionContentRe.search(line)
if inMainFunction and not line.startswith(" ") and line != "":
inMainFunction = False
if match:
inMainFunction = True
hasMainFunction = True
else:
if inMainFunction:
newtext += (line[4:] + '\n')
else:
newtext += (line + '\n')
return newtext
def readHeaderCpp(text):
"""
Extract author and description from header, eliminate header from text. Also returns
notebook boolean, which is True if the string \notebook is present in the header
Also determine options (-js, -nodraw, -header) passed in \notebook command, and
return their booleans
>>> readHeaderCpp('''/// \\file
... /// \\ingroup tutorials
... /// \\\\notebook
... /// This is the description of the tutorial
... ///
... /// \\macro_image
... /// \\macro_code
... ///
... /// \\\\author John Brown
... void tutorialfuncion(){}''')
('void tutorialfuncion(){}\\n', '# This is the description of the tutorial\\n# \\n# \\n', 'John Brown', True, False, False, False)
>>> readHeaderCpp('''/// \\file
... /// \\ingroup tutorials
... /// \\\\notebook -js
... /// This is the description of the tutorial
... ///
... /// \\macro_image
... /// \\macro_code
... ///
... /// \\\\author John Brown
... void tutorialfuncion(){}''')
('void tutorialfuncion(){}\\n', '# This is the description of the tutorial\\n# \\n# \\n', 'John Brown', True, True, False, False)
>>> readHeaderCpp('''/// \\file
... /// \\ingroup tutorials
... /// \\\\notebook -nodraw
... /// This is the description of the tutorial
... ///
... /// \\macro_image
... /// \\macro_code
... ///
... /// \\\\author John Brown
... void tutorialfuncion(){}''')
('void tutorialfuncion(){}\\n', '# This is the description of the tutorial\\n# \\n# \\n', 'John Brown', True, False, True, False)
"""
lines = text.splitlines()
description = ''
author = ''
isNotebook = False
isJsroot = False
nodraw = False
needsHeaderFile = False
for i, line in enumerate(lines):
if line.startswith("/// \\aut"):
author = line[12:]
if line.startswith("/// \\note"):
isNotebook = True
if "-js" in line:
isJsroot = True
if "-nodraw" in line:
nodraw = True
if "-header" in line:
needsHeaderFile = True
if line.startswith("///"):
if not line.startswith("/// \\") and isNotebook:
description += ('# ' + line[4:] + '\n')
else:
break
newtext = ''
for line in lines[i:]:
newtext += (line + "\n")
description = description.replace("\\f$", "$")
description = description.replace("\\f[", "$$")
description = description.replace("\\f]", "$$")
return newtext, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile
def cppFunction(text):
"""
Extracts main function for the function enclosure by means of regular expression
>>> cppFunction('''void mainfunction(arguments = values){
... content of function
... which spans
... several lines
... }''')
'\\n content of function\\n which spans\\n several lines\\n'
>>> cppFunction('''void mainfunction(arguments = values)
... {
... content of function
... which spans
... several lines
... }''')
'\\n content of function\\n which spans\\n several lines\\n'
>>> cppFunction('''void mainfunction(arguments = values
... morearguments = morevalues)
... {
... content of function
... which spans
... several lines
... }''')
'\\n content of function\\n which spans\\n several lines\\n'
"""
functionContentRe = re.compile(r'(?<=\{).*(?=^\})', flags = re.DOTALL | re.MULTILINE)
match = functionContentRe.search(text)
if match:
return match.group()
else:
return text
def cppComments(text):
"""
Converts comments delimited by // and on a new line into a markdown cell. For C++ files only.
>>> cppComments('''// This is a
... // multiline comment
... void function(){}''')
'# <markdowncell>\\n# This is a\\n# multiline comment\\n# <codecell>\\nvoid function(){}\\n'
>>> cppComments('''void function(){
... int variable = 5 // Comment not in cell
... // Comment also not in cell
... }''')
'void function(){\\n int variable = 5 // Comment not in cell\\n // Comment also not in cell\\n}\\n'
"""
text = text.splitlines()
newtext = ''
inComment = False
for line in text:
if line.startswith("//") and not inComment: # True if first line of comment
inComment = True
newtext += "# <markdowncell>\n"
if line[2:].lstrip().startswith("#"): # Don't use .capitalize() if line starts with hash, ie it is a header
newtext += ("# " + line[2:]+"\n")
else:
newtext += ("# " + line[2:].lstrip().capitalize()+"\n")
elif inComment and not line.startswith("//"): # True if first line after comment
inComment = False
newtext += "# <codecell>\n"
newtext += (line+"\n")
elif inComment and line.startswith("//"): # True if in the middle of a comment block
newtext += ("# " + line[2:] + "\n")
else:
newtext += (line+"\n")
return newtext
def split(text):
"""
Splits the text string into main, helpers, and rest. main is the main function,
i.e. the function tha thas the same name as the macro file. Helpers is a list of
strings, each a helper function, i.e. any other function that is not the main function.
Finally, rest is a string containing any top-level code outside of any function.
Comments immediately prior to a helper cell are converted into markdown cell,
added to the helper, and removed from rest.
Intended for C++ files only.
>>> split('''void tutorial(){
... content of tutorial
... }''')
('void tutorial(){\\n content of tutorial\\n}', [], '')
>>> split('''void tutorial(){
... content of tutorial
... }
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n A helper function is created: \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '')
>>> split('''#include <header.h>
... using namespace NAMESPACE
... void tutorial(){
... content of tutorial
... }
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n A helper function is created: \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '#include <header.h>\\nusing namespace NAMESPACE')
>>> split('''void tutorial(){
... content of tutorial
... }
... // This is a multiline
... // description of the
... // helper function
... void helper(arguments = values){
... helper function
... content spans lines
... }''')
('void tutorial(){\\n content of tutorial\\n}', ['\\n# <markdowncell>\\n This is a multiline\\n description of the\\n helper function\\n \\n# <codecell>\\n%%cpp -d\\nvoid helper(arguments = values){\\n helper function\\n content spans lines\\n}'], '')
"""
functionReString="("
for cpptype in gTypesList:
functionReString += ("^%s|") % cpptype
functionReString = functionReString[:-1] + r")\s?\*?&?\s?[\w:]*?\s?\([^\)]*\)\s*\{.*?^\}"
functionRe = re.compile(functionReString, flags = re.DOTALL | re.MULTILINE)
#functionre = re.compile(r'(^void|^int|^Int_t|^TF1|^string|^bool|^double|^float|^char|^TCanvas|^TTree|^TString|^TSeqCollection|^Double_t|^TFile|^Long64_t|^Bool_t)\s?\*?\s?[\w:]*?\s?\([^\)]*\)\s*\{.*?^\}', flags = re.DOTALL | re.MULTILINE)
functionMatches = functionRe.finditer(text)
helpers = []
main = ""
for matchString in [match.group() for match in functionMatches]:
if tutName == findFunctionName(matchString): # if the name of the function is that of the macro
main = matchString
else:
helpers.append(matchString)
# Create rest by replacing the main and helper functions with blank strings
rest = text.replace(main, "")
for helper in helpers:
rest = rest.replace(helper, "")
newHelpers = []
lines = text.splitlines()
for helper in helpers: # For each helper function
for i, line in enumerate(lines): # Look through the lines until the
if line.startswith(helper[:helper.find("\n")]): # first line of the helper is found
j = 1
commentList = []
while lines[i-j].startswith("//"): # Add comment lines immediately prior to list
commentList.append(lines[i-j])
j += 1
if commentList: # Convert list to string
commentList.reverse()
helperDescription = ''
for comment in commentList:
if comment in ("//", "// "):
helperDescription += "\n\n" # Two newlines to create hard break in Markdown
else:
helperDescription += (comment[2:] + "\n")
rest = rest.replace(comment, "")
break
else: # If no comments are found create generic description
helperDescription = "A helper function is created:"
break
if findFunctionName(helper) != "main": # remove void main function
newHelpers.append("\n# <markdowncell>\n " + helperDescription + " \n# <codecell>\n%%cpp -d\n" + helper)
rest = rest.rstrip("\n /") # remove newlines and empty comments at the end of string
return main, newHelpers, rest
def findFunctionName(text):
"""
Takes a string representation of a C++ function as an input,
finds and returns the name of the function
>>> findFunctionName('void functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void functionName (arguments = values){}')
'functionName'
>>> findFunctionName('void *functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void* functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void * functionName(arguments = values){}')
'functionName'
>>> findFunctionName('void class::functionName(arguments = values){}')
'class::functionName'
"""
functionNameReString="(?<="
for cpptype in gTypesList:
functionNameReString += ("(?<=%s)|") % cpptype
functionNameReString = functionNameReString[:-1] + r")\s?\*?\s?[^\s]*?(?=\s?\()"
functionNameRe = re.compile(functionNameReString, flags = re.DOTALL | re.MULTILINE)
#functionnamere = re.compile(r'(?<=(?<=int)|(?<=void)|(?<=TF1)|(?<=Int_t)|(?<=string)|(?<=double)|(?<=Double_t)|(?<=float)|(?<=char)|(?<=TString)|(?<=bool)|(?<=TSeqCollection)|(?<=TCanvas)|(?<=TTree)|(?<=TFile)|(?<=Long64_t)|(?<=Bool_t))\s?\*?\s?[^\s]*?(?=\s?\()', flags = re.DOTALL | re.MULTILINE)
match = functionNameRe.search(text)
functionname = match.group().strip(" *\n")
return functionname
def processmain(text):
"""
Evaluates whether the main function returns a TCanvas or requires input. If it
does then the keepfunction flag is True, meaning the function wont be extracted
by cppFunction. If the initial condition is true then an extra cell is added
before at the end that calls the main function is returned, and added later.
>>> processmain('''void function(){
... content of function
... spanning several
... lines
... }''')
('void function(){\\n content of function\\n spanning several\\n lines\\n}', '')
>>> processmain('''void function(arguments = values){
... content of function
... spanning several
... lines
... }''')
('void function(arguments = values){\\n content of function\\n spanning several\\n lines\\n}', '# <markdowncell> \\n Arguments are defined. \\n# <codecell>\\narguments = values;\\n# <codecell>\\n')
>>> processmain('''void function(argument1 = value1, //comment 1
... argument2 = value2 /*comment 2*/ ,
... argument3 = value3,
... argument4 = value4)
... {
... content of function
... spanning several
... lines
... }''')
('void function(argument1 = value1, //comment 1\\n argument2 = value2 /*comment 2*/ ,\\n argument3 = value3, \\n argument4 = value4)\\n{\\n content of function\\n spanning several\\n lines\\n}', '# <markdowncell> \\n Arguments are defined. \\n# <codecell>\\nargument1 = value1;\\nargument2 = value2;\\nargument3 = value3;\\nargument4 = value4;\\n# <codecell>\\n')
>>> processmain('''TCanvas function(){
... content of function
... spanning several
... lines
... return c1
... }''')
('TCanvas function(){\\n content of function\\n spanning several \\n lines\\n return c1\\n}', '')
"""
argumentsCell = ''
if text:
argumentsre = re.compile(r'(?<=\().*?(?=\))', flags = re.DOTALL | re.MULTILINE)
arguments = argumentsre.search(text)
if len(arguments.group()) > 3:
argumentsCell = "# <markdowncell> \n Arguments are defined. \n# <codecell>\n"
individualArgumentre = re.compile(r'[^/\n,]*?=[^/\n,]*') #, flags = re.DOTALL) #| re.MULTILINE)
argumentList=individualArgumentre.findall(arguments.group())
for argument in argumentList:
argumentsCell += argument.strip("\n ") + ";\n"
argumentsCell += "# <codecell>\n"
return text, argumentsCell
# now define text transformers
def removePaletteEditor(code):
code = code.replace("img->StartPaletteEditor();", "")
code = code.replace("Open the color editor", "")
return code
def runEventExe(code):
if "copytree" in tutName:
return "# <codecell> \n.! $ROOTSYS/test/eventexe 1000 1 1 1 \n" + code
return code
def getLibMathMore(code):
if "quasirandom" == tutName:
return "# <codecell> \ngSystem->Load(\"libMathMore\"); \n# <codecell> \n" + code
return code
def roofitRemoveSpacesComments(code):
def changeString(matchObject):
matchString = matchObject.group()
matchString = matchString[0] + " " + matchString[1:]
matchString = matchString.replace(" " , "THISISASPACE")
matchString = matchString.replace(" " , "")
matchString = matchString.replace("THISISASPACE" , " ")
return matchString
newcode = re.sub("#\s\s?\w\s[\w-]\s\w.*", changeString , code)
return newcode
def declareNamespace(code):
if "using namespace RooFit;\nusing namespace RooStats;" in code:
code = code.replace("using namespace RooFit;\nusing namespace RooStats;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace RooFit;\nusing namespace RooStats;\n# <codecell>\n")
else:
code = code.replace("using namespace RooFit;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace RooFit;\n# <codecell>\n")
code = code.replace("using namespace RooStats;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace RooStats;\n# <codecell>\n")
code = code.replace("using namespace ROOT::Math;", "# <codecell>\n%%cpp -d\n// This is a workaround to make sure the namespace is used inside functions\nusing namespace ROOT::Math;\n# <codecell>\n")
return code
def rs401dGetFiles(code):
if tutName == "rs401d_FeldmanCousins":
code = code.replace(
"""#if !defined(__CINT__) || defined(__MAKECINT__)\n#include "../tutorials/roostats/NuMuToNuE_Oscillation.h"\n#include "../tutorials/roostats/NuMuToNuE_Oscillation.cxx" // so that it can be executed directly\n#else\n#include "../tutorials/roostats/NuMuToNuE_Oscillation.cxx+" // so that it can be executed directly\n#endif""" , """TString tutDir = gROOT->GetTutorialDir();\nTString headerDir = TString::Format("#include \\\"%s/roostats/NuMuToNuE_Oscillation.h\\\"", tutDir.Data());\nTString impDir = TString::Format("#include \\\"%s/roostats/NuMuToNuE_Oscillation.cxx\\\"", tutDir.Data());\ngROOT->ProcessLine(headerDir);\ngROOT->ProcessLine(impDir);""")
return code
def declareIncludes(code):
if tutName != "fitcont":
code = re.sub(r"# <codecell>\s*#include", "# <codecell>\n%%cpp -d\n#include" , code)
return code
def tree4GetFiles(code):
if tutName == "tree4":
code = code.replace(
"""#include \"../test/Event.h\"""" , """# <codecell>\nTString dir = "$ROOTSYS/test/Event.h";\ngSystem->ExpandPathName(dir);\nTString includeCommand = TString::Format("#include \\\"%s\\\"" , dir.Data());\ngROOT->ProcessLine(includeCommand);""")
return code
def disableDrawProgressBar(code):
code = code.replace(":DrawProgressBar",":!DrawProgressBar")
return code
def fixes(code):
codeTransformers=[removePaletteEditor, runEventExe, getLibMathMore,
roofitRemoveSpacesComments, declareNamespace, rs401dGetFiles ,
declareIncludes, tree4GetFiles, disableDrawProgressBar]
for transformer in codeTransformers:
code = transformer(code)
return code
def changeMarkdown(code):
code = code.replace("~~~" , "```")
code = code.replace("{.cpp}", "cpp")
code = code.replace("{.bash}", "bash")
return code
def isCpp():
"""
Return True if extension is a C++ file
"""
return extension in ("C", "c", "cpp", "C++", "cxx")
def findTimeout():
listLongTutorials = ["OneSidedFrequentistUpperLimitWithBands", "StandardBayesianNumericalDemo",
"TwoSidedFrequentistUpperLimitWithBands" , "HybridStandardForm", "rs401d_FeldmanCousins",
"TMVAMultipleBackgroundExample", "TMVARegression", "TMVAClassification", "StandardHypoTestDemo"]
if tutName in listLongTutorials:
return 300
else:
return 90
# -------------------------------------
# ------------ Main Program------------
# -------------------------------------
def mainfunction(text):
"""
Main function. Calls all other functions, depending on whether the macro input is in python or c++.
It adds the header information. Also, it adds a cell that draws all canvases. The working text is
then converted to a version 3 jupyter notebook, subsequently updated to a version 4. Then, metadata
associated with the language the macro is written in is attatched to he notebook. Finally the
notebook is executed and output as a Jupyter notebook.
"""
# Modify text from macros to suit a notebook
if isCpp():
main, helpers, rest = split(text)
main, argumentsCell = processmain(main)
main = cppComments(unindenter(cppFunction(main))) # Remove function, Unindent, and convert comments to Markdown cells
if argumentsCell:
main = argumentsCell + main
rest = cppComments(rest) # Convert top level code comments to Markdown cells
# Construct text by starting with top level code, then the helper functions, and finally the main function.
# Also add cells for headerfile, or keepfunction
if needsHeaderFile:
text = "# <markdowncell>\n# The header file must be copied to the current directory\n# <codecell>\n.!cp %s%s.h .\n# <codecell>\n" % (tutRelativePath, tutName)
text += rest
else:
text = "# <codecell>\n" + rest
for helper in helpers:
text += helper
text += ("\n# <codecell>\n" + main)
if extension == "py":
text = pythonMainFunction(text)
text = pythonComments(text) # Convert comments into Markdown cells
# Perform last minute fixes to the notebook, used for specific fixes needed by some tutorials
text = fixes(text)
# Change to standard Markdown
newDescription = changeMarkdown(description)
# Add the title and header of the notebook
text = "# <markdowncell> \n# # %s\n%s# \n# \n# **Author:** %s \n# <i><small>This notebook tutorial was automatically generated " \
"with <a href= \"https://github.com/root-project/root/blob/master/documentation/doxygen/converttonotebook.py\">ROOTBOOK-izer (Beta)</a> " \
"from the macro found in the ROOT repository on %s.</small></i>\n# <codecell>\n%s" % (tutTitle, newDescription, author, date, text)
# Add cell at the end of the notebook that draws all the canvasses. Add a Markdown cell before explaining it.
if isJsroot and not nodraw:
if isCpp():
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\ngROOT->GetListOfCanvases()->Draw()"
if extension == "py":
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()"
elif not nodraw:
if isCpp():
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\ngROOT->GetListOfCanvases()->Draw()"
if extension == "py":
text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()"
# Create a notebook from the working text
nbook = v3.reads_py(text)
nbook = v4.upgrade(nbook) # Upgrade v3 to v4
# Load notebook string into json format, essentially creating a dictionary
json_data = json.loads(v4.writes(nbook))
# add the corresponding metadata
if extension == "py":
json_data[u'metadata'] = {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.10"
}
}
elif isCpp():
json_data[u'metadata'] = {
"kernelspec": {
"display_name": "ROOT C++",
"language": "c++",
"name": "root"
},
"language_info": {
"codemirror_mode": "text/x-c++src",
"file_extension": ".C",
"mimetype": " text/x-c++src",
"name": "c++"
}
}
# write the json file with the metadata
with open(outPathName, 'w') as fout:
json.dump(json_data, fout, indent=1, sort_keys=True)
print(time.time() - starttime)
timeout = findTimeout()
# Call commmand that executes the notebook and creates a new notebook with the output
r = subprocess.call(["jupyter", "nbconvert", "--ExecutePreprocessor.timeout=%d" % timeout, "--to=notebook", "--execute", outPathName])
if r != 0:
sys.stderr.write("NOTEBOOK_CONVERSION_WARNING: Nbconvert failed for notebook %s with return code %s\n" %(outname,r))
# If notebook conversion did not work, try again without the option --execute
subprocess.call(["jupyter", "nbconvert", "--ExecutePreprocessor.timeout=%d" % timeout, "--to=notebook", outPathName])
else:
if isJsroot:
subprocess.call(["jupyter", "trust", os.path.join(outdir, outnameconverted)])
# Only remove notebook without output if nbconvert succeeds
os.remove(outPathName)
if __name__ == "__main__":
if str(sys.argv[1]) == "-test":
tutName = "tutorial"
doctest.testmod(verbose=True)
else:
# -------------------------------------
# ----- Preliminary definitions--------
# -------------------------------------
# Extract and define the name of the file as well as its derived names
tutPathName = str(sys.argv[1])
tutPath = os.path.dirname(tutPathName)
if tutPath.split("/")[-2] == "tutorials":
tutRelativePath = "$ROOTSYS/tutorials/%s/" % tutPath.split("/")[-1]
tutFileName = os.path.basename(tutPathName)
tutName, extension = tutFileName.split(".")
tutTitle = re.sub( r"([A-Z\d])", r" \1", tutName).title()
outname = tutFileName + ".ipynb"
outnameconverted = tutFileName + ".nbconvert.ipynb"
# Extract output directory
try:
outdir = str(sys.argv[2])
except:
outdir = tutPath
outPathName = os.path.join(outdir, outname)
# Find and define the time and date this script is run
date = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
# -------------------------------------
# -------------------------------------
# -------------------------------------
# Set DYLD_LIBRARY_PATH. When run without root access or as a different user, especially from Mac systems,
# it is possible for security reasons that the environment does not include this definition, so it is manually defined.
os.environ["DYLD_LIBRARY_PATH"] = os.environ["ROOTSYS"] + "/lib"
# Open the file to be converted
with open(tutPathName) as fin:
text = fin.read()
# Extract information from header and remove header from text
if extension == "py":
text, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile = readHeaderPython(text)
elif isCpp():
text, description, author, isNotebook, isJsroot, nodraw, needsHeaderFile = readHeaderCpp(text)
if isNotebook:
starttime = time.time()
mainfunction(text)
print(time.time() - starttime)
else:
pass
| lgpl-2.1 |
angelman/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py | 122 | 3414 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.layout_tests.reftests import extract_reference_link
class ExtractLinkMatchTest(unittest.TestCase):
def test_getExtractMatch(self):
html_1 = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="mismatch" href="red-box-notref.xht" />
<link rel="mismatch" href="red-box-notref.xht" />
<meta name="flags" content="TOKENS" />
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
matches, mismatches = extract_reference_link.get_reference_link(html_1)
self.assertItemsEqual(matches,
["green-box-ref.xht", "blue-box-ref.xht"])
self.assertItemsEqual(mismatches,
["red-box-notref.xht", "red-box-notref.xht"])
html_2 = ""
empty_tuple_1 = extract_reference_link.get_reference_link(html_2)
self.assertEqual(empty_tuple_1, ([], []))
# Link does not have a "ref" attribute.
html_3 = """<link href="RELEVANT_SPEC_SECTION"/>"""
empty_tuple_2 = extract_reference_link.get_reference_link(html_3)
self.assertEqual(empty_tuple_2, ([], []))
# Link does not have a "href" attribute.
html_4 = """<link rel="match"/>"""
empty_tuple_3 = extract_reference_link.get_reference_link(html_4)
self.assertEqual(empty_tuple_3, ([], []))
# Link does not have a "/" at the end.
html_5 = """<link rel="help" href="RELEVANT_SPEC_SECTION">"""
empty_tuple_4 = extract_reference_link.get_reference_link(html_5)
self.assertEqual(empty_tuple_4, ([], []))
| bsd-3-clause |
untrustbank/litecoin | test/functional/feature_logging.py | 13 | 2462 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import BitcoinTestFramework
class LoggingTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
# test default log file name
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "debug.log"))
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, ["-debuglogfile=%s" % tempname])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = os.path.join(self.nodes[0].datadir, "regtest", "foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % (invalidname)],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % invalidname],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
if __name__ == '__main__':
LoggingTest().main()
| mit |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/load_balancer_backend_address_pools_operations.py | 1 | 8115 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class LoadBalancerBackendAddressPoolsOperations(object):
"""LoadBalancerBackendAddressPoolsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def list(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of BackendAddressPool
:rtype:
~azure.mgmt.network.v2017_11_01.models.BackendAddressPoolPaged[~azure.mgmt.network.v2017_11_01.models.BackendAddressPool]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.BackendAddressPoolPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.BackendAddressPoolPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'}
def get(
self, resource_group_name, load_balancer_name, backend_address_pool_name, custom_headers=None, raw=False, **operation_config):
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address
pool.
:type backend_address_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: BackendAddressPool or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_11_01.models.BackendAddressPool or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BackendAddressPool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'}
| mit |
ingadhoc/odoo | addons/l10n_be_coda/wizard/__init__.py | 439 | 1098 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_coda_import
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
40223234/w16b_test | static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testwith.py | 739 | 5806 | import unittest
from warnings import catch_warnings
from unittest.test.testmock.support import is_instance
from unittest.mock import MagicMock, Mock, patch, sentinel, mock_open, call
something = sentinel.Something
something_else = sentinel.SomethingElse
class WithTest(unittest.TestCase):
def test_with_statement(self):
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
self.assertEqual(something, sentinel.Something)
def test_with_statement_exception(self):
try:
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
raise Exception('pow')
except Exception:
pass
else:
self.fail("patch swallowed exception")
self.assertEqual(something, sentinel.Something)
def test_with_statement_as(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertTrue(is_instance(mock_something, MagicMock),
"patching wrong type")
self.assertEqual(something, sentinel.Something)
def test_patch_object_with_statement(self):
class Foo(object):
something = 'foo'
original = Foo.something
with patch.object(Foo, 'something'):
self.assertNotEqual(Foo.something, original, "unpatched")
self.assertEqual(Foo.something, original)
def test_with_statement_nested(self):
with catch_warnings(record=True):
with patch('%s.something' % __name__) as mock_something, patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_with_statement_specified(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(mock_something, sentinel.Patched, "wrong patch")
self.assertEqual(something, sentinel.Something)
def testContextManagerMocking(self):
mock = Mock()
mock.__enter__ = Mock()
mock.__exit__ = Mock()
mock.__exit__.return_value = False
with mock as m:
self.assertEqual(m, mock.__enter__.return_value)
mock.__enter__.assert_called_with()
mock.__exit__.assert_called_with(None, None, None)
def test_context_manager_with_magic_mock(self):
mock = MagicMock()
with self.assertRaises(TypeError):
with mock:
'foo' + 3
mock.__enter__.assert_called_with()
self.assertTrue(mock.__exit__.called)
def test_with_statement_same_attribute(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something' % __name__) as mock_again:
self.assertEqual(something, mock_again, "unpatched")
self.assertEqual(something, mock_something,
"restored with wrong instance")
self.assertEqual(something, sentinel.Something, "not restored")
def test_with_statement_imbricated(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_dict_context_manager(self):
foo = {}
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
self.assertEqual(foo, {})
with self.assertRaises(NameError):
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
raise NameError('Konrad')
self.assertEqual(foo, {})
class TestMockOpen(unittest.TestCase):
def test_mock_open(self):
mock = mock_open()
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_mock_open_context_manager(self):
mock = mock_open()
handle = mock.return_value
with patch('%s.open' % __name__, mock, create=True):
with open('foo') as f:
f.read()
expected_calls = [call('foo'), call().__enter__(), call().read(),
call().__exit__(None, None, None)]
self.assertEqual(mock.mock_calls, expected_calls)
self.assertIs(f, handle)
def test_explicit_mock(self):
mock = MagicMock()
mock_open(mock)
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_read_data(self):
mock = mock_open(read_data='foo')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.read()
self.assertEqual(result, 'foo')
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
zeimusu/D1 | nummeth.py | 1 | 1770 | def sample_cubic(x):
return x**3 - 2*x - 1
def Dsample_cubic(x):
return 3*x**2 - 2
def sign(x):
if x<0:
return -1
else:
return 1
def bisection(f,a,b,fa,fb):
"""given a function, f, and an interval [a,b] in which f changes sign
return a new interval [x,y] in which f changes sign, and the values of
f at the end points """
midpoint = (a + b)/2
fm = f(midpoint)
if sign(fa) == sign(fm):
return midpoint,b,fm,fb
else:
return a,midpoint,fa,fm
def interpolation(f,a,b,fa,fb):
"""given a function and an interval [a, b] in which f changes sign,
return a new interval [x,y] in which one endpoint is found by
interpolation and f changes sign"""
x = (a - b) * fa / (fb - fa) + a
fx = f(x)
if sign(fx) == sign(fa):
return x,b,fx,fb,x
else:
return a,x,fa,fx,x
def NR(f,df,x):
""""
Newton Raphson method, given a function and its derivative, and an
initial estimate use Newton-Raphson to return an improved estimate
"""
return x - f(x)/df(x)
def test():
f = sample_cubic
df = Dsample_cubic
A, B = 1, 2
loops = 10
solution = 1.618033988749895
print("Bisection")
a, b= A, B
fa = f(a)
fb = f(b)
for i in range(loops):
a, b, fa, fb = bisection(f,a,b,fa,fb)
print( a, b, 100*abs(a - solution)/solution )
print()
print("interpolation")
a, b =A, B
fa, fb = f(a), f(b)
for i in range(loops):
a, b,fa,fb,x = interpolation(f,a,b,fa,fb)
print(x, 100*abs(x-solution)/solution)
print()
print("Newton Raphson")
x = A
for i in range(loops):
x = NR(f,df,x)
print(x, 100*abs(x-solution)/solution)
test()
| gpl-3.0 |
HPPTECH/hpp_IOSTressTest | Refer/IOST_OLD_SRC/IOST_0.17/Libs/IOST_AboutDialog.py | 2 | 2723 | #!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST_AboutDialog.py
# Date : Sep 21, 2016
# Author : HuuHoang Nguyen
# Contact : [email protected]
# : [email protected]
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64
import time
from IOST_Prepare import IOST_Prepare
from IOST_Config import *
import gtk
import gtk.glade
class IOST_AboutDialog():
def __init__(self, glade_filename, window_name, object_name ,main_builder):
"This is a function get of Diaglog Help -> About Window"
self.IOST_AboutDialog_window_name = window_name
self.IOST_AboutDialog_object_name = object_name
if not main_builder:
self.IOST_AboutDialog_Builder = gtk.Builder()
self.IOST_AboutDialog_Builder.add_from_file(glade_filename)
self.IOST_AboutDialog_Builder.connect_signals(self)
else:
self.IOST_AboutDialog_Builder = main_builder
self.IOST_Objs[window_name][window_name+ object_name] = self.IOST_AboutDialog_Builder.get_object(window_name+object_name)
self.IOST_Objs[window_name][window_name+ object_name].set_version(self.IOST_Data["ProjectVersion"])
def Run(self, window_name, object_name):
self.IOST_Objs[window_name][window_name+object_name].run()
self.IOST_Objs[window_name][window_name+object_name].hide()
def ActiveLink(self, object_name):
self.IOST_Objs[self.IOST_AboutDialog_window_name][self.IOST_AboutDialog_window_name+ self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_window_name][self.IOST_AboutDialog_window_name+self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogActionArea_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_window_name][self.IOST_AboutDialog_window_name+self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_window_name][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogVB_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_window_name][self.IOST_AboutDialog_window_name+ self.IOST_AboutDialog_objectt_name].hide()
| mit |
pietern/caffe2 | caffe2/python/helpers/conv.py | 3 | 11062 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package conv
# Module caffe2.python.helpers.conv
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
def _ConvBase(
model,
is_nd,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
**kwargs
):
kernels = []
if is_nd:
if not isinstance(kernel, list):
kernels = [kernel]
else:
kernels = kernel
else:
if isinstance(kernel, list):
assert len(kernel) == 2, "Conv support only a 2D kernel."
kernels = kernel
else:
kernels = [kernel] * 2
requested_engine = kwargs.get('engine')
if requested_engine is not None:
if use_cudnn and requested_engine != 'CUDNN':
raise ValueError(
'When use_cudnn=True, the only engine you can specify is '
'"CUDNN"')
elif not use_cudnn and requested_engine == 'CUDNN':
raise ValueError(
'When use_cudnn=False, the only engine you can specify is '
'""')
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
use_bias =\
False if ("no_bias" in kwargs and kwargs["no_bias"]) else True
blob_out = blob_out or model.net.NextName()
weight_shape = [dim_out]
if order == "NCHW":
weight_shape.append(int(dim_in / group))
weight_shape.extend(kernels)
else:
weight_shape.extend(kernels)
weight_shape.append(int(dim_in / group))
WeightInitializer = initializers.update_initializer(
WeightInitializer, weight_init, ("XavierFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
if not model.init_params:
WeightInitializer = initializers.ExternalInitializer()
BiasInitializer = initializers.ExternalInitializer()
weight = model.create_param(
param_name=blob_out + '_w',
shape=weight_shape,
initializer=WeightInitializer,
tags=ParameterTags.WEIGHT
)
if use_bias:
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_out, ],
initializer=BiasInitializer,
tags=ParameterTags.BIAS
)
if use_bias:
inputs = [blob_in, weight, bias]
else:
inputs = [blob_in, weight]
if transform_inputs is not None:
transform_inputs(model, blob_out, inputs)
# For the operator, we no longer need to provide the no_bias field
# because it can automatically figure this out from the number of
# inputs.
if 'no_bias' in kwargs:
del kwargs['no_bias']
if group != 1:
kwargs['group'] = group
if is_nd:
return model.net.Conv(
inputs,
blob_out,
kernels=kernels,
order=order,
**kwargs)
else:
if isinstance(kernel, list):
return model.net.Conv(
inputs,
blob_out,
kernel_h=kernel[0],
kernel_w=kernel[1],
order=order,
**kwargs)
else:
return model.net.Conv(
inputs,
blob_out,
kernel=kernel,
order=order,
**kwargs)
def conv_nd(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
order="NCHW",
**kwargs
):
"""N-dimensional convolution for inputs with NCHW storage order.
"""
assert order == "NCHW", "ConvNd only supported for NCHW storage."
return _ConvBase(model, True, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init, bias_init, WeightInitializer, BiasInitializer,
group, transform_inputs, order=order, **kwargs)
def conv(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
**kwargs
):
"""2-dimensional convolution.
"""
return _ConvBase(model, False, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init, bias_init, WeightInitializer, BiasInitializer,
group, transform_inputs, **kwargs)
def conv_transpose(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
**kwargs
):
"""ConvTranspose.
"""
weight_init = weight_init if weight_init else ('XavierFill', {})
bias_init = bias_init if bias_init else ('ConstantFill', {})
blob_out = blob_out or model.net.NextName()
weight_shape = (
[dim_in, dim_out, kernel, kernel]
if order == "NCHW" else [dim_in, kernel, kernel, dim_out]
)
if model.init_params:
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_w',
shape=weight_shape,
**weight_init[1]
)
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_b',
shape=[dim_out, ],
**bias_init[1]
)
else:
weight = core.ScopedBlobReference(
blob_out + '_w', model.param_init_net)
bias = core.ScopedBlobReference(
blob_out + '_b', model.param_init_net)
model.AddParameter(weight, ParameterTags.WEIGHT)
model.AddParameter(bias, ParameterTags.BIAS)
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
return model.net.ConvTranspose(
[blob_in, weight, bias],
blob_out,
kernel=kernel,
order=order,
**kwargs
)
def group_conv(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
group=1,
**kwargs
):
"""Group Convolution.
This is essentially the same as Conv with a group argument passed in.
We specialize this for backward interface compatibility.
"""
return conv(model, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init=weight_init, bias_init=bias_init,
group=group, **kwargs)
def group_conv_deprecated(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
group=1,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
**kwargs
):
"""GroupConvolution's deprecated interface.
This is used to simulate a group convolution via split and concat. You
should always use the new group convolution in your new code.
"""
weight_init = weight_init if weight_init else ('XavierFill', {})
bias_init = bias_init if bias_init else ('ConstantFill', {})
use_bias = False if ("no_bias" in kwargs and kwargs["no_bias"]) else True
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
if dim_in % group:
raise ValueError("dim_in should be divisible by group.")
if dim_out % group:
raise ValueError("dim_out should be divisible by group.")
splitted_blobs = model.net.DepthSplit(
blob_in,
['_' + blob_out + '_gconv_split_' + str(i) for i in range(group)],
dimensions=[int(dim_in / group) for i in range(group)],
order=order
)
weight_shape = (
[dim_out / group, dim_in / group, kernel, kernel]
if order == "NCHW" else
[dim_out / group, kernel, kernel, dim_in / group]
)
# Make sure that the shapes are of int format. Especially for py3 where
# int division gives float output.
weight_shape = [int(v) for v in weight_shape]
conv_blobs = []
for i in range(group):
if model.init_params:
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_gconv_%d_w' % i,
shape=weight_shape,
**weight_init[1]
)
if use_bias:
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_gconv_%d_b' % i,
shape=[int(dim_out / group)],
**bias_init[1]
)
else:
weight = core.ScopedBlobReference(
blob_out + '_gconv_%d_w' % i, model.param_init_net)
if use_bias:
bias = core.ScopedBlobReference(
blob_out + '_gconv_%d_b' % i, model.param_init_net)
model.AddParameter(weight, ParameterTags.WEIGHT)
if use_bias:
model.AddParameter(bias, ParameterTags.BIAS)
if use_bias:
inputs = [weight, bias]
else:
inputs = [weight]
if 'no_bias' in kwargs:
del kwargs['no_bias']
conv_blobs.append(
splitted_blobs[i].Conv(
inputs,
blob_out + '_gconv_%d' % i,
kernel=kernel,
order=order,
**kwargs
)
)
concat, concat_dims = model.net.Concat(
conv_blobs,
[blob_out,
"_" + blob_out + "_concat_dims"],
order=order
)
return concat
| apache-2.0 |
KurtDeGreeff/infernal-twin | build/pip/build/lib.linux-i686-2.7/pip/_vendor/requests/packages/chardet/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| gpl-3.0 |
diofeher/django-nfa | django/core/management/commands/runserver.py | 16 | 3339 | from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import os
import sys
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
from django.core.servers.basehttp import run, AdminMediaHandler, WSGIServerException
from django.core.handlers.wsgi import WSGIHandler
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
use_reloader = options.get('use_reloader', True)
admin_media_path = options.get('admin_media_path', '')
shutdown_message = options.get('shutdown_message', '')
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
from django.conf import settings
print "Validating models..."
self.validate(display_num_errors=True)
print "\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE)
print "Development server is running at http://%s:%s/" % (addr, port)
print "Quit the server with %s." % quit_command
try:
path = admin_media_path or django.__path__[0] + '/contrib/admin/media'
handler = AdminMediaHandler(WSGIHandler(), path)
run(addr, int(port), handler)
except WSGIServerException, e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
13: "You don't have permission to access that port.",
98: "That port is already in use.",
99: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.args[0].args[0]]
except (AttributeError, KeyError):
error_text = str(e)
sys.stderr.write(self.style.ERROR("Error: %s" % error_text) + '\n')
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
print shutdown_message
sys.exit(0)
if use_reloader:
from django.utils import autoreload
autoreload.main(inner_run)
else:
inner_run()
| bsd-3-clause |
yufengg/tensorflow | tensorflow/contrib/boosted_trees/python/ops/model_ops.py | 17 | 5122 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model ops python wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.python.ops import gen_model_ops
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_deserialize
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_serialize
# pylint: disable=unused-import
from tensorflow.contrib.boosted_trees.python.ops.gen_model_ops import tree_ensemble_stamp_token
# pylint: enable=unused-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver
ops.NotDifferentiable("TreeEnsembleVariable")
ops.NotDifferentiable("TreeEnsembleSerialize")
ops.NotDifferentiable("TreeEnsembleDeserialize")
class TreeEnsembleVariableSavable(saver.BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for TreeEnsembleVariable."""
def __init__(self, tree_ensemble_handle, create_op, name):
"""Creates a TreeEnsembleVariableSavable object.
Args:
tree_ensemble_handle: handle to the tree ensemble variable.
create_op: the op to initialize the variable.
name: the name to save the tree ensemble variable under.
"""
stamp_token, ensemble_config = tree_ensemble_serialize(tree_ensemble_handle)
# slice_spec is useful for saving a slice from a variable.
# It's not meaningful the tree ensemble variable. So we just pass an empty
# value.
slice_spec = ""
specs = [
saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec,
name + "_stamp"),
saver.BaseSaverBuilder.SaveSpec(ensemble_config, slice_spec,
name + "_config"),
]
super(TreeEnsembleVariableSavable,
self).__init__(tree_ensemble_handle, specs, name)
self._tree_ensemble_handle = tree_ensemble_handle
self._create_op = create_op
def restore(self, restored_tensors, unused_restored_shapes):
"""Restores the associated tree ensemble from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree ensemble variable.
"""
with ops.control_dependencies([self._create_op]):
return tree_ensemble_deserialize(
self._tree_ensemble_handle,
stamp_token=restored_tensors[0],
tree_ensemble_config=restored_tensors[1])
def tree_ensemble_variable(stamp_token,
tree_ensemble_config,
name,
container=None):
r"""Creates a tree ensemble model and returns a handle to it.
Args:
stamp_token: The initial stamp token value for the ensemble resource.
tree_ensemble_config: A `Tensor` of type `string`.
Serialized proto of the tree ensemble.
name: A name for the ensemble variable.
container: An optional `string`. Defaults to `""`.
Returns:
A `Tensor` of type mutable `string`. The handle to the tree ensemble.
"""
with ops.name_scope(name, "TreeEnsembleVariable") as name:
resource_handle = gen_model_ops.decision_tree_ensemble_resource_handle_op(
container, shared_name=name, name=name)
create_op = gen_model_ops.create_tree_ensemble_variable(
resource_handle, stamp_token, tree_ensemble_config)
is_initialized_op = gen_model_ops.tree_ensemble_is_initialized_op(
resource_handle)
# Adds the variable to the savable list.
saveable = TreeEnsembleVariableSavable(resource_handle, create_op,
resource_handle.name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
resources.register_resource(resource_handle, create_op, is_initialized_op)
return resource_handle
# Conditionally load ops, they might already be statically linked in.
try:
_model_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_model_ops.so"))
except (errors.NotFoundError, IOError):
print("Error loading _model_ops.so")
| apache-2.0 |
aavanian/bokeh | examples/app/crossfilter/main.py | 5 | 2462 | import pandas as pd
from bokeh.layouts import row, widgetbox
from bokeh.models import Select
from bokeh.palettes import Spectral5
from bokeh.plotting import curdoc, figure
from bokeh.sampledata.autompg import autompg_clean as df
df = df.copy()
SIZES = list(range(6, 22, 3))
COLORS = Spectral5
N_SIZES = len(SIZES)
N_COLORS = len(COLORS)
# data cleanup
df.cyl = df.cyl.astype(str)
df.yr = df.yr.astype(str)
del df['name']
columns = sorted(df.columns)
discrete = [x for x in columns if df[x].dtype == object]
continuous = [x for x in columns if x not in discrete]
def create_figure():
xs = df[x.value].values
ys = df[y.value].values
x_title = x.value.title()
y_title = y.value.title()
kw = dict()
if x.value in discrete:
kw['x_range'] = sorted(set(xs))
if y.value in discrete:
kw['y_range'] = sorted(set(ys))
kw['title'] = "%s vs %s" % (x_title, y_title)
p = figure(plot_height=600, plot_width=800, tools='pan,box_zoom,hover,reset', **kw)
p.xaxis.axis_label = x_title
p.yaxis.axis_label = y_title
if x.value in discrete:
p.xaxis.major_label_orientation = pd.np.pi / 4
sz = 9
if size.value != 'None':
if len(set(df[size.value])) > N_SIZES:
groups = pd.qcut(df[size.value].values, N_SIZES, duplicates='drop')
else:
groups = pd.Categorical(df[size.value])
sz = [SIZES[xx] for xx in groups.codes]
c = "#31AADE"
if color.value != 'None':
if len(set(df[color.value])) > N_SIZES:
groups = pd.qcut(df[color.value].values, N_COLORS, duplicates='drop')
else:
groups = pd.Categorical(df[color.value])
c = [COLORS[xx] for xx in groups.codes]
p.circle(x=xs, y=ys, color=c, size=sz, line_color="white", alpha=0.6, hover_color='white', hover_alpha=0.5)
return p
def update(attr, old, new):
layout.children[1] = create_figure()
x = Select(title='X-Axis', value='mpg', options=columns)
x.on_change('value', update)
y = Select(title='Y-Axis', value='hp', options=columns)
y.on_change('value', update)
size = Select(title='Size', value='None', options=['None'] + continuous)
size.on_change('value', update)
color = Select(title='Color', value='None', options=['None'] + continuous)
color.on_change('value', update)
controls = widgetbox([x, y, color, size], width=200)
layout = row(controls, create_figure())
curdoc().add_root(layout)
curdoc().title = "Crossfilter"
| bsd-3-clause |
espadrine/opera | chromium/src/tools/swarm_client/isolateserver_archive.py | 2 | 14494 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Archives a set of files to a server."""
import binascii
import cStringIO
import hashlib
import itertools
import logging
import optparse
import os
import sys
import time
import urllib
import zlib
import run_isolated
import run_test_cases
# The minimum size of files to upload directly to the blobstore.
MIN_SIZE_FOR_DIRECT_BLOBSTORE = 20 * 1024
# The number of files to check the isolate server per /contains query. The
# number here is a trade-off; the more per request, the lower the effect of HTTP
# round trip latency and TCP-level chattiness. On the other hand, larger values
# cause longer lookups, increasing the initial latency to start uploading, which
# is especially an issue for large files. This value is optimized for the "few
# thousands files to look up with minimal number of large files missing" case.
ITEMS_PER_CONTAINS_QUERY = 100
# A list of already compressed extension types that should not receive any
# compression before being uploaded.
ALREADY_COMPRESSED_TYPES = [
'7z', 'avi', 'cur', 'gif', 'h264', 'jar', 'jpeg', 'jpg', 'pdf', 'png',
'wav', 'zip'
]
def randomness():
"""Generates low-entropy randomness for MIME encoding.
Exists so it can be mocked out in unit tests.
"""
return str(time.time())
def encode_multipart_formdata(fields, files,
mime_mapper=lambda _: 'application/octet-stream'):
"""Encodes a Multipart form data object.
Args:
fields: a sequence (name, value) elements for
regular form fields.
files: a sequence of (name, filename, value) elements for data to be
uploaded as files.
mime_mapper: function to return the mime type from the filename.
Returns:
content_type: for httplib.HTTP instance
body: for httplib.HTTP instance
"""
boundary = hashlib.md5(randomness()).hexdigest()
body_list = []
for (key, value) in fields:
if isinstance(key, unicode):
value = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
body_list.append('--' + boundary)
body_list.append('Content-Disposition: form-data; name="%s"' % key)
body_list.append('')
body_list.append(value)
body_list.append('--' + boundary)
body_list.append('')
for (key, filename, value) in files:
if isinstance(key, unicode):
value = key.encode('utf-8')
if isinstance(filename, unicode):
value = filename.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
body_list.append('--' + boundary)
body_list.append('Content-Disposition: form-data; name="%s"; '
'filename="%s"' % (key, filename))
body_list.append('Content-Type: %s' % mime_mapper(filename))
body_list.append('')
body_list.append(value)
body_list.append('--' + boundary)
body_list.append('')
if body_list:
body_list[-2] += '--'
body = '\r\n'.join(body_list)
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body
def sha1_file(filepath):
"""Calculates the SHA-1 of a file without reading it all in memory at once."""
digest = hashlib.sha1()
with open(filepath, 'rb') as f:
while True:
# Read in 1mb chunks.
chunk = f.read(1024*1024)
if not chunk:
break
digest.update(chunk)
return digest.hexdigest()
def url_open(url, **kwargs):
result = run_isolated.url_open(url, **kwargs)
if not result:
# If we get no response from the server, assume it is down and raise an
# exception.
raise run_isolated.MappingError('Unable to connect to server %s' % url)
return result
def upload_hash_content_to_blobstore(
generate_upload_url, data, hash_key, content):
"""Uploads the given hash contents directly to the blobsotre via a generated
url.
Arguments:
generate_upload_url: The url to get the new upload url from.
data: extra POST data.
hash_key: sha1 of the uncompressed version of content.
content: The contents to upload. Must fit in memory for now.
"""
logging.debug('Generating url to directly upload file to blobstore')
assert isinstance(hash_key, str), hash_key
assert isinstance(content, str), (hash_key, content)
# TODO(maruel): Support large files. This would require streaming support.
content_type, body = encode_multipart_formdata(
data, [('content', hash_key, content)])
for attempt in xrange(run_isolated.URL_OPEN_MAX_ATTEMPTS):
# Retry HTTP 50x here.
response = run_isolated.url_open(generate_upload_url, data=data)
if not response:
raise run_isolated.MappingError(
'Unable to connect to server %s' % generate_upload_url)
upload_url = response.read()
# Do not retry this request on HTTP 50x. Regenerate an upload url each time
# since uploading "consumes" the upload url.
result = run_isolated.url_open(
upload_url, data=body, content_type=content_type, retry_50x=False)
if result:
return result.read()
if attempt != run_isolated.URL_OPEN_MAX_ATTEMPTS - 1:
run_isolated.HttpService.sleep_before_retry(attempt, None)
raise run_isolated.MappingError(
'Unable to connect to server %s' % generate_upload_url)
class UploadRemote(run_isolated.Remote):
def __init__(self, namespace, base_url, token):
self.namespace = str(namespace)
self._token = token
super(UploadRemote, self).__init__(base_url)
def get_file_handler(self, base_url):
base_url = str(base_url)
def upload_file(content, hash_key):
# TODO(maruel): Detect failures.
hash_key = str(hash_key)
content_url = base_url.rstrip('/') + '/content/'
if len(content) > MIN_SIZE_FOR_DIRECT_BLOBSTORE:
url = '%sgenerate_blobstore_url/%s/%s' % (
content_url, self.namespace, hash_key)
# self._token is stored already quoted but it is unnecessary here, and
# only here.
data = [('token', urllib.unquote(self._token))]
upload_hash_content_to_blobstore(url, data, hash_key, content)
else:
url = '%sstore/%s/%s?token=%s' % (
content_url, self.namespace, hash_key, self._token)
url_open(url, data=content, content_type='application/octet-stream')
return upload_file
def check_files_exist_on_server(query_url, queries):
"""Queries the server to see which files from this batch already exist there.
Arguments:
queries: The hash files to potential upload to the server.
Returns:
missing_files: list of files that are missing on the server.
"""
logging.info('Checking existence of %d files...', len(queries))
body = ''.join(
(binascii.unhexlify(meta_data['h']) for (_, meta_data) in queries))
assert (len(body) % 20) == 0, repr(body)
response = url_open(
query_url, data=body, content_type='application/octet-stream').read()
if len(queries) != len(response):
raise run_isolated.MappingError(
'Got an incorrect number of responses from the server. Expected %d, '
'but got %d' % (len(queries), len(response)))
missing_files = [
queries[i] for i, flag in enumerate(response) if flag == chr(0)
]
logging.info('Queried %d files, %d cache hit',
len(queries), len(queries) - len(missing_files))
return missing_files
def compression_level(filename):
"""Given a filename calculates the ideal compression level to use."""
file_ext = os.path.splitext(filename)[1].lower()
# TODO(csharp): Profile to find what compression level works best.
return 0 if file_ext in ALREADY_COMPRESSED_TYPES else 7
def read_and_compress(filepath, level):
"""Reads a file and returns its content gzip compressed."""
compressor = zlib.compressobj(level)
compressed_data = cStringIO.StringIO()
with open(filepath, 'rb') as f:
while True:
chunk = f.read(run_isolated.ZIPPED_FILE_CHUNK)
if not chunk:
break
compressed_data.write(compressor.compress(chunk))
compressed_data.write(compressor.flush(zlib.Z_FINISH))
value = compressed_data.getvalue()
compressed_data.close()
return value
def zip_and_trigger_upload(infile, metadata, upload_function):
# TODO(csharp): Fix crbug.com/150823 and enable the touched logic again.
# if not metadata['T']:
compressed_data = read_and_compress(infile, compression_level(infile))
priority = (
run_isolated.Remote.HIGH if metadata.get('priority', '1') == '0'
else run_isolated.Remote.MED)
return upload_function(priority, compressed_data, metadata['h'], None)
def batch_files_for_check(infiles):
"""Splits list of files to check for existence on the server into batches.
Each batch corresponds to a single 'exists?' query to the server.
Yields:
batches: list of batches, each batch is a list of files.
"""
# TODO(maruel): Make this adaptative, e.g. only query a few, like 10 in one
# request, for the largest files, since they are the ones most likely to be
# missing, then batch larger requests (up to 500) for the tail since they are
# likely to be present.
next_queries = []
items = ((k, v) for k, v in infiles.iteritems() if 's' in v)
for relfile, metadata in sorted(items, key=lambda x: -x[1]['s']):
next_queries.append((relfile, metadata))
if len(next_queries) == ITEMS_PER_CONTAINS_QUERY:
yield next_queries
next_queries = []
if next_queries:
yield next_queries
def get_files_to_upload(contains_hash_url, infiles):
"""Yields files that are missing on the server."""
with run_isolated.ThreadPool(1, 16, 0, prefix='get_files_to_upload') as pool:
for files in batch_files_for_check(infiles):
pool.add_task(0, check_files_exist_on_server, contains_hash_url, files)
for missing_file in itertools.chain.from_iterable(pool.iter_results()):
yield missing_file
def upload_sha1_tree(base_url, indir, infiles, namespace):
"""Uploads the given tree to the given url.
Arguments:
base_url: The base url, it is assume that |base_url|/has/ can be used to
query if an element was already uploaded, and |base_url|/store/
can be used to upload a new element.
indir: Root directory the infiles are based in.
infiles: dict of files to upload files from |indir| to |base_url|.
namespace: The namespace to use on the server.
"""
logging.info('upload tree(base_url=%s, indir=%s, files=%d)' %
(base_url, indir, len(infiles)))
assert base_url.startswith('http'), base_url
base_url = base_url.rstrip('/')
# TODO(maruel): Make this request much earlier asynchronously while the files
# are being enumerated.
token = urllib.quote(url_open(base_url + '/content/get_token').read())
# Create a pool of workers to zip and upload any files missing from
# the server.
num_threads = run_test_cases.num_processors()
zipping_pool = run_isolated.ThreadPool(min(2, num_threads),
num_threads, 0, 'zip')
remote_uploader = UploadRemote(namespace, base_url, token)
# Starts the zip and upload process for files that are missing
# from the server.
contains_hash_url = '%s/content/contains/%s?token=%s' % (
base_url, namespace, token)
uploaded = []
for relfile, metadata in get_files_to_upload(contains_hash_url, infiles):
infile = os.path.join(indir, relfile)
zipping_pool.add_task(0, zip_and_trigger_upload, infile, metadata,
remote_uploader.add_item)
uploaded.append((relfile, metadata))
logging.info('Waiting for all files to finish zipping')
zipping_pool.join()
zipping_pool.close()
logging.info('All files zipped.')
logging.info('Waiting for all files to finish uploading')
# Will raise if any exception occurred.
remote_uploader.join()
remote_uploader.close()
logging.info('All files are uploaded')
total = len(infiles)
total_size = sum(metadata.get('s', 0) for metadata in infiles.itervalues())
logging.info(
'Total: %6d, %9.1fkb',
total,
sum(m.get('s', 0) for m in infiles.itervalues()) / 1024.)
cache_hit = set(infiles.iterkeys()) - set(x[0] for x in uploaded)
cache_hit_size = sum(infiles[i].get('s', 0) for i in cache_hit)
logging.info(
'cache hit: %6d, %9.1fkb, %6.2f%% files, %6.2f%% size',
len(cache_hit),
cache_hit_size / 1024.,
len(cache_hit) * 100. / total,
cache_hit_size * 100. / total_size if total_size else 0)
cache_miss = uploaded
cache_miss_size = sum(infiles[i[0]].get('s', 0) for i in cache_miss)
logging.info(
'cache miss: %6d, %9.1fkb, %6.2f%% files, %6.2f%% size',
len(cache_miss),
cache_miss_size / 1024.,
len(cache_miss) * 100. / total,
cache_miss_size * 100. / total_size if total_size else 0)
return 0
def main(args):
run_isolated.disable_buffering()
parser = optparse.OptionParser(
usage='%prog [options] <file1..fileN> or - to read from stdin',
description=sys.modules[__name__].__doc__)
parser.add_option('-r', '--remote', help='Remote server to archive to')
parser.add_option(
'-v', '--verbose',
action='count', default=0,
help='Use multiple times to increase verbosity')
parser.add_option('--namespace', default='default-gzip',
help='The namespace to use on the server.')
options, files = parser.parse_args(args)
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=levels[min(len(levels)-1, options.verbose)],
format='[%(threadName)s] %(asctime)s,%(msecs)03d %(levelname)5s'
' %(module)15s(%(lineno)3d): %(message)s',
datefmt='%H:%M:%S')
if files == ['-']:
files = sys.stdin.readlines()
if not files:
parser.error('Nothing to upload')
if not options.remote:
parser.error('Nowhere to send. Please specify --remote')
# Load the necessary metadata. This is going to be rewritten eventually to be
# more efficient.
infiles = dict(
(
f,
{
's': os.stat(f).st_size,
'h': sha1_file(f),
}
)
for f in files)
with run_isolated.Profiler('Archive'):
return upload_sha1_tree(
base_url=options.remote,
indir=os.getcwd(),
infiles=infiles,
namespace=options.namespace)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
dobbymoodge/origin | vendor/github.com/getsentry/raven-go/docs/_sentryext/sentryext.py | 36 | 25388 | import re
import os
import sys
import json
import posixpath
from itertools import chain
from urlparse import urljoin
from docutils import nodes
from docutils.io import StringOutput
from docutils.nodes import document, section
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.environment import url_re
from sphinx.domains import Domain, ObjType
from sphinx.directives import ObjectDescription
from sphinx.util.osutil import relative_uri
from sphinx.util.compat import Directive
from sphinx.util.docfields import Field, TypedField
from sphinx.builders.html import StandaloneHTMLBuilder, DirectoryHTMLBuilder
_http_method_re = re.compile(r'^\s*:http-method:\s+(.*?)$(?m)')
_http_path_re = re.compile(r'^\s*:http-path:\s+(.*?)$(?m)')
_edition_re = re.compile(r'^(\s*)..\s+sentry:edition::\s*(.*?)$')
_docedition_re = re.compile(r'^..\s+sentry:docedition::\s*(.*?)$')
_url_var_re = re.compile(r'\{(.*?)\}')
EXTERNAL_DOCS_URL = 'https://docs.getsentry.com/hosted/'
API_BASE_URL = 'https://api.getsentry.com/'
def iter_url_parts(path):
last = 0
for match in _url_var_re.finditer(path):
before = path[last:match.start()]
if before:
yield False, before
yield True, match.group(1)
last = match.end()
after = path[last:]
if after:
yield False, after
def resolve_toctree(env, docname, builder, toctree, collapse=False):
def _toctree_add_classes(node):
for subnode in node.children:
if isinstance(subnode, (addnodes.compact_paragraph,
nodes.list_item,
nodes.bullet_list)):
_toctree_add_classes(subnode)
elif isinstance(subnode, nodes.reference):
# for <a>, identify which entries point to the current
# document and therefore may not be collapsed
if subnode['refuri'] == docname:
list_item = subnode.parent.parent
if not subnode['anchorname']:
# give the whole branch a 'current' class
# (useful for styling it differently)
branchnode = subnode
while branchnode:
branchnode['classes'].append('current')
branchnode = branchnode.parent
# mark the list_item as "on current page"
if subnode.parent.parent.get('iscurrent'):
# but only if it's not already done
return
while subnode:
subnode['iscurrent'] = True
subnode = subnode.parent
# Now mark all siblings as well and also give the
# innermost expansion an extra class.
list_item['classes'].append('active')
for node in list_item.parent.children:
node['classes'].append('relevant')
def _entries_from_toctree(toctreenode, parents, subtree=False):
refs = [(e[0], e[1]) for e in toctreenode['entries']]
entries = []
for (title, ref) in refs:
refdoc = None
if url_re.match(ref):
raise NotImplementedError('Not going to implement this (url)')
elif ref == 'env':
raise NotImplementedError('Not going to implement this (env)')
else:
if ref in parents:
env.warn(ref, 'circular toctree references '
'detected, ignoring: %s <- %s' %
(ref, ' <- '.join(parents)))
continue
refdoc = ref
toc = env.tocs[ref].deepcopy()
env.process_only_nodes(toc, builder, ref)
if title and toc.children and len(toc.children) == 1:
child = toc.children[0]
for refnode in child.traverse(nodes.reference):
if refnode['refuri'] == ref and \
not refnode['anchorname']:
refnode.children = [nodes.Text(title)]
if not toc.children:
# empty toc means: no titles will show up in the toctree
env.warn_node(
'toctree contains reference to document %r that '
'doesn\'t have a title: no link will be generated'
% ref, toctreenode)
# delete everything but the toplevel title(s)
# and toctrees
for toplevel in toc:
# nodes with length 1 don't have any children anyway
if len(toplevel) > 1:
subtrees = toplevel.traverse(addnodes.toctree)
toplevel[1][:] = subtrees
# resolve all sub-toctrees
for subtocnode in toc.traverse(addnodes.toctree):
i = subtocnode.parent.index(subtocnode) + 1
for item in _entries_from_toctree(subtocnode, [refdoc] +
parents, subtree=True):
subtocnode.parent.insert(i, item)
i += 1
subtocnode.parent.remove(subtocnode)
entries.extend(toc.children)
if not subtree:
ret = nodes.bullet_list()
ret += entries
return [ret]
return entries
tocentries = _entries_from_toctree(toctree, [])
if not tocentries:
return None
newnode = addnodes.compact_paragraph('', '')
newnode.extend(tocentries)
newnode['toctree'] = True
_toctree_add_classes(newnode)
for refnode in newnode.traverse(nodes.reference):
if not url_re.match(refnode['refuri']):
refnode.parent.parent['classes'].append('ref-' + refnode['refuri'])
refnode['refuri'] = builder.get_relative_uri(
docname, refnode['refuri']) + refnode['anchorname']
return newnode
def make_link_builder(app, base_page):
def link_builder(edition, to_current=False):
here = app.builder.get_target_uri(base_page)
if to_current:
uri = relative_uri(here, '../' + edition + '/' +
here.lstrip('/')) or './'
else:
root = app.builder.get_target_uri(app.env.config.master_doc) or './'
uri = relative_uri(here, root) or ''
if app.builder.name in ('sentryhtml', 'html'):
uri = (posixpath.dirname(uri or '.') or '.').rstrip('/') + \
'/../' + edition + '/index.html'
else:
uri = uri.rstrip('/') + '/../' + edition + '/'
return uri
return link_builder
def html_page_context(app, pagename, templatename, context, doctree):
# toc_parts = get_rendered_toctree(app.builder, pagename)
# context['full_toc'] = toc_parts['main']
def build_toc(split_toc=None):
return get_rendered_toctree(app.builder, pagename, collapse=False,
split_toc=split_toc)
context['build_toc'] = build_toc
context['link_to_edition'] = make_link_builder(app, pagename)
def render_sitemap():
return get_rendered_toctree(app.builder, 'sitemap',
collapse=False)['main']
context['render_sitemap'] = render_sitemap
context['sentry_doc_variant'] = app.env.config.sentry_doc_variant
def extract_toc(fulltoc, selectors):
entries = []
for refnode in fulltoc.traverse(nodes.reference):
container = refnode.parent.parent
if any(cls[:4] == 'ref-' and cls[4:] in selectors
for cls in container['classes']):
parent = container.parent
new_parent = parent.deepcopy()
del new_parent.children[:]
new_parent += container
entries.append(new_parent)
parent.remove(container)
if not parent.children:
parent.parent.remove(parent)
newnode = addnodes.compact_paragraph('', '')
newnode.extend(entries)
newnode['toctree'] = True
return newnode
def get_rendered_toctree(builder, docname, collapse=True, split_toc=None):
fulltoc = build_full_toctree(builder, docname, collapse=collapse)
rv = {}
def _render_toc(node):
return builder.render_partial(node)['fragment']
if split_toc:
for key, selectors in split_toc.iteritems():
rv[key] = _render_toc(extract_toc(fulltoc, selectors))
rv['main'] = _render_toc(fulltoc)
return rv
def build_full_toctree(builder, docname, collapse=True):
env = builder.env
doctree = env.get_doctree(env.config.master_doc)
toctrees = []
for toctreenode in doctree.traverse(addnodes.toctree):
toctrees.append(resolve_toctree(env, docname, builder, toctreenode,
collapse=collapse))
if not toctrees:
return None
result = toctrees[0]
for toctree in toctrees[1:]:
if toctree:
result.extend(toctree.children)
env.resolve_references(result, docname, builder)
return result
def parse_rst(state, content_offset, doc):
node = nodes.section()
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(doc, content_offset, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children
def find_cached_api_json(env, filename):
return os.path.join(env.srcdir, '_apicache', filename)
def api_url_rule(text):
def add_url_thing(rv, value):
for is_var, part in iter_url_parts(value):
if is_var:
part = '{%s}' % part
node = nodes.emphasis(part, part)
else:
node = nodes.inline(part, part)
rv.append(node)
container = nodes.inline(classes=['url'])
domain_part = nodes.inline(classes=['domain', 'skip-latex'])
# add_url_thing(domain_part, API_BASE_URL.rstrip('/'))
container += domain_part
add_url_thing(container, text)
rv = nodes.inline(classes=['urlwrapper'])
rv += container
return rv
class URLPathField(Field):
def make_entry(self, fieldarg, content):
text = u''.join(x.rawsource for x in content)
return fieldarg, api_url_rule(text)
class AuthField(Field):
def make_entry(self, fieldarg, content):
rv = []
flags = set(x.strip() for x in
u''.join(x.rawsource for x in content).split(',')
if x.strip())
if 'required' in flags:
rv.append('required')
elif 'optional' in flags:
rv.append('optional')
else:
rv.append('unauthenticated')
if 'user-context-needed' in flags:
rv.append('user context needed')
text = ', '.join(rv)
node = nodes.inline(text, text)
return fieldarg, node
class ApiEndpointDirective(ObjectDescription):
option_spec = {
'noindex': directives.flag
}
doc_field_types = [
Field('http_method', label='Method', has_arg=False,
names=('http-method',)),
URLPathField('http_path', label='Path', has_arg=False,
names=('http-path',)),
TypedField('query_parameter', label='Query Parameters',
names=('qparam', 'query-parameter'),
typerolename='obj', typenames=('qparamtype',),
can_collapse=True),
TypedField('path_parameter', label='Path Parameters',
names=('pparam', 'path-parameter'),
typerolename='obj', typenames=('pparamtype',),
can_collapse=True),
TypedField('body_parameter', label='Parameters',
names=('param', 'parameter'),
typerolename='obj', typenames=('paramtype',),
can_collapse=True),
Field('returnvalue', label='Returns', has_arg=False,
names=('returns', 'return')),
Field('returntype', label='Return type', has_arg=False,
names=('rtype',)),
AuthField('auth', label='Authentication', has_arg=False,
names=('auth',)),
]
def needs_arglist(self):
return False
def handle_signature(self, sig, signode):
name = sig.strip()
fullname = name
content = '\n'.join(self.content)
method = _http_method_re.search(content)
path = _http_path_re.search(content)
if method and path:
prefix = method.group(1)
signode += addnodes.desc_type(prefix + ' ', prefix + ' ')
signode += api_url_rule(path.group(1))
return fullname
class ApiScenarioDirective(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
def get_scenario_info(self):
ident = self.arguments[0].encode('ascii', 'replace')
with open(find_cached_api_json(self.state.document.settings.env,
'scenarios/%s.json' % ident)) as f:
return json.load(f)
def iter_body(self, data, is_json=True):
if data is None:
return
if is_json:
data = json.dumps(data, indent=2)
for line in data.splitlines():
yield line.rstrip()
def write_request(self, doc, request_info):
doc.append('.. class:: api-request', '')
doc.append('', '')
doc.append('.. sourcecode:: http', '')
doc.append('', '')
doc.append(' %s %s HTTP/1.1' % (
request_info['method'],
request_info['path'],
), '')
special_headers = [
('Authorization', 'Basic ___ENCODED_API_KEY___'),
('Host', 'app.getsentry.com'),
]
for key, value in chain(special_headers,
sorted(request_info['headers'].items())):
doc.append(' %s: %s' % (key, value), '')
doc.append('', '')
for item in self.iter_body(request_info['data'],
request_info['is_json']):
doc.append(' ' + item, '')
def write_response(self, doc, response_info):
doc.append('.. class:: api-response', '')
doc.append('', '')
doc.append('.. sourcecode:: http', '')
doc.append('', '')
doc.append(' HTTP/1.1 %s %s' % (
response_info['status'],
response_info['reason'],
), '')
for key, value in sorted(response_info['headers'].items()):
doc.append(' %s: %s' % (key.title(), value), '')
doc.append('', '')
for item in self.iter_body(response_info['data'],
response_info['is_json']):
doc.append(' ' + item, '')
def run(self):
doc = ViewList()
info = self.get_scenario_info()
for request in info['requests']:
self.write_request(doc, request['request'])
doc.append('', '')
self.write_response(doc, request['response'])
doc.append('', '')
return parse_rst(self.state, self.content_offset, doc)
class SentryDomain(Domain):
name = 'sentry'
label = 'Sentry'
object_types = {
'api-endpoint': ObjType('api-endpoint', 'api-endpoint', 'obj'),
'type': ObjType('type', 'type', 'obj'),
}
directives = {
'api-endpoint': ApiEndpointDirective,
'api-scenario': ApiScenarioDirective,
}
def preprocess_source(app, docname, source):
source_lines = source[0].splitlines()
def _find_block(indent, lineno):
block_indent = len(indent.expandtabs())
rv = []
actual_indent = None
while lineno < end:
line = source_lines[lineno]
if not line.strip():
rv.append(u'')
else:
expanded_line = line.expandtabs()
indent = len(expanded_line) - len(expanded_line.lstrip())
if indent > block_indent:
if actual_indent is None or indent < actual_indent:
actual_indent = indent
rv.append(line)
else:
break
lineno += 1
if rv:
rv.append(u'')
if actual_indent:
rv = [x[actual_indent:] for x in rv]
return rv, lineno
result = []
lineno = 0
end = len(source_lines)
while lineno < end:
line = source_lines[lineno]
match = _edition_re.match(line)
if match is None:
# Skip sentry:docedition. We don't want those.
match = _docedition_re.match(line)
if match is None:
result.append(line)
lineno += 1
continue
lineno += 1
indent, tags = match.groups()
tags = set(x.strip() for x in tags.split(',') if x.strip())
should_include = app.env.config.sentry_doc_variant in tags
block_lines, lineno = _find_block(indent, lineno)
if should_include:
result.extend(block_lines)
source[:] = [u'\n'.join(result)]
def builder_inited(app):
# XXX: this currently means thigns only stay referenced after a
# deletion of a link after a clean build :(
if not hasattr(app.env, 'sentry_referenced_docs'):
app.env.sentry_referenced_docs = {}
def track_references(app, doctree):
docname = app.env.temp_data['docname']
rd = app.env.sentry_referenced_docs
for toctreenode in doctree.traverse(addnodes.toctree):
for e in toctreenode['entries']:
rd.setdefault(str(e[1]), set()).add(docname)
def is_referenced(docname, references):
if docname == 'index':
return True
seen = set([docname])
to_process = set(references.get(docname) or ())
while to_process:
if 'index' in to_process:
return True
next = to_process.pop()
seen.add(next)
for backlink in references.get(next) or ():
if backlink in seen:
continue
else:
to_process.add(backlink)
return False
class SphinxBuilderMixin(object):
build_wizard_fragment = False
@property
def add_permalinks(self):
return not self.build_wizard_fragment
def get_target_uri(self, *args, **kwargs):
rv = super(SphinxBuilderMixin, self).get_target_uri(*args, **kwargs)
if self.build_wizard_fragment:
rv = urljoin(EXTERNAL_DOCS_URL, rv)
return rv
def get_relative_uri(self, from_, to, typ=None):
if self.build_wizard_fragment:
return self.get_target_uri(to, typ)
return super(SphinxBuilderMixin, self).get_relative_uri(
from_, to, typ)
def write_doc(self, docname, doctree):
original_field_limit = self.docsettings.field_name_limit
try:
self.docsettings.field_name_limit = 120
if is_referenced(docname, self.app.env.sentry_referenced_docs):
return super(SphinxBuilderMixin, self).write_doc(docname, doctree)
else:
print 'skipping because unreferenced'
finally:
self.docsettings.field_name_limit = original_field_limit
def __iter_wizard_files(self):
for dirpath, dirnames, filenames in os.walk(self.srcdir,
followlinks=True):
dirnames[:] = [x for x in dirnames if x[:1] not in '_.']
for filename in filenames:
if filename == 'sentry-doc-config.json':
full_path = os.path.join(self.srcdir, dirpath)
base_path = full_path[len(self.srcdir):].strip('/\\') \
.replace(os.path.sep, '/')
yield os.path.join(full_path, filename), base_path
def __build_wizard_section(self, base_path, snippets):
trees = {}
rv = []
def _build_node(node):
original_header_level = self.docsettings.initial_header_level
# bump initial header level to two
self.docsettings.initial_header_level = 2
# indicate that we're building for the wizard fragements.
# This changes url generation and more.
self.build_wizard_fragment = True
# Embed pygments colors as inline styles
original_args = self.highlighter.formatter_args
self.highlighter.formatter_args = original_args.copy()
self.highlighter.formatter_args['noclasses'] = True
try:
sub_doc = document(self.docsettings,
doctree.reporter)
sub_doc += node
destination = StringOutput(encoding='utf-8')
self.current_docname = docname
self.docwriter.write(sub_doc, destination)
self.docwriter.assemble_parts()
rv.append(self.docwriter.parts['fragment'])
finally:
self.build_wizard_fragment = False
self.highlighter.formatter_args = original_args
self.docsettings.initial_header_level = original_header_level
for snippet in snippets:
if '#' not in snippet:
snippet_path = snippet
section_name = None
else:
snippet_path, section_name = snippet.split('#', 1)
docname = posixpath.join(base_path, snippet_path)
if docname in trees:
doctree = trees.get(docname)
else:
doctree = self.env.get_and_resolve_doctree(docname, self)
trees[docname] = doctree
if section_name is None:
_build_node(next(iter(doctree.traverse(section))))
else:
for sect in doctree.traverse(section):
if section_name in sect['ids']:
_build_node(sect)
return u'\n\n'.join(rv)
def __write_wizard(self, data, base_path):
for uid, framework_data in data.get('wizards', {}).iteritems():
try:
body = self.__build_wizard_section(base_path,
framework_data['snippets'])
except IOError as e:
print >> sys.stderr, 'Failed to build wizard "%s" (%s)' % (uid, e)
continue
fn = os.path.join(self.outdir, '_wizards', '%s.json' % uid)
try:
os.makedirs(os.path.dirname(fn))
except OSError:
pass
doc_link = framework_data.get('doc_link')
if doc_link is not None:
doc_link = urljoin(EXTERNAL_DOCS_URL,
posixpath.join(base_path, doc_link))
with open(fn, 'w') as f:
json.dump({
'name': framework_data.get('name') or uid.title(),
'is_framework': framework_data.get('is_framework', False),
'doc_link': doc_link,
'client_lib': framework_data.get('client_lib'),
'body': body
}, f)
f.write('\n')
def __write_wizards(self):
for filename, base_path in self.__iter_wizard_files():
with open(filename) as f:
data = json.load(f)
self.__write_wizard(data, base_path)
def finish(self):
super(SphinxBuilderMixin, self).finish()
self.__write_wizards()
class SentryStandaloneHTMLBuilder(SphinxBuilderMixin, StandaloneHTMLBuilder):
name = 'sentryhtml'
class SentryDirectoryHTMLBuilder(SphinxBuilderMixin, DirectoryHTMLBuilder):
name = 'sentrydirhtml'
def setup(app):
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
app.add_domain(SentryDomain)
app.connect('builder-inited', builder_inited)
app.connect('html-page-context', html_page_context)
app.connect('source-read', preprocess_source)
app.connect('doctree-read', track_references)
app.add_builder(SentryStandaloneHTMLBuilder)
app.add_builder(SentryDirectoryHTMLBuilder)
app.add_config_value('sentry_doc_variant', None, 'env')
def activate():
"""Changes the config to something that the sentry doc infrastructure
expects.
"""
frm = sys._getframe(1)
globs = frm.f_globals
globs.setdefault('sentry_doc_variant',
os.environ.get('SENTRY_DOC_VARIANT', 'self'))
globs['extensions'] = list(globs.get('extensions') or ()) + ['sentryext']
globs['primary_domain'] = 'std'
globs['exclude_patterns'] = list(globs.get('exclude_patterns')
or ()) + ['_sentryext']
| apache-2.0 |
2014c2g18/c2g18 | w2/static/Brython2.0.0-20140209-164925/Lib/types.py | 756 | 3167 | """
Define names for built-in types that aren't directly accessible as a builtin.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "__next__" attributes instead.
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.__code__)
MappingProxyType = type(type.__dict__)
SimpleNamespace = type(sys.implementation)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
MethodType = type(_C()._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
tb = None; del tb
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.__code__)
MemberDescriptorType = type(FunctionType.__globals__)
del sys, _f, _g, _C, # Not for export
# Provide a PEP 3115 compliant mechanism for class creation
def new_class(name, bases=(), kwds=None, exec_body=None):
"""Create a class object dynamically using the appropriate metaclass."""
meta, ns, kwds = prepare_class(name, bases, kwds)
if exec_body is not None:
exec_body(ns)
return meta(name, bases, ns, **kwds)
def prepare_class(name, bases=(), kwds=None):
"""Call the __prepare__ method of the appropriate metaclass.
Returns (metaclass, namespace, kwds) as a 3-tuple
*metaclass* is the appropriate metaclass
*namespace* is the prepared class namespace
*kwds* is an updated copy of the passed in kwds argument with any
'metaclass' entry removed. If no kwds argument is passed in, this will
be an empty dict.
"""
if kwds is None:
kwds = {}
else:
kwds = dict(kwds) # Don't alter the provided mapping
if 'metaclass' in kwds:
meta = kwds.pop('metaclass')
else:
if bases:
meta = type(bases[0])
else:
meta = type
if isinstance(meta, type):
# when meta is a type, we first determine the most-derived metaclass
# instead of invoking the initial candidate directly
meta = _calculate_meta(meta, bases)
if hasattr(meta, '__prepare__'):
ns = meta.__prepare__(name, bases, **kwds)
else:
ns = {}
return meta, ns, kwds
def _calculate_meta(meta, bases):
"""Calculate the most derived metaclass."""
winner = meta
for base in bases:
base_meta = type(base)
if issubclass(winner, base_meta):
continue
if issubclass(base_meta, winner):
winner = base_meta
continue
# else:
raise TypeError("metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases")
return winner
| gpl-2.0 |
pythonchelle/opencomparison | apps/apiv1/tests/test_resources.py | 3 | 1333 | import json
from django.test import TestCase
from django.core.urlresolvers import reverse
from apiv1.tests import data
class ResourcesV1Tests(TestCase):
base_kwargs = {'api_name': 'v1'}
def setUp(self):
data.load()
def test_01_category(self):
kwargs = {'resource_name': 'category'}
kwargs.update(self.base_kwargs)
# check 200's
list_url = reverse('api_dispatch_list', kwargs=kwargs)
response = self.client.get(list_url)
self.assertEqual(response.status_code, 200)
kwargs['pk'] = 'apps'
cat_url = reverse('api_dispatch_detail', kwargs=kwargs)
self.assertTrue(cat_url in response.content)
response = self.client.get(cat_url)
self.assertEqual(response.status_code, 200)
def test_02_grid(self):
kwargs = {'resource_name': 'grid'}
kwargs.update(self.base_kwargs)
# check 200's
list_url = reverse('api_dispatch_list', kwargs=kwargs)
response = self.client.get(list_url)
self.assertEqual(response.status_code, 200)
kwargs['pk'] = 'testing'
grid_url = reverse('api_dispatch_detail', kwargs=kwargs)
self.assertTrue(grid_url in response.content)
response = self.client.get(grid_url)
self.assertEqual(response.status_code, 200)
| mit |
martinblech/pyfpm | pyfpm/pattern.py | 2 | 10947 | """
This module holds the actual pattern implementations.
End users should not normally have to deal with it, except for constructing
patterns programatically without making use of the pattern syntax parser.
"""
import re
try:
# python 2.x base string
_basestring = basestring
except NameError:
# python 3.x base string
_basestring = str
class Match(object):
"""
Represents the result of matching successfully a pattern against an
object. The `ctx` attribute is a :class:`dict` that contains the value for
each bound name in the pattern, if any.
"""
def __init__(self, ctx=None, value=None):
if ctx is None:
ctx = {}
self.ctx = ctx
self.value = value
def __eq__(self, other):
return (isinstance(other, Match) and
self.__dict__ == other.__dict__)
def __repr__(self):
return 'Match(%s)' % self.ctx
class Pattern(object):
"""
Base Pattern class. Abstracts the behavior common to all pattern types,
such as name bindings, conditionals and operator overloading for combining
several patterns.
"""
def __init__(self):
self.bound_name = None
self.condition = None
def match(self, other, ctx=None):
"""
Match this pattern against an object. Operator: `<<`.
:param other: the object this pattern should be matched against.
:param ctx: optional context. If none, an empty one will be
automatically created.
:type ctx: dict
:returns: a :class:`Match` if successful, `None` otherwise.
"""
match = self._does_match(other, ctx)
if match:
ctx = match.ctx
value = match.value or other
if self.bound_name:
if ctx is None:
ctx = {}
try:
previous = ctx[self.bound_name]
if previous != value:
return None
except KeyError:
ctx[self.bound_name] = value
if self.condition is None or self.condition(**ctx):
return Match(ctx)
return None
def __lshift__(self, other):
return self.match(other)
def bind(self, name):
"""Bind this pattern to the given name. Operator: `%`."""
self.bound_name = name
return self
def __mod__(self, name):
return self.bind(name)
def if_(self, condition):
"""
Add a boolean condition to this pattern. Operator: `/`.
:param condition: must accept the match context as keyword
arguments and return a boolean-ish value.
:type condition: callable
"""
self.condition = condition
return self
def __div__(self, condition):
return self.if_(condition)
def __truediv__(self, condition):
return self.if_(condition)
def multiply(self, n):
"""
Build a :class:`ListPattern` that matches `n` instances of this pattern.
Operator: `*`.
Example:
>>> p = EqualsPattern(1).multiply(3)
>>> p.match((1, 1, 1))
Match({})
"""
return build(*([self]*n))
def __mul__(self, length):
return self.multiply(length)
def __rmul__(self, length):
return self.multiply(length)
def or_with(self, other):
"""
Build a new :class:`OrPattern` with this or the other pattern.
Operator: `|`.
Example:
>>> p = EqualsPattern(1).or_with(InstanceOfPattern(str))
>>> p.match('hello')
Match({})
>>> p.match(1)
Match({})
>>> p.match(2)
"""
patterns = []
for pattern in (self, other):
if isinstance(pattern, OrPattern):
patterns.extend(pattern.patterns)
else:
patterns.append(pattern)
return OrPattern(*patterns)
def __or__(self, other):
return self.or_with(other)
def head_tail_with(self, other):
"""
Head-tail concatenate this pattern with the other. The lhs pattern will
be the head and the other will be the tail. Operator: `+`.
Example:
>>> p = InstanceOfPattern(int).head_tail_with(ListPattern())
>>> p.match([1])
Match({})
>>> p.match([1, 2])
"""
return ListPattern(self, other)
def __add__(self, other):
return self.head_tail_with(other)
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join('='.join((str(k), repr(v))) for (k, v) in
self.__dict__.items() if v))
class AnyPattern(Pattern):
"""Pattern that matches anything."""
def _does_match(self, other, ctx):
return Match(ctx)
class EqualsPattern(Pattern):
"""Pattern that only matches objects that equal the given object."""
def __init__(self, obj):
super(EqualsPattern, self).__init__()
self.obj = obj
def _does_match(self, other, ctx):
if self.obj == other:
return Match(ctx)
else:
return None
class InstanceOfPattern(Pattern):
"""Pattern that only matches instances of the given class."""
def __init__(self, cls):
super(InstanceOfPattern, self).__init__()
self.cls = cls
def _does_match(self, other, ctx):
if isinstance(other, self.cls):
return Match(ctx)
else:
return None
_CompiledRegex = type(re.compile(''))
class RegexPattern(Pattern):
"""Pattern that only matches strings that match the given regex."""
def __init__(self, regex):
super(RegexPattern, self).__init__()
if not isinstance(regex, _CompiledRegex):
regex = re.compile(regex)
self.regex = regex
def _does_match(self, other, ctx):
re_match = self.regex.match(other)
if re_match:
return Match(ctx, re_match.groups())
return None
class ListPattern(Pattern):
"""Pattern that only matches iterables whose head matches `head_pattern` and
whose tail matches `tail_pattern`"""
def __init__(self, head_pattern=None, tail_pattern=None):
super(ListPattern, self).__init__()
if head_pattern is not None and tail_pattern is None:
tail_pattern = ListPattern()
self.head_pattern = head_pattern
self.tail_pattern = tail_pattern
def head_tail_with(self, other):
return ListPattern(self.head_pattern,
self.tail_pattern.head_tail_with(other))
def _does_match(self, other, ctx):
try:
if (self.head_pattern is None and
self.tail_pattern is None and
len(other) == 0):
return Match(ctx)
except TypeError:
return None
if isinstance(other, _basestring):
return None
try:
head, tail = other[0], other[1:]
except (IndexError, TypeError):
return None
if self.head_pattern is not None:
match = self.head_pattern.match(head, ctx)
if match:
ctx = match.ctx
match = self.tail_pattern.match(tail, ctx)
if match:
ctx = match.ctx
else:
return None
else:
return None
else:
if len(other):
return None
return Match(ctx)
class NamedTuplePattern(Pattern):
"""Pattern that only matches named tuples of the given class and whose
contents match the given patterns."""
def __init__(self, casecls, *initpatterns):
super(NamedTuplePattern, self).__init__()
self.casecls_pattern = InstanceOfPattern(casecls)
if (len(initpatterns) == 1 and
isinstance(initpatterns[0], ListPattern)):
self.initargs_pattern = initpatterns[0]
else:
self.initargs_pattern = build(*initpatterns, **dict(is_list=True))
def _does_match(self, other, ctx):
match = self.casecls_pattern.match(other, ctx)
if not match:
return None
ctx = match.ctx
return self.initargs_pattern.match(other, ctx)
class OrPattern(Pattern):
"""Pattern that matches whenever any of the inner patterns match."""
def __init__(self, *patterns):
if len(patterns) < 2:
raise ValueError('need at least two patterns')
super(OrPattern, self).__init__()
self.patterns = patterns
def _does_match(self, other, ctx):
for pattern in self.patterns:
if ctx is not None:
ctx_ = ctx.copy()
else:
ctx_ = None
match = pattern.match(other, ctx_)
if match:
return match
return None
def build(*args, **kwargs):
"""
Shorthand pattern factory.
Examples:
>>> build() == AnyPattern()
True
>>> build(1) == EqualsPattern(1)
True
>>> build('abc') == EqualsPattern('abc')
True
>>> build(str) == InstanceOfPattern(str)
True
>>> build(re.compile('.*')) == RegexPattern('.*')
True
>>> build(()) == build([]) == ListPattern()
True
>>> build([1]) == build((1,)) == ListPattern(EqualsPattern(1),
... ListPattern())
True
>>> build(int, str, 'a') == ListPattern(InstanceOfPattern(int),
... ListPattern(InstanceOfPattern(str),
... ListPattern(EqualsPattern('a'))))
True
>>> try:
... from collections import namedtuple
... MyTuple = namedtuple('MyTuple', 'a b c')
... build(MyTuple(1, 2, 3)) == NamedTuplePattern(MyTuple, 1, 2, 3)
... except ImportError:
... True
True
"""
arglen = len(args)
if arglen > 1:
head, tail = args[0], args[1:]
return ListPattern(build(head), build(*tail, **(dict(is_list=True))))
if arglen == 0:
return AnyPattern()
(arg,) = args
if kwargs.get('is_list', False):
return ListPattern(build(arg))
if isinstance(arg, Pattern):
return arg
if isinstance(arg, _CompiledRegex):
return RegexPattern(arg)
if isinstance(arg, tuple) and hasattr(arg, '_fields'):
return NamedTuplePattern(arg.__class__, *map(build, arg))
if isinstance(arg, type):
return InstanceOfPattern(arg)
if isinstance(arg, (tuple, list)):
if len(arg) == 0:
return ListPattern()
return build(*arg, **(dict(is_list=True)))
return EqualsPattern(arg)
| mit |
MarcosCommunity/odoo | comunity_modules/product_prices_update/wizard/wizard_update_prices.py | 3 | 7297 | # -*- coding: utf-8 -*-
from openerp import fields, models, api, _
from openerp.exceptions import Warning
from openerp import tools
class prices_update_wizard(models.TransientModel):
_name = 'product.prices_update_wizard'
price_type = fields.Selection(
[('list_price', 'Sale Price'), ('standard_price', 'Cost Price')],
required=True,
string='Price Type')
price_discount = fields.Float('Price Discoun')
price_surcharge = fields.Float(
'Price Surcharge', help='Specify the fixed amount to add or substract(if negative) to the amount calculated with the discount.')
price_round = fields.Float('Price Rounding', help="Sets the price so that it is a multiple of this value.\n"
"Rounding is applied after the discount and before the surcharge.\n"
"To have prices that end in 9.99, set rounding 10, surcharge -0.01"
)
check = fields.Boolean('Check before changing')
@api.multi
def change_prices(self, context=None):
active_ids = context.get('active_ids', [])
products_vals = []
if not active_ids:
raise Warning(_('You must select at least one product'))
if self.check is True:
actions = self.env.ref(
'product_prices_update.action_prices_update_wizard_result')
if actions:
action_read = actions.read()[0]
action_read['context'] = {
'product_tmpl_ids': active_ids,
'price_type': self.price_type,
'price_discount': self.price_discount,
'price_surcharge': self.price_surcharge,
'price_round': self.price_round,
}
return action_read
else:
for prodct in self.env['product.template'].browse(active_ids):
if self.price_type == 'list_price':
old_price = prodct.list_price
elif self.price_type == 'standard_price':
old_price = prodct.standard_price
else:
raise Warning(
_('Price type "%s" is not implemented') % (self.price_type))
new_price = self.calc_new_price(
old_price, self.price_discount,
self.price_surcharge, self.price_round)
vals = {
'product_tmpl': prodct,
'new_price': new_price,
}
products_vals.append(vals)
return self.update_prices(products_vals, self.price_type)
@api.model
def update_prices(self, products_vals, price_type):
product_ids = []
change_price_obj = self.pool.get("stock.change.standard.price")
for line in products_vals:
if line['product_tmpl'].cost_method == u'average' and self.price_type == 'standard_price':
new_change = change_price_obj.create(self.env.cr, self.env.uid, {"new_price": line['new_price']}, context=self.env.context)
context = {'active_id': line['product_tmpl'].id,
'active_ids': [line['product_tmpl'].id],
'active_model': 'product.template',
'lang': 'es_DO',
'params': {'_push_me': False,
'action': 176,
'limit': 80,
'model': 'product.template',
'page': 0,
'view_type': 'list'},
'search_disable_custom_filters': True,
'tz': 'America/Santo_Domingo',
'uid': 1}
change_price_obj.change_price(self.env.cr, self.env.uid, new_change, context=context)
else:
line['product_tmpl'].write({price_type: line['new_price']})
product_ids.append(line['product_tmpl'].id)
return {
'type': 'ir.actions.act_window',
'name': _('Products'),
'res_model': 'product.template',
'view_type': 'form',
'view_mode': 'tree,form',
'domain': [('id', 'in', product_ids)],
'target': 'current',
'nodestroy': True,
}
@api.model
def calc_new_price(
self, old_price, price_discount, price_surcharge, price_round):
new_price = old_price * \
(1.0 + (price_discount or 0.0))
if price_round:
new_price = tools.float_round(
new_price, precision_rounding=price_round)
if price_surcharge:
new_price += price_surcharge
return new_price
class prices_update_wizard_result_detail(models.TransientModel):
_name = 'product.prices_update_wizard_result_detail'
result_id = fields.Many2one(
'product.prices_update_wizard_result', 'Result')
product_tmpl_id = fields.Many2one(
'product.template', 'Product Template',
readonly=True)
old_price = fields.Float(
'Old Price',
readonly=True)
new_price = fields.Float(
'New Price',
required=True
)
class prices_update_wizard_result(models.TransientModel):
_name = 'product.prices_update_wizard_result'
@api.model
def _get_details(self):
ret = []
price_discount = self._context.get('price_discount', 0.0)
price_surcharge = self._context.get('price_surcharge', 0.0)
price_round = self._context.get('price_round', 0.0)
product_tmpl_ids = self._context.get('product_tmpl_ids', [])
price_type = self._context.get('price_type', False)
for product_tmpl in self.env['product.template'].browse(
product_tmpl_ids):
if price_type == 'list_price':
old_price = product_tmpl.list_price
elif price_type == 'standard_price':
old_price = product_tmpl.standard_price
else:
raise Warning(
_('Price type "%s" is not implemented') % (price_type))
vals = {
'product_tmpl_id': product_tmpl.id,
'old_price': old_price,
'new_price': self.env[
'product.prices_update_wizard'].calc_new_price(
old_price, price_discount,
price_surcharge, price_round),
}
ret.append(vals)
return ret
detail_ids = fields.One2many(
'product.prices_update_wizard_result_detail',
'result_id',
string='Products Detail',
default=_get_details,
)
@api.multi
def confirm(self):
products_vals = []
price_type = self._context.get('price_type', False)
for line in self.detail_ids:
vals = {
'product_tmpl': line.product_tmpl_id,
'new_price': line.new_price,
}
products_vals.append(vals)
return self.env['product.prices_update_wizard'].update_prices(
products_vals, price_type)
| agpl-3.0 |
orospakr/peephole | peephole/peephole_client.py | 1 | 5019 | #!/usr/bin/env python
# Peephole - a D-Bus service providing access to small LCD panels
# Copyright (C) 2007-2008 Andrew Clunis
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gobject
from gettext import gettext as _
import dbus
import dbus.service
import dbus.mainloop.glib
from dbus.exceptions import DBusException
import logging
import struct
import sys
import os
from optparse import OptionParser
from peephole.peepholed import PEEPHOLE_WELL_KNOWN_NAME
from peephole.dbus_settings import *
def getButtons(selected_lcd, bus):
button_paths = []
buttons = {}
button_paths = selected_lcd.GetButtons()
for path in button_paths:
button_proxy = bus.get_object(PEEPHOLE_WELL_KNOWN_NAME, path)
button = dbus.Interface(button_proxy, dbus_interface=BUTTON_INTERFACE)
button_name = button.GetName()
buttons[button_name] = button
return buttons
def main():
usage = "%prog: [--lcd=LCD], needs one of [--list] [--print-buttons]"
parser = OptionParser(usage)
parser.add_option("-L", "--lcd", dest="lcd",
help="LCD to interact with")
parser.add_option("-l", "--list", action="store_true",
dest="list",
help="Print list of LCDs in the system")
parser.add_option("-b", "--print-buttons", action="store_true",
dest="print_buttons",
help="Print button events on stdout as they occur")
parser.add_option("-B", "--button", dest="button",
help="Button to interact with, used with --set-button-backlight")
parser.add_option("-O", "--button-backlight-on", dest="button_backlight", action="store_true",
help="Turn on button's (specified by --button) backlight")
parser.add_option("-o", "--button-backlight-off", dest="button_backlight", action="store_false",
help="Turn off button's (specified by --button) backlight")
(options, args) = parser.parse_args()
if not (options.list or options.print_buttons or (options.button_backlight is not None)):
parser.error("You must specify an option.")
mainloop = gobject.MainLoop()
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
peep_proxy = bus.get_object(PEEPHOLE_WELL_KNOWN_NAME,
PEEPHOLE_PATH)
peep = dbus.Interface(peep_proxy, dbus_interface=PEEPHOLE_INTERFACE)
try:
lcd_paths = peep.GetLCDs()
except DBusException, e:
print "\nPeephole D-Bus service is unavailable. Possible causes: \n\
1. Missing D-Bus activation configuration -- alternatively, the daemon may \n\
also be invoked manually. \n\
2. Missing security policy (see README) \n\
3. Daemon was started, but no LCDs were detected."
sys.exit(-1)
lcds = {}
for path in lcd_paths:
lcd_proxy = bus.get_object(PEEPHOLE_WELL_KNOWN_NAME, path)
lcd = dbus.Interface(lcd_proxy, dbus_interface=LCD_INTERFACE)
lcd_name = lcd.GetName()
lcds[lcd_name] = lcd
if options.list:
for name, lcd in lcds.items():
print name
sys.exit(0)
selected_lcd = None
if options.lcd is not None:
if options.lcd not in lcds:
parser.error("That LCD does not exist.")
selected_lcd = lcds[options.lcd]
print "Selected: '%s'" % options.lcd
else:
for name, l in lcds.items():
print "Fell back to default LCD: '%s'" % name
selected_lcd = l
break
buttons = getButtons(selected_lcd, bus)
if options.button_backlight is not None:
if options.button is None:
parser.error("You must specify --button")
if options.button not in buttons:
parser.error("That button does not exist.")
button = buttons[options.button]
button.SetBacklight(options.button_backlight)
if options.print_buttons:
class PressReceiver(object):
def __init__(self, button, name):
self.button = button
self.name = name
def pressed(self):
print self.name
for name, btn in buttons.items():
receiver = PressReceiver(btn, name)
btn.connect_to_signal("Pressed", receiver.pressed)
mainloop.run()
sys.exit(0)
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
puchake/market-teller | src/data_handling/set_assembly.py | 1 | 7628 | import numpy as np
import datetime as dt
from os import listdir, path
def gather_mats(
split_mat, avg_5_mat, avg_25_mat, avg_50_mat, dates_mat, min_year
):
"""
Collects chosen columns from split and avg matrices and adds dates_mat
indicator data for each row (each day).
:param split_mat: original company data matrix
:param avg_5_mat: matrix with EMA of length 5 of closing prices
:param avg_25_mat: matrix with EMA of length 25 of closing prices
:param avg_50_mat: matrix with EMA of length 50 of closing prices
:param dates_mat: matrix of profit indicators for each date
:return: matrix of gathered data
"""
# Gather matrix columns indices.
gather_split_i = 0
gather_avg_5_i = 1
gather_avg_25_i = 2
gather_avg_50_i = 3
gather_volume_i = 4
gather_dates_indicator_i = 5
# Indices of date fragment columns in split matrix.
dates_indices = [1, 2, 3]
# Indices of elements in dates matrix.
all_i = 0
profit_i = 1
# Index of close price column and volume column.
close_i = 5
volume_i = 6
# Number of gathered values. Original close price + 3 averages profit
# indicator and volume will be collected.
gathered_row_len = 6
# Create gathered mat with row count of avg_50_mat as it is the shortest
# of all input matrices.
gathered_mat = np.zeros([avg_50_mat.shape[0], gathered_row_len])
for i in range(avg_50_mat.shape[0]):
# Gather split, avg_5, avg_25, avg_50 and volume columns.
gathered_mat[-(i + 1), gather_split_i] = split_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_avg_5_i] = avg_5_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_avg_25_i] = avg_25_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_avg_50_i] = avg_50_mat[-(i + 1), close_i]
gathered_mat[-(i + 1), gather_volume_i] = split_mat[-(i + 1), volume_i]
# Construct the date of current row and access dates matrix indicator.
date = dt.date(*(split_mat[-(i + 1), dates_indices].astype(np.int32)))
all_count = dates_mat[
date.year - min_year, date.month - 1,
date.day - 1, all_i
]
profit_count = dates_mat[
date.year - min_year, date.month - 1,
date.day - 1, profit_i
]
# Set indicator column element of current row to calculated indicator.
gathered_mat[-(i + 1), gather_dates_indicator_i] = profit_count / \
all_count
return gathered_mat
def label_mat(mat):
"""
Assign labels to each row of gathered matrix.
:param mat: previously gathered matrix
:return: labels for gathered matrix rows
"""
# Index and range of average used for labeling.
gather_avg_25_i = 2
avg_range = 25
# Labels for rising and falling price.
rising_i = 1
falling_i = 0
num_classes = 2
labels = np.zeros([mat.shape[0] - avg_range + 1, num_classes])
for i in range(mat.shape[0] - avg_range + 1):
# If average 25 day price rises after 24 days assign rising label, else
# assign falling label.
if mat[i, gather_avg_25_i] < mat[i + avg_range - 1, gather_avg_25_i]:
labels[i, rising_i] = 1.0
else:
labels[i, falling_i] = 1.0
return labels
def normalize_mat(mat):
"""
Bring all values in matrix to around -1, 1 range with mean 0.
:param mat: matrix of gathered data
:return: normalized matrix
"""
# Gather matrix columns indices.
gather_split_i = 0
gather_avg_5_i = 1
gather_avg_25_i = 2
gather_avg_50_i = 3
gather_volume_i = 4
gather_dates_indicator_i = 5
# Normalize prices. We want to keep relationship between prices
# (eg. avg_5 > split) untouched, so we use single set of max and mean for
# split and all averages.
prices_indices = [
gather_split_i, gather_avg_5_i, gather_avg_25_i, gather_avg_50_i
]
mat[:, prices_indices] /= np.max(mat[:, prices_indices])
mat[:, prices_indices] *= 2
mat[:, prices_indices] -= np.mean(mat[:, prices_indices])
# Normalize volume.
mat[:, gather_volume_i] /= np.max(mat[:, gather_volume_i])
mat[:, gather_volume_i] *= 2
mat[:, gather_volume_i] -= np.mean(mat[:, gather_volume_i])
# Subtract 1.0 from dates indicator multiplied by 2.0 as it is already in
# range 0.0, 1.0 and we don't want characteristic values to vary between
# matrices as it is data outside of one company scope.
dates_indicator_mean = 1.0
mat[:, gather_dates_indicator_i] *= 2
mat[:, gather_dates_indicator_i] -= dates_indicator_mean
return mat
def assemble_set(
split_in_dir_path, avg_5_in_dir_path, avg_25_in_dir_path,
avg_50_in_dir_path, dates_mat_path, min_year,
data_out_dir_path, labels_out_dir_path
):
"""
Gathers companies data, labels and normalizes it.
:param split_in_dir_path: path to dir containing split matrices
:param avg_5_in_dir_path: path to avg_5 matrices in dir
:param avg_25_in_dir_path: path to avg_25 matrices in dir
:param avg_50_in_dir_path: path to avg_50 matrices in dir
:param dates_mat_path: path to dates matrix
:param min_year: min year contained in companies data
:param data_out_dir_path: path to data output dir
:param labels_out_dir_path: path to labels output dir
:return: -
"""
# Minimal size of the gathered matrix.
labeling_range = 25
# Load dates matrix.
dates_mat = np.load(dates_mat_path)
for filename in listdir(split_in_dir_path):
# If company matrix exists in all variants.
if path.isfile(avg_5_in_dir_path + "/" + filename) and \
path.isfile(avg_25_in_dir_path + "/" + filename) and \
path.isfile(avg_50_in_dir_path + "/" + filename):
# Load all matrices.
split_mat = np.load(split_in_dir_path + "/" + filename)
avg_5_mat = np.load(avg_5_in_dir_path + "/" + filename)
avg_25_mat = np.load(avg_25_in_dir_path + "/" + filename)
avg_50_mat = np.load(avg_50_in_dir_path + "/" + filename)
# Gather data from them, label it and normalize if we have
# enough data to label it.
if avg_50_mat.shape[0] >= labeling_range:
gathered_mat = gather_mats(
split_mat, avg_5_mat, avg_25_mat,
avg_50_mat, dates_mat, min_year
)
labels = label_mat(gathered_mat)
labeled_rows = labels.shape[0]
normalized_mat = normalize_mat(gathered_mat[:labeled_rows])
# Save results.
np.save(data_out_dir_path + "/" + filename, normalized_mat)
np.save(labels_out_dir_path + "/" + filename, labels)
def main():
"""
Main function of this script.
:return: -
"""
# Path used in assembly and previously discovered min year value.
split_in_dir_path = "../../data/split"
avg_5_in_dir_path = "../../data/averaged_5"
avg_25_in_dir_path = "../../data/averaged_25"
avg_50_in_dir_path = "../../data/averaged_50"
dates_mat_path = "../../data/dates_matrix/dates_matrix.npy"
min_year = 1962
data_out_dir_path = "../../data/rnn_set/data"
labels_out_dir_path = "../../data/rnn_set/labels"
assemble_set(
split_in_dir_path, avg_5_in_dir_path, avg_25_in_dir_path,
avg_50_in_dir_path, dates_mat_path, min_year,
data_out_dir_path, labels_out_dir_path
)
if __name__ == "__main__":
main() | mit |
FlaPer87/django-nonrel | django/dispatch/dispatcher.py | 19 | 8313 | import weakref
from django.dispatch import saferef
WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref)
def _make_id(target):
if hasattr(target, 'im_func'):
return (id(target.im_self), id(target.im_func))
return id(target)
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receriverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
if weak is True, then receiver must be weak-referencable (more
precisely saferef.safeRef() must be able to create a reference
to the receiver).
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender
The sender to which the receiver should respond Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.DEBUG:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
receiver = saferef.safeRef(receiver, onDelete=self._remove_receiver)
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
for index in xrange(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
del self.receivers[index]
break
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers:
return responses
for receiver in self._live_receivers(_make_id(sender)):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
if any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver.
"""
responses = []
if not self.receivers:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(_make_id(sender)):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception, err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _live_receivers(self, senderkey):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
none_senderkey = _make_id(None)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == none_senderkey or r_senderkey == senderkey:
if isinstance(receiver, WEAKREF_TYPES):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
receivers.append(receiver)
else:
receivers.append(receiver)
return receivers
def _remove_receiver(self, receiver):
"""
Remove dead receivers from connections.
"""
to_remove = []
for key, connected_receiver in self.receivers:
if connected_receiver == receiver:
to_remove.append(key)
for key in to_remove:
for idx, (r_key, _) in enumerate(self.receivers):
if r_key == key:
del self.receivers[idx]
| bsd-3-clause |
margaret/python-datamuse | datamuse/test_api.py | 1 | 1445 | import unittest
import datamuse
from datamuse import Datamuse
class DatamuseTestCase(unittest.TestCase):
def setUp(self):
self.api = Datamuse()
self.max = 5
# words endpoint
def test_sounds_like(self):
args = {'sl': 'orange', 'max': self.max}
data = self.api.words(**args)
self.assertTrue(type(data), list)
print("sounds like", data)
def test_rhymes(self):
args = {'rel_rhy': 'orange', 'max': self.max}
data = self.api.words(**args)
self.assertTrue(len(data) <= self.max)
print("rhyme", data)
def test_near_rhymes(self):
args = {'rel_nry': 'orange', 'max': self.max}
data = self.api.words(**args)
self.assertTrue(len(data) <= self.max)
print("near rhyme", data)
def test_bad_request(self):
args = {'foo':42}
with self.assertRaises(ValueError):
data = self.api.words(**args)
def test_set_max(self):
self.assertTrue(self.api.max, 100)
self.api.set_max_default(10)
self.assertEquals(self.api.max, 10)
data = self.api.words(ml='ringing in the ears')
self.assertEquals(len(data), 10)
def test_set_max_error(self):
with self.assertRaises(ValueError):
self.api.set_max_default(-2)
self.api.set_max_default(0)
self.api.set_max_default(1001)
if __name__ == "__main__":
unittest.main()
| mit |
jetskijoe/headphones | lib/unidecode/x0d5.py | 253 | 4680 | data = (
'pyuk', # 0x00
'pyut', # 0x01
'pyup', # 0x02
'pyuh', # 0x03
'peu', # 0x04
'peug', # 0x05
'peugg', # 0x06
'peugs', # 0x07
'peun', # 0x08
'peunj', # 0x09
'peunh', # 0x0a
'peud', # 0x0b
'peul', # 0x0c
'peulg', # 0x0d
'peulm', # 0x0e
'peulb', # 0x0f
'peuls', # 0x10
'peult', # 0x11
'peulp', # 0x12
'peulh', # 0x13
'peum', # 0x14
'peub', # 0x15
'peubs', # 0x16
'peus', # 0x17
'peuss', # 0x18
'peung', # 0x19
'peuj', # 0x1a
'peuc', # 0x1b
'peuk', # 0x1c
'peut', # 0x1d
'peup', # 0x1e
'peuh', # 0x1f
'pyi', # 0x20
'pyig', # 0x21
'pyigg', # 0x22
'pyigs', # 0x23
'pyin', # 0x24
'pyinj', # 0x25
'pyinh', # 0x26
'pyid', # 0x27
'pyil', # 0x28
'pyilg', # 0x29
'pyilm', # 0x2a
'pyilb', # 0x2b
'pyils', # 0x2c
'pyilt', # 0x2d
'pyilp', # 0x2e
'pyilh', # 0x2f
'pyim', # 0x30
'pyib', # 0x31
'pyibs', # 0x32
'pyis', # 0x33
'pyiss', # 0x34
'pying', # 0x35
'pyij', # 0x36
'pyic', # 0x37
'pyik', # 0x38
'pyit', # 0x39
'pyip', # 0x3a
'pyih', # 0x3b
'pi', # 0x3c
'pig', # 0x3d
'pigg', # 0x3e
'pigs', # 0x3f
'pin', # 0x40
'pinj', # 0x41
'pinh', # 0x42
'pid', # 0x43
'pil', # 0x44
'pilg', # 0x45
'pilm', # 0x46
'pilb', # 0x47
'pils', # 0x48
'pilt', # 0x49
'pilp', # 0x4a
'pilh', # 0x4b
'pim', # 0x4c
'pib', # 0x4d
'pibs', # 0x4e
'pis', # 0x4f
'piss', # 0x50
'ping', # 0x51
'pij', # 0x52
'pic', # 0x53
'pik', # 0x54
'pit', # 0x55
'pip', # 0x56
'pih', # 0x57
'ha', # 0x58
'hag', # 0x59
'hagg', # 0x5a
'hags', # 0x5b
'han', # 0x5c
'hanj', # 0x5d
'hanh', # 0x5e
'had', # 0x5f
'hal', # 0x60
'halg', # 0x61
'halm', # 0x62
'halb', # 0x63
'hals', # 0x64
'halt', # 0x65
'halp', # 0x66
'halh', # 0x67
'ham', # 0x68
'hab', # 0x69
'habs', # 0x6a
'has', # 0x6b
'hass', # 0x6c
'hang', # 0x6d
'haj', # 0x6e
'hac', # 0x6f
'hak', # 0x70
'hat', # 0x71
'hap', # 0x72
'hah', # 0x73
'hae', # 0x74
'haeg', # 0x75
'haegg', # 0x76
'haegs', # 0x77
'haen', # 0x78
'haenj', # 0x79
'haenh', # 0x7a
'haed', # 0x7b
'hael', # 0x7c
'haelg', # 0x7d
'haelm', # 0x7e
'haelb', # 0x7f
'haels', # 0x80
'haelt', # 0x81
'haelp', # 0x82
'haelh', # 0x83
'haem', # 0x84
'haeb', # 0x85
'haebs', # 0x86
'haes', # 0x87
'haess', # 0x88
'haeng', # 0x89
'haej', # 0x8a
'haec', # 0x8b
'haek', # 0x8c
'haet', # 0x8d
'haep', # 0x8e
'haeh', # 0x8f
'hya', # 0x90
'hyag', # 0x91
'hyagg', # 0x92
'hyags', # 0x93
'hyan', # 0x94
'hyanj', # 0x95
'hyanh', # 0x96
'hyad', # 0x97
'hyal', # 0x98
'hyalg', # 0x99
'hyalm', # 0x9a
'hyalb', # 0x9b
'hyals', # 0x9c
'hyalt', # 0x9d
'hyalp', # 0x9e
'hyalh', # 0x9f
'hyam', # 0xa0
'hyab', # 0xa1
'hyabs', # 0xa2
'hyas', # 0xa3
'hyass', # 0xa4
'hyang', # 0xa5
'hyaj', # 0xa6
'hyac', # 0xa7
'hyak', # 0xa8
'hyat', # 0xa9
'hyap', # 0xaa
'hyah', # 0xab
'hyae', # 0xac
'hyaeg', # 0xad
'hyaegg', # 0xae
'hyaegs', # 0xaf
'hyaen', # 0xb0
'hyaenj', # 0xb1
'hyaenh', # 0xb2
'hyaed', # 0xb3
'hyael', # 0xb4
'hyaelg', # 0xb5
'hyaelm', # 0xb6
'hyaelb', # 0xb7
'hyaels', # 0xb8
'hyaelt', # 0xb9
'hyaelp', # 0xba
'hyaelh', # 0xbb
'hyaem', # 0xbc
'hyaeb', # 0xbd
'hyaebs', # 0xbe
'hyaes', # 0xbf
'hyaess', # 0xc0
'hyaeng', # 0xc1
'hyaej', # 0xc2
'hyaec', # 0xc3
'hyaek', # 0xc4
'hyaet', # 0xc5
'hyaep', # 0xc6
'hyaeh', # 0xc7
'heo', # 0xc8
'heog', # 0xc9
'heogg', # 0xca
'heogs', # 0xcb
'heon', # 0xcc
'heonj', # 0xcd
'heonh', # 0xce
'heod', # 0xcf
'heol', # 0xd0
'heolg', # 0xd1
'heolm', # 0xd2
'heolb', # 0xd3
'heols', # 0xd4
'heolt', # 0xd5
'heolp', # 0xd6
'heolh', # 0xd7
'heom', # 0xd8
'heob', # 0xd9
'heobs', # 0xda
'heos', # 0xdb
'heoss', # 0xdc
'heong', # 0xdd
'heoj', # 0xde
'heoc', # 0xdf
'heok', # 0xe0
'heot', # 0xe1
'heop', # 0xe2
'heoh', # 0xe3
'he', # 0xe4
'heg', # 0xe5
'hegg', # 0xe6
'hegs', # 0xe7
'hen', # 0xe8
'henj', # 0xe9
'henh', # 0xea
'hed', # 0xeb
'hel', # 0xec
'helg', # 0xed
'helm', # 0xee
'helb', # 0xef
'hels', # 0xf0
'helt', # 0xf1
'help', # 0xf2
'helh', # 0xf3
'hem', # 0xf4
'heb', # 0xf5
'hebs', # 0xf6
'hes', # 0xf7
'hess', # 0xf8
'heng', # 0xf9
'hej', # 0xfa
'hec', # 0xfb
'hek', # 0xfc
'het', # 0xfd
'hep', # 0xfe
'heh', # 0xff
)
| gpl-3.0 |
KiChjang/servo | tests/wpt/web-platform-tests/tools/third_party/h2/examples/fragments/client_https_setup_fragment.py | 14 | 3887 | # -*- coding: utf-8 -*-
"""
Client HTTPS Setup
~~~~~~~~~~~~~~~~~~
This example code fragment demonstrates how to set up a HTTP/2 client that
negotiates HTTP/2 using NPN and ALPN. For the sake of maximum explanatory value
this code uses the synchronous, low-level sockets API: however, if you're not
using sockets directly (e.g. because you're using asyncio), you should focus on
the set up required for the SSLContext object. For other concurrency libraries
you may need to use other setup (e.g. for Twisted you'll need to use
IProtocolNegotiationFactory).
This code requires Python 3.5 or later.
"""
import h2.connection
import socket
import ssl
def establish_tcp_connection():
"""
This function establishes a client-side TCP connection. How it works isn't
very important to this example. For the purpose of this example we connect
to localhost.
"""
return socket.create_connection(('localhost', 443))
def get_http2_ssl_context():
"""
This function creates an SSLContext object that is suitably configured for
HTTP/2. If you're working with Python TLS directly, you'll want to do the
exact same setup as this function does.
"""
# Get the basic context from the standard library.
ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
# RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2
# or higher. Disable TLS 1.1 and lower.
ctx.options |= (
ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
)
# RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable
# compression.
ctx.options |= ssl.OP_NO_COMPRESSION
# RFC 7540 Section 9.2.2: "deployments of HTTP/2 that use TLS 1.2 MUST
# support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256". In practice, the
# blocklist defined in this section allows only the AES GCM and ChaCha20
# cipher suites with ephemeral key negotiation.
ctx.set_ciphers("ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20")
# We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may
# be absent, so allow that. This setup allows for negotiation of HTTP/1.1.
ctx.set_alpn_protocols(["h2", "http/1.1"])
try:
ctx.set_npn_protocols(["h2", "http/1.1"])
except NotImplementedError:
pass
return ctx
def negotiate_tls(tcp_conn, context):
"""
Given an established TCP connection and a HTTP/2-appropriate TLS context,
this function:
1. wraps TLS around the TCP connection.
2. confirms that HTTP/2 was negotiated and, if it was not, throws an error.
"""
# Note that SNI is mandatory for HTTP/2, so you *must* pass the
# server_hostname argument.
tls_conn = context.wrap_socket(tcp_conn, server_hostname='localhost')
# Always prefer the result from ALPN to that from NPN.
# You can only check what protocol was negotiated once the handshake is
# complete.
negotiated_protocol = tls_conn.selected_alpn_protocol()
if negotiated_protocol is None:
negotiated_protocol = tls_conn.selected_npn_protocol()
if negotiated_protocol != "h2":
raise RuntimeError("Didn't negotiate HTTP/2!")
return tls_conn
def main():
# Step 1: Set up your TLS context.
context = get_http2_ssl_context()
# Step 2: Create a TCP connection.
connection = establish_tcp_connection()
# Step 3: Wrap the connection in TLS and validate that we negotiated HTTP/2
tls_connection = negotiate_tls(connection, context)
# Step 4: Create a client-side H2 connection.
http2_connection = h2.connection.H2Connection()
# Step 5: Initiate the connection
http2_connection.initiate_connection()
tls_connection.sendall(http2_connection.data_to_send())
# The TCP, TLS, and HTTP/2 handshakes are now complete. You can enter your
# main loop now.
| mpl-2.0 |
farhaanbukhsh/sympy | sympy/plotting/pygletplot/color_scheme.py | 85 | 12579 | from __future__ import print_function, division
from sympy import Basic, Symbol, symbols, lambdify
from util import interpolate, rinterpolate, create_bounds, update_bounds
from sympy.core.compatibility import range
class ColorGradient(object):
colors = [0.4, 0.4, 0.4], [0.9, 0.9, 0.9]
intervals = 0.0, 1.0
def __init__(self, *args):
if len(args) == 2:
self.colors = list(args)
self.intervals = [0.0, 1.0]
elif len(args) > 0:
if len(args) % 2 != 0:
raise ValueError("len(args) should be even")
self.colors = [args[i] for i in range(1, len(args), 2)]
self.intervals = [args[i] for i in range(0, len(args), 2)]
assert len(self.colors) == len(self.intervals)
def copy(self):
c = ColorGradient()
c.colors = [e[::] for e in self.colors]
c.intervals = self.intervals[::]
return c
def _find_interval(self, v):
m = len(self.intervals)
i = 0
while i < m - 1 and self.intervals[i] <= v:
i += 1
return i
def _interpolate_axis(self, axis, v):
i = self._find_interval(v)
v = rinterpolate(self.intervals[i - 1], self.intervals[i], v)
return interpolate(self.colors[i - 1][axis], self.colors[i][axis], v)
def __call__(self, r, g, b):
c = self._interpolate_axis
return c(0, r), c(1, g), c(2, b)
default_color_schemes = {} # defined at the bottom of this file
class ColorScheme(object):
def __init__(self, *args, **kwargs):
self.args = args
self.f, self.gradient = None, ColorGradient()
if len(args) == 1 and not isinstance(args[0], Basic) and callable(args[0]):
self.f = args[0]
elif len(args) == 1 and isinstance(args[0], str):
if args[0] in default_color_schemes:
cs = default_color_schemes[args[0]]
self.f, self.gradient = cs.f, cs.gradient.copy()
else:
self.f = lambdify('x,y,z,u,v', args[0])
else:
self.f, self.gradient = self._interpret_args(args, kwargs)
self._test_color_function()
if not isinstance(self.gradient, ColorGradient):
raise ValueError("Color gradient not properly initialized. "
"(Not a ColorGradient instance.)")
def _interpret_args(self, args, kwargs):
f, gradient = None, self.gradient
atoms, lists = self._sort_args(args)
s = self._pop_symbol_list(lists)
s = self._fill_in_vars(s)
# prepare the error message for lambdification failure
f_str = ', '.join(str(fa) for fa in atoms)
s_str = (str(sa) for sa in s)
s_str = ', '.join(sa for sa in s_str if sa.find('unbound') < 0)
f_error = ValueError("Could not interpret arguments "
"%s as functions of %s." % (f_str, s_str))
# try to lambdify args
if len(atoms) == 1:
fv = atoms[0]
try:
f = lambdify(s, [fv, fv, fv])
except TypeError:
raise f_error
elif len(atoms) == 3:
fr, fg, fb = atoms
try:
f = lambdify(s, [fr, fg, fb])
except TypeError:
raise f_error
else:
raise ValueError("A ColorScheme must provide 1 or 3 "
"functions in x, y, z, u, and/or v.")
# try to intrepret any given color information
if len(lists) == 0:
gargs = []
elif len(lists) == 1:
gargs = lists[0]
elif len(lists) == 2:
try:
(r1, g1, b1), (r2, g2, b2) = lists
except TypeError:
raise ValueError("If two color arguments are given, "
"they must be given in the format "
"(r1, g1, b1), (r2, g2, b2).")
gargs = lists
elif len(lists) == 3:
try:
(r1, r2), (g1, g2), (b1, b2) = lists
except Exception:
raise ValueError("If three color arguments are given, "
"they must be given in the format "
"(r1, r2), (g1, g2), (b1, b2). To create "
"a multi-step gradient, use the syntax "
"[0, colorStart, step1, color1, ..., 1, "
"colorEnd].")
gargs = [[r1, g1, b1], [r2, g2, b2]]
else:
raise ValueError("Don't know what to do with collection "
"arguments %s." % (', '.join(str(l) for l in lists)))
if gargs:
try:
gradient = ColorGradient(*gargs)
except Exception as ex:
raise ValueError(("Could not initialize a gradient "
"with arguments %s. Inner "
"exception: %s") % (gargs, str(ex)))
return f, gradient
def _pop_symbol_list(self, lists):
symbol_lists = []
for l in lists:
mark = True
for s in l:
if s is not None and not isinstance(s, Symbol):
mark = False
break
if mark:
lists.remove(l)
symbol_lists.append(l)
if len(symbol_lists) == 1:
return symbol_lists[0]
elif len(symbol_lists) == 0:
return []
else:
raise ValueError("Only one list of Symbols "
"can be given for a color scheme.")
def _fill_in_vars(self, args):
defaults = symbols('x,y,z,u,v')
if len(args) == 0:
return defaults
if not isinstance(args, (tuple, list)):
raise v_error
if len(args) == 0:
return defaults
for s in args:
if s is not None and not isinstance(s, Symbol):
raise v_error
# when vars are given explicitly, any vars
# not given are marked 'unbound' as to not
# be accidentally used in an expression
vars = [Symbol('unbound%i' % (i)) for i in range(1, 6)]
# interpret as t
if len(args) == 1:
vars[3] = args[0]
# interpret as u,v
elif len(args) == 2:
if args[0] is not None:
vars[3] = args[0]
if args[1] is not None:
vars[4] = args[1]
# interpret as x,y,z
elif len(args) >= 3:
# allow some of x,y,z to be
# left unbound if not given
if args[0] is not None:
vars[0] = args[0]
if args[1] is not None:
vars[1] = args[1]
if args[2] is not None:
vars[2] = args[2]
# interpret the rest as t
if len(args) >= 4:
vars[3] = args[3]
# ...or u,v
if len(args) >= 5:
vars[4] = args[4]
return vars
def _sort_args(self, args):
atoms, lists = [], []
for a in args:
if isinstance(a, (tuple, list)):
lists.append(a)
else:
atoms.append(a)
return atoms, lists
def _test_color_function(self):
if not callable(self.f):
raise ValueError("Color function is not callable.")
try:
result = self.f(0, 0, 0, 0, 0)
if len(result) != 3:
raise ValueError("length should be equal to 3")
except TypeError as te:
raise ValueError("Color function needs to accept x,y,z,u,v, "
"as arguments even if it doesn't use all of them.")
except AssertionError as ae:
raise ValueError("Color function needs to return 3-tuple r,g,b.")
except Exception as ie:
pass # color function probably not valid at 0,0,0,0,0
def __call__(self, x, y, z, u, v):
try:
return self.f(x, y, z, u, v)
except Exception as e:
return None
def apply_to_curve(self, verts, u_set, set_len=None, inc_pos=None):
"""
Apply this color scheme to a
set of vertices over a single
independent variable u.
"""
bounds = create_bounds()
cverts = list()
if callable(set_len):
set_len(len(u_set)*2)
# calculate f() = r,g,b for each vert
# and find the min and max for r,g,b
for _u in range(len(u_set)):
if verts[_u] is None:
cverts.append(None)
else:
x, y, z = verts[_u]
u, v = u_set[_u], None
c = self(x, y, z, u, v)
if c is not None:
c = list(c)
update_bounds(bounds, c)
cverts.append(c)
if callable(inc_pos):
inc_pos()
# scale and apply gradient
for _u in range(len(u_set)):
if cverts[_u] is not None:
for _c in range(3):
# scale from [f_min, f_max] to [0,1]
cverts[_u][_c] = rinterpolate(bounds[_c][0], bounds[_c][1],
cverts[_u][_c])
# apply gradient
cverts[_u] = self.gradient(*cverts[_u])
if callable(inc_pos):
inc_pos()
return cverts
def apply_to_surface(self, verts, u_set, v_set, set_len=None, inc_pos=None):
"""
Apply this color scheme to a
set of vertices over two
independent variables u and v.
"""
bounds = create_bounds()
cverts = list()
if callable(set_len):
set_len(len(u_set)*len(v_set)*2)
# calculate f() = r,g,b for each vert
# and find the min and max for r,g,b
for _u in range(len(u_set)):
column = list()
for _v in range(len(v_set)):
if verts[_u][_v] is None:
column.append(None)
else:
x, y, z = verts[_u][_v]
u, v = u_set[_u], v_set[_v]
c = self(x, y, z, u, v)
if c is not None:
c = list(c)
update_bounds(bounds, c)
column.append(c)
if callable(inc_pos):
inc_pos()
cverts.append(column)
# scale and apply gradient
for _u in range(len(u_set)):
for _v in range(len(v_set)):
if cverts[_u][_v] is not None:
# scale from [f_min, f_max] to [0,1]
for _c in range(3):
cverts[_u][_v][_c] = rinterpolate(bounds[_c][0],
bounds[_c][1], cverts[_u][_v][_c])
# apply gradient
cverts[_u][_v] = self.gradient(*cverts[_u][_v])
if callable(inc_pos):
inc_pos()
return cverts
def str_base(self):
return ", ".join(str(a) for a in self.args)
def __repr__(self):
return "%s" % (self.str_base())
x, y, z, t, u, v = symbols('x,y,z,t,u,v')
default_color_schemes['rainbow'] = ColorScheme(z, y, x)
default_color_schemes['zfade'] = ColorScheme(z, (0.4, 0.4, 0.97),
(0.97, 0.4, 0.4), (None, None, z))
default_color_schemes['zfade3'] = ColorScheme(z, (None, None, z),
[0.00, (0.2, 0.2, 1.0),
0.35, (0.2, 0.8, 0.4),
0.50, (0.3, 0.9, 0.3),
0.65, (0.4, 0.8, 0.2),
1.00, (1.0, 0.2, 0.2)])
default_color_schemes['zfade4'] = ColorScheme(z, (None, None, z),
[0.0, (0.3, 0.3, 1.0),
0.30, (0.3, 1.0, 0.3),
0.55, (0.95, 1.0, 0.2),
0.65, (1.0, 0.95, 0.2),
0.85, (1.0, 0.7, 0.2),
1.0, (1.0, 0.3, 0.2)])
| bsd-3-clause |
martinribelotta/micropython | drivers/onewire/onewire.py | 66 | 11789 | """
OneWire library ported to MicroPython by Jason Hildebrand.
TODO:
* implement and test parasite-power mode (as an init option)
* port the crc checks
The original upstream copyright and terms follow.
------------------------------------------------------------------------------
Copyright (c) 2007, Jim Studt (original old version - many contributors since)
OneWire has been maintained by Paul Stoffregen ([email protected]) since
January 2010.
26 Sept 2008 -- Robin James
Jim Studt's original library was modified by Josh Larios.
Tom Pollard, [email protected], contributed around May 20, 2008
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Much of the code was inspired by Derek Yerger's code, though I don't
think much of that remains. In any event that was..
(copyleft) 2006 by Derek Yerger - Free to distribute freely.
"""
import pyb
from pyb import disable_irq
from pyb import enable_irq
class OneWire:
def __init__(self, pin):
"""
Pass the data pin connected to your one-wire device(s), for example Pin('X1').
The one-wire protocol allows for multiple devices to be attached.
"""
self.data_pin = pin
self.write_delays = (1, 40, 40, 1)
self.read_delays = (1, 1, 40)
# cache a bunch of methods and attributes. This is necessary in _write_bit and
# _read_bit to achieve the timing required by the OneWire protocol.
self.cache = (pin.init, pin.value, pin.OUT_PP, pin.IN, pin.PULL_NONE)
pin.init(pin.IN, pin.PULL_UP)
def reset(self):
"""
Perform the onewire reset function.
Returns 1 if a device asserted a presence pulse, 0 otherwise.
If you receive 0, then check your wiring and make sure you are providing
power and ground to your devices.
"""
retries = 25
self.data_pin.init(self.data_pin.IN, self.data_pin.PULL_UP)
# We will wait up to 250uS for
# the bus to come high, if it doesn't then it is broken or shorted
# and we return a 0;
# wait until the wire is high... just in case
while True:
if self.data_pin.value():
break
retries -= 1
if retries == 0:
raise OSError("OneWire pin didn't go high")
pyb.udelay(10)
# pull the bus low for at least 480us
self.data_pin.low()
self.data_pin.init(self.data_pin.OUT_PP)
pyb.udelay(480)
# If there is a slave present, it should pull the bus low within 60us
i = pyb.disable_irq()
self.data_pin.init(self.data_pin.IN, self.data_pin.PULL_UP)
pyb.udelay(70)
presence = not self.data_pin.value()
pyb.enable_irq(i)
pyb.udelay(410)
return presence
def write_bit(self, value):
"""
Write a single bit.
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
self._write_bit(value, pin_init, pin_value, Pin_OUT_PP)
def _write_bit(self, value, pin_init, pin_value, Pin_OUT_PP):
"""
Write a single bit - requires cached methods/attributes be passed as arguments.
See also write_bit()
"""
d0, d1, d2, d3 = self.write_delays
udelay = pyb.udelay
if value:
# write 1
i = disable_irq()
pin_value(0)
pin_init(Pin_OUT_PP)
udelay(d0)
pin_value(1)
enable_irq(i)
udelay(d1)
else:
# write 0
i = disable_irq()
pin_value(0)
pin_init(Pin_OUT_PP)
udelay(d2)
pin_value(1)
enable_irq(i)
udelay(d3)
def write_byte(self, value):
"""
Write a byte. The pin will go tri-state at the end of the write to avoid
heating in a short or other mishap.
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
for i in range(8):
self._write_bit(value & 1, pin_init, pin_value, Pin_OUT_PP)
value >>= 1
pin_init(Pin_IN, Pin_PULL_UP)
def write_bytes(self, bytestring):
"""
Write a sequence of bytes.
"""
for byte in bytestring:
self.write_byte(byte)
def _read_bit(self, pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP):
"""
Read a single bit - requires cached methods/attributes be passed as arguments.
See also read_bit()
"""
d0, d1, d2 = self.read_delays
udelay = pyb.udelay
pin_init(Pin_IN, Pin_PULL_UP) # TODO why do we need this?
i = disable_irq()
pin_value(0)
pin_init(Pin_OUT_PP)
udelay(d0)
pin_init(Pin_IN, Pin_PULL_UP)
udelay(d1)
value = pin_value()
enable_irq(i)
udelay(d2)
return value
def read_bit(self):
"""
Read a single bit.
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
return self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
def read_byte(self):
"""
Read a single byte and return the value as an integer.
See also read_bytes()
"""
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
value = 0
for i in range(8):
bit = self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
value |= bit << i
return value
def read_bytes(self, count):
"""
Read a sequence of N bytes.
The bytes are returned as a bytearray.
"""
s = bytearray(count)
for i in range(count):
s[i] = self.read_byte()
return s
def select_rom(self, rom):
"""
Select a specific device to talk to. Pass in rom as a bytearray (8 bytes).
"""
assert len(rom) == 8, "ROM must be 8 bytes"
self.reset()
self.write_byte(0x55) # ROM MATCH
self.write_bytes(rom)
def read_rom(self):
"""
Read the ROM - this works if there is only a single device attached.
"""
self.reset()
self.write_byte(0x33) # READ ROM
rom = self.read_bytes(8)
# TODO: check CRC of the ROM
return rom
def skip_rom(self):
"""
Send skip-rom command - this works if there is only one device attached.
"""
self.write_byte(0xCC) # SKIP ROM
def depower(self):
self.data_pin.init(self.data_pin.IN, self.data_pin.PULL_NONE)
def scan(self):
"""
Return a list of ROMs for all attached devices.
Each ROM is returned as a bytes object of 8 bytes.
"""
devices = []
self._reset_search()
while True:
rom = self._search()
if not rom:
return devices
devices.append(rom)
def _reset_search(self):
self.last_discrepancy = 0
self.last_device_flag = False
self.last_family_discrepancy = 0
self.rom = bytearray(8)
def _search(self):
# initialize for search
id_bit_number = 1
last_zero = 0
rom_byte_number = 0
rom_byte_mask = 1
search_result = 0
pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP = self.cache
# if the last call was not the last one
if not self.last_device_flag:
# 1-Wire reset
if not self.reset():
self._reset_search()
return None
# issue the search command
self.write_byte(0xF0)
# loop to do the search
while rom_byte_number < 8: # loop until through all ROM bytes 0-7
# read a bit and its complement
id_bit = self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
cmp_id_bit = self._read_bit(pin_init, pin_value, Pin_OUT_PP, Pin_IN, Pin_PULL_UP)
# check for no devices on 1-wire
if (id_bit == 1) and (cmp_id_bit == 1):
break
else:
# all devices coupled have 0 or 1
if (id_bit != cmp_id_bit):
search_direction = id_bit # bit write value for search
else:
# if this discrepancy if before the Last Discrepancy
# on a previous next then pick the same as last time
if (id_bit_number < self.last_discrepancy):
search_direction = (self.rom[rom_byte_number] & rom_byte_mask) > 0
else:
# if equal to last pick 1, if not then pick 0
search_direction = (id_bit_number == self.last_discrepancy)
# if 0 was picked then record its position in LastZero
if search_direction == 0:
last_zero = id_bit_number
# check for Last discrepancy in family
if last_zero < 9:
self.last_family_discrepancy = last_zero
# set or clear the bit in the ROM byte rom_byte_number
# with mask rom_byte_mask
if search_direction == 1:
self.rom[rom_byte_number] |= rom_byte_mask
else:
self.rom[rom_byte_number] &= ~rom_byte_mask
# serial number search direction write bit
#print('sd', search_direction)
self.write_bit(search_direction)
# increment the byte counter id_bit_number
# and shift the mask rom_byte_mask
id_bit_number += 1
rom_byte_mask <<= 1
# if the mask is 0 then go to new SerialNum byte rom_byte_number and reset mask
if rom_byte_mask == 0x100:
rom_byte_number += 1
rom_byte_mask = 1
# if the search was successful then
if not (id_bit_number < 65):
# search successful so set last_discrepancy,last_device_flag,search_result
self.last_discrepancy = last_zero
# check for last device
if self.last_discrepancy == 0:
self.last_device_flag = True
search_result = True
# if no device found then reset counters so next 'search' will be like a first
if not search_result or not self.rom[0]:
self._reset_search()
return None
else:
return bytes(self.rom)
| mit |
jashug/Lightning | serverutil.py | 1 | 2368 | """Utility functions for the server.
This includes the interface from the server implementation to the
payment channel and lightning network APIs.
requires_auth -- decorator which makes a view function require authentication
authenticate_before_request -- a before_request callback for auth
api_factory -- returns a flask Blueprint or equivalent, along with a decorator
making functions availiable as RPCs, and a base class for
SQLAlchemy Declarative database models.
Signals:
WALLET_NOTIFY: sent when bitcoind tells us it has a transaction.
- tx = txid
BLOCK_NOTIFY: send when bitcoind tells us it has a block
- block = block hash
"""
import os.path
from functools import wraps
from flask import Flask, current_app, Response, request, Blueprint
from blinker import Namespace
from jsonrpc.backend.flask import JSONRPCAPI
import bitcoin.core.serialize
from jsonrpcproxy import SmartDispatcher
app = Flask('lightningd')
SIGNALS = Namespace()
WALLET_NOTIFY = SIGNALS.signal('WALLET_NOTIFY')
BLOCK_NOTIFY = SIGNALS.signal('BLOCK_NOTIFY')
# Copied from http://flask.pocoo.org/snippets/8/
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return (username == current_app.config['rpcuser'] and
password == current_app.config['rpcpassword'])
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(view):
"""Require basic authentication on requests to this view.
Also only accept requests from localhost.
"""
@wraps(view)
def decorated(*args, **kwargs):
"""Decorated version of view that checks authentication."""
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
if request.remote_addr != "127.0.0.1":
return Response("Access outside 127.0.0.1 forbidden", 403)
return view(*args, **kwargs)
return decorated
def authenticate_before_request():
"""before_request callback to perform authentication."""
return requires_auth(lambda: None)()
| mit |
jolevq/odoopub | addons/report_webkit/wizard/report_webkit_actions.py | 382 | 6537 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Vincent Renaville
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import fields, osv
class report_webkit_actions(osv.osv_memory):
_name = "report.webkit.actions"
_description = "Webkit Actions"
_columns = {
'print_button':fields.boolean('Add print button', help="Check this to add a Print action for this Report in the sidebar of the corresponding document types"),
'open_action':fields.boolean('Open added action', help="Check this to view the newly added internal print action after creating it (technical view) "),
}
_defaults = {
'print_button': lambda *a: True,
'open_action': lambda *a: False,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if not context: context = {}
res = super(report_webkit_actions, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if not record_id or (active_model and active_model != 'ir.actions.report.xml'):
return res
report = self.pool['ir.actions.report.xml'].browse(
cr,
uid,
context.get('active_id'),
context=context
)
ir_values_obj = self.pool['ir.values']
ids = ir_values_obj.search(
cr,
uid,
[('value','=',report.type+','+str(context.get('active_id')))]
)
if ids:
res['arch'] = '''<form string="Add Print Buttons">
<label string="Report Action already exist for this report."/>
</form>
'''
return res
def do_action(self, cr, uid, ids, context=None):
""" This Function Open added Action.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of report.webkit.actions's ID
@param context: A standard dictionary
@return: Dictionary of ir.values form.
"""
if context is None:
context = {}
report_obj = self.pool['ir.actions.report.xml']
for current in self.browse(cr, uid, ids, context=context):
report = report_obj.browse(
cr,
uid,
context.get('active_id'),
context=context
)
if current.print_button:
ir_values_obj = self.pool['ir.values']
res = ir_values_obj.set(
cr,
uid,
'action',
'client_print_multi',
report.report_name,
[report.model],
'ir.actions.report.xml,%d' % context.get('active_id', False),
isobject=True
)
else:
ir_values_obj = self.pool['ir.values']
res = ir_values_obj.set(
cr,
uid,
'action',
'client_print_multi',
report.report_name,
[report.model,0],
'ir.actions.report.xml,%d' % context.get('active_id', False),
isobject=True
)
if res[0]:
if not current.open_action:
return {'type': 'ir.actions.act_window_close'}
return {
'name': _('Client Actions Connections'),
'view_type': 'form',
'view_mode': 'form',
'res_id' : res[0],
'res_model': 'ir.values',
'view_id': False,
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
qaugustijn/stk-code | tools/update_characteristics.py | 16 | 2103 | #!/usr/bin/env python3
#
# SuperTuxKart - a fun racing game with go-kart
# Copyright (C) 2006-2015 SuperTuxKart-Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# This script uses create_kart_properties.py to create code and then replaces
# the code in the source files. The parts in the source are marked with tags, that
# contain the argument that has to be passed to create_kart_properties.py.
# The script has to be run from the root directory of this project.
import os
import re
import subprocess
from create_kart_properties import functions
def main():
# Check, if it runs in the root directory
if not os.path.isfile("tools/update_characteristics.py"):
print("Please run this script in the root directory of the project.")
exit(1)
for operation, function in functions.items():
result = subprocess.Popen("tools/create_kart_properties.py " +
operation, shell = True,
stdout = subprocess.PIPE).stdout.read().decode('UTF-8')
with open("src/" + function[2], "r") as f:
text = f.read()
# Replace the text by using look behinds and look forwards
text = re.sub("(?<=/\* \<characteristics-start " + operation +
"\> \*/\\n)(.|\n)*(?=\\n\s*/\* <characteristics-end " + operation + "> \*/)", result, text)
with open("src/" + function[2], "w") as f:
f.write(text)
if __name__ == '__main__':
main()
| gpl-3.0 |
mkuron/espresso | src/python/espressomd/lbboundaries.py | 1 | 2672 | # Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .script_interface import ScriptObjectRegistry, ScriptInterfaceHelper, script_interface_register
import espressomd.code_info
if any(i in espressomd.code_info.features() for i in ["LB_BOUNDARIES", "LB_BOUNDARIES_GPU"]):
@script_interface_register
class LBBoundaries(ScriptObjectRegistry):
"""
Creates a set of lattice Boltzmann boundaries.
"""
_so_name = "LBBoundaries::LBBoundaries"
def add(self, *args, **kwargs):
"""
Adds a boundary to the set.
Either a valid boundary is an argument,
or a valid set of parameters to create a boundary.
"""
if len(args) == 1:
if isinstance(args[0], LBBoundary):
lbboundary = args[0]
else:
raise TypeError(
"Either a LBBoundary object or key-value pairs for the parameters of a LBBoundary object need to be passed.")
else:
lbboundary = LBBoundary(**kwargs)
self.call_method("add", object=lbboundary)
return lbboundary
def remove(self, lbboundary):
"""
Removes a boundary from the set.
Parameters
----------
lbboundary : :obj:`LBBoundary`
The boundary to be removed from the set.
"""
self.call_method("remove", object=lbboundary)
def clear(self):
"""
Removes all boundaries.
"""
self.call_method("clear")
def size(self):
return self.call_method("size")
def empty(self):
return self.call_method("empty")
@script_interface_register
class LBBoundary(ScriptInterfaceHelper):
"""
Creates a LB boundary.
"""
_so_name = "LBBoundaries::LBBoundary"
_so_bind_methods = ("get_force",)
| gpl-3.0 |
jeasoft/odoo | comunity_modules/house_booking/__openerp__.py | 4 | 1098 | # -*- coding: utf-8 -*-
{
'name' : 'Booking management',
'version' : '1.2',
'author' : 'Alicia FLOREZ & Sébastien CHAZALLET',
'category': 'Sales Management',
'summary': 'Management of house, guestroom or hotel bookings.',
'description' : """
Manage your bookings
====================
This module is used by :
- hotels
- guest houses
- guest rooms
Manage rental schedule, bookings, arrivals and departure.
Use it with its WebSite App and allow your customers to rent online !
In further versions, will manage quotations, invoices, and seasons.
""",
'website': 'http://www.inspyration.fr',
'images' : [],
'depends' : ['base', 'mail', 'crm'],
'data': [
'security/booking_security.xml',
'security/ir.model.access.csv',
'views/res_config_view.xml',
'views/booking_view.xml',
'report/voucher.xml',
'views/email.xml',
],
'js': [
],
'qweb' : [
],
'css':[
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': True,
}
| agpl-3.0 |
arokem/scipy | scipy/sparse/linalg/tests/test_norm.py | 1 | 5427 | """Test functions for the sparse.linalg.norm module
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import norm as npnorm
from numpy.testing import assert_allclose
from pytest import raises as assert_raises
import scipy.sparse
from scipy.sparse.linalg import norm as spnorm
class TestNorm(object):
def setup_method(self):
a = np.arange(9) - 4
b = a.reshape((3, 3))
self.b = scipy.sparse.csr_matrix(b)
def test_matrix_norm(self):
# Frobenius norm is the default
assert_allclose(spnorm(self.b), 7.745966692414834)
assert_allclose(spnorm(self.b, 'fro'), 7.745966692414834)
assert_allclose(spnorm(self.b, np.inf), 9)
assert_allclose(spnorm(self.b, -np.inf), 2)
assert_allclose(spnorm(self.b, 1), 7)
assert_allclose(spnorm(self.b, -1), 6)
# _multi_svd_norm is not implemented for sparse matrix
assert_raises(NotImplementedError, spnorm, self.b, 2)
assert_raises(NotImplementedError, spnorm, self.b, -2)
def test_matrix_norm_axis(self):
for m, axis in ((self.b, None), (self.b, (0, 1)), (self.b.T, (1, 0))):
assert_allclose(spnorm(m, axis=axis), 7.745966692414834)
assert_allclose(spnorm(m, 'fro', axis=axis), 7.745966692414834)
assert_allclose(spnorm(m, np.inf, axis=axis), 9)
assert_allclose(spnorm(m, -np.inf, axis=axis), 2)
assert_allclose(spnorm(m, 1, axis=axis), 7)
assert_allclose(spnorm(m, -1, axis=axis), 6)
def test_vector_norm(self):
v = [4.5825756949558398, 4.2426406871192848, 4.5825756949558398]
for m, a in (self.b, 0), (self.b.T, 1):
for axis in a, (a, ), a-2, (a-2, ):
assert_allclose(spnorm(m, 1, axis=axis), [7, 6, 7])
assert_allclose(spnorm(m, np.inf, axis=axis), [4, 3, 4])
assert_allclose(spnorm(m, axis=axis), v)
assert_allclose(spnorm(m, ord=2, axis=axis), v)
assert_allclose(spnorm(m, ord=None, axis=axis), v)
def test_norm_exceptions(self):
m = self.b
assert_raises(TypeError, spnorm, m, None, 1.5)
assert_raises(TypeError, spnorm, m, None, [2])
assert_raises(ValueError, spnorm, m, None, ())
assert_raises(ValueError, spnorm, m, None, (0, 1, 2))
assert_raises(ValueError, spnorm, m, None, (0, 0))
assert_raises(ValueError, spnorm, m, None, (0, 2))
assert_raises(ValueError, spnorm, m, None, (-3, 0))
assert_raises(ValueError, spnorm, m, None, 2)
assert_raises(ValueError, spnorm, m, None, -3)
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', 0)
assert_raises(ValueError, spnorm, m, 'plate_of_shrimp', (0, 1))
class TestVsNumpyNorm(object):
_sparse_types = (
scipy.sparse.bsr_matrix,
scipy.sparse.coo_matrix,
scipy.sparse.csc_matrix,
scipy.sparse.csr_matrix,
scipy.sparse.dia_matrix,
scipy.sparse.dok_matrix,
scipy.sparse.lil_matrix,
)
_test_matrices = (
(np.arange(9) - 4).reshape((3, 3)),
[
[1, 2, 3],
[-1, 1, 4]],
[
[1, 0, 3],
[-1, 1, 4j]],
)
def test_sparse_matrix_norms(self):
for sparse_type in self._sparse_types:
for M in self._test_matrices:
S = sparse_type(M)
assert_allclose(spnorm(S), npnorm(M))
assert_allclose(spnorm(S, 'fro'), npnorm(M, 'fro'))
assert_allclose(spnorm(S, np.inf), npnorm(M, np.inf))
assert_allclose(spnorm(S, -np.inf), npnorm(M, -np.inf))
assert_allclose(spnorm(S, 1), npnorm(M, 1))
assert_allclose(spnorm(S, -1), npnorm(M, -1))
def test_sparse_matrix_norms_with_axis(self):
for sparse_type in self._sparse_types:
for M in self._test_matrices:
S = sparse_type(M)
for axis in None, (0, 1), (1, 0):
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
for ord in 'fro', np.inf, -np.inf, 1, -1:
assert_allclose(spnorm(S, ord, axis=axis),
npnorm(M, ord, axis=axis))
# Some numpy matrix norms are allergic to negative axes.
for axis in (-2, -1), (-1, -2), (1, -2):
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
assert_allclose(spnorm(S, 'f', axis=axis),
npnorm(M, 'f', axis=axis))
assert_allclose(spnorm(S, 'fro', axis=axis),
npnorm(M, 'fro', axis=axis))
def test_sparse_vector_norms(self):
for sparse_type in self._sparse_types:
for M in self._test_matrices:
S = sparse_type(M)
for axis in (0, 1, -1, -2, (0, ), (1, ), (-1, ), (-2, )):
assert_allclose(spnorm(S, axis=axis), npnorm(M, axis=axis))
for ord in None, 2, np.inf, -np.inf, 1, 0.5, 0.42:
assert_allclose(spnorm(S, ord, axis=axis),
npnorm(M, ord, axis=axis))
| bsd-3-clause |
dafei2015/hugula | Client/tools/site-packages/PIL/MicImagePlugin.py | 13 | 2228 | #
# The Python Imaging Library.
# $Id: MicImagePlugin.py 2134 2004-10-06 08:55:20Z fredrik $
#
# Microsoft Image Composer support for PIL
#
# Notes:
# uses TiffImagePlugin.py to read the actual image streams
#
# History:
# 97-01-20 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
import string
import Image, TiffImagePlugin
from OleFileIO import *
#
# --------------------------------------------------------------------
def _accept(prefix):
return prefix[:8] == MAGIC
##
# Image plugin for Microsoft's Image Composer file format.
class MicImageFile(TiffImagePlugin.TiffImageFile):
format = "MIC"
format_description = "Microsoft Image Composer"
def _open(self):
# read the OLE directory and see if this is a likely
# to be a Microsoft Image Composer file
try:
self.ole = OleFileIO(self.fp)
except IOError:
raise SyntaxError, "not an MIC file; invalid OLE file"
# find ACI subfiles with Image members (maybe not the
# best way to identify MIC files, but what the... ;-)
self.images = []
for file in self.ole.listdir():
if file[1:] and file[0][-4:] == ".ACI" and file[1] == "Image":
self.images.append(file)
# if we didn't find any images, this is probably not
# an MIC file.
if not self.images:
raise SyntaxError, "not an MIC file; no image entries"
self.__fp = self.fp
self.frame = 0
if len(self.images) > 1:
self.category = Image.CONTAINER
self.seek(0)
def seek(self, frame):
try:
filename = self.images[frame]
except IndexError:
raise EOFError, "no such frame"
self.fp = self.ole.openstream(filename)
TiffImagePlugin.TiffImageFile._open(self)
self.frame = frame
def tell(self):
return self.frame
#
# --------------------------------------------------------------------
Image.register_open("MIC", MicImageFile, _accept)
Image.register_extension("MIC", ".mic")
| mit |
pascalguru/florincoin | qa/rpc-tests/getblocktemplate_longpoll.py | 163 | 3683 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = AuthServiceProxy(node.url, timeout=600)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(BitcoinTestFramework):
'''
Test longpolling with getblocktemplate.
'''
def run_test(self):
print "Warning: this test will take about 70 seconds in the best case. Be patient."
self.nodes[0].setgenerate(True, 10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].setgenerate(True, 1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].setgenerate(True, 1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), Decimal("0.0"), Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| mit |
liavkoren/djangoDev | tests/serializers/models.py | 29 | 3090 | # -*- coding: utf-8 -*-
"""
42. Serialization
``django.core.serializers`` provides interfaces to converting Django
``QuerySet`` objects to and from "flat" data (i.e. strings).
"""
from __future__ import unicode_literals
from decimal import Decimal
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
author = models.ForeignKey(Author)
headline = models.CharField(max_length=50)
pub_date = models.DateTimeField()
categories = models.ManyToManyField(Category)
class Meta:
ordering = ('pub_date',)
def __str__(self):
return self.headline
@python_2_unicode_compatible
class AuthorProfile(models.Model):
author = models.OneToOneField(Author, primary_key=True)
date_of_birth = models.DateField()
def __str__(self):
return "Profile of %s" % self.author
@python_2_unicode_compatible
class Actor(models.Model):
name = models.CharField(max_length=20, primary_key=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Movie(models.Model):
actor = models.ForeignKey(Actor)
title = models.CharField(max_length=50)
price = models.DecimalField(max_digits=6, decimal_places=2, default=Decimal('0.00'))
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
class Score(models.Model):
score = models.FloatField()
@python_2_unicode_compatible
class Team(object):
def __init__(self, title):
self.title = title
def __str__(self):
raise NotImplementedError("Not so simple")
def to_string(self):
return "%s" % self.title
class TeamField(six.with_metaclass(models.SubfieldBase, models.CharField)):
def __init__(self):
super(TeamField, self).__init__(max_length=100)
def get_db_prep_save(self, value, connection):
return six.text_type(value.title)
def to_python(self, value):
if isinstance(value, Team):
return value
return Team(value)
def value_to_string(self, obj):
return self._get_val_from_obj(obj).to_string()
def deconstruct(self):
name, path, args, kwargs = super(TeamField, self).deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
@python_2_unicode_compatible
class Player(models.Model):
name = models.CharField(max_length=50)
rank = models.IntegerField()
team = TeamField()
def __str__(self):
return '%s (%d) playing for %s' % (self.name, self.rank, self.team.to_string())
| bsd-3-clause |
jeremygillies-ea/data_returns_prototype | node_modules/node-sass/node_modules/pangyp/gyp/PRESUBMIT.py | 496 | 3373 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
# Needs style fix.
'pylib/gyp/generator/xcode.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
def GetPreferredTrySlaves():
return ['gyp-win32', 'gyp-win64', 'gyp-linux', 'gyp-mac', 'gyp-android']
| mit |
LaoZhongGu/kbengine | kbe/src/lib/python/Lib/symbol.py | 9 | 2023 | #! /usr/bin/env python3
"""Non-terminal symbols of Python grammar (from "graminit.h")."""
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# ./python Lib/symbol.py
#--start constants--
single_input = 256
file_input = 257
eval_input = 258
decorator = 259
decorators = 260
decorated = 261
funcdef = 262
parameters = 263
typedargslist = 264
tfpdef = 265
varargslist = 266
vfpdef = 267
stmt = 268
simple_stmt = 269
small_stmt = 270
expr_stmt = 271
testlist_star_expr = 272
augassign = 273
del_stmt = 274
pass_stmt = 275
flow_stmt = 276
break_stmt = 277
continue_stmt = 278
return_stmt = 279
yield_stmt = 280
raise_stmt = 281
import_stmt = 282
import_name = 283
import_from = 284
import_as_name = 285
dotted_as_name = 286
import_as_names = 287
dotted_as_names = 288
dotted_name = 289
global_stmt = 290
nonlocal_stmt = 291
assert_stmt = 292
compound_stmt = 293
if_stmt = 294
while_stmt = 295
for_stmt = 296
try_stmt = 297
with_stmt = 298
with_item = 299
except_clause = 300
suite = 301
test = 302
test_nocond = 303
lambdef = 304
lambdef_nocond = 305
or_test = 306
and_test = 307
not_test = 308
comparison = 309
comp_op = 310
star_expr = 311
expr = 312
xor_expr = 313
and_expr = 314
shift_expr = 315
arith_expr = 316
term = 317
factor = 318
power = 319
atom = 320
testlist_comp = 321
trailer = 322
subscriptlist = 323
subscript = 324
sliceop = 325
exprlist = 326
testlist = 327
dictorsetmaker = 328
classdef = 329
arglist = 330
argument = 331
comp_iter = 332
comp_for = 333
comp_if = 334
encoding_decl = 335
yield_expr = 336
#--end constants--
sym_name = {}
for _name, _value in list(globals().items()):
if type(_value) is type(0):
sym_name[_value] = _name
def main():
import sys
import token
if len(sys.argv) == 1:
sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
token.main()
if __name__ == "__main__":
main()
| lgpl-3.0 |
klebercode/lionsclub | eventi/subscriptions/tests/test_views_subscribe.py | 1 | 3025 | # coding: utf-8
from django.test import TestCase
from django.core.urlresolvers import reverse as r
from eventex.subscriptions.forms import SubscriptionForm
from eventex.subscriptions.models import Subscription
class SubscribeTest(TestCase):
def setUp(self):
self.resp = self.client.get(r('subscriptions:subscribe'))
def test_get(self):
"""
Get /inscricao/ must return status code 200.
"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
"""
Response should be rendered by template.
"""
self.assertTemplateUsed(self.resp, 'subscriptions/subscription_form.html')
def test_html(self):
"""
Html must contain input controls.
"""
self.assertContains(self.resp, '<form')
self.assertContains(self.resp, '<input', 7)
self.assertContains(self.resp, 'type="text"', 4)
self.assertContains(self.resp, 'type="email"')
self.assertContains(self.resp, 'type="submit"')
def test_csrf(self):
"""
Html must contain csrf token.
"""
self.assertContains(self.resp, 'csrfmiddlewaretoken')
def test_has_form(self):
"""
Context must have the subscription form.
"""
form = self.resp.context['form']
self.assertIsInstance(form, SubscriptionForm)
class SubscribePostTest(TestCase):
def setUp(self):
data = dict(name='Henrique Bastos', cpf='12345678901',
email='[email protected]', phone='21-96186180')
self.resp = self.client.post(r('subscriptions:subscribe'), data)
def test_post(self):
"""
Valid POST should redirect to /inscricao/1/.
"""
self.assertEqual(302, self.resp.status_code)
def test_save(self):
"""
Valid POST must be saved.
"""
self.assertTrue(Subscription.objects.exists())
class SubscribeInvalidPost(TestCase):
def setUp(self):
data = dict(name='Henrique Bastos', cpf='000000000012',
email='[email protected]', phone='21-96186180')
self.resp = self.client.post(r('subscriptions:subscribe'), data)
def test_post(self):
"""
Invalid POST should not redirect.
"""
self.assertEqual(200, self.resp.status_code)
def test_form_errors(self):
"""
Form must contain errors.
"""
self.assertTrue(self.resp.context['form'].errors)
def test_dont_save(self):
"""
Do not save data.
"""
self.assertFalse(Subscription.objects.exists())
class TemplateRegressionTest(TestCase):
def test_template_has_non_field_errors(self):
"""
Check if non_field_errors are shown in template.
"""
invalid_data = dict(name='Henrique Bastos', cpf='12345678901')
response = self.client.post(r('subscriptions:subscribe'), invalid_data)
self.assertContains(response, '<ul class="errorlist">') | mit |
Tennyson53/SUR | magnum/common/pythonk8sclient/client/models/V1beta3_NodeList.py | 15 | 2562 | #!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V1beta3_NodeList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'apiVersion': 'str',
'items': 'list[V1beta3_Node]',
'kind': 'str',
'resourceVersion': 'str',
'selfLink': 'str'
}
self.attributeMap = {
'apiVersion': 'apiVersion',
'items': 'items',
'kind': 'kind',
'resourceVersion': 'resourceVersion',
'selfLink': 'selfLink'
}
#version of the schema the object should have
self.apiVersion = None # str
#list of nodes
self.items = None # list[V1beta3_Node]
#kind of object, in CamelCase; cannot be updated
self.kind = None # str
#string that identifies the internal version of this object that can be used by clients to determine when objects have changed; populated by the system, read-only; value must be treated as opaque by clients and passed unmodified back to the server: https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api-conventions.md#concurrency-control-and-consistency
self.resourceVersion = None # str
#URL for the object; populated by the system, read-only
self.selfLink = None # str
| apache-2.0 |
ccellis/WHACK2016 | flask/lib/python2.7/site-packages/migrate/versioning/pathed.py | 146 | 2059 | """
A path/directory class.
"""
import os
import shutil
import logging
from migrate import exceptions
from migrate.versioning.config import *
from migrate.versioning.util import KeyedInstance
log = logging.getLogger(__name__)
class Pathed(KeyedInstance):
"""
A class associated with a path/directory tree.
Only one instance of this class may exist for a particular file;
__new__ will return an existing instance if possible
"""
parent = None
@classmethod
def _key(cls, path):
return str(path)
def __init__(self, path):
self.path = path
if self.__class__.parent is not None:
self._init_parent(path)
def _init_parent(self, path):
"""Try to initialize this object's parent, if it has one"""
parent_path = self.__class__._parent_path(path)
self.parent = self.__class__.parent(parent_path)
log.debug("Getting parent %r:%r" % (self.__class__.parent, parent_path))
self.parent._init_child(path, self)
def _init_child(self, child, path):
"""Run when a child of this object is initialized.
Parameters: the child object; the path to this object (its
parent)
"""
@classmethod
def _parent_path(cls, path):
"""
Fetch the path of this object's parent from this object's path.
"""
# os.path.dirname(), but strip directories like files (like
# unix basename)
#
# Treat directories like files...
if path[-1] == '/':
path = path[:-1]
ret = os.path.dirname(path)
return ret
@classmethod
def require_notfound(cls, path):
"""Ensures a given path does not already exist"""
if os.path.exists(path):
raise exceptions.PathFoundError(path)
@classmethod
def require_found(cls, path):
"""Ensures a given path already exists"""
if not os.path.exists(path):
raise exceptions.PathNotFoundError(path)
def __str__(self):
return self.path
| bsd-3-clause |
tomMoulard/python-projetcs | scripts3/say_chat.py | 1 | 2153 | __author__ = "Brian Lenihan <[email protected]"
__copyright__ = "Copyright (c) 2012 Python for Android Project"
__license__ = "Apache License, Version 2.0"
import logging
import sl4a
from pyxmpp2.jid import JID
from pyxmpp2.client import Client
from pyxmpp2.settings import XMPPSettings
from pyxmpp2.interfaces import XMPPFeatureHandler
from pyxmpp2.interfaces import EventHandler, event_handler, QUIT
from pyxmpp2.interfaces import message_stanza_handler
from pyxmpp2.streamevents import DisconnectedEvent
from pyxmpp2.ext.version import VersionProvider
logging.basicConfig(level = logging.INFO)
xmpp_trace = False
class SayChat(EventHandler, XMPPFeatureHandler):
def __init__(self):
self.droid = sl4a.Android()
settings = XMPPSettings({"software_name": "Say Chat"})
settings["jid"] = self.droid.dialogGetInput("Google Talk Username").result
settings["password"] = self.droid.dialogGetInput("Google Talk Password").result
settings["server"] = "talk.google.com"
settings["starttls"] = True
self.client = Client(
JID(settings["jid"]),
[self, VersionProvider(settings)],
settings)
def connect(self):
self.client.connect()
self.client.run()
def disconnect(self):
self.client.disconnect()
self.client.run(timeout = 2)
@message_stanza_handler()
def handle_message(self, stanza):
self.droid.ttsSpeak(
"{!s} says {!s}".format(stanza.from_jid.as_unicode(),
stanza.body))
return ""
@event_handler(DisconnectedEvent)
def handle_disconnected(self, event):
return QUIT
@event_handler()
def handle_all(self, event):
"""If it's not logged, it didn't happen."""
logging.info("-- {}".format(event))
def run(self):
try:
self.connect()
except KeyboardInterrupt:
self.disconnect()
if xmpp_trace:
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
for logger in ("pyxmpp2.IN", "pyxmpp2.OUT"):
logger = logging.getLogger(logger)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.propagate = False
saychat = SayChat()
saychat.run()
| apache-2.0 |
thawatchai/mrkimontour | appengine-django/lib/django/conf/locale/nb/formats.py | 504 | 1766 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| gpl-2.0 |
vlinhd11/vlinhd11-android-scripting | python/src/Lib/test/pydocfodder.py | 194 | 6329 | """Something just to look at via pydoc."""
import types
class A_classic:
"A classic class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
class B_classic(A_classic):
"A classic class, derived from A_classic."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_classic(A_classic):
"A classic class, derived from A_classic."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_classic(B_classic, C_classic):
"A classic class, derived from B_classic and C_classic."
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class A_new(object):
"A new-style class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def A_classmethod(cls, x):
"A class method defined in A."
A_classmethod = classmethod(A_classmethod)
def A_staticmethod():
"A static method defined in A."
A_staticmethod = staticmethod(A_staticmethod)
def _getx(self):
"A property getter function."
def _setx(self, value):
"A property setter function."
def _delx(self):
"A property deleter function."
A_property = property(fdel=_delx, fget=_getx, fset=_setx,
doc="A sample property defined in A.")
A_int_alias = int
class B_new(A_new):
"A new-style class, derived from A_new."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_new(A_new):
"A new-style class, derived from A_new."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_new(B_new, C_new):
"""A new-style class, derived from B_new and C_new.
"""
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class FunkyProperties(object):
"""From SF bug 472347, by Roeland Rengelink.
Property getters etc may not be vanilla functions or methods,
and this used to make GUI pydoc blow up.
"""
def __init__(self):
self.desc = {'x':0}
class get_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print 'Get called', self, inst
return inst.desc[self.attr]
class set_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst, val):
print 'Set called', self, inst, val
inst.desc[self.attr] = val
class del_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print 'Del called', self, inst
del inst.desc[self.attr]
x = property(get_desc('x'), set_desc('x'), del_desc('x'), 'prop x')
submodule = types.ModuleType(__name__ + '.submodule',
"""A submodule, which should appear in its parent's summary""")
| apache-2.0 |
frreiss/tensorflow-fred | tensorflow/python/autograph/pyct/static_analysis/__init__.py | 27 | 1375 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Static information resolution.
This module contains utilities to help annotate AST nodes with as much runtime
information as can be possibly extracted without actually executing the code,
under that assumption that the context in which the code will run is known.
Overall, the different analyses have the functions listed below:
* activity: inventories symbols read, written to, params, etc. at different
levels
* liveness, reaching_definitions: dataflow analyses based on the program's CFG
and using the symbol information gathered by activity analysis
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| apache-2.0 |
ramielrowe/magnum | magnum/common/pythonk8sclient/client/models/V1beta3_ServiceStatus.py | 15 | 1172 | #!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V1beta3_ServiceStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
}
self.attributeMap = {
}
| apache-2.0 |
manjunaths/tensorflow | tensorflow/contrib/framework/python/framework/tensor_util.py | 37 | 13232 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
__all__ = [
'assert_same_float_dtype',
'assert_scalar',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'remove_squeezable_dimensions',
'with_shape',
'with_same_shape']
convert_to_tensor_or_sparse_tensor = (
sparse_tensor.convert_to_tensor_or_sparse_tensor)
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be `dtypes.float32` or `dtypes.float64`. If neither `tensors` nor
`dtype` is supplied, default to `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected float, got %s.' % dtype)
return dtype
def assert_scalar_int(tensor, name=None):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: `Tensor` to test.
name: Name of the op and of the new `Tensor` if one is created.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`.
"""
with ops.name_scope(name, 'assert_scalar_int', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor)
data_type = tensor.dtype
if data_type.base_dtype not in [dtypes.int32, dtypes.int64]:
raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name))
return assert_scalar(tensor, name=name_scope)
def assert_scalar(tensor, name=None):
with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor, name=name_scope)
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name))
return tensor
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope:
tensors = [
math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
return math_ops.add_n(tensors, name=name_scope)
def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels
def _all_equal(tensor0, tensor1):
with ops.name_scope('all_equal', values=[tensor0, tensor1]) as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
Tuple of (actual_tensor, label_tensor), possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def is_tensor(x):
"""Check for tensor types.
Check whether an object is a tensor. Equivalent to
`isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
Args:
x: An python object to check.
Returns:
`True` if `x` is a tensor, `False` if not.
"""
tensor_types = (ops.Tensor, sparse_tensor.SparseTensor, variables.Variable)
return isinstance(x, tensor_types)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if not actual_shape.is_fully_defined() or is_tensor(expected_shape):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if not is_tensor(expected_shape) and (len(expected_shape) < 1):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
| apache-2.0 |
cwisecarver/osf.io | scripts/osfstorage/migrate_to_generic.py | 5 | 6466 | from __future__ import unicode_literals
import sys
import logging
import datetime
from django.utils import timezone
from modularodm import Q
from modularodm.storage.base import KeyExistsException
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
from website.files import models
from website.app import init_app
from addons.osfstorage import model as osfstorage_model
NOW = timezone.now()
logger = logging.getLogger(__name__)
def paginated(model, query=None, increment=200):
last_id = ''
pages = (model.find(query).count() / increment) + 1
for i in xrange(pages):
q = Q('_id', 'gt', last_id)
if query:
q &= query
page = list(model.find(q).limit(increment))
for item in page:
yield item
if page:
last_id = item._id
def do_migration():
logger.info('Migration: OsfStorageFileNode -> FileNode')
migrate_filenodes()
logger.info('Migration: OsfStorageTrashedFileNode -> TrashedFileNode')
migrate_trashedfilenodes()
logger.info('Checking that all Files have been migrated...')
diff = osfstorage_model.OsfStorageFileNode.find().count() - models.FileNode.find().count()
if diff > 0:
logger.error('Missing {} FileNodes; canceling transaction')
raise Exception('{} unmigrated FileNodes'.format(diff))
logger.info('Checking that all File versions have been migrated...')
diff = osfstorage_model.OsfStorageFileVersion.find().count() - models.FileVersion.find().count()
if diff != 0:
logger.error('{} OsfStorageFileVersions did not get migrated'.format(diff))
logger.error('This is most likely because they are orphaned')
logger.error('This is not a show stopper; The migration was still successful')
else:
logger.info('Migration successful')
def migrate_trashedfilenodes():
for trashed in osfstorage_model.OsfStorageTrashedFileNode.find():
logger.debug('Migrating OsfStorageTrashedFileNode {}'.format(trashed._id))
if trashed.node_settings is None:
logger.warning('OsfStorageTrashedFileNode {} has no node_settings; skipping'.format(trashed._id))
continue
parent_id = trashed.to_storage()['parent']
parent = osfstorage_model.OsfStorageTrashedFileNode.load(parent_id) or osfstorage_model.OsfStorageFileNode.load(parent_id)
if parent:
if isinstance(parent, osfstorage_model.OsfStorageFileNode):
parent = (parent._id, 'storedfilenode')
else:
parent = (parent._id, 'trashedfilenode')
models.TrashedFileNode(
_id=trashed._id,
versions=translate_versions(trashed.versions),
node=trashed.node_settings.owner,
parent=parent,
is_file=trashed.kind == 'file',
provider='osfstorage',
name=trashed.name,
path='/' + trashed._id + ('' if trashed.kind == 'file' else '/'),
materialized_path=''
).save()
def migrate_filenodes():
for node_settings in paginated(osfstorage_model.OsfStorageNodeSettings):
if node_settings.owner is None:
logger.warning('NodeSettings {} has no parent; skipping'.format(node_settings._id))
continue
logger.info('Migrating files for {!r}'.format(node_settings.owner))
listing = []
for filenode in osfstorage_model.OsfStorageFileNode.find(Q('node_settings', 'eq', node_settings._id)):
logger.debug('Migrating OsfStorageFileNode {}'.format(filenode._id))
versions = translate_versions(filenode.versions)
if filenode.is_file and not filenode.node.is_deleted:
if not filenode.versions:
logger.warning('File {!r} has no versions'.format(filenode))
elif not versions:
logger.warning('{!r} is a file with no translatable versions'.format(filenode))
new_node = models.StoredFileNode(
_id=filenode._id,
versions=versions,
node=node_settings.owner,
parent=None if not filenode.parent else filenode.parent._id,
is_file=filenode.kind == 'file',
provider='osfstorage',
name=filenode.name,
last_touched=NOW
)
# Wrapped's save will populate path and materialized_path
new_node.save()
listing.append(new_node)
assert node_settings.get_root()
for x in listing:
# Make sure everything transfered properly
if x.to_storage()['parent']:
assert x.parent, '{!r}\'s parent {} does not exist'.format(x, x.to_storage()['parent'])
def translate_versions(versions):
translated = []
for index, version in enumerate(versions):
if version is None:
raise Exception('Version {} missing from database'.format(version))
if not version.metadata or not version.location:
logger.error('Version {} missing metadata or location'.format(version))
continue
translated.append(translate_version(version, index))
return translated
def translate_version(version, index):
version = models.FileVersion(
_id=version._id,
creator=version.creator,
identifier=index + 1,
date_created=version.date_created,
location=version.location,
metadata=version.metadata,
size=version.size,
content_type=version.content_type,
date_modified=version.date_modified,
)
try:
version.save()
except KeyExistsException:
version = models.FileVersion.load(version._id)
return version
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
main(dry=dry)
| apache-2.0 |
storborg/manhattan | manhattan/tests/test_middleware.py | 1 | 5386 | from __future__ import absolute_import, division, print_function
import re
from unittest import TestCase
from webob import Request, Response
from webtest import TestApp, TestRequest
from manhattan.middleware import ManhattanMiddleware
from manhattan.record import Record
from manhattan.log.memory import MemoryLog
class SampleApp(object):
def __call__(self, environ, start_response):
req = Request(environ)
if req.path_info.endswith('.txt'):
s = 'Hello %s' % req.path_info
resp = Response(s)
resp.content_type = 'text/plain'
elif req.path_info.endswith('.iter'):
resp = Response()
s = 'Hello %s' % req.path_info.encode('ascii')
def app_iter(sample):
for piece in ('<html><body>', sample, '</body>', '</html>'):
yield piece
self.consumed_iter = True
yield ' '
self.consumed_iter = False
resp.content_type = 'text/html'
resp.app_iter = app_iter(s)
else:
s = '<html><body><h1>Hello %s</h1></body></html>' % req.path_info
resp = Response(s)
resp.content_type = 'text/html'
return resp(environ, start_response)
log = MemoryLog()
host_map = {'localhost': 3,
'example.com': 5}
inner_app = SampleApp()
wrapped_app = ManhattanMiddleware(inner_app, log, 'secret', host_map=host_map)
app = TestApp(wrapped_app)
class TestMiddleware(TestCase):
def setUp(self):
app.reset()
log.purge()
def process(self):
records = list(log.process())
self.assertEqual(len(records), 1)
record = Record.from_list(records[0][0])
return record
def test_request(self):
resp = app.get('/')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertEqual(record.site_id, '3')
first_vid = record.vid
m = re.search('<img (.+)src="(.+)" alt="" />', resp.body)
pixel_path = m.group(2)
resp = app.get(pixel_path)
self.assertEqual(resp.content_type, 'image/gif')
record = self.process()
self.assertEqual(record.key, 'pixel')
self.assertEqual(record.site_id, '3')
self.assertEqual(first_vid, record.vid)
resp = app.get('/foo')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/foo'))
self.assertEqual(record.site_id, '3')
self.assertEqual(first_vid, record.vid)
def test_host_map(self):
resp = app.get('/hello', extra_environ={'HTTP_HOST': 'example.com'})
self.assertEqual(resp.content_type, 'text/html')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/hello'))
self.assertEqual(record.site_id, '5')
def test_unknown_host(self):
resp = app.get('/somepage',
extra_environ={'HTTP_HOST':
'supercalifragilicious.com'})
self.assertEqual(resp.content_type, 'text/html')
record = self.process()
self.assertEqual(record.key, 'page')
self.assertTrue(record.url.endswith('/somepage'))
self.assertEqual(record.site_id, '0')
def test_pixel_req(self):
resp = app.get('/vpixel.gif')
self.assertEqual(resp.content_type, 'image/gif',
'An html response should have a pixel tag.')
def test_non_html_pixel(self):
resp = app.get('/non-html-page.txt')
self.assertNotIn('/vpixel.gif', resp.body,
'A non-html response should not have a pixel tag.')
def test_generator_response(self):
req = Request.blank('/quux.iter')
resp = req.get_response(wrapped_app)
self.assertFalse(inner_app.consumed_iter,
'The generator response has been buffered by '
'middleware before instead of being returned as an '
'iterable.')
self.assertIn('/vpixel.gif', resp.body)
self.assertTrue(inner_app.consumed_iter)
def test_latin1_user_agent(self):
# Example user agent is latin1-encoded, so should be preserved.
sample_ua = '\xc0 \xe0 hello'
app.get('/somepage', extra_environ={'HTTP_USER_AGENT': sample_ua})
record = self.process()
self.assertEqual(record.user_agent, sample_ua.decode('latin1'))
def test_nongetpost_methods_not_processed(self):
app.put('/somepage')
app.delete('/somepage')
app.options('/somepage')
records = list(log.process())
self.assertEqual(len(records), 0)
def test_safari_top_sites_not_counted(self):
app.get('/blah', headers={'X-Purpose': 'preview'})
records = list(log.process())
self.assertEqual(len(records), 0)
def test_signature_mangled(self):
app.get('/')
orig_cookie = app.cookies['manhattan']
# truncate the last 4 chars, which will blow the sig
bad_cookie = orig_cookie[:-4]
bad_request = TestRequest.blank('/', cookies={'manhattan': bad_cookie})
app.request(bad_request)
new_cookie = app.cookies['manhattan']
self.assertNotEqual(bad_cookie, new_cookie)
| mit |
kubeflow/pipelines | components/arena/docker/job_generator.py | 3 | 2817 | import argparse
import datetime
import json
import os
import sys
import logging
import requests
import subprocess
import six
import time
import yaml
from subprocess import Popen,PIPE
from shlex import split
from utils import *
# Generate common options
def generate_options(args):
gpus = args.gpus
cpu = args.cpu
memory = args.memory
tensorboard = args.tensorboard
output_data = args.output_data
data = args.data
env = args.env
tensorboard_image = args.tensorboard_image
tensorboard = str2bool(args.tensorboard)
log_dir = args.log_dir
sync_source = args.sync_source
options = []
if gpus > 0:
options.extend(['--gpus', str(gpus)])
if cpu != '0':
options.extend(['--cpu', str(cpu)])
if memory != '0':
options.extend(['--memory', str(memory)])
if tensorboard_image != "tensorflow/tensorflow:1.12.0":
options.extend(['--tensorboardImage', tensorboard_image])
if tensorboard:
options.append("--tensorboard")
if os.path.isdir(args.log_dir):
options.extend(['--logdir', args.log_dir])
else:
logging.info("skip log dir :{0}".format(args.log_dir))
if len(data) > 0:
for d in data:
if ":" in d:
options.append("--data={0}".format(d))
else:
logging.info("--data={0} is illegal, skip.".format(d))
if len(env) > 0:
for e in env:
if "=" in e:
options.append("--env={0}".format(e))
else:
logging.info("--env={0} is illegal, skip.".format(e))
if len(args.workflow_name) > 0:
options.append("--env=WORKFLOW_NAME={0}".format(args.workflow_name))
if len(args.step_name) > 0:
options.append("--env=STEP_NAME={0}".format(args.step_name))
if len(sync_source) > 0:
if not sync_source.endswith(".git"):
raise ValueError("sync_source must be an http git url")
options.extend(['--sync-mode','git'])
options.extend(['--sync-source',sync_source])
return options
# Generate standalone job
def generate_job_command(args):
name = args.name
image = args.image
commandArray = [
'arena', 'submit', 'tfjob',
'--name={0}'.format(name),
'--image={0}'.format(image),
]
commandArray.extend(generate_options(args))
return commandArray, "tfjob"
# Generate mpi job
def generate_mpjob_command(args):
name = args.name
workers = args.workers
image = args.image
rdma = args.rdma
commandArray = [
'arena', 'submit', 'mpijob',
'--name={0}'.format(name),
'--workers={0}'.format(workers),
'--image={0}'.format(image),
]
if rdma.lower() == "true":
commandArray.append("--rdma")
commandArray.extend(generate_options(args))
return commandArray, "mpijob"
| apache-2.0 |
cgstudiomap/cgstudiomap | main/parts/odoo/addons/account_analytic_plans/__openerp__.py | 264 | 3114 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multiple Analytic Plans',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
This module allows to use several analytic plans according to the general journal.
==================================================================================
Here multiple analytic lines are created when the invoice or the entries
are confirmed.
For example, you can define the following analytic structure:
-------------------------------------------------------------
* **Projects**
* Project 1
+ SubProj 1.1
+ SubProj 1.2
* Project 2
* **Salesman**
* Eric
* Fabien
Here, we have two plans: Projects and Salesman. An invoice line must be able to write analytic entries in the 2 plans: SubProj 1.1 and Fabien. The amount can also be split.
The following example is for an invoice that touches the two subprojects and assigned to one salesman:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**Plan1:**
* SubProject 1.1 : 50%
* SubProject 1.2 : 50%
**Plan2:**
Eric: 100%
So when this line of invoice will be confirmed, it will generate 3 analytic lines,for one account entry.
The analytic plan validates the minimum and maximum percentage at the time of creation of distribution models.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/accounting',
'depends': ['account', 'account_analytic_default'],
'data': [
'security/account_analytic_plan_security.xml',
'security/ir.model.access.csv',
'account_analytic_plans_view.xml',
'account_analytic_plans_report.xml',
'wizard/analytic_plan_create_model_view.xml',
'wizard/account_crossovered_analytic_view.xml',
'views/report_crossoveredanalyticplans.xml',
'views/account_analytic_plans.xml',
],
'demo': [],
'test': ['test/acount_analytic_plans_report.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HybridF5/jacket | jacket/db/migration.py | 1 | 1220 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database setup and migration commands."""
from oslo_log import log as logging
from jacket.db.sqlalchemy import migration
LOG = logging.getLogger(__name__)
IMPL = migration
def db_sync(version=None, database='main'):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(version=version, database=database)
def db_version(database='main'):
"""Display the current database version."""
return IMPL.db_version(database=database)
| apache-2.0 |
rahulguptakota/paper-To-Reviewer-Matching-System | citeSentClassifier_gurki.py | 1 | 9088 | import xml.etree.ElementTree as ET
import re
import time
import os, csv
from nltk.tokenize import sent_tokenize
from textblob.classifiers import NaiveBayesClassifier
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from sklearn import naive_bayes
from random import shuffle
import numpy as np
import scipy as sc
import matplotlib.pyplot as plt
from prettyprint import pp
import os, re, pickle
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, precision_score, recall_score, classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC, NuSVC, SVC
from sklearn.grid_search import GridSearchCV
from datetime import datetime as dt
from ipy_table import *
def testClassifier(x_train, y_train, x_test, y_test, clf, name):
"""
this method will first train the classifier on the training data
and will then test the trained classifier on test data.
Finally it will report some metrics on the classifier performance.
Parameters
----------
x_train: np.ndarray
train data matrix
y_train: list
train data label
x_test: np.ndarray
test data matrix
y_test: list
test data label
clf: sklearn classifier object implementing fit() and predict() methods
Returns
-------
metrics: list
[training time, testing time, recall and precision for every class, macro-averaged F1 score]
"""
print(name)
metrics = []
start = dt.now()
clf.fit(x_train, y_train)
end = dt.now()
print 'training time: ', (end - start)
pickle.dump( clf, open( name+".p", "wb" ) )
# add training time to metrics
metrics.append(end-start)
start = dt.now()
yhat = clf.predict(x_test)
end = dt.now()
print 'testing time: ', (end - start)
# add testing time to metrics
metrics.append(end-start)
print 'classification report: '
# print classification_report(y_test, yhat)
pp(classification_report(y_test, yhat))
print 'f1 score'
print f1_score(y_test, yhat, average='macro')
print 'accuracy score'
print accuracy_score(y_test, yhat)
precision = precision_score(y_test, yhat, average=None)
recall = recall_score(y_test, yhat, average=None)
# add precision and recall values to metrics
for p, r in zip(precision, recall):
metrics.append(p)
metrics.append(r)
#add macro-averaged F1 score to metrics
metrics.append(f1_score(y_test, yhat, average='macro'))
print 'confusion matrix:'
print confusion_matrix(y_test, yhat)
# plotting the confusion matrix
plt.imshow(confusion_matrix(y_test, yhat), interpolation='nearest')
# plt.show()
return metrics
stop_words = set(stopwords.words('english'))
clfrNB = naive_bayes.MultinomialNB()
train = []
test = []
rootDir = './data_label'
one_label = 0
zero_label = 0
ps = PorterStemmer()
for dirName, subdirList, fileList in os.walk(rootDir, topdown=False):
try:
# print(dirName)
fo = open(dirName + "/citeSents.csv", "r")
except:
continue
lines = fo.readlines()
for line in lines:
line = line.strip().lower()
# print(line)
splitsent = line.split(",,")
# print(splitsent)
word_tokens = word_tokenize(splitsent[0])
if splitsent[1] != '1' and splitsent[1] != '0' :
print(splitsent)
elif splitsent[1] == "1":
one_label += 1
else:
zero_label += 1
filtered_sentence = [w for w in word_tokens if not w in stop_words]
line = " ".join(filtered_sentence)
stemmed = [ps.stem(word) for word in line.split()]
stemmed = filter(lambda x: not(len(x)<3 or re.findall(r"[0-9]+",x)) , stemmed)
stemmed = list(stemmed)
line = " ".join(stemmed)
# print(line)
train.append((line, splitsent[1]))
shuffle(train)
# testindex = int(len(train)*4/5)
# test = train[testindex:]
# train = train[:testindex]
train_arr = []
# test_arr = []
train_lbl = []
# test_lbl = []
for x in train:
train_arr.append(x[0])
train_lbl.append(x[1])
# for x in test:
# test_arr.append(x[0])
# test_lbl.append(x[1])
vectorizer = CountVectorizer()
vectorizer.fit(train_arr)
pickle.dump(vectorizer, open("vectorizer.p", "wb"))
train_mat = vectorizer.transform(train_arr)
print train_mat
# print train_mat.shape
# test_mat = vectorizer.transform(test_arr)
# print test_mat.shape
tfidf = TfidfTransformer()
tfidf.fit(train_mat)
pickle.dump(tfidf, open("tfidf.p", "wb"))
train_tfmat = tfidf.transform(train_mat)
print train_tfmat.shape
print train_tfmat[0]
# test_tfmat = tfidf.transform(test_mat)
# print test_tfmat.shape
testindex = int(len(train)*4/5)
test_tfmat = train_tfmat[testindex:]
test_lbl = train_lbl[testindex:]
train_tfmat = train_tfmat[:testindex]
train_lbl = train_lbl[:testindex]
metrics_dict = []
bnb = BernoulliNB()
bnb_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, bnb, "bernoulliNB")
metrics_dict.append({'name':'BernoulliNB', 'metrics':bnb_me})
gnb = GaussianNB()
gnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, gnb, "guassianNB")
metrics_dict.append({'name':'GaussianNB', 'metrics':gnb_me})
mnb = MultinomialNB()
mnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, mnb, "MultinomialNB")
metrics_dict.append({'name':'MultinomialNB', 'metrics':mnb_me})
for nn in [5]:
print 'knn with ', nn, ' neighbors'
knn = KNeighborsClassifier(n_neighbors=nn)
knn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, knn, "knn"+str(nn))
metrics_dict.append({'name':'5NN', 'metrics':knn_me})
print ' '
print("linear SVM starts:")
lsvm = LinearSVC( class_weight={'1': 1, '0' : 1})
lsvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, lsvm, "linearSVM")
metrics_dict.append({'name':'LinearSVM', 'metrics':lsvm_me})
rbfsvm = SVC(kernel = 'poly',degree=2,coef0=1 ,class_weight={'1': zero_label, '0' : one_label})
rbfsvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, rbfsvm, "rbfSVM")
metrics_dict.append({'name':'SVM with RBF kernel', 'metrics':rbfsvm_me})
bnb_params = {'alpha': [a*0.1 for a in range(0,11)]}
bnb_clf = GridSearchCV(BernoulliNB(), bnb_params, cv=10)
bnb_clf.fit(train_tfmat, train_lbl)
print 'best parameters'
print bnb_clf.best_params_
best_bnb = BernoulliNB(alpha=bnb_clf.best_params_['alpha'])
best_bnb_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_bnb,"bernoulliNB")
metrics_dict.append({'name':'Best BernoulliNB', 'metrics':best_bnb_me})
best_gnb = GaussianNB()
best_gnb_me = testClassifier(train_tfmat.toarray(), train_lbl, test_tfmat.toarray(), test_lbl, best_gnb, "guassianNB")
metrics_dict.append({'name':'Best GaussianNB', 'metrics':best_gnb_me})
mbn_params = {'alpha': [a*0.1 for a in range(0,11)]}
mbn_clf = GridSearchCV(MultinomialNB(), mbn_params, cv=10)
mbn_clf.fit(train_tfmat, train_lbl)
print 'best parameters'
print mbn_clf.best_params_
best_mbn = MultinomialNB(alpha=mbn_clf.best_params_['alpha'])
best_mbn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_mbn, "MultinomialNB")
metrics_dict.append({'name':'Best MultinomialNB', 'metrics':best_mbn_me})
print metrics_dict
# knn_params = {'n_neighbors': range(1,21), 'weights': ['uniform', 'distance'], 'algorithm': ['ball_tree', 'kd_tree'],
# 'leaf_size': [15, 30, 50, 100], 'p': [1,2]}
# knn_clf = GridSearchCV(KNeighborsClassifier(), knn_params, cv=10)
# knn_clf.fit(train_tfmat, train_lbl)
# print 'best parameters'
# print knn_clf.best_params_
# best_knn = KNeighborsClassifier(n_neighbors=knn_clf.best_params_['n_neighbors'], weights=knn_clf.best_params_['weights'],
# algorithm=knn_clf.best_params_['algorithm'], leaf_size=knn_clf.best_params_['leaf_size'])
# best_knn_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, best_knn)
# metrics_dict.append({'name':'Best KNN', 'metrics':best_knn_me})
# nusvm = NuSVC()
# nusvm_me = testClassifier(train_tfmat, train_lbl, test_tfmat, test_lbl, nusvm)
# metrics_dict.append({'name':'nuSVM', 'metrics':nusvm_me})
# traindata = [data[0] for data in train]
# trainlabel = [data[1] for data in train]
# clfrNB.fit(traindata, trainlabel)
# print(test)
# cl = NaiveBayesClassifier(train)
# print(cl.classify("It is also possible to focus on non-compositional compounds, a key point in bilingual applications (CITATION; CITATION; Lin, 99)")) # "pos"
# print(cl.classify("I don't like their pizza.")) # "neg"
# for item in test:
# if(cl.classify(item[0]) == '1'):
# print(item, cl.classify(item[0]))
# print(cl.accuracy(test))
# print(cl.show_informative_features(100))
# print(train)
| mit |
cfe-lab/MiCall | micall/utils/find_missing_samples.py | 1 | 2703 | import re
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import logging
from csv import DictReader
from pathlib import Path
from micall.utils.sample_sheet_parser import sample_sheet_parser
logger = logging.getLogger(__name__)
def parse_args():
parser = ArgumentParser(
description="Look for samples that didn't get processed.",
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('start_folder',
nargs='?',
default='/media/raw_data/MiSeq/runs',
help='a run folder, or a parent of many run folders',
type=Path)
parser.add_argument('--skip_mid_hcv',
action='store_true',
help="Don't report missing samples with the MidHCV project.")
return parser.parse_args()
def process_run(run_folder: Path, skip_mid_hcv: bool):
if not (run_folder / 'needsprocessing').exists():
return False
if (run_folder / 'errorprocessing').exists():
return True
sample_sheet_path = run_folder / 'SampleSheet.csv'
with sample_sheet_path.open() as f:
try:
run_info = sample_sheet_parser(f)
except Exception:
raise RuntimeError(f'Failed to process run {run_folder.name}.')
sample_names = set(run_info['Data'])
if skip_mid_hcv:
sample_names = {sample_name
for sample_name in sample_names
if not re.match(r'.*MidHCV_S\d+$', sample_name)}
cascade_path = run_folder / 'Results' / 'version_7.9' / 'cascade.csv'
with cascade_path.open() as f:
reader = DictReader(f)
cascade_samples = {row['sample'] for row in reader}
missing_samples = sample_names - cascade_samples
if missing_samples:
logger.error('Missing samples in run %s: %s',
run_folder.name,
sorted(missing_samples))
return True
def process_runs(runs_folder: Path, skip_mid_hcv: bool):
for file_path in sorted(runs_folder.iterdir()):
if file_path.is_dir():
# noinspection PyBroadException
try:
process_run(file_path, skip_mid_hcv)
except Exception:
logger.warning('Run %s failed.', file_path.name, exc_info=True)
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s[%(levelname)s]%(name)s: %(message)s')
logger.info('Starting.')
args = parse_args()
if not process_run(args.start_folder, args.skip_mid_hcv):
process_runs(args.start_folder, args.skip_mid_hcv)
logger.info('Done.')
main()
| agpl-3.0 |
haroldl/homeworklog | django/contrib/auth/tests/permissions.py | 231 | 1654 | try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.contrib.auth.management import create_permissions
from django.contrib.auth import models as auth_models
from django.contrib.contenttypes import models as contenttypes_models
from django.core.management import call_command
from django.test import TestCase
class TestAuthPermissions(TestCase):
def tearDown(self):
# These tests mess with content types, but content type lookups
# are cached, so we need to make sure the effects of this test
# are cleaned up.
contenttypes_models.ContentType.objects.clear_cache()
def test_permission_register_order(self):
"""Test that the order of registered permissions doesn't break"""
# Changeset 14413 introduced a regression in the ordering of
# newly created permissions for objects. When loading a fixture
# after the initial creation (such as during unit tests), the
# expected IDs for the permissions may not match up, leading to
# SQL errors. This is ticket 14731
# Start with a clean slate and build the permissions as we
# expect to see them in the fixtures.
auth_models.Permission.objects.all().delete()
contenttypes_models.ContentType.objects.all().delete()
create_permissions(auth_models, [], verbosity=0)
create_permissions(contenttypes_models, [], verbosity=0)
stderr = StringIO()
call_command('loaddata', 'test_permissions.json',
verbosity=0, commit=False, stderr=stderr)
self.assertEqual(stderr.getvalue(), '')
| bsd-3-clause |
ruuk/script.module.youtube.dl | lib/youtube_dl/extractor/streamable.py | 53 | 3891 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
)
class StreamableIE(InfoExtractor):
_VALID_URL = r'https?://streamable\.com/(?:[es]/)?(?P<id>\w+)'
_TESTS = [
{
'url': 'https://streamable.com/dnd1',
'md5': '3e3bc5ca088b48c2d436529b64397fef',
'info_dict': {
'id': 'dnd1',
'ext': 'mp4',
'title': 'Mikel Oiarzabal scores to make it 0-3 for La Real against Espanyol',
'thumbnail': r're:https?://.*\.jpg$',
'uploader': 'teabaker',
'timestamp': 1454964157.35115,
'upload_date': '20160208',
'duration': 61.516,
'view_count': int,
}
},
# older video without bitrate, width/height, etc. info
{
'url': 'https://streamable.com/moo',
'md5': '2cf6923639b87fba3279ad0df3a64e73',
'info_dict': {
'id': 'moo',
'ext': 'mp4',
'title': '"Please don\'t eat me!"',
'thumbnail': r're:https?://.*\.jpg$',
'timestamp': 1426115495,
'upload_date': '20150311',
'duration': 12,
'view_count': int,
}
},
{
'url': 'https://streamable.com/e/dnd1',
'only_matching': True,
},
{
'url': 'https://streamable.com/s/okkqk/drxjds',
'only_matching': True,
}
]
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+src=(?P<q1>[\'"])(?P<src>(?:https?:)?//streamable\.com/(?:(?!\1).+))(?P=q1)',
webpage)
if mobj:
return mobj.group('src')
def _real_extract(self, url):
video_id = self._match_id(url)
# Note: Using the ajax API, as the public Streamable API doesn't seem
# to return video info like the title properly sometimes, and doesn't
# include info like the video duration
video = self._download_json(
'https://ajax.streamable.com/videos/%s' % video_id, video_id)
# Format IDs:
# 0 The video is being uploaded
# 1 The video is being processed
# 2 The video has at least one file ready
# 3 The video is unavailable due to an error
status = video.get('status')
if status != 2:
raise ExtractorError(
'This video is currently unavailable. It may still be uploading or processing.',
expected=True)
title = video.get('reddit_title') or video['title']
formats = []
for key, info in video['files'].items():
if not info.get('url'):
continue
formats.append({
'format_id': key,
'url': self._proto_relative_url(info['url']),
'width': int_or_none(info.get('width')),
'height': int_or_none(info.get('height')),
'filesize': int_or_none(info.get('size')),
'fps': int_or_none(info.get('framerate')),
'vbr': float_or_none(info.get('bitrate'), 1000)
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': self._proto_relative_url(video.get('thumbnail_url')),
'uploader': video.get('owner', {}).get('user_name'),
'timestamp': float_or_none(video.get('date_added')),
'duration': float_or_none(video.get('duration')),
'view_count': int_or_none(video.get('plays')),
'formats': formats
}
| gpl-2.0 |
mmarchini/python-mingus | mingus/extra/tunings.py | 10 | 26880 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# mingus - Music theory Python package, tunings module.
# Copyright (C) 2009, Bart Spaans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Dozens of standard tunings, a StringTuning class and some functions to help
you search through them."""
from mingus.containers.note import Note
from mingus.containers.note_container import NoteContainer
from mingus.core.mt_exceptions import RangeError
import mingus.core.notes as notes
class StringTuning(object):
"""A class to store and work with tunings and fingerings."""
def __init__(self, instrument, description, tuning):
"""Create a new StringTuning instance.
The instrument and description parameters should be strings; tuning
should be a list of strings or a list of lists of strings that
denote courses.
See tunings.add_tuning for examples.
"""
self.instrument = instrument
self.tuning = []
# convert to Note
for x in tuning:
if type(x) == list:
self.tuning.append([Note(n) for n in x])
else:
self.tuning.append(Note(x))
self.description = description
def count_strings(self):
"""Return the number of strings."""
return len(self.tuning)
def count_courses(self):
"""Return the average number of courses per string."""
c = 0
for x in self.tuning:
if type(x) == list:
c += len(x)
else:
c += 1
return float(c) / len(self.tuning)
def find_frets(self, note, maxfret=24):
"""Return a list with for each string the fret on which the note is
played or None if it can't be played on that particular string.
The maxfret parameter is the highest fret that can be played; note
should either be a string or a Note object.
Example:
>>> t = tunings.StringTuning('test', 'test', ['A-3', 'E-4'])
>>> t.find_frets(Note('C-4')
[3, None]
>>> t.find_frets(Note('A-4')
[12, 5]
"""
result = []
if type(note) == str:
note = Note(note)
for x in self.tuning:
if type(x) == list:
base = x[0]
else:
base = x
diff = base.measure(note)
if 0 <= diff <= maxfret:
result.append(diff)
else:
result.append(None)
return result
def find_fingering(self, notes, max_distance=4, not_strings=[]):
"""Return a list [(string, fret)] of possible fingerings for
'notes'.
The notes parameter should be a list of strings or Notes or a
NoteContainer; max_distance denotes the maximum distance between
frets; not_strings can be used to disclude certain strings and is
used internally to recurse.
Example:
>>> t = tunings.StringTuning('test', 'test', ['A-3', 'E-4', 'A-5'])
>>> t.find_fingering(['E-4', 'B-4'])
[[(0, 7), (1, 7)], [(1, 0), (0, 14)]]
"""
if notes is None:
return []
if len(notes) == 0:
return []
first = notes[0]
notes = notes[1:]
frets = self.find_frets(first)
result = []
for (string, fret) in enumerate(frets):
if fret is not None and string not in not_strings:
if len(notes) > 0:
# recursively find fingerings for
# remaining notes
r = self.find_fingering(notes, max_distance, not_strings
+ [string])
if r != []:
for f in r:
result.append([(string, fret)] + f)
else:
result.append([(string, fret)])
# filter impossible fingerings and sort
res = []
for r in result:
(min, max) = (1000, -1)
frets = 0
for (string, fret) in r:
if fret > max:
max = fret
if fret < min and fret != 0:
min = fret
frets += fret
if 0 <= max - min < max_distance or min == 1000 or max == -1:
res.append((frets, r))
return [r for (_, r) in sorted(res)]
def find_chord_fingering(self, notes, max_distance=4, maxfret=18,
max_fingers=4, return_best_as_NoteContainer=False):
"""Return a list of fret lists that are considered possible fingerings.
This function only looks at and matches on the note _names_ so it
does more than find_fingering.
Example:
>>> t = tunings.get_tuning('guitar', 'standard', 6, 1)
>>> t.find_chord_fingering(NoteContainer().from_chord('Am'))
[[0, 0, 2, 2, 1, 0], [0, 3, 2, 2, 1, 0], ......]
"""
def follow(string, next, name, prev=-1):
"""Follow the fret 'next' on 'string'; build result on the way."""
if string >= len(self.tuning) - 1:
return [[(next, name)]]
result = []
cur = res[string][next]
if cur != []:
for y in cur[1]:
for sub in follow(string + 1, y[0], y[1]):
if prev < 0:
result.append([(next, name)] + sub)
else:
if sub[0][0] == 0 or abs(sub[0][0] - prev)\
< max_distance:
result.append([(next, name)] + sub)
for s in follow(string + 1, maxfret + 1, None, next):
result.append([(next, name)] + s)
return [[(next, name)]] if result == [] else result
def make_lookup_table():
"""Prepare the lookup table.
table[string][fret] = (name, dest_frets)
"""
res = [[[] for x in xrange(maxfret + 2)] for x in
xrange(len(self.tuning) - 1)]
for x in xrange(0, len(self.tuning) - 1):
addedNone = -1
next = fretdict[x + 1]
for (fret, name) in fretdict[x]:
for (f2, n2) in next:
if n2 != name and (f2 == 0 or abs(fret - f2)
< max_distance):
if res[x][fret] != []:
res[x][fret][1].append((f2, n2))
else:
res[x][fret] = (name, [(f2, n2)])
if addedNone < x:
if res[x][maxfret + 1] != []:
res[x][maxfret + 1][1].append((f2, n2))
else:
res[x][maxfret + 1] = (None, [(f2, n2)])
addedNone = x
return res
# Convert to NoteContainer if necessary
n = notes
if notes != [] and type(notes) == list and type(notes[0]) == str:
n = NoteContainer(notes)
# Check number of note names.
notenames = [x.name for x in n]
if len(notenames) == 0 or len(notenames) > len(self.tuning):
return []
# Make string-fret dictionary
fretdict = []
for x in xrange(0, len(self.tuning)):
fretdict.append(self.find_note_names(notes, x, maxfret))
# Build table
res = make_lookup_table()
# Build result using table
result = []
# For each fret on the first string
for (i, y) in enumerate(res[0]):
if y != []:
(yname, next) = (y[0], y[1])
# For each destination fret in y
for (fret, name) in next:
# For each followed result
for s in follow(1, fret, name):
subresult = [(i, yname)] + s
# Get boundaries
(mi, ma, names) = (1000, -1000, [])
for (f, n) in subresult:
if n is not None:
if f != 0 and f <= mi:
mi = f
if f != 0 and f >= ma:
ma = f
names.append(n)
# Enforce boundaries
if abs(ma - mi) < max_distance:
# Check if all note
# names are present
covered = True
for n in notenames:
if n not in names:
covered = False
# Add to result
if covered and names != []:
result.append([y[0] if y[1]
is not None else y[1] for y in
subresult])
# Return semi-sorted list
s = sorted(result, key=lambda x: sum([t if t is not None else 1000
for (i, t) in enumerate(x)]))
s = filter(lambda a: fingers_needed(a) <= max_fingers, s)
if not return_best_as_NoteContainer:
return s
else:
rnotes = self.frets_to_NoteContainer(s[0])
for (i, x) in enumerate(rnotes):
if x.string < len(self.tuning) - 1:
if res[x.string][x.fret] != []:
rnotes[i].name = res[x.string][x.fret][0]
return rnotes
def frets_to_NoteContainer(self, fingering):
"""Convert a list such as returned by find_fret to a NoteContainer."""
res = []
for (string, fret) in enumerate(fingering):
if fret is not None:
res.append(self.get_Note(string, fret))
return NoteContainer(res)
def find_note_names(self, notelist, string=0, maxfret=24):
"""Return a list [(fret, notename)] in ascending order.
Notelist should be a list of Notes, note-strings or a NoteContainer.
Example:
>>> t = tunings.StringTuning('test', 'test', ['A-3', 'A-4'])
>>> t.find_note_names(['A', 'C', 'E'], 0, 12)
[(0, 'E'), (5, 'A'), (8, 'C'), (12, 'E')]
"""
n = notelist
if notelist != [] and type(notelist[0]) == str:
n = NoteContainer(notelist)
result = []
names = [x.name for x in n]
int_notes = [notes.note_to_int(x) for x in names]
# Base of the string
s = int(self.tuning[string]) % 12
for x in xrange(0, maxfret + 1):
if (s + x) % 12 in int_notes:
result.append((x, names[int_notes.index((s + x) % 12)]))
return result
def get_Note(self, string=0, fret=0, maxfret=24):
"""Return the Note on 'string', 'fret'.
Throw a RangeError if either the fret or string is unplayable.
Examples:
>>> t = tunings.StringTuning('test', 'test', ['A-3', 'A-4'])
>>> t,get_Note(0, 0)
'A-3'
>>> t.get_Note(0, 1)
'A#-3'
>>> t.get_Note(1, 0)
'A-4'
"""
if 0 <= string < self.count_strings():
if 0 <= fret <= maxfret:
s = self.tuning[string]
if type(s) == list:
s = s[0]
n = Note(int(s) + fret)
n.string = string
n.fret = fret
return n
else:
raise RangeError("Fret '%d' on string '%d' is out of range"
% (string, fret))
else:
raise RangeError("String '%d' out of range" % string)
def fingers_needed(fingering):
"""Return the number of fingers needed to play the given fingering."""
split = False # True if an open string must be played, thereby making any
# subsequent strings impossible to bar with the index finger
indexfinger = False # True if the index finger was already accounted for
# in the count
minimum = min(finger for finger in fingering if finger) # the index finger
# plays the lowest
# finger position
result = 0
for finger in reversed(fingering):
if finger == 0: # an open string is played
split = True # subsequent strings are impossible to bar with the
# index finger
else:
if not split and finger == minimum: # if an open string hasn't been
# played and this is a job for
# the index finger:
if not indexfinger: # if the index finger hasn't been accounted
# for:
result += 1
indexfinger = True # index finger has now been accounted for
else:
result += 1
return result
# The index
_known = {}
def add_tuning(instrument, description, tuning):
"""Add a new tuning to the index.
The instrument and description parameters should be strings; tuning
should be a list of strings or a list of lists to denote courses.
Example:
>>> std_strings = ['E-2', 'A-2', 'D-3', 'G-3', 'B-3', 'E-4']
>>> tuning.add_tuning('Guitar', 'standard', std_strings)
>>> tw_strings = [['E-2', 'E-3'], ['A-2', 'A-3'], ...........]
>>> tuning.add_tuning('Guitar', 'twelve string', tw_string)
"""
t = StringTuning(instrument, description, tuning)
if _known.has_key(str.upper(instrument)):
_known[str.upper(instrument)][1][str.upper(description)] = t
else:
_known[str.upper(instrument)] = (instrument,
{str.upper(description): t})
def get_tuning(instrument, description, nr_of_strings=None, nr_of_courses=None):
"""Get the first tuning that satisfies the constraints.
The instrument and description arguments are treated like
case-insensitive prefixes. So search for 'bass' is the same is
'Bass Guitar'.
Example:
>>> tunings.get_tuning('guitar', 'standard')
<tunings.StringTuning instance at 0x139ac20>
"""
searchi = str.upper(instrument)
searchd = str.upper(description)
keys = _known.keys()
for x in keys:
if (searchi not in keys and x.find(searchi) == 0 or searchi in keys and
x == searchi):
for (desc, tun) in _known[x][1].iteritems():
if desc.find(searchd) == 0:
if nr_of_strings is None and nr_of_courses is None:
return tun
elif nr_of_strings is not None and nr_of_courses is None:
if tun.count_strings() == nr_of_strings:
return tun
elif nr_of_strings is None and nr_of_courses is not None:
if tun.count_courses() == nr_of_courses:
return tun
else:
if tun.count_courses() == nr_of_courses\
and tun.count_strings() == nr_of_strings:
return tun
def get_tunings(instrument=None, nr_of_strings=None, nr_of_courses=None):
"""Search tunings on instrument, strings, courses or a combination.
The instrument is actually treated like a case-insensitive prefix. So
asking for 'bass' yields the same tunings as 'Bass Guitar'; the string
'ba' yields all the instruments starting with 'ba'.
Example:
>>> tunings.get_tunings(nr_of_string = 4)
>>> tunings.get_tunings('bass')
"""
search = ''
if instrument is not None:
search = str.upper(instrument)
result = []
keys = _known.keys()
inkeys = search in keys
for x in keys:
if (instrument is None or not inkeys and x.find(search) == 0 or
inkeys and search == x):
if nr_of_strings is None and nr_of_courses is None:
result += _known[x][1].values()
elif nr_of_strings is not None and nr_of_courses is None:
result += [y for y in _known[x][1].itervalues()
if y.count_strings() == nr_of_strings]
elif nr_of_strings is None and nr_of_courses is not None:
result += [y for y in _known[x][1].itervalues()
if y.count_courses() == nr_of_courses]
else:
result += [y for y in _known[x][1].itervalues()
if y.count_strings() == nr_of_strings
and y.count_courses() == nr_of_courses]
return result
def get_instruments():
"""Return a sorted list of instruments that have string tunings defined
for them."""
return sorted([_known[upname][0] for upname in _known])
add_tuning('Baglamas (Greek)', 'Modal D standard tuning', [['D-4', 'D-5'],
['A-4', 'A-4'], ['D-5', 'D-5']])
add_tuning('Bajo quinto', 'Standard tuning.', [['A-2', 'A-1'], ['D-3', 'D-2'],
['G-2', 'G-2'], ['C-3', 'C-3'], ['F-3', 'F-3']])
add_tuning('Bajo Sexto', 'Standard tuning', [
['E-2', 'E-1'],
['A-2', 'A-1'],
['D-3', 'D-2'],
['G-2', 'G-2'],
['C-3', 'C-3'],
['F-3', 'F-3'],
])
add_tuning('Bandola Oriental', 'Standard tuning.', [['G-3', 'G-3'], ['D-4',
'D-4'], ['A-4', 'A-4'], ['E-5', 'E-5']])
add_tuning('Banjo (bass)',
'A cello banjo is sometimes called a "bass banjo",but there are true bass banjos as well'
, ['E-1', 'A-1', 'D-2', 'G-2'])
add_tuning('Banjo (cello)', 'Standard tuning. Same as cello and mandocello',
['C-2', 'G-2', 'D-3', 'A-3'])
add_tuning('Banjo (tenor)', 'Standard tenor jazz tuning', ['C-3', 'G-3', 'D-4',
'A-4'])
add_tuning('Banjo (tenor)', 'Irish tenor tuning', ['G-2', 'D-3', 'A-3', 'E-4'])
add_tuning('Banjo (5-string)', 'Open G tuning', ['G-4', 'D-3', 'G-3', 'B-3',
'D-4'])
add_tuning('Baritone guitar', 'Standard 5th lower tuning', [
'A-1',
'D-2',
'G-2',
'C-3',
'E-3',
'A-3',
])
add_tuning('Baritone guitar', 'Octave lower tuning', [
'E-1',
'A-1',
'D-2',
'G-2',
'B-2',
'E-3',
])
add_tuning('Bass guitar', 'Standard 4-string tuning', ['E-1', 'A-1', 'D-2',
'G-2'])
add_tuning('Bass guitar', 'Standard 5-string tuning', ['B-0', 'E-1', 'A-1',
'D-2', 'G-2'])
add_tuning('Bass guitar', 'Alternate 5-string tuning', ['E-1', 'A-1', 'D-2',
'G-2', 'C-3'])
add_tuning('Bass guitar', 'Standard 6-string tuning', [
'B-0',
'E-1',
'A-1',
'D-2',
'G-2',
'C-3',
])
add_tuning('Cello', 'Standard tuning', ['C-2', 'G-2', 'D-3', 'A-3'])
add_tuning('Cello', '"5th Suite" tuning', ['C-2', 'G-2', 'D-3', 'G-3'])
add_tuning('Cello banjo', 'Standard tuning', ['C-2', 'G-2', 'D-3', 'A-3'])
add_tuning('Charango', 'Standard C6 tuning. 3rd course is an octave pair.',
[['G-4', 'G-4'], ['C-4', 'C-4'], ['E-5', 'E-4'], ['A-4', 'A-4'],
['E-5', 'E-5']])
add_tuning('Charangon', 'F6 tuning', [['C-4', 'C-4'], ['F-4', 'F-4'], ['A-5',
'A-4'], ['D-5', 'D-5'], ['A-5', 'A-5']])
add_tuning('Charangon', 'G6 tuning', [['D-4', 'D-4'], ['G-4', 'G-4'], ['B-5',
'B-4'], ['E-5', 'E-5'], ['B-5', 'B-5']])
add_tuning('Cuatro', 'Standard tuning', [['B-3', 'B-2'], ['E-4', 'E-3'], ['A-3'
, 'A-3'], ['D-4', 'D-4'], ['G-4', 'G-4']])
add_tuning('Double bass', 'Orchestral tuning', ['E-1', 'A-1', 'D-2', 'G-2'])
add_tuning('Dulcimer',
'Ionian Tuning (The traditional dulcimer is fretted diatonically whole, whole, half, whole, whole, half, whole. )'
, ['A-3', 'A-3', 'D-3'])
add_tuning('Dulcimer', 'Mixolydian Tuning', ['D-4', 'A-3', 'D-3'])
add_tuning('Dulcimer', 'Dorian Tuning', ['G-3', 'A-3', 'D-3'])
add_tuning('Dulcimer', 'Aeolian Tuning', ['C-4', 'A-3', 'D-3'])
add_tuning('Fiddle', 'Standard tuning', ['G-3', 'D-4', 'A-4', 'E-5'])
add_tuning('Fiddle', 'Cajun tuning', ['F-3', 'C-4', 'G-4', 'F-5'])
add_tuning('Fiddle', 'Open G tuning', ['G-3', 'D-4', 'G-4', 'B-4'])
add_tuning('Fiddle', 'Sawmill tuning', ['G-3', 'D-4', 'G-4', 'D-5'])
add_tuning('Fiddle', '"Gee-dad"', ['G-3', 'D-4', 'A-4', 'D-5'])
add_tuning('Fiddle', 'Open D tuning', ['D-3', 'D-4', 'A-4', 'D-5'])
add_tuning('Fiddle', 'Old-timey D tuning', ['A-3', 'D-4', 'A-4', 'E-5'])
add_tuning('Fiddle', 'Cross Tuning, High bass, high counter', ['A-3', 'E-4',
'A-4', 'E-5'])
add_tuning('Gadulka', '3 playing strings, with up to 10 sympathetic strings.',
['A-3', 'E-3', 'A-4'])
add_tuning('Greek Bouzouki', 'Standard F6 tuning', [['C-3', 'C-4'], ['F-3',
'F-4'], ['A-3', 'A-3'], ['D-4', 'D-4']])
add_tuning('Greek Bouzouki', 'Standard F6 tuning', [['D-3', 'D-4'], ['A-3',
'A-3'], ['D-4', 'D-4']])
add_tuning('Guitar', 'Standard tuning', [
'E-2',
'A-2',
'D-3',
'G-3',
'B-3',
'E-4',
])
add_tuning('Guitar', '*DADGAD* Dsus4 tuning', [
'D-2',
'A-2',
'D-3',
'G-3',
'A-3',
'D-4',
])
add_tuning('Guitar', 'Double drop D tuning', [
'D-2',
'A-2',
'D-3',
'G-3',
'B-3',
'D-4',
])
add_tuning('Guitar', 'Drop D tuning', [
'D-2',
'A-2',
'D-3',
'G-3',
'B-3',
'E-4',
])
add_tuning('Guitar', 'Open C major tuning', [
'C-2',
'G-2',
'C-3',
'G-3',
'C-3',
'E-4',
])
add_tuning('Guitar', 'Open E minor tuning', [
'E-2',
'B-2',
'E-3',
'G-3',
'B-3',
'E-4',
])
add_tuning('Guitar', 'Open G major tuning', [
'D-2',
'G-2',
'D-3',
'G-3',
'B-3',
'D-4',
])
add_tuning('Guitar',
'Standard tuning. Some players tune the second course G string to unison to minimize breakage.'
, [
['E-2', 'E-3'],
['A-2', 'A-3'],
['D-3', 'D-4'],
['G-3', 'G-4'],
['B-3', 'B-3'],
['E-4', 'E-4'],
])
add_tuning('Guitar Banjo', 'Standard guitar tuning', [
'E-2',
'A-2',
'D-3',
'G-3',
'B-3',
'E-4',
])
add_tuning("Guitarrón", 'Standard tuning', [
'A-1',
'D-2',
'G-2',
'C-3',
'E-3',
'A-2',
])
add_tuning('Huapanguera', '', ['G-2', ['D-3', 'D-4'], ['G-3', 'G-3'], ['B-3',
'B-3'], 'E-3'])
add_tuning('Irish bouzouki', 'Irish tuning (octaves)', [['G-3', 'G-2'], ['D-4',
'D-3'], ['A-3', 'A-3'], ['D-4', 'D-4']])
add_tuning('Irish bouzouki', 'Irish tuning (unison pairs)', [['G-2', 'G-2'],
['D-3', 'D-3'], ['A-3', 'A-3'], ['D-4', 'D-4']])
add_tuning('Irish bouzouki', '"Mandolin" tuning (octaves)', [['G-3', 'G-2'],
['D-4', 'D-3'], ['A-3', 'A-3'], ['E-4', 'E-4']])
add_tuning('Irish bouzouki', '"Mandolin" tuning (unison pairs)', [['G-2', 'G-2'
], ['D-3', 'D-3'], ['A-3', 'A-3'], ['E-4', 'E-4']])
add_tuning('Irish bouzouki', 'Modal D tuning (octaves)', [['A-3', 'A-2'], ['D-4'
, 'D-3'], ['A-3', 'A-3'], ['D-4', 'D-4']])
add_tuning('Irish bouzouki', 'Modal D tuning (unison pairs)', [['A-2', 'A-2'],
['D-3', 'D-3'], ['A-3', 'A-3'], ['D-4', 'D-4']])
add_tuning('Mandobass', 'Standard tuning', ['E-1', 'A-1', 'D-2', 'G-2'])
add_tuning('Mandola',
'Standard tuning. Pitched a 5th below mandolin tuning. Known in Europe as the tenor mandola.'
, [['C-3', 'C-3'], ['G-3', 'G-3'], ['D-4', 'D-4'], ['A-4', 'A-4']])
add_tuning('Mandocello', 'Standard tuning. Pitched an octave below the mandola.'
, [['C-2', 'C-2'], ['G-2', 'G-2'], ['D-3', 'D-3'], ['A-3', 'A-3']])
add_tuning('Mandolin', 'Standard tuning', [['G-3', 'G-3'], ['D-4', 'D-4'],
['A-4', 'A-4'], ['E-5', 'E-5']])
add_tuning('Mandolin (piccolo)', 'Standard tuning', [['C-4', 'C-4'], ['G-4',
'G-4'], ['D-5', 'D-5'], ['A-5', 'A-5']])
add_tuning('Mandolin (Octave)',
'Standard tuning. Known in Europe as the octave mandola. Pitched an octave below the mandolin.'
, [['G-2', 'G-2'], ['D-3', 'D-3'], ['A-3', 'A-3'], ['E-4', 'E-4']])
add_tuning('Mejorana', 'Standard tuning', ['D-4', 'A-4', 'A-3', 'B-3', 'E-4'])
add_tuning('Mejorana', 'Alternative tuning', ['D-4', 'G-4', 'G-3', 'B-3', 'E-3'
])
add_tuning('Octave Guitar', 'see *Soprano guitar*', [
'E-3',
'A-4',
'D-4',
'G-4',
'B-4',
'E-5',
])
add_tuning('Requinto', 'Standard tuning', [
'A-2',
'D-3',
'G-3',
'C-4',
'E-4',
'A-4',
])
add_tuning('Ronroco', 'Standard C6 tuning (tuned an octave below the charango).'
, [['G-3', 'G-3'], ['C-3', 'C-3'], ['E-4', 'E-3'], ['A-3', 'A-3'],
['E-4', 'E-4']])
add_tuning('Soprano guitar', 'Standard tuning', [
'E-3',
'A-4',
'D-4',
'G-4',
'B-4',
'E-5',
])
add_tuning('Taro patch',
'Standard C6 tuning. The taro patch is a double-string ukulele.',
[['G-3', 'G-4'], ['C-3', 'C-4'], ['E-4', 'E-4'], ['A-4', 'A-4']])
add_tuning('Tenor guitar', 'Standard tuning.', ['C-3', 'G-3', 'D-4', 'A-4'])
add_tuning('Tiple', 'Standard Colombian G6 tuning.', [['D-4', 'D-3', 'D-4'],
['G-4', 'G-3', 'G-4'], ['B-3', 'B-3', 'B-3'], ['E-4', 'E-4', 'E-4']])
add_tuning('Tres', 'Standard C major tuning', [['G-4', 'G-3'], ['C-4', 'C-4'],
['E-4', 'E-3']])
add_tuning('Ukulele', 'Standard C6 tuning for soprano, concert and tenor.',
['G-4', 'C-4', 'E-4', 'A-4'])
add_tuning('Viola', 'Standard tuning. Pitched a 5th below the violin.', ['C-3',
'G-3', 'D-4', 'A-4'])
add_tuning('Violin', 'Standard tuning', ['G-3', 'D-4', 'A-4', 'E-5'])
add_tuning('Violin', 'Cajun tuning to accompany accordion', ['F-3', 'C-4', 'G-4'
, 'D-5'])
add_tuning('Walaycho', 'F6 tuning', [['C-4', 'C-4'], ['F-4', 'F-4'], ['A-5',
'A-4'], ['D-5', 'D-5'], ['A-5', 'A-5']])
add_tuning('Walaycho', 'G6 tuning', [['D-4', 'D-4'], ['G-4', 'G-4'], ['B-5',
'B-4'], ['E-5', 'E-5'], ['B-5', 'B-5']])
| gpl-3.0 |
rizzatti/luigi | luigi/tools/range.py | 12 | 31004 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Produces contiguous completed ranges of recurring tasks.
See RangeDaily and RangeHourly for basic usage.
Caveat - if gaps accumulate, their causes (e.g. missing dependencies) going
unmonitored/unmitigated, then this will eventually keep retrying the same gaps
over and over and make no progress to more recent times. (See 'task_limit' and
'reverse' parameters.)
TODO foolproof against that kind of misuse?
"""
import itertools
import functools
import logging
import warnings
import operator
import re
import time
from datetime import datetime, timedelta
from luigi import six
import luigi
from luigi.parameter import ParameterException
from luigi.target import FileSystemTarget
from luigi.task import Register, flatten_output
logger = logging.getLogger('luigi-interface')
class RangeEvent(luigi.Event): # Not sure if subclassing currently serves a purpose. Stringly typed, events are.
"""
Events communicating useful metrics.
COMPLETE_COUNT would normally be nondecreasing, and its derivative would
describe performance (how many instances complete
invocation-over-invocation).
COMPLETE_FRACTION reaching 1 would be a telling event in case of a backfill
with defined start and stop. Would not be strikingly useful for a typical
recurring task without stop defined, fluctuating close to 1.
DELAY is measured from the first found missing datehour till (current time
+ hours_forward), or till stop if it is defined. In hours for Hourly.
TBD different units for other frequencies?
TODO any different for reverse mode? From first missing till last missing?
From last gap till stop?
"""
COMPLETE_COUNT = "event.tools.range.complete.count"
COMPLETE_FRACTION = "event.tools.range.complete.fraction"
DELAY = "event.tools.range.delay"
class RangeBase(luigi.WrapperTask):
"""
Produces a contiguous completed range of a recurring task.
Made for the common use case where a task is parameterized by e.g.
DateParameter, and assurance is needed that any gaps arising from downtime
are eventually filled.
Emits events that one can use to monitor gaps and delays.
At least one of start and stop needs to be specified.
(This is quite an abstract base class for subclasses with different
datetime parameter class, e.g. DateParameter, DateHourParameter, ..., and
different parameter naming, e.g. days_back/forward, hours_back/forward,
..., as well as different documentation wording, for good user experience.)
Subclasses will need to use the ``of`` parameter when overriding methods.
"""
# TODO lift the single parameter constraint by passing unknown parameters through WrapperTask?
of = luigi.TaskParameter(
description="task name to be completed. The task must take a single datetime parameter")
of_params = luigi.DictParameter(default=dict(), description="Arguments to be provided to the 'of' class when instantiating")
# The common parameters 'start' and 'stop' have type (e.g. DateParameter,
# DateHourParameter) dependent on the concrete subclass, cumbersome to
# define here generically without dark magic. Refer to the overrides.
start = luigi.Parameter()
stop = luigi.Parameter()
reverse = luigi.BoolParameter(
default=False,
description="specifies the preferred order for catching up. False - work from the oldest missing outputs onward; True - from the newest backward")
task_limit = luigi.IntParameter(
default=50,
description="how many of 'of' tasks to require. Guards against scheduling insane amounts of tasks in one go")
# TODO overridable exclude_datetimes or something...
now = luigi.IntParameter(
default=None,
description="set to override current time. In seconds since epoch")
param_name = luigi.Parameter(
default=None,
description="parameter name used to pass in parameterized value. Defaults to None, meaning use first positional parameter",
positional=False)
@property
def of_cls(self):
"""
DONT USE. Will be deleted soon. Use ``self.of``!
"""
if isinstance(self.of, six.string_types):
warnings.warn('When using Range programatically, dont pass "of" param as string!')
return Register.get_task_cls(self.of)
return self.of
# a bunch of datetime arithmetic building blocks that need to be provided in subclasses
def datetime_to_parameter(self, dt):
raise NotImplementedError
def parameter_to_datetime(self, p):
raise NotImplementedError
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
raise NotImplementedError
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
raise NotImplementedError
def moving_start(self, now):
"""
Returns a datetime from which to ensure contiguousness in the case when
start is None or unfeasibly far back.
"""
raise NotImplementedError
def moving_stop(self, now):
"""
Returns a datetime till which to ensure contiguousness in the case when
stop is None or unfeasibly far forward.
"""
raise NotImplementedError
def finite_datetimes(self, finite_start, finite_stop):
"""
Returns the individual datetimes in interval [finite_start, finite_stop)
for which task completeness should be required, as a sorted list.
"""
raise NotImplementedError
def _emit_metrics(self, missing_datetimes, finite_start, finite_stop):
"""
For consistent metrics one should consider the entire range, but
it is open (infinite) if stop or start is None.
Hence make do with metrics respective to the finite simplification.
"""
datetimes = self.finite_datetimes(
finite_start if self.start is None else min(finite_start, self.parameter_to_datetime(self.start)),
finite_stop if self.stop is None else max(finite_stop, self.parameter_to_datetime(self.stop)))
delay_in_jobs = len(datetimes) - datetimes.index(missing_datetimes[0]) if datetimes and missing_datetimes else 0
self.trigger_event(RangeEvent.DELAY, self.of_cls.task_family, delay_in_jobs)
expected_count = len(datetimes)
complete_count = expected_count - len(missing_datetimes)
self.trigger_event(RangeEvent.COMPLETE_COUNT, self.of_cls.task_family, complete_count)
self.trigger_event(RangeEvent.COMPLETE_FRACTION, self.of_cls.task_family, float(complete_count) / expected_count if expected_count else 1)
def _format_datetime(self, dt):
return self.datetime_to_parameter(dt)
def _format_range(self, datetimes):
param_first = self._format_datetime(datetimes[0])
param_last = self._format_datetime(datetimes[-1])
return '[%s, %s]' % (param_first, param_last)
def _instantiate_task_cls(self, param):
return self.of(**self._task_parameters(param))
@property
def _param_name(self):
if self.param_name is None:
return next(x[0] for x in self.of.get_params() if x[1].positional)
else:
return self.param_name
def _task_parameters(self, param):
kwargs = dict(**self.of_params)
kwargs[self._param_name] = param
return kwargs
def requires(self):
# cache because we anticipate a fair amount of computation
if hasattr(self, '_cached_requires'):
return self._cached_requires
if not self.start and not self.stop:
raise ParameterException("At least one of start and stop needs to be specified")
if not self.start and not self.reverse:
raise ParameterException("Either start needs to be specified or reverse needs to be True")
if self.start and self.stop and self.start > self.stop:
raise ParameterException("Can't have start > stop")
# TODO check overridden complete() and exists()
now = datetime.utcfromtimestamp(time.time() if self.now is None else self.now)
moving_start = self.moving_start(now)
finite_start = moving_start if self.start is None else max(self.parameter_to_datetime(self.start), moving_start)
moving_stop = self.moving_stop(now)
finite_stop = moving_stop if self.stop is None else min(self.parameter_to_datetime(self.stop), moving_stop)
datetimes = self.finite_datetimes(finite_start, finite_stop) if finite_start <= finite_stop else []
if datetimes:
logger.debug('Actually checking if range %s of %s is complete',
self._format_range(datetimes), self.of_cls.task_family)
missing_datetimes = sorted(self._missing_datetimes(datetimes))
logger.debug('Range %s lacked %d of expected %d %s instances',
self._format_range(datetimes), len(missing_datetimes), len(datetimes), self.of_cls.task_family)
else:
missing_datetimes = []
logger.debug('Empty range. No %s instances expected', self.of_cls.task_family)
self._emit_metrics(missing_datetimes, finite_start, finite_stop)
if self.reverse:
required_datetimes = missing_datetimes[-self.task_limit:]
else:
required_datetimes = missing_datetimes[:self.task_limit]
if required_datetimes:
logger.debug('Requiring %d missing %s instances in range %s',
len(required_datetimes), self.of_cls.task_family, self._format_range(required_datetimes))
if self.reverse:
required_datetimes.reverse() # TODO priorities, so that within the batch tasks are ordered too
self._cached_requires = [self._instantiate_task_cls(self.datetime_to_parameter(d)) for d in required_datetimes]
return self._cached_requires
def missing_datetimes(self, finite_datetimes):
"""
Override in subclasses to do bulk checks.
Returns a sorted list.
This is a conservative base implementation that brutally checks completeness, instance by instance.
Inadvisable as it may be slow.
"""
return [d for d in finite_datetimes if not self._instantiate_task_cls(self.datetime_to_parameter(d)).complete()]
def _missing_datetimes(self, finite_datetimes):
"""
Backward compatible wrapper. Will be deleted eventually (stated on Dec 2015)
"""
try:
return self.missing_datetimes(finite_datetimes)
except TypeError as ex:
if 'missing_datetimes()' in repr(ex):
warnings.warn('In your Range* subclass, missing_datetimes() should only take 1 argument (see latest docs)')
return self.missing_datetimes(self.of_cls, finite_datetimes)
else:
raise
class RangeDailyBase(RangeBase):
"""
Produces a contiguous completed range of a daily recurring task.
"""
start = luigi.DateParameter(
default=None,
description="beginning date, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateParameter(
default=None,
description="ending date, exclusive. Default: None - work forward forever")
days_back = luigi.IntParameter(
default=100, # slightly more than three months
description=("extent to which contiguousness is to be assured into "
"past, in days from current time. Prevents infinite loop "
"when start is none. If the dataset has limited retention"
" (i.e. old outputs get removed), this should be set "
"shorter to that, too, to prevent the oldest outputs "
"flapping. Increase freely if you intend to process old "
"dates - worker's memory is the limit"))
days_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, in days from current time. Prevents infinite loop when stop is none")
def datetime_to_parameter(self, dt):
return dt.date()
def parameter_to_datetime(self, p):
return datetime(p.year, p.month, p.day)
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
return self._task_parameters(dt.date())
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
dt = p[self._param_name]
return datetime(dt.year, dt.month, dt.day)
def moving_start(self, now):
return now - timedelta(days=self.days_back)
def moving_stop(self, now):
return now + timedelta(days=self.days_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to turn of day.
"""
date_start = datetime(finite_start.year, finite_start.month, finite_start.day)
dates = []
for i in itertools.count():
t = date_start + timedelta(days=i)
if t >= finite_stop:
return dates
if t >= finite_start:
dates.append(t)
class RangeHourlyBase(RangeBase):
"""
Produces a contiguous completed range of an hourly recurring task.
"""
start = luigi.DateHourParameter(
default=None,
description="beginning datehour, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateHourParameter(
default=None,
description="ending datehour, exclusive. Default: None - work forward forever")
hours_back = luigi.IntParameter(
default=100 * 24, # slightly more than three months
description=("extent to which contiguousness is to be assured into "
"past, in hours from current time. Prevents infinite "
"loop when start is none. If the dataset has limited "
"retention (i.e. old outputs get removed), this should "
"be set shorter to that, too, to prevent the oldest "
"outputs flapping. Increase freely if you intend to "
"process old dates - worker's memory is the limit"))
# TODO always entire interval for reprocessings (fixed start and stop)?
hours_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, in hours from current time. Prevents infinite loop when stop is none")
def datetime_to_parameter(self, dt):
return dt
def parameter_to_datetime(self, p):
return p
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
return self._task_parameters(dt)
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
return p[self._param_name]
def moving_start(self, now):
return now - timedelta(hours=self.hours_back)
def moving_stop(self, now):
return now + timedelta(hours=self.hours_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to whole hours.
"""
datehour_start = datetime(finite_start.year, finite_start.month, finite_start.day, finite_start.hour)
datehours = []
for i in itertools.count():
t = datehour_start + timedelta(hours=i)
if t >= finite_stop:
return datehours
if t >= finite_start:
datehours.append(t)
def _format_datetime(self, dt):
return luigi.DateHourParameter().serialize(dt)
class RangeByMinutesBase(RangeBase):
"""
Produces a contiguous completed range of an recurring tasks separated a specified number of minutes.
"""
start = luigi.DateMinuteParameter(
default=None,
description="beginning date-hour-minute, inclusive. Default: None - work backward forever (requires reverse=True)")
stop = luigi.DateMinuteParameter(
default=None,
description="ending date-hour-minute, exclusive. Default: None - work forward forever")
minutes_back = luigi.IntParameter(
default=60*24, # one day
description=("extent to which contiguousness is to be assured into "
"past, in minutes from current time. Prevents infinite "
"loop when start is none. If the dataset has limited "
"retention (i.e. old outputs get removed), this should "
"be set shorter to that, too, to prevent the oldest "
"outputs flapping. Increase freely if you intend to "
"process old dates - worker's memory is the limit"))
minutes_forward = luigi.IntParameter(
default=0,
description="extent to which contiguousness is to be assured into future, "
"in minutes from current time. Prevents infinite loop when stop is none")
minutes_interval = luigi.IntParameter(
default=1,
description="separation between events in minutes. It must evenly divide 60"
)
def datetime_to_parameter(self, dt):
return dt
def parameter_to_datetime(self, p):
return p
def datetime_to_parameters(self, dt):
"""
Given a date-time, will produce a dictionary of of-params combined with the ranged task parameter
"""
return self._task_parameters(dt)
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
dt = p[self._param_name]
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute)
def moving_start(self, now):
return now - timedelta(minutes=self.minutes_back)
def moving_stop(self, now):
return now + timedelta(minutes=self.minutes_forward)
def finite_datetimes(self, finite_start, finite_stop):
"""
Simply returns the points in time that correspond to a whole number of minutes intervals.
"""
# Validate that the minutes_interval can divide 60 and it is greater than 0 and lesser than 60
if not (0 < self.minutes_interval < 60):
raise ParameterException('minutes-interval must be within 0..60')
if (60 / self.minutes_interval) * self.minutes_interval != 60:
raise ParameterException('minutes-interval does not evenly divide 60')
# start of a complete interval, e.g. 20:13 and the interval is 5 -> 20:10
start_minute = int(finite_start.minute/self.minutes_interval)*self.minutes_interval
datehour_start = datetime(
year=finite_start.year,
month=finite_start.month,
day=finite_start.day,
hour=finite_start.hour,
minute=start_minute)
datehours = []
for i in itertools.count():
t = datehour_start + timedelta(minutes=i*self.minutes_interval)
if t >= finite_stop:
return datehours
if t >= finite_start:
datehours.append(t)
def _format_datetime(self, dt):
return luigi.DateMinuteParameter().serialize(dt)
def _constrain_glob(glob, paths, limit=5):
"""
Tweaks glob into a list of more specific globs that together still cover paths and not too much extra.
Saves us minutes long listings for long dataset histories.
Specifically, in this implementation the leftmost occurrences of "[0-9]"
give rise to a few separate globs that each specialize the expression to
digits that actually occur in paths.
"""
def digit_set_wildcard(chars):
"""
Makes a wildcard expression for the set, a bit readable, e.g. [1-5].
"""
chars = sorted(chars)
if len(chars) > 1 and ord(chars[-1]) - ord(chars[0]) == len(chars) - 1:
return '[%s-%s]' % (chars[0], chars[-1])
else:
return '[%s]' % ''.join(chars)
current = {glob: paths}
while True:
pos = list(current.keys())[0].find('[0-9]')
if pos == -1:
# no wildcard expressions left to specialize in the glob
return list(current.keys())
char_sets = {}
for g, p in six.iteritems(current):
char_sets[g] = sorted(set(path[pos] for path in p))
if sum(len(s) for s in char_sets.values()) > limit:
return [g.replace('[0-9]', digit_set_wildcard(char_sets[g]), 1) for g in current]
for g, s in six.iteritems(char_sets):
for c in s:
new_glob = g.replace('[0-9]', c, 1)
new_paths = list(filter(lambda p: p[pos] == c, current[g]))
current[new_glob] = new_paths
del current[g]
def most_common(items):
"""
Wanted functionality from Counters (new in Python 2.7).
"""
counts = {}
for i in items:
counts.setdefault(i, 0)
counts[i] += 1
return max(six.iteritems(counts), key=operator.itemgetter(1))
def _get_per_location_glob(tasks, outputs, regexes):
"""
Builds a glob listing existing output paths.
Esoteric reverse engineering, but worth it given that (compared to an
equivalent contiguousness guarantee by naive complete() checks)
requests to the filesystem are cut by orders of magnitude, and users
don't even have to retrofit existing tasks anyhow.
"""
paths = [o.path for o in outputs]
# naive, because some matches could be confused by numbers earlier
# in path, e.g. /foo/fifa2000k/bar/2000-12-31/00
matches = [r.search(p) for r, p in zip(regexes, paths)]
for m, p, t in zip(matches, paths, tasks):
if m is None:
raise NotImplementedError("Couldn't deduce datehour representation in output path %r of task %s" % (p, t))
n_groups = len(matches[0].groups())
# the most common position of every group is likely
# to be conclusive hit or miss
positions = [most_common((m.start(i), m.end(i)) for m in matches)[0] for i in range(1, n_groups + 1)]
glob = list(paths[0]) # FIXME sanity check that it's the same for all paths
for start, end in positions:
glob = glob[:start] + ['[0-9]'] * (end - start) + glob[end:]
# chop off the last path item
# (wouldn't need to if `hadoop fs -ls -d` equivalent were available)
return ''.join(glob).rsplit('/', 1)[0]
def _get_filesystems_and_globs(datetime_to_task, datetime_to_re):
"""
Yields a (filesystem, glob) tuple per every output location of task.
The task can have one or several FileSystemTarget outputs.
For convenience, the task can be a luigi.WrapperTask,
in which case outputs of all its dependencies are considered.
"""
# probe some scattered datetimes unlikely to all occur in paths, other than by being sincere datetime parameter's representations
# TODO limit to [self.start, self.stop) so messages are less confusing? Done trivially it can kill correctness
sample_datetimes = [datetime(y, m, d, h) for y in range(2000, 2050, 10) for m in range(1, 4) for d in range(5, 8) for h in range(21, 24)]
regexes = [re.compile(datetime_to_re(d)) for d in sample_datetimes]
sample_tasks = [datetime_to_task(d) for d in sample_datetimes]
sample_outputs = [flatten_output(t) for t in sample_tasks]
for o, t in zip(sample_outputs, sample_tasks):
if len(o) != len(sample_outputs[0]):
raise NotImplementedError("Outputs must be consistent over time, sorry; was %r for %r and %r for %r" % (o, t, sample_outputs[0], sample_tasks[0]))
# TODO fall back on requiring last couple of days? to avoid astonishing blocking when changes like that are deployed
# erm, actually it's not hard to test entire hours_back..hours_forward and split into consistent subranges FIXME?
for target in o:
if not isinstance(target, FileSystemTarget):
raise NotImplementedError("Output targets must be instances of FileSystemTarget; was %r for %r" % (target, t))
for o in zip(*sample_outputs): # transposed, so here we're iterating over logical outputs, not datetimes
glob = _get_per_location_glob(sample_tasks, o, regexes)
yield o[0].fs, glob
def _list_existing(filesystem, glob, paths):
"""
Get all the paths that do in fact exist. Returns a set of all existing paths.
Takes a luigi.target.FileSystem object, a str which represents a glob and
a list of strings representing paths.
"""
globs = _constrain_glob(glob, paths)
time_start = time.time()
listing = []
for g in sorted(globs):
logger.debug('Listing %s', g)
if filesystem.exists(g):
listing.extend(filesystem.listdir(g))
logger.debug('%d %s listings took %f s to return %d items',
len(globs), filesystem.__class__.__name__, time.time() - time_start, len(listing))
return set(listing)
def infer_bulk_complete_from_fs(datetimes, datetime_to_task, datetime_to_re):
"""
Efficiently determines missing datetimes by filesystem listing.
The current implementation works for the common case of a task writing
output to a FileSystemTarget whose path is built using strftime with format
like '...%Y...%m...%d...%H...', without custom complete() or exists().
(Eventually Luigi could have ranges of completion as first-class citizens.
Then this listing business could be factored away/be provided for
explicitly in target API or some kind of a history server.)
"""
filesystems_and_globs_by_location = _get_filesystems_and_globs(datetime_to_task, datetime_to_re)
paths_by_datetime = [[o.path for o in flatten_output(datetime_to_task(d))] for d in datetimes]
listing = set()
for (f, g), p in zip(filesystems_and_globs_by_location, zip(*paths_by_datetime)): # transposed, so here we're iterating over logical outputs, not datetimes
listing |= _list_existing(f, g, p)
# quickly learn everything that's missing
missing_datetimes = []
for d, p in zip(datetimes, paths_by_datetime):
if not set(p) <= listing:
missing_datetimes.append(d)
return missing_datetimes
class RangeDaily(RangeDailyBase):
"""Efficiently produces a contiguous completed range of a daily recurring
task that takes a single DateParameter.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeDaily --of YourActualTask --start 2014-01-01
"""
def missing_datetimes(self, finite_datetimes):
try:
cls_with_params = functools.partial(self.of, **self.of_params)
complete_parameters = self.of.bulk_complete.__func__(cls_with_params, map(self.datetime_to_parameter, finite_datetimes))
return set(finite_datetimes) - set(map(self.parameter_to_datetime, complete_parameters))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: self._instantiate_task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d)'))
class RangeHourly(RangeHourlyBase):
"""Efficiently produces a contiguous completed range of an hourly recurring
task that takes a single DateHourParameter.
Benefits from bulk_complete information to efficiently cover gaps.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeHourly --of YourActualTask --start 2014-01-01T00
"""
def missing_datetimes(self, finite_datetimes):
try:
# TODO: Why is there a list() here but not for the RangeDaily??
cls_with_params = functools.partial(self.of, **self.of_params)
complete_parameters = self.of.bulk_complete.__func__(cls_with_params, list(map(self.datetime_to_parameter, finite_datetimes)))
return set(finite_datetimes) - set(map(self.parameter_to_datetime, complete_parameters))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: self._instantiate_task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H)'))
class RangeByMinutes(RangeByMinutesBase):
"""Efficiently produces a contiguous completed range of an recurring
task every interval minutes that takes a single DateMinuteParameter.
Benefits from bulk_complete information to efficiently cover gaps.
Falls back to infer it from output filesystem listing to facilitate the
common case usage.
Convenient to use even from command line, like:
.. code-block:: console
luigi --module your.module RangeByMinutes --of YourActualTask --start 2014-01-01T0123
"""
def missing_datetimes(self, finite_datetimes):
try:
cls_with_params = functools.partial(self.of, **self.of_params)
complete_parameters = self.of.bulk_complete.__func__(cls_with_params, map(self.datetime_to_parameter, finite_datetimes))
return set(finite_datetimes) - set(map(self.parameter_to_datetime, complete_parameters))
except NotImplementedError:
return infer_bulk_complete_from_fs(
finite_datetimes,
lambda d: self._instantiate_task_cls(self.datetime_to_parameter(d)),
lambda d: d.strftime('(%Y).*(%m).*(%d).*(%H).*(%M)'))
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.