repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
mcardillo55/django | django/contrib/gis/geos/prototypes/topology.py | 338 | 2145 | """
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
from ctypes import c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_string,
)
from django.contrib.gis.geos.prototypes.geom import geos_char_p
class Topology(GEOSFuncFactory):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# Topology Routines
geos_boundary = Topology('GEOSBoundary')
geos_buffer = Topology('GEOSBuffer', argtypes=[GEOM_PTR, c_double, c_int])
geos_centroid = Topology('GEOSGetCentroid')
geos_convexhull = Topology('GEOSConvexHull')
geos_difference = Topology('GEOSDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_envelope = Topology('GEOSEnvelope')
geos_intersection = Topology('GEOSIntersection', argtypes=[GEOM_PTR, GEOM_PTR])
geos_linemerge = Topology('GEOSLineMerge')
geos_pointonsurface = Topology('GEOSPointOnSurface')
geos_preservesimplify = Topology('GEOSTopologyPreserveSimplify', argtypes=[GEOM_PTR, c_double])
geos_simplify = Topology('GEOSSimplify', argtypes=[GEOM_PTR, c_double])
geos_symdifference = Topology('GEOSSymDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_union = Topology('GEOSUnion', argtypes=[GEOM_PTR, GEOM_PTR])
geos_cascaded_union = GEOSFuncFactory('GEOSUnionCascaded', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFuncFactory(
'GEOSRelate', argtypes=[GEOM_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
# Linear referencing routines
geos_project = GEOSFuncFactory(
'GEOSProject', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate = Topology('GEOSInterpolate', argtypes=[GEOM_PTR, c_double])
geos_project_normalized = GEOSFuncFactory(
'GEOSProjectNormalized', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate_normalized = Topology('GEOSInterpolateNormalized', argtypes=[GEOM_PTR, c_double])
| bsd-3-clause |
nburn42/tensorflow | tensorflow/python/training/tensorboard_logging_test.py | 132 | 4456 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.tensorboard_logging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import tempfile
import time
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer
from tensorflow.python.training import tensorboard_logging
class EventLoggingTest(test.TestCase):
def setUp(self):
self._work_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self._sw = writer.FileWriter(self._work_dir)
tensorboard_logging.set_summary_writer(self._sw)
self.addCleanup(shutil.rmtree, self._work_dir)
# Stop the clock to avoid test flakiness.
now = time.time()
time._real_time = time.time
time.time = lambda: now
# Mock out logging calls so we can verify that the right number of messages
# get logged.
self.logged_message_count = 0
self._actual_log = logging.log
def mockLog(*args, **kwargs):
self.logged_message_count += 1
self._actual_log(*args, **kwargs)
logging.log = mockLog
def tearDown(self):
time.time = time._real_time
logging.log = self._actual_log
def assertLoggedMessagesAre(self, expected_messages):
self._sw.close()
event_paths = glob.glob(os.path.join(self._work_dir, "event*"))
# If the tests runs multiple time in the same directory we can have
# more than one matching event file. We only want to read the last one.
self.assertTrue(event_paths)
event_reader = summary_iterator.summary_iterator(event_paths[-1])
# Skip over the version event.
next(event_reader)
for level, message in expected_messages:
event = next(event_reader)
self.assertEqual(event.wall_time, time.time())
self.assertEqual(event.log_message.level, level)
self.assertEqual(event.log_message.message, message)
def testBasic(self):
tensorboard_logging.set_summary_writer(self._sw)
tensorboard_logging.error("oh no!")
tensorboard_logging.error("for%s", "mat")
self.assertLoggedMessagesAre([(event_pb2.LogMessage.ERROR, "oh no!"),
(event_pb2.LogMessage.ERROR, "format")])
self.assertEqual(2, self.logged_message_count)
def testVerbosity(self):
tensorboard_logging.set_summary_writer(self._sw)
tensorboard_logging.set_verbosity(tensorboard_logging.ERROR)
tensorboard_logging.warn("warn")
tensorboard_logging.error("error")
tensorboard_logging.set_verbosity(tensorboard_logging.DEBUG)
tensorboard_logging.debug("debug")
self.assertLoggedMessagesAre([(event_pb2.LogMessage.ERROR, "error"),
(event_pb2.LogMessage.DEBUGGING, "debug")])
# All message should be logged because tensorboard_logging verbosity doesn't
# affect logging verbosity.
self.assertEqual(3, self.logged_message_count)
def testBadVerbosity(self):
with self.assertRaises(ValueError):
tensorboard_logging.set_verbosity("failure")
with self.assertRaises(ValueError):
tensorboard_logging.log("bad", "dead")
def testNoSummaryWriter(self):
"""Test that logging without a SummaryWriter succeeds."""
tensorboard_logging.set_summary_writer(None)
tensorboard_logging.warn("this should work")
self.assertEqual(1, self.logged_message_count)
def testSummaryWriterFailsAfterClear(self):
tensorboard_logging._clear_summary_writer()
with self.assertRaises(RuntimeError):
tensorboard_logging.log(tensorboard_logging.ERROR, "failure")
if __name__ == "__main__":
test.main()
| apache-2.0 |
Bourneer/scrapy | scrapy/utils/console.py | 70 | 2754 | from functools import wraps
from collections import OrderedDict
def _embed_ipython_shell(namespace={}, banner=''):
"""Start an IPython Shell"""
try:
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.ipapp import load_default_config
except ImportError:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from IPython.frontend.terminal.ipapp import load_default_config
@wraps(_embed_ipython_shell)
def wrapper(namespace=namespace, banner=''):
config = load_default_config()
shell = InteractiveShellEmbed(
banner1=banner, user_ns=namespace, config=config)
shell()
return wrapper
def _embed_bpython_shell(namespace={}, banner=''):
"""Start a bpython shell"""
import bpython
@wraps(_embed_bpython_shell)
def wrapper(namespace=namespace, banner=''):
bpython.embed(locals_=namespace, banner=banner)
return wrapper
def _embed_standard_shell(namespace={}, banner=''):
"""Start a standard python shell"""
import code
try: # readline module is only available on unix systems
import readline
except ImportError:
pass
else:
import rlcompleter
readline.parse_and_bind("tab:complete")
@wraps(_embed_standard_shell)
def wrapper(namespace=namespace, banner=''):
code.interact(banner=banner, local=namespace)
return wrapper
DEFAULT_PYTHON_SHELLS = OrderedDict([
('ipython', _embed_ipython_shell),
('bpython', _embed_bpython_shell),
( 'python', _embed_standard_shell),
])
def get_shell_embed_func(shells=None, known_shells=None):
"""Return the first acceptable shell-embed function
from a given list of shell names.
"""
if shells is None: # list, preference order of shells
shells = DEFAULT_PYTHON_SHELLS.keys()
if known_shells is None: # available embeddable shells
known_shells = DEFAULT_PYTHON_SHELLS.copy()
for shell in shells:
if shell in known_shells:
try:
# function test: run all setup code (imports),
# but dont fall into the shell
return known_shells[shell]()
except ImportError:
continue
def start_python_console(namespace=None, banner='', shells=None):
"""Start Python console bound to the given namespace.
Readline support and tab completion will be used on Unix, if available.
"""
if namespace is None:
namespace = {}
try:
shell = get_shell_embed_func(shells)
if shell is not None:
shell(namespace=namespace, banner=banner)
except SystemExit: # raised when using exit() in python code.interact
pass
| bsd-3-clause |
ptisserand/ansible | hacking/tests/gen_distribution_version_testcase.py | 63 | 1905 | #!/usr/bin/env python
"""
This script generated test_cases for test_distribution_version.py.
To do so it outputs the relevant files from /etc/*release, the output of platform.dist() and the current ansible_facts regarding the distribution version.
This assumes a working ansible version in the path.
"""
import platform
import os.path
import subprocess
import json
import sys
filelist = [
'/etc/oracle-release',
'/etc/slackware-version',
'/etc/redhat-release',
'/etc/vmware-release',
'/etc/openwrt_release',
'/etc/system-release',
'/etc/alpine-release',
'/etc/release',
'/etc/arch-release',
'/etc/os-release',
'/etc/SuSE-release',
'/etc/gentoo-release',
'/etc/os-release',
'/etc/lsb-release',
'/etc/altlinux-release',
'/etc/os-release',
'/etc/coreos/update.conf',
'/usr/lib/os-release',
]
fcont = {}
for f in filelist:
if os.path.exists(f):
s = os.path.getsize(f)
if s > 0 and s < 10000:
with open(f) as fh:
fcont[f] = fh.read()
dist = platform.dist()
facts = ['distribution', 'distribution_version', 'distribution_release', 'distribution_major_version', 'os_family']
try:
ansible_out = subprocess.check_output(
['ansible', 'localhost', '-m', 'setup'])
except subprocess.CalledProcessError as e:
print("ERROR: ansible run failed, output was: \n")
print(e.output)
sys.exit(e.returncode)
parsed = json.loads(ansible_out[ansible_out.index('{'):])
ansible_facts = {}
for fact in facts:
try:
ansible_facts[fact] = parsed['ansible_facts']['ansible_' + fact]
except:
ansible_facts[fact] = "N/A"
nicename = ansible_facts['distribution'] + ' ' + ansible_facts['distribution_version']
output = {
'name': nicename,
'input': fcont,
'platform.dist': dist,
'result': ansible_facts,
}
print(json.dumps(output, indent=4))
| gpl-3.0 |
mezz64/home-assistant | homeassistant/components/volkszaehler/sensor.py | 19 | 4129 | """Support for consuming values for the Volkszaehler API."""
from datetime import timedelta
import logging
from volkszaehler import Volkszaehler
from volkszaehler.exceptions import VolkszaehlerApiConnectionError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PORT,
ENERGY_WATT_HOUR,
POWER_WATT,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_UUID = "uuid"
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "Volkszaehler"
DEFAULT_PORT = 80
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
SENSOR_TYPES = {
"average": ["Average", POWER_WATT, "mdi:power-off"],
"consumption": ["Consumption", ENERGY_WATT_HOUR, "mdi:power-plug"],
"max": ["Max", POWER_WATT, "mdi:arrow-up"],
"min": ["Min", POWER_WATT, "mdi:arrow-down"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_UUID): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["average"]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Volkszaehler sensors."""
host = config[CONF_HOST]
name = config[CONF_NAME]
port = config[CONF_PORT]
uuid = config[CONF_UUID]
conditions = config[CONF_MONITORED_CONDITIONS]
session = async_get_clientsession(hass)
vz_api = VolkszaehlerData(
Volkszaehler(hass.loop, session, uuid, host=host, port=port)
)
await vz_api.async_update()
if vz_api.api.data is None:
raise PlatformNotReady
dev = []
for condition in conditions:
dev.append(VolkszaehlerSensor(vz_api, name, condition))
async_add_entities(dev, True)
class VolkszaehlerSensor(Entity):
"""Implementation of a Volkszaehler sensor."""
def __init__(self, vz_api, name, sensor_type):
"""Initialize the Volkszaehler sensor."""
self.vz_api = vz_api
self._name = name
self.type = sensor_type
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(self._name, SENSOR_TYPES[self.type][0])
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return SENSOR_TYPES[self.type][1]
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self.vz_api.available
@property
def state(self):
"""Return the state of the resources."""
return self._state
async def async_update(self):
"""Get the latest data from REST API."""
await self.vz_api.async_update()
if self.vz_api.api.data is not None:
self._state = round(getattr(self.vz_api.api, self.type), 2)
class VolkszaehlerData:
"""The class for handling the data retrieval from the Volkszaehler API."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
self.available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from the Volkszaehler REST API."""
try:
await self.api.get_data()
self.available = True
except VolkszaehlerApiConnectionError:
_LOGGER.error("Unable to fetch data from the Volkszaehler API")
self.available = False
| apache-2.0 |
gvnn3/PCS | scripts/ptptimes.py | 1 | 3112 | #!/usr/bin/env python
# Copyright (c) 2012-2016, Neville-Neil Consulting
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Neville-Neil Consulting nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: George V. Neville-Neil
#
# Description: Script displays all the times found in various PTP packets.
from pcs.packets.ptp import *
from pcs.packets.ptp_common import Common
from pcs.packets.ipv4 import ipv4
from pcs.packets.udpv4 import udpv4
import pcs
import datetime
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file",
dest="file", default=None,
help="File to read from.")
parser.add_option("-n", "--natural",
action="store_true",
dest="natural", default=False,
help="human readable time.")
(options, args) = parser.parse_args()
file = pcs.PcapConnector(options.file)
packet = file.readpkt()
while(packet != None):
if packet.data == None:
continue
if packet.data.data == None:
continue
if packet.data.data.data != None:
if (options.natural == True):
ts = datetime.datetime.fromtimestamp(packet.data.data.timestamp)
ms = ts.microsecond / 1000
msecond = ts.strftime("%H:%M:%S")
msecond += (".%d") % ms
print msecond
else:
print packet.data.data.timestamp
print packet.data.data.data
if packet.data.data.data.data != None:
print packet.data.data.data.data
packet = file.readpkt()
if __name__ == "__main__":
main()
| bsd-3-clause |
alex-robbins/micropython | examples/hwapi/soft_pwm.py | 43 | 1367 | import utime
from hwconfig import LED
# Using sleep_ms() gives pretty poor PWM resolution and
# brightness control, but we use it in the attempt to
# make this demo portable to even more boards (e.g. to
# those which don't provide sleep_us(), or provide, but
# it's not precise, like would be on non realtime OSes).
# We otherwise use 20ms period, to make frequency not less
# than 50Hz to avoid visible flickering (you may still see
# if you're unlucky).
def pwm_cycle(led, duty, cycles):
duty_off = 20 - duty
for i in range(cycles):
if duty:
led.on()
utime.sleep_ms(duty)
if duty_off:
led.off()
utime.sleep_ms(duty_off)
# At the duty setting of 1, an LED is still pretty bright, then
# at duty 0, it's off. This makes rather unsmooth transition, and
# breaks fade effect. So, we avoid value of 0 and oscillate between
# 1 and 20. Actually, highest values like 19 and 20 are also
# barely distinguishible (like, both of them too bright and burn
# your eye). So, improvement to the visible effect would be to use
# more steps (at least 10x), and then higher frequency, and use
# range which includes 1 but excludes values at the top.
while True:
# Fade in
for i in range(1, 21):
pwm_cycle(LED, i, 2)
# Fade out
for i in range(20, 0, -1):
pwm_cycle(LED, i, 2)
| mit |
dentaku65/plugin.video.sod | servers/ultramegabit.py | 42 | 1290 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para ultramegabit
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[ultramegabit.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://ultramegabit.com/file/
patronvideos = '(ultramegabit.com/file/details/[^"]+)'
logger.info("[ultramegabit.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[ultramegabit]"
url ='http://'+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'ultramegabit' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| gpl-3.0 |
Celtoys/pycgen | pycgen.py | 1 | 5990 |
#
# Copyright 2014 Celtoys Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
#
# Compiles and executes the Python code that the generator code has access to.
# Returns the global python environment that must be used between all code runs in a file.
#
def CreatePythonExecEnvironment():
prologue = """
import os
import inspect
# Empty string for output
g_EmitOutput = ""
def EmitStr(string):
globals()["g_EmitOutput"] += string
def EmitLn(line):
globals()["g_EmitOutput"] += line
globals()["g_EmitOutput"] += os.linesep
def EmitRepl(generic, repl):
# Remove leading/trailing newlines
generic = generic.lstrip(os.linesep)
generic = generic.rstrip(os.linesep)
# Split into old value and replacement values
r = repl.split(":")
old_val = r[0]
new_vals = r[1]
# Iterate all replacement values and emit them
vals = new_vals.split(",")
for v in vals:
EmitLn(generic.replace(old_val, v))
def EmitFmt(line):
# Get local variables from calling frame
# This is available in CPython but not guaranteed to be available in other Python implementations
calling_frame = inspect.currentframe().f_back
locals = calling_frame.f_locals
# Remove leading/trailing newlines
line = line.lstrip(os.linesep)
line = line.rstrip(os.linesep)
# Format with the unpacked calling frame local variables
EmitStr(line.format(**locals))
"""
# Compile the prologue and return the environment it creates
exec_globals = { }
prologue_compiled = compile(prologue, "<prologue>", "exec")
exec(prologue_compiled, exec_globals)
return exec_globals
#
# Opens a file and returns it as an array of lines
#
def OpenFile(filename):
try:
f = open(filename, "rb")
lines = f.readlines()
return lines
except:
return None
#
# Strips all leading whitespace on a line and treats it as the indent
#
def GetIndent(line):
lstrip = line.lstrip()
indent_size = len(line) - len(lstrip)
return line[0:indent_size]
#
# Executes python code at runtime within the given environment.
# Assumes that the code will be doing nothing but code emits so returns the
# emit output from the python environment passed in.
#
def ExecPythonLines(lines, filename, start_line, python_env):
if len(lines) == 0:
return None
# Use the indent from the starting line to prepare all the other lines
start_indent = GetIndent(lines[0])
indent_size = len(start_indent)
# Strip leading indentation from all lines
for i in range(0, len(lines)):
line = lines[i]
# Lines that don't share the same indent could be an error...
if not line.startswith(start_indent):
# ...unless they're empty lines
line = line.lstrip()
if len(line) == 0:
lines[i] = ""
continue
print(filename + "(" + str(start_line + i) + "): Bad leading whitespace indent")
return None
lines[i] = line[indent_size:]
# Run python code with shared global state for entire file
python_env["g_EmitOutput"] = ""
code = "".join(lines)
exec(code, python_env)
output = python_env["g_EmitOutput"].splitlines()
if len(output) == 0:
return None
# Prefix the output with indentation inferred from the generator
output = [ start_indent + line for line in output ]
output = os.linesep.join(output) + os.linesep
return output
#
# Quick and dirty, line-by-line parsing of an input file looking for Python generator code-blocks
# to execute. Returns the modified file as a set of lines.
#
def ParseInputFile(python_env, filename):
lines = OpenFile(filename)
if lines == None:
return None
new_lines = [ ]
in_emit_output = False
in_python_code = False
python_lines = [ ]
python_start_line = 0
python_indent = 0
for i in range(0, len(lines)):
line = lines[i]
line_lstrip = line.lstrip()
# Detect and skip start and end of emit output from previous runs
if line_lstrip.startswith("//$pycgen-begin"):
in_emit_output = True
continue
if in_emit_output and line_lstrip.startswith("//$pycgen-end"):
in_emit_output = False
continue
# Copy lines from old to new, ignoring emit output from previous runs
if not in_emit_output:
new_lines.append(line)
# Detect start of new python code block
if line_lstrip.startswith("/*$pycgen"):
in_python_code = True
python_lines = [ ]
python_start_line = i + 2 # +1 for skip over "pycgen", +1 for 1-based indexing
python_indent = GetIndent(line)
continue
# Detect end of python code block
if in_python_code and line_lstrip.startswith("*/"):
in_python_code = False
output = ExecPythonLines(python_lines, filename, python_start_line, python_env)
if output != None:
new_lines.append(python_indent + "//$pycgen-begin" + os.linesep)
new_lines += output
new_lines.append(python_indent + "//$pycgen-end" + os.linesep)
# Copy lines in python code blocks
if in_python_code:
python_lines.append(line)
return new_lines
# Parse command-line arguments
if len(sys.argv) != 3:
print("Use: pycgen.py <input_filename> <output_filename>")
sys.exit(1)
input_filename = os.path.abspath(sys.argv[1])
output_filename = sys.argv[2]
# Parse the input file and generate any required code
python_env = CreatePythonExecEnvironment()
output_lines = ParseInputFile(python_env, input_filename)
if output_lines == None:
print("ERROR: Couldn't open file " + input_filename)
sys.exit(1)
# Write the result
try:
output_file = open(output_filename, "wb")
output_file.writelines(output_lines)
except:
print("ERROR: Couldn't write to file " + output_filename)
sys.exit(1)
| apache-2.0 |
kyleknap/boto | boto/ec2/instancestatus.py | 181 | 6854 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Details(dict):
"""
A dict object that contains name/value pairs which provide
more detailed information about the status of the system
or the instance.
"""
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'name':
self._name = value
elif name == 'status':
self[self._name] = value
else:
setattr(self, name, value)
class Event(object):
"""
A status event for an instance.
:ivar code: A string indicating the event type.
:ivar description: A string describing the reason for the event.
:ivar not_before: A datestring describing the earliest time for
the event.
:ivar not_after: A datestring describing the latest time for
the event.
"""
def __init__(self, code=None, description=None,
not_before=None, not_after=None):
self.code = code
self.description = description
self.not_before = not_before
self.not_after = not_after
def __repr__(self):
return 'Event:%s' % self.code
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'code':
self.code = value
elif name == 'description':
self.description = value
elif name == 'notBefore':
self.not_before = value
elif name == 'notAfter':
self.not_after = value
else:
setattr(self, name, value)
class Status(object):
"""
A generic Status object used for system status and instance status.
:ivar status: A string indicating overall status.
:ivar details: A dict containing name-value pairs which provide
more details about the current status.
"""
def __init__(self, status=None, details=None):
self.status = status
if not details:
details = Details()
self.details = details
def __repr__(self):
return 'Status:%s' % self.status
def startElement(self, name, attrs, connection):
if name == 'details':
return self.details
return None
def endElement(self, name, value, connection):
if name == 'status':
self.status = value
else:
setattr(self, name, value)
class EventSet(list):
def startElement(self, name, attrs, connection):
if name == 'item':
event = Event()
self.append(event)
return event
else:
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class InstanceStatus(object):
"""
Represents an EC2 Instance status as reported by
DescribeInstanceStatus request.
:ivar id: The instance identifier.
:ivar zone: The availability zone of the instance.
:ivar events: A list of events relevant to the instance.
:ivar state_code: An integer representing the current state
of the instance.
:ivar state_name: A string describing the current state
of the instance.
:ivar system_status: A Status object that reports impaired
functionality that stems from issues related to the systems
that support an instance, such as such as hardware failures
and network connectivity problems.
:ivar instance_status: A Status object that reports impaired
functionality that arises from problems internal to the instance.
"""
def __init__(self, id=None, zone=None, events=None,
state_code=None, state_name=None):
self.id = id
self.zone = zone
self.events = events
self.state_code = state_code
self.state_name = state_name
self.system_status = Status()
self.instance_status = Status()
def __repr__(self):
return 'InstanceStatus:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'eventsSet':
self.events = EventSet()
return self.events
elif name == 'systemStatus':
return self.system_status
elif name == 'instanceStatus':
return self.instance_status
else:
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.id = value
elif name == 'availabilityZone':
self.zone = value
elif name == 'code':
self.state_code = int(value)
elif name == 'name':
self.state_name = value
else:
setattr(self, name, value)
class InstanceStatusSet(list):
"""
A list object that contains the results of a call to
DescribeInstanceStatus request. Each element of the
list will be an InstanceStatus object.
:ivar next_token: If the response was truncated by
the EC2 service, the next_token attribute of the
object will contain the string that needs to be
passed in to the next request to retrieve the next
set of results.
"""
def __init__(self, connection=None):
list.__init__(self)
self.connection = connection
self.next_token = None
def startElement(self, name, attrs, connection):
if name == 'item':
status = InstanceStatus()
self.append(status)
return status
else:
return None
def endElement(self, name, value, connection):
if name == 'nextToken':
self.next_token = value
setattr(self, name, value)
| mit |
chromium2014/src | build/android/pylib/base/base_test_result_unittest.py | 134 | 2817 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for TestRunResults."""
import unittest
from pylib.base.base_test_result import BaseTestResult
from pylib.base.base_test_result import TestRunResults
from pylib.base.base_test_result import ResultType
class TestTestRunResults(unittest.TestCase):
def setUp(self):
self.p1 = BaseTestResult('p1', ResultType.PASS, log='pass1')
other_p1 = BaseTestResult('p1', ResultType.PASS)
self.p2 = BaseTestResult('p2', ResultType.PASS)
self.f1 = BaseTestResult('f1', ResultType.FAIL, log='failure1')
self.c1 = BaseTestResult('c1', ResultType.CRASH, log='crash1')
self.u1 = BaseTestResult('u1', ResultType.UNKNOWN)
self.tr = TestRunResults()
self.tr.AddResult(self.p1)
self.tr.AddResult(other_p1)
self.tr.AddResult(self.p2)
self.tr.AddResults(set([self.f1, self.c1, self.u1]))
def testGetAll(self):
self.assertFalse(
self.tr.GetAll().symmetric_difference(
[self.p1, self.p2, self.f1, self.c1, self.u1]))
def testGetPass(self):
self.assertFalse(self.tr.GetPass().symmetric_difference(
[self.p1, self.p2]))
def testGetNotPass(self):
self.assertFalse(self.tr.GetNotPass().symmetric_difference(
[self.f1, self.c1, self.u1]))
def testGetAddTestRunResults(self):
tr2 = TestRunResults()
other_p1 = BaseTestResult('p1', ResultType.PASS)
f2 = BaseTestResult('f2', ResultType.FAIL)
tr2.AddResult(other_p1)
tr2.AddResult(f2)
tr2.AddTestRunResults(self.tr)
self.assertFalse(
tr2.GetAll().symmetric_difference(
[self.p1, self.p2, self.f1, self.c1, self.u1, f2]))
def testGetLogs(self):
log_print = ('[FAIL] f1:\n'
'failure1\n'
'[CRASH] c1:\n'
'crash1')
self.assertEqual(self.tr.GetLogs(), log_print)
def testGetShortForm(self):
short_print = ('ALL: 5 PASS: 2 FAIL: 1 '
'CRASH: 1 TIMEOUT: 0 UNKNOWN: 1 ')
self.assertEqual(self.tr.GetShortForm(), short_print)
def testGetGtestForm(self):
gtest_print = ('[==========] 5 tests ran.\n'
'[ PASSED ] 2 tests.\n'
'[ FAILED ] 3 tests, listed below:\n'
'[ FAILED ] f1\n'
'[ FAILED ] c1 (CRASHED)\n'
'[ FAILED ] u1 (UNKNOWN)\n'
'\n'
'3 FAILED TESTS')
self.assertEqual(gtest_print, self.tr.GetGtestForm())
def testRunPassed(self):
self.assertFalse(self.tr.DidRunPass())
tr2 = TestRunResults()
self.assertTrue(tr2.DidRunPass())
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
skudriashev/incubator-airflow | tests/contrib/hooks/test_jdbc_hook.py | 12 | 1690 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import Mock
from mock import patch
from airflow import configuration
from airflow.hooks.jdbc_hook import JdbcHook
from airflow import models
from airflow.utils import db
jdbc_conn_mock = Mock(
name="jdbc_conn"
)
class TestJdbcHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='jdbc_default', conn_type='jdbc',
host='jdbc://localhost/', port=443,
extra='{"extra__jdbc__drv_path": "/path1/test.jar,/path2/t.jar2", "extra__jdbc__drv_clsname": "com.driver.main"}'))
@patch("airflow.hooks.jdbc_hook.jaydebeapi.connect", autospec=True,
return_value=jdbc_conn_mock)
def test_jdbc_conn_connection(self, jdbc_mock):
jdbc_hook = JdbcHook()
jdbc_conn = jdbc_hook.get_conn()
self.assertTrue(jdbc_mock.called)
self.assertIsInstance(jdbc_conn, Mock)
self.assertEqual(jdbc_conn.name, jdbc_mock.return_value.name)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Lokke/eden | modules/feedparser.py | 22 | 155418 | """Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.1.3"
__license__ = """
Copyright (c) 2010-2013 Kurt McKee <[email protected]>
Copyright (c) 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>",
"Bernd Schlapsi <https://github.com/brot>",]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
#try:
# # Python 3.1 introduces bytes.maketrans and simultaneously
# # deprecates string.maketrans; use bytes.maketrans if possible
# _maketrans = bytes.maketrans
#except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
#_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
_base64decode = base64.decodestring
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
#try:
# if bytes is str:
# # In Python 2.5 and below, bytes doesn't exist (NameError)
# # In Python 2.6 and above, bytes and str are the same type
# raise NameError
#except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
#else:
# # Python 3
# def _s2bytes(s):
# return bytes(s, 'utf8')
# def _l2bytes(l):
# return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import itertools
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing and content santizing
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
#try:
# import iconv_codec
#except ImportError:
# pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
#try:
# import chardet
#except ImportError:
chardet = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
try:
uri = urlparse.urljoin(base, uri)
except ValueError:
uri = u''
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://podlove.org/simple-chapters': 'psc',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
# georss
self.ingeometry = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
# psc_chapters_flag prevents multiple psc_chapters from being
# captured in a single entry or item. The transition states are
# None -> True -> False. psc_chapter elements will only be
# captured while it is True.
self.psc_chapters_flag = None
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
# do not resolve guid elements with isPermalink="false"
if not element == 'id' or self.guidislink:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
self.psc_chapters_flag = None
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
# geospatial location, or "where", from georss.org
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context['where'].update(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
if geometry:
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
if geometry:
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
context = self._getContext()
context['where'] = FeedParserDict()
_start_georss_where = _start_where
def _parse_srs_attrs(self, attrsD):
srsName = attrsD.get('srsname')
try:
srsDimension = int(attrsD.get('srsdimension', '2'))
except ValueError:
srsDimension = 2
context = self._getContext()
context['where']['srsName'] = srsName
context['where']['srsDimension'] = srsDimension
def _start_gml_point(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 1
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self._parse_srs_attrs(attrsD)
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self._parse_srs_attrs(attrsD)
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_georss_point(this, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
this = self.pop('pos')
context = self._getContext()
srsName = context['where'].get('srsName')
srsDimension = context['where'].get('srsDimension', 2)
swap = True
if srsName and "EPSG" in srsName:
epsg = int(srsName.split(":")[-1])
swap = bool(epsg in _geogCS)
geometry = _parse_poslist(
this, self.ingeometry, swap=swap, dims=srsDimension)
if geometry:
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# end geospatial
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
_start_media_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
_end_media_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_group(self, attrsD):
# don't do anything, but don't break the enclosed tags either
pass
def _start_media_credit(self, attrsD):
context = self._getContext()
context.setdefault('media_credit', [])
context['media_credit'].append(attrsD)
self.push('credit', 1)
def _end_media_credit(self):
credit = self.pop('credit')
if credit != None and len(credit.strip()) != 0:
context = self._getContext()
context['media_credit'][-1]['content'] = credit
def _start_media_restriction(self, attrsD):
context = self._getContext()
context.setdefault('media_restriction', attrsD)
self.push('restriction', 1)
def _end_media_restriction(self):
restriction = self.pop('restriction')
if restriction != None and len(restriction.strip()) != 0:
context = self._getContext()
context['media_restriction']['content'] = restriction
def _start_media_license(self, attrsD):
context = self._getContext()
context.setdefault('media_license', attrsD)
self.push('license', 1)
def _end_media_license(self):
license = self.pop('license')
if license != None and len(license.strip()) != 0:
context = self._getContext()
context['media_license']['content'] = license
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
def _start_psc_chapters(self, attrsD):
if self.psc_chapters_flag is None:
# Transition from None -> True
self.psc_chapters_flag = True
attrsD['chapters'] = []
self._getContext()['psc_chapters'] = FeedParserDict(attrsD)
def _end_psc_chapters(self):
# Transition from True -> False
self.psc_chapters_flag = False
def _start_psc_chapter(self, attrsD):
if self.psc_chapters_flag:
start = self._getAttribute(attrsD, 'start')
attrsD['start_parsed'] = _parse_psc_chapter_start(start)
context = self._getContext()['psc_chapters']
context['chapters'].append(FeedParserDict(attrsD))
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
localname = str(localname).lower()
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('video', 'poster')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
return _urljoin(base, rel or u'')
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
uri = _urljoin(base, rel)
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics'])
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if base64 and not url_file_stream_or_string.startswith('ftp:'):
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
def _parse_psc_chapter_start(start):
FORMAT = r'^((\d{2}):)?(\d{2}):(\d{2})(\.(\d{3}))?$'
m = re.compile(FORMAT).match(start)
if m is None:
return None
_, h, m, s, _, ms = m.groups()
h, m, s, ms = (int(h or 0), int(m), int(s), int(ms or 0))
return datetime.timedelta(0, h*60*60 + m*60 + s, ms*1000)
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
timezonenames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# W3 date and time format parser
# http://www.w3.org/TR/NOTE-datetime
# Also supports MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (basically, allow a space as a date/time/timezone separator)
def _parse_date_w3dtf(datestr):
if not datestr.strip():
return None
parts = datestr.lower().split('t')
if len(parts) == 1:
# This may be a date only, or may be an MSSQL-style date
parts = parts[0].split()
if len(parts) == 1:
# Treat this as a date only
parts.append('00:00:00z')
elif len(parts) > 2:
return None
date = parts[0].split('-', 2)
if not date or len(date[0]) != 4:
return None
# Ensure that `date` has 3 elements. Using '1' sets the default
# month to January and the default day to the 1st of the month.
date.extend(['1'] * (3 - len(date)))
try:
year, month, day = [int(i) for i in date]
except ValueError:
# `date` may have more than 3 elements or may contain
# non-integer strings.
return None
if parts[1].endswith('z'):
parts[1] = parts[1][:-1]
parts.append('z')
# Append the numeric timezone offset, if any, to parts.
# If this is an MSSQL-style date then parts[2] already contains
# the timezone information, so `append()` will not affect it.
# Add 1 to each value so that if `find()` returns -1 it will be
# treated as False.
loc = parts[1].find('-') + 1 or parts[1].find('+') + 1 or len(parts[1]) + 1
loc = loc - 1
parts.append(parts[1][loc:])
parts[1] = parts[1][:loc]
time = parts[1].split(':', 2)
# Ensure that time has 3 elements. Using '0' means that the
# minutes and seconds, if missing, will default to 0.
time.extend(['0'] * (3 - len(time)))
tzhour = 0
tzmin = 0
if parts[2][:1] in ('-', '+'):
try:
tzhour = int(parts[2][1:3])
tzmin = int(parts[2][4:])
except ValueError:
return None
if parts[2].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[2], 0)
try:
hour, minute, second = [int(float(i)) for i in time]
except ValueError:
return None
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(date):
"""Parse RFC 822 dates and times
http://tools.ietf.org/html/rfc822#section-5
There are some formatting differences that are accounted for:
1. Years may be two or four digits.
2. The month and day can be swapped.
3. Additional timezone names are supported.
4. A default time and timezone are assumed if only a date is present.
"""
daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
parts = date.lower().split()
if len(parts) < 5:
# Assume that the time and timezone are missing
parts.extend(('00:00:00', '0000'))
# Remove the day name
if parts[0][:3] in daynames:
parts = parts[1:]
if len(parts) < 5:
# If there are still fewer than five parts, there's not enough
# information to interpret this
return None
try:
day = int(parts[0])
except ValueError:
# Check if the day and month are swapped
if months.get(parts[0][:3]):
try:
day = int(parts[1])
except ValueError:
return None
else:
parts[1] = parts[0]
else:
return None
month = months.get(parts[1][:3])
if not month:
return None
try:
year = int(parts[2])
except ValueError:
return None
# Normalize two-digit years:
# Anything in the 90's is interpreted as 1990 and on
# Anything 89 or less is interpreted as 2089 or before
if len(parts[2]) <= 2:
year += (1900, 2000)[year < 90]
timeparts = parts[3].split(':')
timeparts = timeparts + ([0] * (3 - len(timeparts)))
try:
(hour, minute, second) = map(int, timeparts)
except ValueError:
return None
tzhour = 0
tzmin = 0
# Strip 'Etc/' from the timezone
if parts[4].startswith('etc/'):
parts[4] = parts[4][4:]
# Normalize timezones that start with 'gmt':
# GMT-05:00 => -0500
# GMT => GMT
if parts[4].startswith('gmt'):
parts[4] = ''.join(parts[4][3:].split(':')) or 'gmt'
# Handle timezones like '-0500', '+0500', and 'EST'
if parts[4] and parts[4][0] in ('-', '+'):
try:
tzhour = int(parts[4][1:3])
tzmin = int(parts[4][3:])
except ValueError:
return None
if parts[4].startswith('-'):
tzhour = tzhour * -1
tzmin = tzmin * -1
else:
tzhour = timezonenames.get(parts[4], 0)
# Create the datetime object and timezone delta objects
try:
stamp = datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in a UTC 9-tuple
try:
return (stamp - delta).utctimetuple()
except (OverflowError, ValueError):
# IronPython throws ValueErrors instead of OverflowErrors
return None
registerDateHandler(_parse_date_rfc822)
_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
def _parse_date_asctime(dt):
"""Parse asctime-style dates"""
dayname, month, day, remainder = dt.split(None, 3)
# Convert month and day into zero-padded integers
month = '%02i ' % (_months.index(month.lower()) + 1)
day = '%02i ' % (int(day),)
dt = month + day + remainder
return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = u''
xml_encoding = u''
rfc3023_encoding = u''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = u'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = u'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = u'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = u'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = u'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = u'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = u'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = u'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
u'u16', u'utf-16', u'utf16', u'utf_16',
u'u32', u'utf-32', u'utf32', u'utf_32',
u'iso-10646-ucs-2', u'iso-10646-ucs-4',
u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, unicode):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd',
u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or u'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or u'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == u'gb2312':
rfc3023_encoding = u'gb18030'
if xml_encoding.lower() == u'gb2312':
xml_encoding = u'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
chardet_encoding = None
tried_encodings = []
if chardet:
chardet_encoding = chardet.detect(data)['encoding']
if not chardet_encoding:
chardet_encoding = ''
if not isinstance(chardet_encoding, unicode):
chardet_encoding = unicode(chardet_encoding, 'ascii', 'ignore')
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + u'\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = u''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = u'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = filter(match_safe_entities, entity_results)
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# items, or None in the case of a parsing error.
def _parse_poslist(value, geom_type, swap=True, dims=2):
if geom_type == 'linestring':
return _parse_georss_line(value, swap, dims)
elif geom_type == 'polygon':
ring = _parse_georss_line(value, swap, dims)
return {'type': u'Polygon', 'coordinates': (ring['coordinates'],)}
else:
return None
def _gen_georss_coords(value, swap=True, dims=2):
# A generator of (lon, lat) pairs from a string of encoded GeoRSS
# coordinates. Converts to floats and swaps order.
latlons = itertools.imap(float, value.strip().replace(',', ' ').split())
nxt = latlons.next
while True:
t = [nxt(), nxt()][::swap and -1 or 1]
if dims == 3:
t.append(nxt())
yield tuple(t)
def _parse_georss_point(value, swap=True, dims=2):
# A point contains a single latitude-longitude pair, separated by
# whitespace. We'll also handle comma separators.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Point', u'coordinates': coords[0]}
except (IndexError, ValueError):
return None
def _parse_georss_line(value, swap=True, dims=2):
# A line contains a space separated list of latitude-longitude pairs in
# WGS84 coordinate reference system, with each pair separated by
# whitespace. There must be at least two pairs.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'LineString', u'coordinates': coords}
except (IndexError, ValueError):
return None
def _parse_georss_polygon(value, swap=True, dims=2):
# A polygon contains a space separated list of latitude-longitude pairs,
# with each pair separated by whitespace. There must be at least four
# pairs, with the last being identical to the first (so a polygon has a
# minimum of three actual points).
try:
ring = list(_gen_georss_coords(value, swap, dims))
except (IndexError, ValueError):
return None
if len(ring) < 4:
return None
return {u'type': u'Polygon', u'coordinates': (ring,)}
def _parse_georss_box(value, swap=True, dims=2):
# A bounding box is a rectangular region, often used to define the extents
# of a map or a rough area of interest. A box contains two space seperate
# latitude-longitude pairs, with each pair separated by whitespace. The
# first pair is the lower corner, the second is the upper corner.
try:
coords = list(_gen_georss_coords(value, swap, dims))
return {u'type': u'Box', u'coordinates': tuple(coords)}
except (IndexError, ValueError):
return None
# end geospatial parsers
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error, e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
# The list of EPSG codes for geographic (latitude/longitude) coordinate
# systems to support decoding of GeoRSS GML profiles.
_geogCS = [
3819, 3821, 3824, 3889, 3906, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008,
4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4018, 4019, 4020, 4021, 4022,
4023, 4024, 4025, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4052, 4053, 4054, 4055, 4075, 4081,
4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 4130, 4131, 4132,
4133, 4134, 4135, 4136, 4137, 4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145,
4146, 4147, 4148, 4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158,
4159, 4160, 4161, 4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171,
4172, 4173, 4174, 4175, 4176, 4178, 4179, 4180, 4181, 4182, 4183, 4184, 4185,
4188, 4189, 4190, 4191, 4192, 4193, 4194, 4195, 4196, 4197, 4198, 4199, 4200,
4201, 4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
4214, 4215, 4216, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227,
4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266,
4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279,
4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4291, 4292, 4293,
4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4306, 4307,
4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4322,
4324, 4326, 4463, 4470, 4475, 4483, 4490, 4555, 4558, 4600, 4601, 4602, 4603,
4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616,
4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665,
4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678,
4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717,
4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743,
4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4801, 4802, 4803, 4804,
4805, 4806, 4807, 4808, 4809, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4823, 4824, 4901, 4902, 4903, 4904, 4979 ]
| mit |
xzYue/odoo | addons/account/wizard/account_invoice_state.py | 340 | 2875 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_invoice_confirm(osv.osv_memory):
"""
This wizard will confirm the all the selected draft invoices
"""
_name = "account.invoice.confirm"
_description = "Confirm the selected invoices"
def invoice_confirm(self, cr, uid, ids, context=None):
if context is None:
context = {}
active_ids = context.get('active_ids', []) or []
proxy = self.pool['account.invoice']
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state not in ('draft', 'proforma', 'proforma2'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be confirmed as they are not in 'Draft' or 'Pro-Forma' state."))
record.signal_workflow('invoice_open')
return {'type': 'ir.actions.act_window_close'}
class account_invoice_cancel(osv.osv_memory):
"""
This wizard will cancel the all the selected invoices.
If in the journal, the option allow cancelling entry is not selected then it will give warning message.
"""
_name = "account.invoice.cancel"
_description = "Cancel the Selected Invoices"
def invoice_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
proxy = self.pool['account.invoice']
active_ids = context.get('active_ids', []) or []
for record in proxy.browse(cr, uid, active_ids, context=context):
if record.state in ('cancel','paid'):
raise osv.except_osv(_('Warning!'), _("Selected invoice(s) cannot be cancelled as they are already in 'Cancelled' or 'Done' state."))
record.signal_workflow('invoice_cancel')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CompMusic/essentia | test/src/unittest/standard/test_unaryoperator.py | 10 | 3671 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestUnaryOperator(TestCase):
testInput = [1,2,3,4,3.4,-5.0008, 100034]
def testEmpty(self):
self.assertEqualVector(UnaryOperator()([]), [])
def testOne(self):
self.assertEqualVector(UnaryOperator(type="identity")([101]), [101])
def testAbs(self):
self.assertAlmostEqualVector(UnaryOperator(type="abs")(self.testInput),
[1,2,3,4,3.4,5.0008,100034])
def testLog10(self):
self.assertAlmostEqualVector(
UnaryOperator(type="log10")(self.testInput),
[0., 0.30103001, 0.4771212637, 0.60206002, 0.5314789414, -30., 5.0001478195])
def testLog(self):
self.assertAlmostEqualVector(
UnaryOperator(type="log")(self.testInput),
[0., 0.6931471825, 1.0986123085, 1.3862943649, 1.223775506, -69.0775527954, 11.5132656097])
def testLn(self):
self.assertAlmostEqualVector(UnaryOperator(type="ln")(self.testInput),
[0, 0.693147181, 1.098612289, 1.386294361, 1.223775432, -69.07755279, 11.513265407])
def testLin2Db(self):
self.assertAlmostEqualVector(
UnaryOperator(type="lin2db")(self.testInput),
[0., 3.01029992, 4.77121258, 6.02059984, 5.3147893, -90., 50.00147629])
def testDb2Lin(self):
# remove the last element because it causes an overflow because it is
# too large
self.assertAlmostEqualVector(
UnaryOperator(type="db2lin")(self.testInput[:-1]),
[1.25892544, 1.58489323, 1.99526227, 2.51188636, 2.18776178, 0.3161695],
2e-7)
def testSine(self):
self.assertAlmostEqualVector(UnaryOperator(type="sin")(self.testInput),
[0.841470985, 0.909297427, 0.141120008, -0.756802495, -0.255541102, 0.958697038, -0.559079868], 1e-6)
def testCosine(self):
self.assertAlmostEqualVector(UnaryOperator(type="cos")(self.testInput),
[0.540302306, -0.416146837, -0.989992497, -0.653643621, -0.966798193, 0.284429234, 0.829113805], 1e-6)
def testSqrt(self):
# first take abs so we won't take sqrt of a negative (that test comes later)
absInput = UnaryOperator(type="abs")(self.testInput)
self.assertAlmostEqualVector(UnaryOperator(type="sqrt")(absInput),
[1, 1.414213562, 1.732050808, 2, 1.843908891, 2.236246856, 316.281520168])
def testSqrtNegative(self):
self.assertComputeFails(UnaryOperator(type="sqrt"),([0, -1, 1]))
def testSquare(self):
self.assertAlmostEqualVector(UnaryOperator(type="square")(self.testInput),
[1, 4, 9, 16, 11.56, 25.0080006, 10006801156])
def testInvalidParam(self):
self.assertConfigureFails(UnaryOperator(), {'type':'exp'})
suite = allTests(TestUnaryOperator)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 |
murraymeehan/marsyas | scripts/createMarSystem.py | 3 | 2266 | #!/usr/bin/env python
'''
Script to generate skeleton files for a new MarSystem.
Usage:
createMarSystem.py NameOfNewMarSystem
This will create the files NameOfNewMarSystem.h and NameOfNewMarSystem.cpp
if the current directory.
'''
import os
import sys
def create_from_template(template_file, template_name, target_file, target_name):
'''
Create a MarSystem based on a given template.
Copy the lines from the given template_file to target_file and replace
the occurences of template_name to target_name.
Also remove the '//' style comments, but not the '/* */' and '///' ones.
'''
# If file already exists: quit.
if os.path.exists(target_file):
raise ValueError('file "%s" already exists' % target_file)
template = open(template_file, 'r')
target = open(target_file, 'w')
for line in template:
# Skip comment lines starting with '// '
# (other comments like '/* */' and '///' are kept.)
if line.strip() == '//' or line.strip()[:3] == '// ':
continue
# Replace the template_name with the target_name
line = line.replace(template_name, target_name)
# Replace the upper case versions too (for the #define stuff)
line = line.replace(template_name.upper(), target_name.upper())
target.write(line)
template.close()
target.close()
if __name__ == '__main__':
# Get target name from command line.
if len(sys.argv) == 2:
target_name = sys.argv[1]
else:
print >>sys.stderr, 'usage: %s MarSystemName' % sys.argv[0]
sys.exit(1)
# Get the base directory of Marsyas
marsyas_base_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
print 'Marsyas base directory:', marsyas_base_dir
# The template file.
template_name = 'MarSystemTemplateBasic'
for ext in ['.h', '.cpp']:
# Construct the file names.
template_file = os.path.join(marsyas_base_dir, 'src', 'marsyas', template_name + ext)
target_file = target_name + ext
# Do the copy and replace stuff.
create_from_template(template_file, template_name, target_file, target_name)
print 'created "%s" from template "%s"' % (target_file, template_file)
| gpl-2.0 |
jcoady9/python-for-android | python3-alpha/python3-src/Lib/test/test_os.py | 47 | 46096 | # As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import os
import errno
import unittest
import warnings
import sys
import signal
import subprocess
import time
import shutil
from test import support
import contextlib
import mmap
import uuid
# Detect whether we're on a Linux system that uses the (now outdated
# and unmaintained) linuxthreads threading library. There's an issue
# when combining linuxthreads with a failed execv call: see
# http://bugs.python.org/issue4970.
if (hasattr(os, "confstr_names") and
"CS_GNU_LIBPTHREAD_VERSION" in os.confstr_names):
libpthread = os.confstr("CS_GNU_LIBPTHREAD_VERSION")
USING_LINUXTHREADS= libpthread.startswith("linuxthreads")
else:
USING_LINUXTHREADS= False
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.exists(support.TESTFN):
os.unlink(support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, b"a")
@support.cpython_only
def test_rename(self):
path = support.TESTFN
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
def test_read(self):
with open(support.TESTFN, "w+b") as fobj:
fobj.write(b"spam")
fobj.flush()
fd = fobj.fileno()
os.lseek(fd, 0, 0)
s = os.read(fd, 4)
self.assertEqual(type(s), bytes)
self.assertEqual(s, b"spam")
def test_write(self):
# os.write() accepts bytes- and buffer-like objects but not strings
fd = os.open(support.TESTFN, os.O_CREAT | os.O_WRONLY)
self.assertRaises(TypeError, os.write, fd, "beans")
os.write(fd, b"bacon\n")
os.write(fd, bytearray(b"eggs\n"))
os.write(fd, memoryview(b"spam\n"))
os.close(fd)
with open(support.TESTFN, "rb") as fobj:
self.assertEqual(fobj.read().splitlines(),
[b"bacon", b"eggs", b"spam"])
def write_windows_console(self, *args):
retcode = subprocess.call(args,
# use a new console to not flood the test output
creationflags=subprocess.CREATE_NEW_CONSOLE,
# use a shell to hide the console window (SW_HIDE)
shell=True)
self.assertEqual(retcode, 0)
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the Windows console')
def test_write_windows_console(self):
# Issue #11395: the Windows console returns an error (12: not enough
# space error) on writing into stdout if stdout mode is binary and the
# length is greater than 66,000 bytes (or less, depending on heap
# usage).
code = "print('x' * 100000)"
self.write_windows_console(sys.executable, "-c", code)
self.write_windows_console(sys.executable, "-u", "-c", code)
def fdopen_helper(self, *args):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = os.fdopen(fd, *args)
f.close()
def test_fdopen(self):
fd = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(fd)
self.fdopen_helper()
self.fdopen_helper('r')
self.fdopen_helper('r', 100)
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(support.TESTFN)
self.fname = os.path.join(support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write(b"ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(support.TESTFN)
def check_stat_attributes(self, fname):
if not hasattr(os, "stat"):
return
import stat
result = os.stat(fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
try:
result[200]
self.fail("No exception thrown")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception thrown")
except AttributeError:
pass
try:
result.st_rdev = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_stat_attributes(self):
self.check_stat_attributes(self.fname)
def test_stat_attributes_bytes(self):
try:
fname = self.fname.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
self.skipTest("cannot encode %a for the filesystem" % self.fname)
self.check_stat_attributes(fname)
def test_statvfs_attributes(self):
if not hasattr(os, "statvfs"):
return
try:
result = os.statvfs(self.fname)
except OSError as e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
return
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception thrown")
except AttributeError:
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_utime_dir(self):
delta = 1000000
st = os.stat(support.TESTFN)
# round to int, because some systems may support sub-second
# time stamps in stat, but not in utime.
os.utime(support.TESTFN, (st.st_atime, int(st.st_mtime-delta)))
st2 = os.stat(support.TESTFN)
self.assertEqual(st2.st_mtime, int(st.st_mtime-delta))
# Restrict test to Win32, since there is no guarantee other
# systems support centiseconds
if sys.platform == 'win32':
def get_file_system(path):
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_unicode_buffer("", 100)
if kernel32.GetVolumeInformationW(root, None, 0, None, None, None, buf, len(buf)):
return buf.value
if get_file_system(support.TESTFN) == "NTFS":
def test_1565150(self):
t1 = 1159195039.25
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_large_time(self):
t1 = 5000000000 # some day in 2128
os.utime(self.fname, (t1, t1))
self.assertEqual(os.stat(self.fname).st_mtime, t1)
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except WindowsError as e:
if e.errno == 2: # file does not exist; cannot run test
return
self.fail("Could not stat pagefile.sys")
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def setUp(self):
self.__save = dict(os.environ)
if os.supports_bytes_environ:
self.__saveb = dict(os.environb)
for key, value in self._reference().items():
os.environ[key] = value
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
if os.supports_bytes_environ:
os.environb.clear()
os.environb.update(self.__saveb)
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
# Bug 1110478
def test_update2(self):
os.environ.clear()
if os.path.exists("/bin/sh"):
os.environ.update(HELLO="World")
with os.popen("/bin/sh -c 'echo $HELLO'") as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
def test_os_popen_iter(self):
if os.path.exists("/bin/sh"):
with os.popen(
"/bin/sh -c 'echo \"line1\nline2\nline3\"'") as popen:
it = iter(popen)
self.assertEqual(next(it), "line1\n")
self.assertEqual(next(it), "line2\n")
self.assertEqual(next(it), "line3\n")
self.assertRaises(StopIteration, next, it)
# Verify environ keys and values from the OS are of the
# correct str type.
def test_keyvalue_types(self):
for key, val in os.environ.items():
self.assertEqual(type(key), str)
self.assertEqual(type(val), str)
def test_items(self):
for key, value in self._reference().items():
self.assertEqual(os.environ.get(key), value)
# Issue 7310
def test___repr__(self):
"""Check that the repr() of os.environ looks like environ({...})."""
env = os.environ
self.assertEqual(repr(env), 'environ({{{}}})'.format(', '.join(
'{!r}: {!r}'.format(key, value)
for key, value in env.items())))
def test_get_exec_path(self):
defpath_list = os.defpath.split(os.pathsep)
test_path = ['/monty', '/python', '', '/flying/circus']
test_env = {'PATH': os.pathsep.join(test_path)}
saved_environ = os.environ
try:
os.environ = dict(test_env)
# Test that defaulting to os.environ works.
self.assertSequenceEqual(test_path, os.get_exec_path())
self.assertSequenceEqual(test_path, os.get_exec_path(env=None))
finally:
os.environ = saved_environ
# No PATH environment variable
self.assertSequenceEqual(defpath_list, os.get_exec_path({}))
# Empty PATH environment variable
self.assertSequenceEqual(('',), os.get_exec_path({'PATH':''}))
# Supplied PATH environment variable
self.assertSequenceEqual(test_path, os.get_exec_path(test_env))
if os.supports_bytes_environ:
# env cannot contain 'PATH' and b'PATH' keys
try:
# ignore BytesWarning warning
with warnings.catch_warnings(record=True):
mixed_env = {'PATH': '1', b'PATH': b'2'}
except BytesWarning:
# mixed_env cannot be created with python -bb
pass
else:
self.assertRaises(ValueError, os.get_exec_path, mixed_env)
# bytes key and/or value
self.assertSequenceEqual(os.get_exec_path({b'PATH': b'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({b'PATH': 'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({'PATH': b'abc'}),
['abc'])
@unittest.skipUnless(os.supports_bytes_environ,
"os.environb required for this test.")
def test_environb(self):
# os.environ -> os.environb
value = 'euro\u20ac'
try:
value_bytes = value.encode(sys.getfilesystemencoding(),
'surrogateescape')
except UnicodeEncodeError:
msg = "U+20AC character is not encodable to %s" % (
sys.getfilesystemencoding(),)
self.skipTest(msg)
os.environ['unicode'] = value
self.assertEqual(os.environ['unicode'], value)
self.assertEqual(os.environb[b'unicode'], value_bytes)
# os.environb -> os.environ
value = b'\xff'
os.environb[b'bytes'] = value
self.assertEqual(os.environb[b'bytes'], value)
value_str = value.decode(sys.getfilesystemencoding(), 'surrogateescape')
self.assertEqual(os.environ['bytes'], value_str)
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
def test_traversal(self):
import os
from os.path import join
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# link/ a symlink to TESTFN.2
# TEST2/
# tmp4 a lone file
walk_path = join(support.TESTFN, "TEST1")
sub1_path = join(walk_path, "SUB1")
sub11_path = join(sub1_path, "SUB11")
sub2_path = join(walk_path, "SUB2")
tmp1_path = join(walk_path, "tmp1")
tmp2_path = join(sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
link_path = join(sub2_path, "link")
t2_path = join(support.TESTFN, "TEST2")
tmp4_path = join(support.TESTFN, "TEST2", "tmp4")
# Create stuff.
os.makedirs(sub11_path)
os.makedirs(sub2_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path:
f = open(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
if support.can_symlink():
os.symlink(os.path.abspath(t2_path), link_path)
sub2_tree = (sub2_path, ["link"], ["tmp3"])
else:
sub2_tree = (sub2_path, [], ["tmp3"])
# Walk top-down.
all = list(os.walk(walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], sub2_tree)
# Prune the search.
all = []
for root, dirs, files in os.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"]))
self.assertEqual(all[1], sub2_tree)
# Walk bottom-up.
all = list(os.walk(walk_path, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped], (sub11_path, [], []))
self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped], sub2_tree)
if support.can_symlink():
# Walk, following symlinks.
for root, dirs, files in os.walk(walk_path, followlinks=True):
if root == link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def tearDown(self):
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(support.TESTFN, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
dirname = os.path.join(root, name)
if not os.path.islink(dirname):
os.rmdir(dirname)
else:
os.remove(dirname)
os.rmdir(support.TESTFN)
class MakedirTests(unittest.TestCase):
def setUp(self):
os.mkdir(support.TESTFN)
def test_makedir(self):
base = support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def test_exist_ok_existing_directory(self):
path = os.path.join(support.TESTFN, 'dir1')
mode = 0o777
old_mask = os.umask(0o022)
os.makedirs(path, mode)
self.assertRaises(OSError, os.makedirs, path, mode)
self.assertRaises(OSError, os.makedirs, path, mode, exist_ok=False)
self.assertRaises(OSError, os.makedirs, path, 0o776, exist_ok=True)
os.makedirs(path, mode=mode, exist_ok=True)
os.umask(old_mask)
def test_exist_ok_existing_regular_file(self):
base = support.TESTFN
path = os.path.join(support.TESTFN, 'dir1')
f = open(path, 'w')
f.write('abc')
f.close()
self.assertRaises(OSError, os.makedirs, path)
self.assertRaises(OSError, os.makedirs, path, exist_ok=False)
self.assertRaises(OSError, os.makedirs, path, exist_ok=True)
os.remove(path)
def tearDown(self):
path = os.path.join(support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
class DevNullTests(unittest.TestCase):
def test_devnull(self):
with open(os.devnull, 'wb') as f:
f.write(b'hello')
f.close()
with open(os.devnull, 'rb') as f:
self.assertEqual(f.read(), b'')
class URandomTests(unittest.TestCase):
def test_urandom(self):
try:
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
except NotImplementedError:
pass
@contextlib.contextmanager
def _execvpe_mockup(defpath=None):
"""
Stubs out execv and execve functions when used as context manager.
Records exec calls. The mock execv and execve functions always raise an
exception as they would normally never return.
"""
# A list of tuples containing (function name, first arg, args)
# of calls to execv or execve that have been made.
calls = []
def mock_execv(name, *args):
calls.append(('execv', name, args))
raise RuntimeError("execv called")
def mock_execve(name, *args):
calls.append(('execve', name, args))
raise OSError(errno.ENOTDIR, "execve called")
try:
orig_execv = os.execv
orig_execve = os.execve
orig_defpath = os.defpath
os.execv = mock_execv
os.execve = mock_execve
if defpath is not None:
os.defpath = defpath
yield calls
finally:
os.execv = orig_execv
os.execve = orig_execve
os.defpath = orig_defpath
class ExecTests(unittest.TestCase):
@unittest.skipIf(USING_LINUXTHREADS,
"avoid triggering a linuxthreads bug: see issue #4970")
def test_execvpe_with_bad_program(self):
self.assertRaises(OSError, os.execvpe, 'no such app-',
['no such app-'], None)
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
@unittest.skipUnless(hasattr(os, '_execvpe'),
"No internal os._execvpe function to test.")
def _test_internal_execvpe(self, test_type):
program_path = os.sep + 'absolutepath'
if test_type is bytes:
program = b'executable'
fullpath = os.path.join(os.fsencode(program_path), program)
native_fullpath = fullpath
arguments = [b'progname', 'arg1', 'arg2']
else:
program = 'executable'
arguments = ['progname', 'arg1', 'arg2']
fullpath = os.path.join(program_path, program)
if os.name != "nt":
native_fullpath = os.fsencode(fullpath)
else:
native_fullpath = fullpath
env = {'spam': 'beans'}
# test os._execvpe() with an absolute path
with _execvpe_mockup() as calls:
self.assertRaises(RuntimeError,
os._execvpe, fullpath, arguments)
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], ('execv', fullpath, (arguments,)))
# test os._execvpe() with a relative path:
# os.get_exec_path() returns defpath
with _execvpe_mockup(defpath=program_path) as calls:
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env)))
# test os._execvpe() with a relative path:
# os.get_exec_path() reads the 'PATH' variable
with _execvpe_mockup() as calls:
env_path = env.copy()
if test_type is bytes:
env_path[b'PATH'] = program_path
else:
env_path['PATH'] = program_path
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env_path)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env_path)))
def test_internal_execvpe_str(self):
self._test_internal_execvpe(str)
if os.name != "nt":
self._test_internal_execvpe(bytes)
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(WindowsError, os.rename, support.TESTFN, support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(WindowsError, os.remove, support.TESTFN)
def test_chdir(self):
self.assertRaises(WindowsError, os.chdir, support.TESTFN)
def test_mkdir(self):
f = open(support.TESTFN, "w")
try:
self.assertRaises(WindowsError, os.mkdir, support.TESTFN)
finally:
f.close()
os.unlink(support.TESTFN)
def test_utime(self):
self.assertRaises(WindowsError, os.utime, support.TESTFN, None)
def test_chmod(self):
self.assertRaises(WindowsError, os.chmod, support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "dup", "fdopen", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn'r raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
f(support.make_bad_fd(), *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("%r didn't raise a OSError with a bad file descriptor"
% f)
def test_isatty(self):
if hasattr(os, "isatty"):
self.assertEqual(os.isatty(support.make_bad_fd()), False)
def test_closerange(self):
if hasattr(os, "closerange"):
fd = support.make_bad_fd()
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
def test_dup2(self):
if hasattr(os, "dup2"):
self.check(os.dup2, 20)
def test_fchmod(self):
if hasattr(os, "fchmod"):
self.check(os.fchmod, 0)
def test_fchown(self):
if hasattr(os, "fchown"):
self.check(os.fchown, -1, -1)
def test_fpathconf(self):
if hasattr(os, "fpathconf"):
self.check(os.fpathconf, "PC_NAME_MAX")
def test_ftruncate(self):
if hasattr(os, "ftruncate"):
self.check(os.ftruncate, 0)
def test_lseek(self):
if hasattr(os, "lseek"):
self.check(os.lseek, 0, 0)
def test_read(self):
if hasattr(os, "read"):
self.check(os.read, 1)
def test_tcsetpgrpt(self):
if hasattr(os, "tcsetpgrp"):
self.check(os.tcsetpgrp, 0)
def test_write(self):
if hasattr(os, "write"):
self.check(os.write, b" ")
class LinkTests(unittest.TestCase):
def setUp(self):
self.file1 = support.TESTFN
self.file2 = os.path.join(support.TESTFN + "2")
def tearDown(self):
for file in (self.file1, self.file2):
if os.path.exists(file):
os.unlink(file)
def _test_link(self, file1, file2):
with open(file1, "w") as f1:
f1.write("test")
os.link(file1, file2)
with open(file1, "r") as f1, open(file2, "r") as f2:
self.assertTrue(os.path.sameopenfile(f1.fileno(), f2.fileno()))
def test_link(self):
self._test_link(self.file1, self.file2)
def test_link_bytes(self):
self._test_link(bytes(self.file1, sys.getfilesystemencoding()),
bytes(self.file2, sys.getfilesystemencoding()))
def test_unicode_name(self):
try:
os.fsencode("\xf1")
except UnicodeError:
raise unittest.SkipTest("Unable to encode for this platform.")
self.file1 += "\xf1"
self.file2 = self.file1 + "2"
self._test_link(self.file1, self.file2)
if sys.platform != 'win32':
class Win32ErrorTests(unittest.TestCase):
pass
class PosixUidGidTests(unittest.TestCase):
if hasattr(os, 'setuid'):
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
if hasattr(os, 'setgid'):
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
if hasattr(os, 'seteuid'):
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
if hasattr(os, 'setegid'):
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
if hasattr(os, 'setreuid'):
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
if hasattr(os, 'setregid'):
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
class Pep383Tests(unittest.TestCase):
def setUp(self):
if support.TESTFN_UNENCODABLE:
self.dir = support.TESTFN_UNENCODABLE
else:
self.dir = support.TESTFN
self.bdir = os.fsencode(self.dir)
bytesfn = []
def add_filename(fn):
try:
fn = os.fsencode(fn)
except UnicodeEncodeError:
return
bytesfn.append(fn)
add_filename(support.TESTFN_UNICODE)
if support.TESTFN_UNENCODABLE:
add_filename(support.TESTFN_UNENCODABLE)
if not bytesfn:
self.skipTest("couldn't create any non-ascii filename")
self.unicodefn = set()
os.mkdir(self.dir)
try:
for fn in bytesfn:
f = open(os.path.join(self.bdir, fn), "w")
f.close()
fn = os.fsdecode(fn)
if fn in self.unicodefn:
raise ValueError("duplicate filename")
self.unicodefn.add(fn)
except:
shutil.rmtree(self.dir)
raise
def tearDown(self):
shutil.rmtree(self.dir)
def test_listdir(self):
expected = self.unicodefn
found = set(os.listdir(self.dir))
self.assertEqual(found, expected)
def test_open(self):
for fn in self.unicodefn:
f = open(os.path.join(self.dir, fn), 'rb')
f.close()
def test_stat(self):
for fn in self.unicodefn:
os.stat(os.path.join(self.dir, fn))
else:
class PosixUidGidTests(unittest.TestCase):
pass
class Pep383Tests(unittest.TestCase):
pass
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value.decode())
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = 0
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 100
while count < max and proc.poll() is None:
if m[0] == 1:
break
time.sleep(0.1)
count += 1
else:
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting CTRL+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle CTRL+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@support.skip_unless_symlink
class Win32SymlinkTests(unittest.TestCase):
filelink = 'filelinktest'
filelink_target = os.path.abspath(__file__)
dirlink = 'dirlinktest'
dirlink_target = os.path.dirname(filelink_target)
missing_link = 'missing link'
def setUp(self):
assert os.path.exists(self.dirlink_target)
assert os.path.exists(self.filelink_target)
assert not os.path.exists(self.dirlink)
assert not os.path.exists(self.filelink)
assert not os.path.exists(self.missing_link)
def tearDown(self):
if os.path.exists(self.filelink):
os.remove(self.filelink)
if os.path.exists(self.dirlink):
os.rmdir(self.dirlink)
if os.path.lexists(self.missing_link):
os.remove(self.missing_link)
def test_directory_link(self):
os.symlink(self.dirlink_target, self.dirlink)
self.assertTrue(os.path.exists(self.dirlink))
self.assertTrue(os.path.isdir(self.dirlink))
self.assertTrue(os.path.islink(self.dirlink))
self.check_stat(self.dirlink, self.dirlink_target)
def test_file_link(self):
os.symlink(self.filelink_target, self.filelink)
self.assertTrue(os.path.exists(self.filelink))
self.assertTrue(os.path.isfile(self.filelink))
self.assertTrue(os.path.islink(self.filelink))
self.check_stat(self.filelink, self.filelink_target)
def _create_missing_dir_link(self):
'Create a "directory" link to a non-existent target'
linkname = self.missing_link
if os.path.lexists(linkname):
os.remove(linkname)
target = r'c:\\target does not exist.29r3c740'
assert not os.path.exists(target)
target_is_dir = True
os.symlink(target, linkname, target_is_dir)
def test_remove_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# For compatibility with Unix, os.remove will check the
# directory status and call RemoveDirectory if the symlink
# was created with target_is_dir==True.
os.remove(self.missing_link)
@unittest.skip("currently fails; consider for improvement")
def test_isdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# consider having isdir return true for directory links
self.assertTrue(os.path.isdir(self.missing_link))
@unittest.skip("currently fails; consider for improvement")
def test_rmdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# consider allowing rmdir to remove directory links
os.rmdir(self.missing_link)
def check_stat(self, link, target):
self.assertEqual(os.stat(link), os.stat(target))
self.assertNotEqual(os.lstat(link), os.stat(link))
bytes_link = os.fsencode(link)
self.assertEqual(os.stat(bytes_link), os.stat(target))
self.assertNotEqual(os.lstat(bytes_link), os.stat(bytes_link))
def test_12084(self):
level1 = os.path.abspath(support.TESTFN)
level2 = os.path.join(level1, "level2")
level3 = os.path.join(level2, "level3")
try:
os.mkdir(level1)
os.mkdir(level2)
os.mkdir(level3)
file1 = os.path.abspath(os.path.join(level1, "file1"))
with open(file1, "w") as f:
f.write("file1")
orig_dir = os.getcwd()
try:
os.chdir(level2)
link = os.path.join(level2, "link")
os.symlink(os.path.relpath(file1), "link")
self.assertIn("link", os.listdir(os.getcwd()))
# Check os.stat calls from the same dir as the link
self.assertEqual(os.stat(file1), os.stat("link"))
# Check os.stat calls from a dir below the link
os.chdir(level1)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
# Check os.stat calls from a dir above the link
os.chdir(level3)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
finally:
os.chdir(orig_dir)
except OSError as err:
self.fail(err)
finally:
os.remove(file1)
shutil.rmtree(level1)
class FSEncodingTests(unittest.TestCase):
def test_nop(self):
self.assertEqual(os.fsencode(b'abc\xff'), b'abc\xff')
self.assertEqual(os.fsdecode('abc\u0141'), 'abc\u0141')
def test_identity(self):
# assert fsdecode(fsencode(x)) == x
for fn in ('unicode\u0141', 'latin\xe9', 'ascii'):
try:
bytesfn = os.fsencode(fn)
except UnicodeEncodeError:
continue
self.assertEqual(os.fsdecode(bytesfn), fn)
class PidTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'getppid'), "test needs os.getppid")
def test_getppid(self):
p = subprocess.Popen([sys.executable, '-c',
'import os; print(os.getppid())'],
stdout=subprocess.PIPE)
stdout, _ = p.communicate()
# We are the parent of our subprocess
self.assertEqual(int(stdout), os.getpid())
# The introduction of this TestCase caused at least two different errors on
# *nix buildbots. Temporarily skip this to let the buildbots move along.
@unittest.skip("Skip due to platform/environment differences on *NIX buildbots")
@unittest.skipUnless(hasattr(os, 'getlogin'), "test needs os.getlogin")
class LoginTests(unittest.TestCase):
def test_getlogin(self):
user_name = os.getlogin()
self.assertNotEqual(len(user_name), 0)
def test_main():
support.run_unittest(
FileTests,
StatAttributeTests,
EnvironTests,
WalkTests,
MakedirTests,
DevNullTests,
URandomTests,
ExecTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests,
Pep383Tests,
Win32KillTests,
Win32SymlinkTests,
FSEncodingTests,
PidTests,
LoginTests,
LinkTests,
)
if __name__ == "__main__":
test_main()
| apache-2.0 |
h00dy/Diamond | src/collectors/redisstat/redisstat.py | 22 | 10479 | # coding=utf-8
"""
Collects data from one or more Redis Servers
#### Dependencies
* redis
#### Notes
The collector is named an odd redisstat because of an import issue with
having the python library called redis and this collector's module being called
redis, so we use an odd name for this collector. This doesn't affect the usage
of this collector.
Example config file RedisCollector.conf
```
enabled=True
host=redis.example.com
port=16379
auth=PASSWORD
```
or for multi-instance mode:
```
enabled=True
instances = nick1@host1:port1, nick2@host2:port2/PASSWORD, ...
```
For connecting via unix sockets, provide the path prefixed with ``unix:``
instead of the host, e.g.
```
enabled=True
host=unix:/var/run/redis/redis.sock
```
or
```
enabled = True
instances = nick3@unix:/var/run/redis.sock:/PASSWORD
```
In that case, for disambiguation there must be a colon ``:`` before the slash
``/`` followed by the password.
Note: when using the host/port config mode, the port number is used in
the metric key. When using the multi-instance mode, the nick will be used.
If not specified the port will be used. In case of unix sockets, the base name
without file extension (i.e. in the aforementioned examples ``redis``)
is the default metric key.
"""
import diamond.collector
import time
import os
try:
import redis
except ImportError:
redis = None
SOCKET_PREFIX = 'unix:'
SOCKET_PREFIX_LEN = len(SOCKET_PREFIX)
class RedisCollector(diamond.collector.Collector):
_DATABASE_COUNT = 16
_DEFAULT_DB = 0
_DEFAULT_HOST = 'localhost'
_DEFAULT_PORT = 6379
_DEFAULT_SOCK_TIMEOUT = 5
_KEYS = {'clients.blocked': 'blocked_clients',
'clients.connected': 'connected_clients',
'clients.longest_output_list': 'client_longest_output_list',
'cpu.parent.sys': 'used_cpu_sys',
'cpu.children.sys': 'used_cpu_sys_children',
'cpu.parent.user': 'used_cpu_user',
'cpu.children.user': 'used_cpu_user_children',
'hash_max_zipmap.entries': 'hash_max_zipmap_entries',
'hash_max_zipmap.value': 'hash_max_zipmap_value',
'keys.evicted': 'evicted_keys',
'keys.expired': 'expired_keys',
'keyspace.hits': 'keyspace_hits',
'keyspace.misses': 'keyspace_misses',
'last_save.changes_since': 'changes_since_last_save',
'last_save.time': 'last_save_time',
'memory.internal_view': 'used_memory',
'memory.external_view': 'used_memory_rss',
'memory.fragmentation_ratio': 'mem_fragmentation_ratio',
'process.commands_processed': 'total_commands_processed',
'process.connections_received': 'total_connections_received',
'process.uptime': 'uptime_in_seconds',
'pubsub.channels': 'pubsub_channels',
'pubsub.patterns': 'pubsub_patterns',
'slaves.connected': 'connected_slaves',
'slaves.last_io': 'master_last_io_seconds_ago'}
_RENAMED_KEYS = {'last_save.changes_since': 'rdb_changes_since_last_save',
'last_save.time': 'rdb_last_save_time'}
def process_config(self):
super(RedisCollector, self).process_config()
instance_list = self.config['instances']
# configobj make str of single-element list, let's convert
if isinstance(instance_list, basestring):
instance_list = [instance_list]
# process original single redis instance
if len(instance_list) == 0:
host = self.config['host']
port = int(self.config['port'])
auth = self.config['auth']
if auth is not None:
instance_list.append('%s:%d/%s' % (host, port, auth))
else:
instance_list.append('%s:%d' % (host, port))
self.instances = {}
for instance in instance_list:
if '@' in instance:
(nickname, hostport) = instance.split('@', 1)
else:
nickname = None
hostport = instance
if hostport.startswith(SOCKET_PREFIX):
unix_socket, __, port_auth = hostport[
SOCKET_PREFIX_LEN:].partition(':')
auth = port_auth.partition('/')[2] or None
if nickname is None:
nickname = os.path.splitext(
os.path.basename(unix_socket))[0]
self.instances[nickname] = (self._DEFAULT_HOST,
self._DEFAULT_PORT,
unix_socket,
auth)
else:
if '/' in hostport:
parts = hostport.split('/')
hostport = parts[0]
auth = parts[1]
else:
auth = None
if ':' in hostport:
if hostport[0] == ':':
host = self._DEFAULT_HOST
port = int(hostport[1:])
else:
parts = hostport.split(':')
host = parts[0]
port = int(parts[1])
else:
host = hostport
port = self._DEFAULT_PORT
if nickname is None:
nickname = str(port)
self.instances[nickname] = (host, port, None, auth)
self.log.debug("Configured instances: %s" % self.instances.items())
def get_default_config_help(self):
config_help = super(RedisCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname to collect from',
'port': 'Port number to collect from',
'timeout': 'Socket timeout',
'db': '',
'auth': 'Password?',
'databases': 'how many database instances to collect',
'instances': "Redis addresses, comma separated, syntax:"
+ " nick1@host:port, nick2@:port or nick3@host"
})
return config_help
def get_default_config(self):
"""
Return default config
:rtype: dict
"""
config = super(RedisCollector, self).get_default_config()
config.update({
'host': self._DEFAULT_HOST,
'port': self._DEFAULT_PORT,
'timeout': self._DEFAULT_SOCK_TIMEOUT,
'db': self._DEFAULT_DB,
'auth': None,
'databases': self._DATABASE_COUNT,
'path': 'redis',
'instances': [],
})
return config
def _client(self, host, port, unix_socket, auth):
"""Return a redis client for the configuration.
:param str host: redis host
:param int port: redis port
:rtype: redis.Redis
"""
db = int(self.config['db'])
timeout = int(self.config['timeout'])
try:
cli = redis.Redis(host=host, port=port,
db=db, socket_timeout=timeout, password=auth,
unix_socket_path=unix_socket)
cli.ping()
return cli
except Exception, ex:
self.log.error("RedisCollector: failed to connect to %s:%i. %s.",
unix_socket or host, port, ex)
def _precision(self, value):
"""Return the precision of the number
:param str value: The value to find the precision of
:rtype: int
"""
value = str(value)
decimal = value.rfind('.')
if decimal == -1:
return 0
return len(value) - decimal - 1
def _publish_key(self, nick, key):
"""Return the full key for the partial key.
:param str nick: Nickname for Redis instance
:param str key: The key name
:rtype: str
"""
return '%s.%s' % (nick, key)
def _get_info(self, host, port, unix_socket, auth):
"""Return info dict from specified Redis instance
:param str host: redis host
:param int port: redis port
:rtype: dict
"""
client = self._client(host, port, unix_socket, auth)
if client is None:
return None
info = client.info()
del client
return info
def collect_instance(self, nick, host, port, unix_socket, auth):
"""Collect metrics from a single Redis instance
:param str nick: nickname of redis instance
:param str host: redis host
:param int port: redis port
:param str unix_socket: unix socket, if applicable
:param str auth: authentication password
"""
# Connect to redis and get the info
info = self._get_info(host, port, unix_socket, auth)
if info is None:
return
# The structure should include the port for multiple instances per
# server
data = dict()
# Iterate over the top level keys
for key in self._KEYS:
if self._KEYS[key] in info:
data[key] = info[self._KEYS[key]]
# Iterate over renamed keys for 2.6 support
for key in self._RENAMED_KEYS:
if self._RENAMED_KEYS[key] in info:
data[key] = info[self._RENAMED_KEYS[key]]
# Look for databaase speific stats
for dbnum in range(0, int(self.config.get('databases',
self._DATABASE_COUNT))):
db = 'db%i' % dbnum
if db in info:
for key in info[db]:
data['%s.%s' % (db, key)] = info[db][key]
# Time since last save
for key in ['last_save_time', 'rdb_last_save_time']:
if key in info:
data['last_save.time_since'] = int(time.time()) - info[key]
# Publish the data to graphite
for key in data:
self.publish(self._publish_key(nick, key),
data[key],
precision=self._precision(data[key]),
metric_type='GAUGE')
def collect(self):
"""Collect the stats from the redis instance and publish them.
"""
if redis is None:
self.log.error('Unable to import module redis')
return {}
for nick in self.instances.keys():
(host, port, unix_socket, auth) = self.instances[nick]
self.collect_instance(nick, host, int(port), unix_socket, auth)
| mit |
JoachimVandersmissen/CodingSolutions | python/PythonForEveryone/chapter3/22.py | 1 | 1199 | month = int(input("Please enter your birthday month"))
day = int(input("Please enter your birthday day"))
if month == 1:
if day < 20:
print("Capricorn")
else:
print("Aquarius")
if month == 2:
if day < 20:
print("Aquarius")
else:
print("Pisces")
if month == 3:
if day < 21:
print("Pisces")
else:
print("Aries")
if month == 4:
if day < 21:
print("Aries")
else:
print("Taurus")
if month == 5:
if day < 21:
print("Taurus")
else:
print("Gemini")
if month == 6:
if day < 22:
print("Gemini")
else:
print("Cancer")
if month == 7:
if day < 23:
print("Cancer")
else:
print("Leo")
if month == 8:
if day < 24:
print("Leo")
else:
print("Virgo")
if month == 9:
if day < 23:
print("Virgo")
else:
print("Libra")
if month == 10:
if day < 24:
print("Libra")
else:
print("Scorpio")
if month == 11:
if day < 23:
print("Scorpio")
else:
print("Sagittarius")
if month == 12:
if day < 23:
print("Sagittarius")
else:
print("Capricorn")
| apache-2.0 |
slohse/ansible | lib/ansible/modules/network/vyos/vyos_facts.py | 37 | 8854 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_facts
version_added: "2.2"
author: "Nathaniel Case (@qalthos)"
short_description: Collect facts from remote devices running VyOS
description:
- Collects a base set of device facts from a remote device that
is running VyOS. This module prepends all of the
base network fact keys with U(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: vyos
notes:
- Tested against VYOS 1.1.7
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, default, config, and neighbors. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: "!config"
"""
EXAMPLES = """
- name: collect all facts from the device
vyos_facts:
gather_subset: all
- name: collect only the config and default facts
vyos_facts:
gather_subset: config
- name: collect everything exception the config
vyos_facts:
gather_subset: "!config"
"""
RETURN = """
ansible_net_config:
description: The running-config from the device
returned: when config is configured
type: str
ansible_net_commits:
description: The set of available configuration revisions
returned: when present
type: list
ansible_net_hostname:
description: The configured system hostname
returned: always
type: str
ansible_net_model:
description: The device model string
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the device
returned: always
type: str
ansible_net_version:
description: The version of the software running
returned: always
type: str
ansible_net_neighbors:
description: The set of LLDP neighbors
returned: when interface is configured
type: list
ansible_net_gather_subset:
description: The list of subsets gathered by the module
returned: always
type: list
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.vyos.vyos import run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
class FactsBase(object):
COMMANDS = frozenset()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, list(self.COMMANDS))
class Default(FactsBase):
COMMANDS = [
'show version',
'show host name',
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['hostname'] = self.responses[1]
def parse_version(self, data):
match = re.search(r'Version:\s*(.*)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'HW model:\s*(\S+)', data)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'HW S/N:\s+(\S+)', data)
if match:
return match.group(1)
class Config(FactsBase):
COMMANDS = [
'show configuration commands',
'show system commit',
]
def populate(self):
super(Config, self).populate()
self.facts['config'] = self.responses
commits = self.responses[1]
entries = list()
entry = None
for line in commits.split('\n'):
match = re.match(r'(\d+)\s+(.+)by(.+)via(.+)', line)
if match:
if entry:
entries.append(entry)
entry = dict(revision=match.group(1),
datetime=match.group(2),
by=str(match.group(3)).strip(),
via=str(match.group(4)).strip(),
comment=None)
else:
entry['comment'] = line.strip()
self.facts['commits'] = entries
class Neighbors(FactsBase):
COMMANDS = [
'show lldp neighbors',
'show lldp neighbors detail',
]
def populate(self):
super(Neighbors, self).populate()
all_neighbors = self.responses[0]
if 'LLDP not configured' not in all_neighbors:
neighbors = self.parse(
self.responses[1]
)
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def parse(self, data):
parsed = list()
values = None
for line in data.split('\n'):
if not line:
continue
elif line[0] == ' ':
values += '\n%s' % line
elif line.startswith('Interface'):
if values:
parsed.append(values)
values = line
if values:
parsed.append(values)
return parsed
def parse_neighbors(self, data):
facts = dict()
for item in data:
interface = self.parse_interface(item)
host = self.parse_host(item)
port = self.parse_port(item)
if interface not in facts:
facts[interface] = list()
facts[interface].append(dict(host=host, port=port))
return facts
def parse_interface(self, data):
match = re.search(r'^Interface:\s+(\S+),', data)
return match.group(1)
def parse_host(self, data):
match = re.search(r'SysName:\s+(.+)$', data, re.M)
if match:
return match.group(1)
def parse_port(self, data):
match = re.search(r'PortDescr:\s+(.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
neighbors=Neighbors,
config=Config
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Subset must be one of [%s], got %s' %
(', '.join(VALID_SUBSETS), subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
cespare/pastedown | vendor/pygments/pygments/lexers/_luabuiltins.py | 26 | 6863 | # -*- coding: utf-8 -*-
"""
pygments.lexers._luabuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'],
'coroutine': ['coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'],
'debug': ['debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'],
'io': ['io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'],
'math': ['math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'],
'modules': ['module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'],
'os': ['os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'],
'string': ['string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'],
'table': ['table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort']}
if __name__ == '__main__':
import re
import urllib
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urllib.urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().iteritems():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
version = get_newest_version()
print '> Downloading function index for Lua %s' % version
functions = get_lua_functions(version)
print '> %d functions found:' % len(functions)
modules = {}
for full_function_name in functions:
print '>> %s' % full_function_name
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
| mit |
adsorensen/girder | tests/cases/path_utilities_test.py | 3 | 2095 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import unittest
from girder.utility import path
strings = [
('abcd', 'abcd'),
('/', '\/'),
('\\', '\\\\'),
('/\\', '\/\\\\'),
('\\//\\', '\\\\\/\/\\\\'),
('a\\\\b//c\\d', 'a\\\\\\\\b\/\/c\\\\d')
]
paths = [
('abcd', ['abcd']),
('/abcd', ['', 'abcd']),
('/ab/cd/ef/gh', ['', 'ab', 'cd', 'ef', 'gh']),
('/ab/cd//', ['', 'ab', 'cd', '', '']),
('ab\\/cd', ['ab/cd']),
('ab\/c/d', ['ab/c', 'd']),
('ab\//cd', ['ab/', 'cd']),
('ab/\/cd', ['ab', '/cd']),
('ab\\\\/cd', ['ab\\', 'cd']),
('ab\\\\/\\\\cd', ['ab\\', '\\cd']),
('ab\\\\\\/\\\\cd', ['ab\\/\\cd']),
('/\\\\abcd\\\\/', ['', '\\abcd\\', '']),
('/\\\\\\\\/\\//\\\\', ['', '\\\\', '/', '\\'])
]
class TestPathUtilities(unittest.TestCase):
"""Tests the girder.utility.path module."""
def testEncodeStrings(self):
for raw, encoded in strings:
self.assertEqual(path.encode(raw), encoded)
def testDecodeStrings(self):
for raw, encoded in strings:
self.assertEqual(path.decode(encoded), raw)
def testSplitPath(self):
for pth, tokens in paths:
self.assertEqual(path.split(pth), tokens)
def testJoinTokens(self):
for pth, tokens in paths:
self.assertEqual(path.join(tokens), pth)
| apache-2.0 |
Dreamsolution/django-auth-policy | testsite/views.py | 1 | 2666 | import logging
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponse
from django.template.response import TemplateResponse
from django.utils.http import is_safe_url
from django.shortcuts import resolve_url
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login
from django.contrib.sites.shortcuts import get_current_site
from django_auth_policy.decorators import login_not_required
from django_auth_policy.forms import StrictAuthenticationForm
logger = logging.getLogger(__name__)
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=StrictAuthenticationForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
This view is the same as the Django 1.6 login view but uses the
StrictAuthenticationForm
"""
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name, ''))
if request.method == "POST":
form = authentication_form(request, data=request.POST)
if form.is_valid():
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
# Okay, security check complete. Log the user in.
auth_login(request, form.get_user())
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
@login_required
def login_required_view(request):
""" View used in tests
"""
return HttpResponse('ok')
@login_not_required
def login_not_required_view(request):
""" View used in tests
"""
return HttpResponse('login not required!')
def another_view(request):
""" View used in tests
"""
return HttpResponse('another view')
| bsd-3-clause |
vlegoff/tsunami | src/test/secondaires/crafting/test_talent.py | 1 | 2655 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier définissant les unittest de talents de guilde."""
import unittest
from test.primaires.joueur.static.joueur import ManipulationJoueur
class TestTalent(ManipulationJoueur, unittest.TestCase):
"""Unittest des talents de guilde."""
def test_creation(self):
"""Test l'ajout de talents à des guildes vides."""
joueur = self.creer_joueur("simple", "Fiche")
# Création d'une guilde avec deux talents
guilde = importeur.crafting.creer_guilde("forgerons")
guilde.ajouter_rang("apprenti")
guilde.ajouter_talent("forge", "talent de la forge", False)
guilde.ajouter_talent("ciseau", "art du ciseau", False)
guilde.ouvrir()
avant = joueur.points_apprentissage_max
# Rejoindre la guilde devrait donner 100 points d'apprentissage de plus
guilde.rejoindre(joueur)
apres = joueur.points_apprentissage_max
self.assertEqual(avant + 100, apres)
# Nettoyage après le test
importeur.crafting.supprimer_guilde("forgerons")
self.supprimer_joueur(joueur)
| bsd-3-clause |
FelixZYY/gyp | pylib/gyp/common.py | 34 | 19170 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer:
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
| bsd-3-clause |
deka108/meas_deka | assessment/utilities/answer_formatter.py | 2 | 3442 | """
# Name: check_answer_api/utilities/answer_formatter.py
# Description:
# Created by: Martinus Alexander
# Date Created: Nov 30, 2016
# Last Modified: Dec 21, 2016
# Modified by: Martinus Alexander
"""
import re
'''
Splitting a sequence of character into a list of terms.
Used improve trigonometric proving assessment.
This is to support first additional feature: Find if the user writes the
important step.
Approach: Split the expression into a list of term, then compare each term.
'''
def split_term(expression):
# End of expression
if len(expression) == 0:
return []
# Find the number of bracket pairs needed
# Counter should be number of bracket
if re.match(r"^\\frac", expression):
bracket_left = 2
elif re.match(r"^\\(sin|cos|tan|cot|sec|csc|log)\^", expression):
bracket_left = 2
elif re.match(r"^\\(sin|cos|tan|cot|sec|csc|log)", expression):
bracket_left = 1
else:
bracket_left = 0
# Find the index of first bracket + 1, or 0 otherwise
# Indicates the starting index to explore
if bracket_left > 0:
start_position = expression.index("{")
else:
start_position = 0
end_position = start_position
counter = 1
# Find the location of closing bracket
while counter > 0 and bracket_left > 0:
end_position = end_position + 1
# Increment or decrement according to the current character
if expression[end_position] == '{':
counter = counter + 1
elif expression[end_position] == '}':
counter = counter - 1
# One pair of bracket has been found
if counter == 0:
bracket_left = bracket_left - 1
# Include the following string before delimited (+ or -)
while end_position < len(expression) - 1:
if ((expression[end_position + 1] == '+') or
(expression[end_position + 1] == '-')):
break
else:
end_position = end_position + 1
result = expression[0:end_position + 1]
# Remove the unnecessarry positive sign (+) at the beginning
if result.startswith('+'):
result = result[1:]
# Wrap into list
result_list = [result]
return (result_list + split_term(expression[end_position + 1:]))
'''
Splitting a sequence of character into a list of answer.
Delimiter = "|"
'''
def split_answer(expression):
return expression.split("|")
"""
Split an expression into two parts, LHS and RHS
Can handle the plane geometry as well.
"""
def split_LHS_RHS(expression):
# Remove space
expression = expression.replace(" ", "")
split_expr = re.split("=|\bot|\\\parallel", expression)
if len(split_expr) != 2:
# Error
return None, None
# Find delimiter
if "\bot" in expression:
delimiter = "\bot"
elif "\parallel" in expression:
delimiter = "\parallel"
else:
delimiter = "="
LHS = split_expr[0]
RHS = split_expr[1]
return LHS, RHS, delimiter
def split_plane_geometry_term(expression):
LHS, RHS, delimiter = split_LHS_RHS(expression)
if LHS is None and RHS is None:
# Error when splitting RHS and RHS
return None
LHS_split = split_term(LHS)
RHS_split = split_term(RHS)
result_dict = {}
result_dict['LHS'] = LHS_split
result_dict['RHS'] = RHS_split
result_dict['delimiter'] = delimiter
return result_dict
| apache-2.0 |
xodus7/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/batch_normalization_test.py | 25 | 9994 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BatchNorm Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.batch_normalization import BatchNormalization
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class BatchNormTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
def _reduction_axes(self, input_shape, event_dims):
if isinstance(event_dims, int):
event_dims = [event_dims]
ndims = len(input_shape)
# Convert event_dims to non-negative indexing.
event_dims = list(event_dims)
for idx, x in enumerate(event_dims):
if x < 0:
event_dims[idx] = ndims + x
return tuple(i for i in range(ndims) if i not in event_dims)
def testForwardInverse(self):
"""Tests forward and backward passes with different event shapes.
input_shape: Tuple of shapes for input tensor.
event_dims: Tuple of dimension indices that will be normalized.
training: Boolean of whether bijector runs in training or inference mode.
"""
params = [
((5*2, 4), [-1], False),
((5, 2, 4), [-1], False),
((5, 2, 4), [1, 2], False),
((5, 2, 4), [0, 1], False),
((5*2, 4), [-1], True),
((5, 2, 4), [-1], True),
((5, 2, 4), [1, 2], True),
((5, 2, 4), [0, 1], True)
]
for input_shape, event_dims, training in params:
x_ = np.arange(5 * 4 * 2).astype(np.float32).reshape(input_shape)
with self.cached_session() as sess:
x = constant_op.constant(x_)
# When training, memorize the exact mean of the last
# minibatch that it normalized (instead of moving average assignment).
layer = normalization.BatchNormalization(
axis=event_dims, momentum=0., epsilon=0.)
batch_norm = BatchNormalization(
batchnorm_layer=layer, training=training)
# Minibatch statistics are saved only after norm_x has been computed.
norm_x = batch_norm.inverse(x)
with ops.control_dependencies(batch_norm.batchnorm.updates):
moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
denorm_x = batch_norm.forward(array_ops.identity(norm_x))
fldj = batch_norm.forward_log_det_jacobian(
x, event_ndims=len(event_dims))
# Use identity to invalidate cache.
ildj = batch_norm.inverse_log_det_jacobian(
array_ops.identity(denorm_x), event_ndims=len(event_dims))
variables.global_variables_initializer().run()
# Update variables.
norm_x_ = sess.run(norm_x)
[
norm_x_,
moving_mean_,
moving_var_,
denorm_x_,
ildj_,
fldj_,
] = sess.run([
norm_x,
moving_mean,
moving_var,
denorm_x,
ildj,
fldj,
])
self.assertEqual("batch_normalization", batch_norm.name)
reduction_axes = self._reduction_axes(input_shape, event_dims)
keepdims = len(event_dims) > 1
expected_batch_mean = np.mean(
x_, axis=reduction_axes, keepdims=keepdims)
expected_batch_var = np.var(x_, axis=reduction_axes, keepdims=keepdims)
if training:
# When training=True, values become normalized across batch dim and
# original values are recovered after de-normalizing.
zeros = np.zeros_like(norm_x_)
self.assertAllClose(np.mean(zeros, axis=reduction_axes),
np.mean(norm_x_, axis=reduction_axes))
self.assertAllClose(expected_batch_mean, moving_mean_)
self.assertAllClose(expected_batch_var, moving_var_)
self.assertAllClose(x_, denorm_x_, atol=1e-5)
# Since moving statistics are set to batch statistics after
# normalization, ildj and -fldj should match.
self.assertAllClose(ildj_, -fldj_)
# ildj is computed with minibatch statistics.
expected_ildj = np.sum(np.log(1.) - .5 * np.log(
expected_batch_var + batch_norm.batchnorm.epsilon))
self.assertAllClose(expected_ildj, ildj_)
else:
# When training=False, moving_mean, moving_var remain at their
# initialized values (0., 1.), resulting in no scale/shift (a small
# shift occurs if epsilon > 0.)
self.assertAllClose(x_, norm_x_)
self.assertAllClose(x_, denorm_x_, atol=1e-5)
# ildj is computed with saved statistics.
expected_ildj = np.sum(
np.log(1.) - .5 * np.log(1. + batch_norm.batchnorm.epsilon))
self.assertAllClose(expected_ildj, ildj_)
def testMaximumLikelihoodTraining(self):
# Test Maximum Likelihood training with default bijector.
with self.cached_session() as sess:
base_dist = distributions.MultivariateNormalDiag(loc=[0., 0.])
batch_norm = BatchNormalization(training=True)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=base_dist,
bijector=batch_norm)
target_dist = distributions.MultivariateNormalDiag(loc=[1., 2.])
target_samples = target_dist.sample(100)
dist_samples = dist.sample(3000)
loss = -math_ops.reduce_mean(dist.log_prob(target_samples))
with ops.control_dependencies(batch_norm.batchnorm.updates):
train_op = adam.AdamOptimizer(1e-2).minimize(loss)
moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
variables.global_variables_initializer().run()
for _ in range(3000):
sess.run(train_op)
[
dist_samples_,
moving_mean_,
moving_var_
] = sess.run([
dist_samples,
moving_mean,
moving_var
])
self.assertAllClose([1., 2.], np.mean(dist_samples_, axis=0), atol=5e-2)
self.assertAllClose([1., 2.], moving_mean_, atol=5e-2)
self.assertAllClose([1., 1.], moving_var_, atol=5e-2)
def testLogProb(self):
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = BatchNormalization(batchnorm_layer=layer, training=False)
base_dist = distributions.MultivariateNormalDiag(loc=[0., 0.])
dist = transformed_distribution_lib.TransformedDistribution(
distribution=base_dist,
bijector=batch_norm,
validate_args=True)
samples = dist.sample(int(1e5))
# No volume distortion since training=False, bijector is initialized
# to the identity transformation.
base_log_prob = base_dist.log_prob(samples)
dist_log_prob = dist.log_prob(samples)
variables.global_variables_initializer().run()
base_log_prob_, dist_log_prob_ = sess.run([base_log_prob, dist_log_prob])
self.assertAllClose(base_log_prob_, dist_log_prob_)
def testMutuallyConsistent(self):
# BatchNorm bijector is only mutually consistent when training=False.
dims = 4
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = BatchNormalization(batchnorm_layer=layer, training=False)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=batch_norm,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=2.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
# BatchNorm bijector is only mutually consistent when training=False.
dims = 4
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = Invert(
BatchNormalization(batchnorm_layer=layer, training=False))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=batch_norm,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=2.,
center=0.,
rtol=0.02)
if __name__ == "__main__":
test.main()
| apache-2.0 |
rallylee/gem5 | tests/configs/realview-switcheroo-atomic.py | 64 | 2427 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.objects import *
from arm_generic import *
import switcheroo
root = LinuxArmFSSwitcheroo(
mem_class=SimpleMemory,
cpu_classes=(AtomicSimpleCPU, AtomicSimpleCPU)
).create_root()
# Setup a custom test method that uses the switcheroo tester that
# switches between CPU models.
run_test = switcheroo.run_test
| bsd-3-clause |
Juraci/tempest | tempest/tests/test_list_tests.py | 34 | 1824 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import six
import subprocess
from tempest.tests import base
class TestTestList(base.TestCase):
def test_testr_list_tests_no_errors(self):
# Remove unit test discover path from env to test tempest tests
test_env = os.environ.copy()
test_env.pop('OS_TEST_PATH')
import_failures = []
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE,
env=test_env)
ids, err = p.communicate()
self.assertEqual(0, p.returncode,
"test discovery failed, one or more files cause an "
"error on import %s" % ids)
ids = six.text_type(ids).split('\n')
for test_id in ids:
if re.match('(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
parts = test_id.partition('tempest')
fail_id = parts[1] + parts[2]
import_failures.append(fail_id)
error_message = ("The following tests have import failures and aren't"
" being run with test filters %s" % import_failures)
self.assertFalse(import_failures, error_message)
| apache-2.0 |
vasyarv/edx-platform | lms/djangoapps/shoppingcart/processors/CyberSource.py | 142 | 19828 | """
Implementation the CyberSource credit card processor.
IMPORTANT: CyberSource will deprecate this version of the API ("Hosted Order Page") in September 2014.
We are keeping this implementation in the code-base for now, but we should
eventually replace this module with the newer implementation (in `CyberSource2.py`)
To enable this implementation, add the following to Django settings:
CC_PROCESSOR_NAME = "CyberSource"
CC_PROCESSOR = {
"CyberSource": {
"SHARED_SECRET": "<shared secret>",
"MERCHANT_ID": "<merchant ID>",
"SERIAL_NUMBER": "<serial number>",
"PURCHASE_ENDPOINT": "<purchase endpoint>"
}
}
"""
import time
import hmac
import binascii
import re
import json
from collections import OrderedDict, defaultdict
from decimal import Decimal, InvalidOperation
from hashlib import sha1
from textwrap import dedent
from django.conf import settings
from django.utils.translation import ugettext as _
from edxmako.shortcuts import render_to_string
from shoppingcart.models import Order
from shoppingcart.processors.exceptions import *
from shoppingcart.processors.helpers import get_processor_config
from microsite_configuration import microsite
def process_postpay_callback(params, **kwargs):
"""
The top level call to this module, basically
This function is handed the callback request after the customer has entered the CC info and clicked "buy"
on the external Hosted Order Page.
It is expected to verify the callback and determine if the payment was successful.
It returns {'success':bool, 'order':Order, 'error_html':str}
If successful this function must have the side effect of marking the order purchased and calling the
purchased_callbacks of the cart items.
If unsuccessful this function should not have those side effects but should try to figure out why and
return a helpful-enough error message in error_html.
"""
try:
verify_signatures(params)
result = payment_accepted(params)
if result['accepted']:
# SUCCESS CASE first, rest are some sort of oddity
record_purchase(params, result['order'])
return {'success': True,
'order': result['order'],
'error_html': ''}
else:
return {'success': False,
'order': result['order'],
'error_html': get_processor_decline_html(params)}
except CCProcessorException as error:
return {'success': False,
'order': None, # due to exception we may not have the order
'error_html': get_processor_exception_html(error)}
def processor_hash(value):
"""
Performs the base64(HMAC_SHA1(key, value)) used by CyberSource Hosted Order Page
"""
shared_secret = get_processor_config().get('SHARED_SECRET', '')
hash_obj = hmac.new(shared_secret.encode('utf-8'), value.encode('utf-8'), sha1)
return binascii.b2a_base64(hash_obj.digest())[:-1] # last character is a '\n', which we don't want
def sign(params, signed_fields_key='orderPage_signedFields', full_sig_key='orderPage_signaturePublic'):
"""
params needs to be an ordered dict, b/c cybersource documentation states that order is important.
Reverse engineered from PHP version provided by cybersource
"""
merchant_id = get_processor_config().get('MERCHANT_ID', '')
order_page_version = get_processor_config().get('ORDERPAGE_VERSION', '7')
serial_number = get_processor_config().get('SERIAL_NUMBER', '')
params['merchantID'] = merchant_id
params['orderPage_timestamp'] = int(time.time() * 1000)
params['orderPage_version'] = order_page_version
params['orderPage_serialNumber'] = serial_number
fields = u",".join(params.keys())
values = u",".join([u"{0}={1}".format(i, params[i]) for i in params.keys()])
fields_sig = processor_hash(fields)
values += u",signedFieldsPublicSignature=" + fields_sig
params[full_sig_key] = processor_hash(values)
params[signed_fields_key] = fields
return params
def verify_signatures(params, signed_fields_key='signedFields', full_sig_key='signedDataPublicSignature'):
"""
Verify the signatures accompanying the POST back from Cybersource Hosted Order Page
returns silently if verified
raises CCProcessorSignatureException if not verified
"""
signed_fields = params.get(signed_fields_key, '').split(',')
data = u",".join([u"{0}={1}".format(k, params.get(k, '')) for k in signed_fields])
signed_fields_sig = processor_hash(params.get(signed_fields_key, ''))
data += u",signedFieldsPublicSignature=" + signed_fields_sig
returned_sig = params.get(full_sig_key, '')
if processor_hash(data) != returned_sig:
raise CCProcessorSignatureException()
def render_purchase_form_html(cart, **kwargs):
"""
Renders the HTML of the hidden POST form that must be used to initiate a purchase with CyberSource
"""
return render_to_string('shoppingcart/cybersource_form.html', {
'action': get_purchase_endpoint(),
'params': get_signed_purchase_params(cart),
})
def get_signed_purchase_params(cart, **kwargs):
return sign(get_purchase_params(cart))
def get_purchase_params(cart):
total_cost = cart.total_cost
amount = "{0:0.2f}".format(total_cost)
cart_items = cart.orderitem_set.all()
params = OrderedDict()
params['amount'] = amount
params['currency'] = cart.currency
params['orderPage_transactionType'] = 'sale'
params['orderNumber'] = "{0:d}".format(cart.id)
return params
def get_purchase_endpoint():
return get_processor_config().get('PURCHASE_ENDPOINT', '')
def payment_accepted(params):
"""
Check that cybersource has accepted the payment
params: a dictionary of POST parameters returned by CyberSource in their post-payment callback
returns: true if the payment was correctly accepted, for the right amount
false if the payment was not accepted
raises: CCProcessorDataException if the returned message did not provide required parameters
CCProcessorWrongAmountException if the amount charged is different than the order amount
"""
#make sure required keys are present and convert their values to the right type
valid_params = {}
for key, key_type in [('orderNumber', int),
('orderCurrency', str),
('decision', str)]:
if key not in params:
raise CCProcessorDataException(
_("The payment processor did not return a required parameter: {0}").format(key)
)
try:
valid_params[key] = key_type(params[key])
except ValueError:
raise CCProcessorDataException(
_("The payment processor returned a badly-typed value {0} for param {1}.").format(params[key], key)
)
try:
order = Order.objects.get(id=valid_params['orderNumber'])
except Order.DoesNotExist:
raise CCProcessorDataException(_("The payment processor accepted an order whose number is not in our system."))
if valid_params['decision'] == 'ACCEPT':
try:
# Moved reading of charged_amount here from the valid_params loop above because
# only 'ACCEPT' messages have a 'ccAuthReply_amount' parameter
charged_amt = Decimal(params['ccAuthReply_amount'])
except InvalidOperation:
raise CCProcessorDataException(
_("The payment processor returned a badly-typed value {0} for param {1}.").format(
params['ccAuthReply_amount'], 'ccAuthReply_amount'
)
)
if charged_amt == order.total_cost and valid_params['orderCurrency'] == order.currency:
return {'accepted': True,
'amt_charged': charged_amt,
'currency': valid_params['orderCurrency'],
'order': order}
else:
raise CCProcessorWrongAmountException(
_("The amount charged by the processor {0} {1} is different than the total cost of the order {2} {3}.")
.format(
charged_amt,
valid_params['orderCurrency'],
order.total_cost,
order.currency
)
)
else:
return {'accepted': False,
'amt_charged': 0,
'currency': 'usd',
'order': order}
def record_purchase(params, order):
"""
Record the purchase and run purchased_callbacks
"""
ccnum_str = params.get('card_accountNumber', '')
m = re.search("\d", ccnum_str)
if m:
ccnum = ccnum_str[m.start():]
else:
ccnum = "####"
order.purchase(
first=params.get('billTo_firstName', ''),
last=params.get('billTo_lastName', ''),
street1=params.get('billTo_street1', ''),
street2=params.get('billTo_street2', ''),
city=params.get('billTo_city', ''),
state=params.get('billTo_state', ''),
country=params.get('billTo_country', ''),
postalcode=params.get('billTo_postalCode', ''),
ccnum=ccnum,
cardtype=CARDTYPE_MAP[params.get('card_cardType', 'UNKNOWN')],
processor_reply_dump=json.dumps(params)
)
def get_processor_decline_html(params):
"""Have to parse through the error codes to return a helpful message"""
# see if we have an override in the microsites
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
msg = _(
"Sorry! Our payment processor did not accept your payment. "
"The decision they returned was {decision_text}, "
"and the reason was {reason_text}. "
"You were not charged. "
"Please try a different form of payment. "
"Contact us with payment-related questions at {email}."
)
formatted = msg.format(
decision_text='<span class="decision">{}</span>'.format(params['decision']),
reason_text='<span class="reason">{code}:{msg}</span>'.format(
code=params['reasonCode'], msg=REASONCODE_MAP[params['reasonCode']],
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
def get_processor_exception_html(exception):
"""Return error HTML associated with exception"""
# see if we have an override in the microsites
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
if isinstance(exception, CCProcessorDataException):
msg = _(
"Sorry! Our payment processor sent us back a payment confirmation "
"that had inconsistent data!"
"We apologize that we cannot verify whether the charge went through "
"and take further action on your order."
"The specific error message is: {error_message}. "
"Your credit card may possibly have been charged. "
"Contact us with payment-specific questions at {email}."
)
formatted = msg.format(
error_message='<span class="exception_msg">{msg}</span>'.format(
msg=exception.message,
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
elif isinstance(exception, CCProcessorWrongAmountException):
msg = _(
"Sorry! Due to an error your purchase was charged for "
"a different amount than the order total! "
"The specific error message is: {error_message}. "
"Your credit card has probably been charged. "
"Contact us with payment-specific questions at {email}."
)
formatted = msg.format(
error_message='<span class="exception_msg">{msg}</span>'.format(
msg=exception.message,
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
elif isinstance(exception, CCProcessorSignatureException):
msg = _(
"Sorry! Our payment processor sent us back a corrupted message "
"regarding your charge, so we are unable to validate that "
"the message actually came from the payment processor. "
"The specific error message is: {error_message}. "
"We apologize that we cannot verify whether the charge went through "
"and take further action on your order. "
"Your credit card may possibly have been charged. "
"Contact us with payment-specific questions at {email}."
)
formatted = msg.format(
error_message='<span class="exception_msg">{msg}</span>'.format(
msg=exception.message,
),
email=payment_support_email,
)
return '<p class="error_msg">{}</p>'.format(formatted)
# fallthrough case, which basically never happens
return '<p class="error_msg">EXCEPTION!</p>'
CARDTYPE_MAP = defaultdict(lambda: "UNKNOWN")
CARDTYPE_MAP.update(
{
'001': 'Visa',
'002': 'MasterCard',
'003': 'American Express',
'004': 'Discover',
'005': 'Diners Club',
'006': 'Carte Blanche',
'007': 'JCB',
'014': 'EnRoute',
'021': 'JAL',
'024': 'Maestro',
'031': 'Delta',
'033': 'Visa Electron',
'034': 'Dankort',
'035': 'Laser',
'036': 'Carte Bleue',
'037': 'Carta Si',
'042': 'Maestro',
'043': 'GE Money UK card'
}
)
REASONCODE_MAP = defaultdict(lambda: "UNKNOWN REASON")
REASONCODE_MAP.update(
{
'100': _('Successful transaction.'),
'101': _('The request is missing one or more required fields.'),
'102': _('One or more fields in the request contains invalid data.'),
'104': dedent(_(
"""
The merchantReferenceCode sent with this authorization request matches the
merchantReferenceCode of another authorization request that you sent in the last 15 minutes.
Possible fix: retry the payment after 15 minutes.
""")),
'150': _('Error: General system failure. Possible fix: retry the payment after a few minutes.'),
'151': dedent(_(
"""
Error: The request was received but there was a server timeout.
This error does not include timeouts between the client and the server.
Possible fix: retry the payment after some time.
""")),
'152': dedent(_(
"""
Error: The request was received, but a service did not finish running in time
Possible fix: retry the payment after some time.
""")),
'201': _('The issuing bank has questions about the request. Possible fix: retry with another form of payment'),
'202': dedent(_(
"""
Expired card. You might also receive this if the expiration date you
provided does not match the date the issuing bank has on file.
Possible fix: retry with another form of payment
""")),
'203': dedent(_(
"""
General decline of the card. No other information provided by the issuing bank.
Possible fix: retry with another form of payment
""")),
'204': _('Insufficient funds in the account. Possible fix: retry with another form of payment'),
# 205 was Stolen or lost card. Might as well not show this message to the person using such a card.
'205': _('Unknown reason'),
'207': _('Issuing bank unavailable. Possible fix: retry again after a few minutes'),
'208': dedent(_(
"""
Inactive card or card not authorized for card-not-present transactions.
Possible fix: retry with another form of payment
""")),
'210': _('The card has reached the credit limit. Possible fix: retry with another form of payment'),
'211': _('Invalid card verification number. Possible fix: retry with another form of payment'),
# 221 was The customer matched an entry on the processor's negative file.
# Might as well not show this message to the person using such a card.
'221': _('Unknown reason'),
'231': _('Invalid account number. Possible fix: retry with another form of payment'),
'232': dedent(_(
"""
The card type is not accepted by the payment processor.
Possible fix: retry with another form of payment
""")),
'233': _('General decline by the processor. Possible fix: retry with another form of payment'),
'234': _(
"There is a problem with our CyberSource merchant configuration. Please let us know at {0}"
).format(settings.PAYMENT_SUPPORT_EMAIL),
# reason code 235 only applies if we are processing a capture through the API. so we should never see it
'235': _('The requested amount exceeds the originally authorized amount.'),
'236': _('Processor Failure. Possible fix: retry the payment'),
# reason code 238 only applies if we are processing a capture through the API. so we should never see it
'238': _('The authorization has already been captured'),
# reason code 239 only applies if we are processing a capture or credit through the API,
# so we should never see it
'239': _('The requested transaction amount must match the previous transaction amount.'),
'240': dedent(_(
"""
The card type sent is invalid or does not correlate with the credit card number.
Possible fix: retry with the same card or another form of payment
""")),
# reason code 241 only applies when we are processing a capture or credit through the API,
# so we should never see it
'241': _('The request ID is invalid.'),
# reason code 242 occurs if there was not a previously successful authorization request or
# if the previously successful authorization has already been used by another capture request.
# This reason code only applies when we are processing a capture through the API
# so we should never see it
'242': dedent(_(
"""
You requested a capture through the API, but there is no corresponding, unused authorization record.
""")),
# we should never see 243
'243': _('The transaction has already been settled or reversed.'),
# reason code 246 applies only if we are processing a void through the API. so we should never see it
'246': dedent(_(
"""
The capture or credit is not voidable because the capture or credit information has already been
submitted to your processor. Or, you requested a void for a type of transaction that cannot be voided.
""")),
# reason code 247 applies only if we are processing a void through the API. so we should never see it
'247': _('You requested a credit for a capture that was previously voided'),
'250': dedent(_(
"""
Error: The request was received, but there was a timeout at the payment processor.
Possible fix: retry the payment.
""")),
'520': dedent(_(
"""
The authorization request was approved by the issuing bank but declined by CyberSource.'
Possible fix: retry with a different form of payment.
""")),
}
)
| agpl-3.0 |
Gamebasis/3DGamebasisServer | GameData/blender-2.71-windows64/2.71/python/lib/plat-unixware7/STROPTS.py | 106 | 6524 | # Generated by h2py from /usr/include/sys/stropts.h
# Included from sys/types.h
def quad_low(x): return x.val[0]
ADT_EMASKSIZE = 8
SHRT_MIN = -32768
SHRT_MAX = 32767
INT_MIN = (-2147483647-1)
INT_MAX = 2147483647
LONG_MIN = (-2147483647-1)
LONG_MAX = 2147483647
OFF32_MAX = LONG_MAX
ISTAT_ASSERTED = 0
ISTAT_ASSUMED = 1
ISTAT_NONE = 2
OFF_MAX = OFF32_MAX
CLOCK_MAX = LONG_MAX
P_MYID = (-1)
P_MYHOSTID = (-1)
# Included from sys/select.h
FD_SETSIZE = 4096
NBBY = 8
NULL = 0
# Included from sys/conf.h
D_NEW = 0x00
D_OLD = 0x01
D_DMA = 0x02
D_BLKOFF = 0x400
D_LFS = 0x8000
D_STR = 0x0800
D_MOD = 0x1000
D_PSEUDO = 0x2000
D_RANDOM = 0x4000
D_HOT = 0x10000
D_SEEKNEG = 0x04
D_TAPE = 0x08
D_NOBRKUP = 0x10
D_INITPUB = 0x20
D_NOSPECMACDATA = 0x40
D_RDWEQ = 0x80
SECMASK = (D_INITPUB|D_NOSPECMACDATA|D_RDWEQ)
DAF_REQDMA = 0x1
DAF_PHYSREQ = 0x2
DAF_PRE8 = 0x4
DAF_STATIC = 0x8
DAF_STR = 0x10
D_MP = 0x100
D_UPF = 0x200
ROOTFS_NAMESZ = 7
FMNAMESZ = 8
MCD_VERSION = 1
DI_BCBP = 0
DI_MEDIA = 1
# Included from sys/secsys.h
ES_MACOPENLID = 1
ES_MACSYSLID = 2
ES_MACROOTLID = 3
ES_PRVINFO = 4
ES_PRVSETCNT = 5
ES_PRVSETS = 6
ES_MACADTLID = 7
ES_PRVID = 8
ES_TPGETMAJOR = 9
SA_EXEC = 0o01
SA_WRITE = 0o02
SA_READ = 0o04
SA_SUBSIZE = 0o10
# Included from sys/stropts_f.h
X_STR = (ord('S')<<8)
X_I_BASE = (X_STR|0o200)
X_I_NREAD = (X_STR|0o201)
X_I_PUSH = (X_STR|0o202)
X_I_POP = (X_STR|0o203)
X_I_LOOK = (X_STR|0o204)
X_I_FLUSH = (X_STR|0o205)
X_I_SRDOPT = (X_STR|0o206)
X_I_GRDOPT = (X_STR|0o207)
X_I_STR = (X_STR|0o210)
X_I_SETSIG = (X_STR|0o211)
X_I_GETSIG = (X_STR|0o212)
X_I_FIND = (X_STR|0o213)
X_I_LINK = (X_STR|0o214)
X_I_UNLINK = (X_STR|0o215)
X_I_PEEK = (X_STR|0o217)
X_I_FDINSERT = (X_STR|0o220)
X_I_SENDFD = (X_STR|0o221)
X_I_RECVFD = (X_STR|0o222)
# Included from unistd.h
# Included from sys/unistd.h
R_OK = 0o04
W_OK = 0o02
X_OK = 0o01
F_OK = 000
EFF_ONLY_OK = 0o10
EX_OK = 0o20
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
_SC_ARG_MAX = 1
_SC_CHILD_MAX = 2
_SC_CLK_TCK = 3
_SC_NGROUPS_MAX = 4
_SC_OPEN_MAX = 5
_SC_JOB_CONTROL = 6
_SC_SAVED_IDS = 7
_SC_VERSION = 8
_SC_PASS_MAX = 9
_SC_LOGNAME_MAX = 10
_SC_PAGESIZE = 11
_SC_PAGE_SIZE = _SC_PAGESIZE
_SC_XOPEN_VERSION = 12
_SC_NACLS_MAX = 13
_SC_NPROCESSORS_CONF = 14
_SC_NPROCESSORS_ONLN = 15
_SC_NPROCESSES = 39
_SC_TOTAL_MEMORY = 40
_SC_USEABLE_MEMORY = 41
_SC_GENERAL_MEMORY = 42
_SC_DEDICATED_MEMORY = 43
_SC_NCGS_CONF = 44
_SC_NCGS_ONLN = 45
_SC_MAX_CPUS_PER_CG = 46
_SC_CG_SIMPLE_IMPL = 47
_SC_CACHE_LINE = 48
_SC_SYSTEM_ID = 49
_SC_THREADS = 51
_SC_THREAD_ATTR_STACKADDR = 52
_SC_THREAD_ATTR_STACKSIZE = 53
_SC_THREAD_DESTRUCTOR_ITERATIONS = 54
_SC_THREAD_KEYS_MAX = 55
_SC_THREAD_PRIORITY_SCHEDULING = 56
_SC_THREAD_PRIO_INHERIT = 57
_SC_THREAD_PRIO_PROTECT = 58
_SC_THREAD_STACK_MIN = 59
_SC_THREAD_PROCESS_SHARED = 60
_SC_THREAD_SAFE_FUNCTIONS = 61
_SC_THREAD_THREADS_MAX = 62
_SC_KERNEL_VM = 63
_SC_TZNAME_MAX = 320
_SC_STREAM_MAX = 321
_SC_XOPEN_CRYPT = 323
_SC_XOPEN_ENH_I18N = 324
_SC_XOPEN_SHM = 325
_SC_XOPEN_XCU_VERSION = 327
_SC_AES_OS_VERSION = 330
_SC_ATEXIT_MAX = 331
_SC_2_C_BIND = 350
_SC_2_C_DEV = 351
_SC_2_C_VERSION = 352
_SC_2_CHAR_TERM = 353
_SC_2_FORT_DEV = 354
_SC_2_FORT_RUN = 355
_SC_2_LOCALEDEF = 356
_SC_2_SW_DEV = 357
_SC_2_UPE = 358
_SC_2_VERSION = 359
_SC_BC_BASE_MAX = 370
_SC_BC_DIM_MAX = 371
_SC_BC_SCALE_MAX = 372
_SC_BC_STRING_MAX = 373
_SC_COLL_WEIGHTS_MAX = 380
_SC_EXPR_NEST_MAX = 381
_SC_LINE_MAX = 382
_SC_RE_DUP_MAX = 383
_SC_IOV_MAX = 390
_SC_NPROC_CONF = 391
_SC_NPROC_ONLN = 392
_SC_XOPEN_UNIX = 400
_SC_SEMAPHORES = 440
_CS_PATH = 1
__O_CS_HOSTNAME = 2
_CS_RELEASE = 3
_CS_VERSION = 4
__O_CS_MACHINE = 5
__O_CS_ARCHITECTURE = 6
_CS_HW_SERIAL = 7
__O_CS_HW_PROVIDER = 8
_CS_SRPC_DOMAIN = 9
_CS_INITTAB_NAME = 10
__O_CS_SYSNAME = 11
_CS_LFS_CFLAGS = 20
_CS_LFS_LDFLAGS = 21
_CS_LFS_LIBS = 22
_CS_LFS_LINTFLAGS = 23
_CS_LFS64_CFLAGS = 24
_CS_LFS64_LDFLAGS = 25
_CS_LFS64_LIBS = 26
_CS_LFS64_LINTFLAGS = 27
_CS_ARCHITECTURE = 100
_CS_BUSTYPES = 101
_CS_HOSTNAME = 102
_CS_HW_PROVIDER = 103
_CS_KERNEL_STAMP = 104
_CS_MACHINE = 105
_CS_OS_BASE = 106
_CS_OS_PROVIDER = 107
_CS_SYSNAME = 108
_CS_USER_LIMIT = 109
_PC_LINK_MAX = 1
_PC_MAX_CANON = 2
_PC_MAX_INPUT = 3
_PC_NAME_MAX = 4
_PC_PATH_MAX = 5
_PC_PIPE_BUF = 6
_PC_NO_TRUNC = 7
_PC_VDISABLE = 8
_PC_CHOWN_RESTRICTED = 9
_PC_FILESIZEBITS = 10
_POSIX_VERSION = 199009
_XOPEN_VERSION = 4
GF_PATH = "/etc/group"
PF_PATH = "/etc/passwd"
F_ULOCK = 0
F_LOCK = 1
F_TLOCK = 2
F_TEST = 3
_POSIX_JOB_CONTROL = 1
_POSIX_SAVED_IDS = 1
_POSIX_VDISABLE = 0
NULL = 0
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
_XOPEN_UNIX = 1
_XOPEN_ENH_I18N = 1
_XOPEN_XPG4 = 1
_POSIX2_C_VERSION = 199209
_POSIX2_VERSION = 199209
_XOPEN_XCU_VERSION = 4
_POSIX_SEMAPHORES = 1
_POSIX_THREADS = 1
_POSIX_THREAD_ATTR_STACKADDR = 1
_POSIX_THREAD_ATTR_STACKSIZE = 1
_POSIX_THREAD_PRIORITY_SCHEDULING = 1
_POSIX_THREAD_PROCESS_SHARED = 1
_POSIX_THREAD_SAFE_FUNCTIONS = 1
_POSIX2_C_BIND = 1
_POSIX2_CHAR_TERM = 1
_POSIX2_FORT_RUN = 1
_POSIX2_LOCALEDEF = 1
_POSIX2_UPE = 1
_LFS_ASYNCHRONOUS_IO = 1
_LFS_LARGEFILE = 1
_LFS64_ASYNCHRONOUS_IO = 1
_LFS64_LARGEFILE = 1
_LFS64_STDIO = 1
FMNAMESZ = 8
SNDZERO = 0x001
SNDPIPE = 0x002
RNORM = 0x000
RMSGD = 0x001
RMSGN = 0x002
RMODEMASK = 0x003
RPROTDAT = 0x004
RPROTDIS = 0x008
RPROTNORM = 0x010
RPROTMASK = 0x01c
FLUSHR = 0x01
FLUSHW = 0x02
FLUSHRW = 0x03
FLUSHBAND = 0x04
S_INPUT = 0x0001
S_HIPRI = 0x0002
S_OUTPUT = 0x0004
S_MSG = 0x0008
S_ERROR = 0x0010
S_HANGUP = 0x0020
S_RDNORM = 0x0040
S_WRNORM = S_OUTPUT
S_RDBAND = 0x0080
S_WRBAND = 0x0100
S_BANDURG = 0x0200
RS_HIPRI = 0x01
MSG_HIPRI = 0x01
MSG_ANY = 0x02
MSG_BAND = 0x04
MSG_DISCARD = 0x08
MSG_PEEKIOCTL = 0x10
MORECTL = 1
MOREDATA = 2
MUXID_ALL = (-1)
ANYMARK = 0x01
LASTMARK = 0x02
STR = (ord('S')<<8)
I_NREAD = (STR|0o1)
I_PUSH = (STR|0o2)
I_POP = (STR|0o3)
I_LOOK = (STR|0o4)
I_FLUSH = (STR|0o5)
I_SRDOPT = (STR|0o6)
I_GRDOPT = (STR|0o7)
I_STR = (STR|0o10)
I_SETSIG = (STR|0o11)
I_GETSIG = (STR|0o12)
I_FIND = (STR|0o13)
I_LINK = (STR|0o14)
I_UNLINK = (STR|0o15)
I_PEEK = (STR|0o17)
I_FDINSERT = (STR|0o20)
I_SENDFD = (STR|0o21)
I_RECVFD = (STR|0o22)
I_E_RECVFD = (STR|0o16)
I_RECVFD = (STR|0o16)
I_RECVFD = (STR|0o22)
I_SWROPT = (STR|0o23)
I_GWROPT = (STR|0o24)
I_LIST = (STR|0o25)
I_PLINK = (STR|0o26)
I_PUNLINK = (STR|0o27)
I_FLUSHBAND = (STR|0o34)
I_CKBAND = (STR|0o35)
I_GETBAND = (STR|0o36)
I_ATMARK = (STR|0o37)
I_SETCLTIME = (STR|0o40)
I_GETCLTIME = (STR|0o41)
I_CANPUT = (STR|0o42)
I_S_RECVFD = (STR|0o43)
I_STATS = (STR|0o44)
I_BIGPIPE = (STR|0o45)
I_GETTP = (STR|0o46)
INFTIM = -1
| gpl-3.0 |
varunkamra/kuma | vendor/packages/translate/tools/pocount.py | 24 | 12909 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2003-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Count strings and words for supported localization files.
These include: XLIFF, TMX, Gettex PO and MO, Qt .ts and .qm, Wordfast TM, etc
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/pocount.html
for examples and usage instructions.
"""
from __future__ import print_function
import logging
import os
import sys
from argparse import ArgumentParser
from translate.storage import factory, statsdb
logger = logging.getLogger(__name__)
# define style constants
style_full, style_csv, style_short_strings, style_short_words = range(4)
# default output style
default_style = style_full
def calcstats_old(filename):
"""This is the previous implementation of calcstats() and is left for
comparison and debuging purposes."""
# ignore totally blank or header units
try:
store = factory.getobject(filename)
except ValueError as e:
logger.warning(e)
return {}
units = filter(lambda unit: unit.istranslatable(), store.units)
translated = translatedmessages(units)
fuzzy = fuzzymessages(units)
review = filter(lambda unit: unit.isreview(), units)
untranslated = untranslatedmessages(units)
wordcounts = dict(map(lambda unit: (unit, statsdb.wordsinunit(unit)), units))
sourcewords = lambda elementlist: sum(map(lambda unit: wordcounts[unit][0], elementlist))
targetwords = lambda elementlist: sum(map(lambda unit: wordcounts[unit][1], elementlist))
stats = {}
# units
stats["translated"] = len(translated)
stats["fuzzy"] = len(fuzzy)
stats["untranslated"] = len(untranslated)
stats["review"] = len(review)
stats["total"] = stats["translated"] + \
stats["fuzzy"] + \
stats["untranslated"]
# words
stats["translatedsourcewords"] = sourcewords(translated)
stats["translatedtargetwords"] = targetwords(translated)
stats["fuzzysourcewords"] = sourcewords(fuzzy)
stats["untranslatedsourcewords"] = sourcewords(untranslated)
stats["reviewsourcewords"] = sourcewords(review)
stats["totalsourcewords"] = stats["translatedsourcewords"] + \
stats["fuzzysourcewords"] + \
stats["untranslatedsourcewords"]
return stats
def calcstats(filename):
statscache = statsdb.StatsCache()
return statscache.filetotals(filename, extended=True)
def summarize(title, stats, style=style_full, indent=8, incomplete_only=False):
"""Print summary for a .po file in specified format.
:param title: name of .po file
:param stats: array with translation statistics for the file specified
:param indent: indentation of the 2nd column (length of longest filename)
:param incomplete_only: omit fully translated files
:type incomplete_only: Boolean
:rtype: Boolean
:return: 1 if counting incomplete files (incomplete_only=True) and the
file is completely translated, 0 otherwise
"""
def percent(denominator, devisor):
if devisor == 0:
return 0
else:
return denominator * 100 / devisor
if incomplete_only and (stats["total"] == stats["translated"]):
return 1
if (style == style_csv):
print("%s, " % title, end=' ')
print("%d, %d, %d," % (stats["translated"],
stats["translatedsourcewords"],
stats["translatedtargetwords"]), end=' ')
print("%d, %d," % (stats["fuzzy"], stats["fuzzysourcewords"]), end=' ')
print("%d, %d," % (stats["untranslated"],
stats["untranslatedsourcewords"]), end=' ')
print("%d, %d" % (stats["total"], stats["totalsourcewords"]), end=' ')
if stats["review"] > 0:
print(", %d, %d" % (stats["review"], stats["reviewsourdcewords"]), end=' ')
print()
elif (style == style_short_strings):
spaces = " " * (indent - len(title))
print("%s%s strings: total: %d\t| %dt\t%df\t%du\t| %d%%t\t%d%%f\t%d%%u" % (
title, spaces,
stats["total"], stats["translated"], stats["fuzzy"], stats["untranslated"],
percent(stats["translated"], stats["total"]),
percent(stats["fuzzy"], stats["total"]),
percent(stats["untranslated"], stats["total"])))
elif (style == style_short_words):
spaces = " " * (indent - len(title))
print("%s%s source words: total: %d\t| %dt\t%df\t%du\t| %d%%t\t%d%%f\t%d%%u" % (
title, spaces,
stats["totalsourcewords"], stats["translatedsourcewords"], stats["fuzzysourcewords"], stats["untranslatedsourcewords"],
percent(stats["translatedsourcewords"], stats["totalsourcewords"]),
percent(stats["fuzzysourcewords"], stats["totalsourcewords"]),
percent(stats["untranslatedsourcewords"], stats["totalsourcewords"])))
else: # style == style_full
print(title)
print("type strings words (source) words (translation)")
print("translated: %5d (%3d%%) %10d (%3d%%) %15d" % (
stats["translated"],
percent(stats["translated"], stats["total"]),
stats["translatedsourcewords"],
percent(stats["translatedsourcewords"], stats["totalsourcewords"]),
stats["translatedtargetwords"]))
print("fuzzy: %5d (%3d%%) %10d (%3d%%) n/a" % (
stats["fuzzy"],
percent(stats["fuzzy"], stats["total"]),
stats["fuzzysourcewords"],
percent(stats["fuzzysourcewords"], stats["totalsourcewords"])))
print("untranslated: %5d (%3d%%) %10d (%3d%%) n/a" % (
stats["untranslated"],
percent(stats["untranslated"], stats["total"]),
stats["untranslatedsourcewords"],
percent(stats["untranslatedsourcewords"], stats["totalsourcewords"])))
print("Total: %5d %17d %22d" % (
stats["total"],
stats["totalsourcewords"],
stats["translatedtargetwords"]))
if "extended" in stats:
print("")
for state, e_stats in stats["extended"].iteritems():
print("%s: %5d (%3d%%) %10d (%3d%%) %15d" % (
state, e_stats["units"], percent(e_stats["units"], stats["total"]),
e_stats["sourcewords"], percent(e_stats["sourcewords"], stats["totalsourcewords"]),
e_stats["targetwords"]))
if stats["review"] > 0:
print("review: %5d %17d n/a" % (
stats["review"], stats["reviewsourcewords"]))
print()
return 0
def fuzzymessages(units):
return filter(lambda unit: unit.isfuzzy() and unit.target, units)
def translatedmessages(units):
return filter(lambda unit: unit.istranslated(), units)
def untranslatedmessages(units):
return filter(lambda unit: not (unit.istranslated() or unit.isfuzzy()) and unit.source, units)
class summarizer:
def __init__(self, filenames, style=default_style, incomplete_only=False):
self.totals = {}
self.filecount = 0
self.longestfilename = 0
self.style = style
self.incomplete_only = incomplete_only
self.complete_count = 0
if (self.style == style_csv):
print("""Filename, Translated Messages, Translated Source Words, Translated
Target Words, Fuzzy Messages, Fuzzy Source Words, Untranslated Messages,
Untranslated Source Words, Total Message, Total Source Words,
Review Messages, Review Source Words""")
if (self.style == style_short_strings or self.style == style_short_words):
for filename in filenames: # find longest filename
if (len(filename) > self.longestfilename):
self.longestfilename = len(filename)
for filename in filenames:
if not os.path.exists(filename):
logger.error("cannot process %s: does not exist", filename)
continue
elif os.path.isdir(filename):
self.handledir(filename)
else:
self.handlefile(filename)
if self.filecount > 1 and (self.style == style_full):
if self.incomplete_only:
summarize("TOTAL (incomplete only):", self.totals,
incomplete_only=True)
print("File count (incomplete): %5d" % (self.filecount - self.complete_count))
else:
summarize("TOTAL:", self.totals, incomplete_only=False)
print("File count: %5d" % (self.filecount))
print()
def updatetotals(self, stats):
"""Update self.totals with the statistics in stats."""
for key in stats.keys():
if key == "extended":
# FIXME: calculate extended totals
continue
if not key in self.totals:
self.totals[key] = 0
self.totals[key] += stats[key]
def handlefile(self, filename):
try:
stats = calcstats(filename)
self.updatetotals(stats)
self.complete_count += summarize(filename, stats, self.style,
self.longestfilename,
self.incomplete_only)
self.filecount += 1
except Exception: # This happens if we have a broken file.
logger.error(sys.exc_info()[1])
def handlefiles(self, dirname, filenames):
for filename in filenames:
pathname = os.path.join(dirname, filename)
if os.path.isdir(pathname):
self.handledir(pathname)
else:
self.handlefile(pathname)
def handledir(self, dirname):
path, name = os.path.split(dirname)
if name in ["CVS", ".svn", "_darcs", ".git", ".hg", ".bzr"]:
return
entries = os.listdir(dirname)
self.handlefiles(dirname, entries)
def main():
parser = ArgumentParser()
parser.add_argument("--incomplete", action="store_true", default=False,
dest="incomplete_only",
help="skip 100%% translated files.")
if sys.version_info[:2] <= (2, 6):
# Python 2.6 using argparse from PyPI cannot define a mutually
# exclusive group as a child of a group, but it works if it is a child
# of the parser. We lose the group title but the functionality works.
# See https://code.google.com/p/argparse/issues/detail?id=90
megroup = parser.add_mutually_exclusive_group()
else:
output_group = parser.add_argument_group("Output format")
megroup = output_group.add_mutually_exclusive_group()
megroup.add_argument("--full", action="store_const", const=style_full,
dest="style", default=style_full,
help="(default) statistics in full, verbose format")
megroup.add_argument("--csv", action="store_const", const=style_csv,
dest="style",
help="statistics in CSV format")
megroup.add_argument("--short", action="store_const", const=style_short_strings,
dest="style",
help="same as --short-strings")
megroup.add_argument("--short-strings", action="store_const",
const=style_short_strings, dest="style",
help="statistics of strings in short format - one line per file")
megroup.add_argument("--short-words", action="store_const",
const=style_short_words, dest="style",
help="statistics of words in short format - one line per file")
parser.add_argument("files", nargs="+")
args = parser.parse_args()
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s")
summarizer(args.files, args.style, args.incomplete_only)
if __name__ == '__main__':
main()
| mpl-2.0 |
kingvuplus/BH-SH4 | mytest.py | 2 | 16609 | import sys, os
if os.path.isfile("/usr/lib/enigma2/python/enigma.zip"):
sys.path.append("/usr/lib/enigma2/python/enigma.zip")
from Tools.Profile import profile, profile_final
profile("PYTHON_START")
import Tools.RedirectOutput
import enigma
import eConsoleImpl
import eBaseImpl
enigma.eTimer = eBaseImpl.eTimer
enigma.eSocketNotifier = eBaseImpl.eSocketNotifier
enigma.eConsoleAppContainer = eConsoleImpl.eConsoleAppContainer
from traceback import print_exc
profile("SimpleSummary")
from Screens import InfoBar
from Screens.SimpleSummary import SimpleSummary
from sys import stdout, exc_info
profile("Bouquets")
from Components.config import config, configfile, ConfigText, ConfigYesNo, ConfigInteger, NoSave
config.misc.load_unlinked_userbouquets = ConfigYesNo(default=True)
def setLoadUnlinkedUserbouquets(configElement):
enigma.eDVBDB.getInstance().setLoadUnlinkedUserbouquets(configElement.value)
config.misc.load_unlinked_userbouquets.addNotifier(setLoadUnlinkedUserbouquets)
enigma.eDVBDB.getInstance().reloadBouquets()
profile("ParentalControl")
import Components.ParentalControl
Components.ParentalControl.InitParentalControl()
profile("LOAD:Navigation")
from Navigation import Navigation
profile("LOAD:skin")
from skin import readSkin
profile("LOAD:Tools")
from Tools.Directories import InitFallbackFiles, resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_SKIN
InitFallbackFiles()
profile("config.misc")
config.misc.radiopic = ConfigText(default = resolveFilename(SCOPE_CURRENT_SKIN, "radio.mvi"))
config.misc.blackradiopic = ConfigText(default = resolveFilename(SCOPE_CURRENT_SKIN, "black.mvi"))
config.misc.useTransponderTime = ConfigYesNo(default=True)
config.misc.startCounter = ConfigInteger(default=0) # number of e2 starts...
config.misc.standbyCounter = NoSave(ConfigInteger(default=0)) # number of standby
config.misc.DeepStandby = NoSave(ConfigYesNo(default=False)) # detect deepstandby
config.misc.RestartUI = ConfigYesNo(default=False) # detect user interface restart
config.misc.epgcache_filename = ConfigText(default = "/hdd/epg.dat")
def setEPGCachePath(configElement):
enigma.eEPGCache.getInstance().setCacheFile(configElement.value)
#demo code for use of standby enter leave callbacks
#def leaveStandby():
# print "!!!!!!!!!!!!!!!!!leave standby"
#def standbyCountChanged(configElement):
# print "!!!!!!!!!!!!!!!!!enter standby num", configElement.value
# from Screens.Standby import inStandby
# inStandby.onClose.append(leaveStandby)
#config.misc.standbyCounter.addNotifier(standbyCountChanged, initial_call = False)
####################################################
def useTransponderTimeChanged(configElement):
enigma.eDVBLocalTimeHandler.getInstance().setUseDVBTime(configElement.value)
config.misc.useTransponderTime.addNotifier(useTransponderTimeChanged)
profile("Twisted")
try:
import twisted.python.runtime
import e2reactor
e2reactor.install()
from twisted.internet import reactor
def runReactor():
reactor.run(installSignalHandlers=False)
except ImportError:
print "twisted not available"
def runReactor():
enigma.runMainloop()
profile("LOAD:Plugin")
# initialize autorun plugins and plugin menu entries
from Components.PluginComponent import plugins
profile("LOAD:Wizard")
from Screens.Wizard import wizardManager
from Screens.StartWizard import *
import Screens.Rc
from Tools.BoundFunction import boundFunction
from Plugins.Plugin import PluginDescriptor
profile("misc")
had = dict()
def dump(dir, p = ""):
if isinstance(dir, dict):
for (entry, val) in dir.items():
dump(val, p + "(dict)/" + entry)
if hasattr(dir, "__dict__"):
for name, value in dir.__dict__.items():
if not had.has_key(str(value)):
had[str(value)] = 1
dump(value, p + "/" + str(name))
else:
print p + "/" + str(name) + ":" + str(dir.__class__) + "(cycle)"
else:
print p + ":" + str(dir)
# + ":" + str(dir.__class__)
# display
profile("LOAD:ScreenGlobals")
from Screens.Globals import Globals
from Screens.SessionGlobals import SessionGlobals
from Screens.Screen import Screen
profile("Screen")
Screen.global_screen = Globals()
# Session.open:
# * push current active dialog ('current_dialog') onto stack
# * call execEnd for this dialog
# * clear in_exec flag
# * hide screen
# * instantiate new dialog into 'current_dialog'
# * create screens, components
# * read, apply skin
# * create GUI for screen
# * call execBegin for new dialog
# * set in_exec
# * show gui screen
# * call components' / screen's onExecBegin
# ... screen is active, until it calls 'close'...
# Session.close:
# * assert in_exec
# * save return value
# * start deferred close handler ('onClose')
# * execEnd
# * clear in_exec
# * hide screen
# .. a moment later:
# Session.doClose:
# * destroy screen
class Session:
def __init__(self, desktop = None, summary_desktop = None, navigation = None):
self.desktop = desktop
self.summary_desktop = summary_desktop
self.nav = navigation
self.delay_timer = enigma.eTimer()
self.delay_timer.callback.append(self.processDelay)
self.current_dialog = None
self.dialog_stack = [ ]
self.summary_stack = [ ]
self.summary = None
self.in_exec = False
self.screen = SessionGlobals(self)
for p in plugins.getPlugins(PluginDescriptor.WHERE_SESSIONSTART):
try:
p(reason=0, session=self)
except:
print "Plugin raised exception at WHERE_SESSIONSTART"
import traceback
traceback.print_exc()
def processDelay(self):
callback = self.current_dialog.callback
retval = self.current_dialog.returnValue
if self.current_dialog.isTmp:
self.current_dialog.doClose()
# dump(self.current_dialog)
del self.current_dialog
else:
del self.current_dialog.callback
self.popCurrent()
if callback is not None:
callback(*retval)
def execBegin(self, first=True, do_show = True):
assert not self.in_exec
self.in_exec = True
c = self.current_dialog
# when this is an execbegin after a execend of a "higher" dialog,
# popSummary already did the right thing.
if first:
self.instantiateSummaryDialog(c)
c.saveKeyboardMode()
c.execBegin()
# when execBegin opened a new dialog, don't bother showing the old one.
if c == self.current_dialog and do_show:
c.show()
def execEnd(self, last=True):
assert self.in_exec
self.in_exec = False
self.current_dialog.execEnd()
self.current_dialog.restoreKeyboardMode()
self.current_dialog.hide()
if last:
self.current_dialog.removeSummary(self.summary)
self.popSummary()
def instantiateDialog(self, screen, *arguments, **kwargs):
return self.doInstantiateDialog(screen, arguments, kwargs, self.desktop)
def deleteDialog(self, screen):
screen.hide()
screen.doClose()
def instantiateSummaryDialog(self, screen, **kwargs):
self.pushSummary()
summary = screen.createSummary() or SimpleSummary
arguments = (screen,)
self.summary = self.doInstantiateDialog(summary, arguments, kwargs, self.summary_desktop)
self.summary.show()
screen.addSummary(self.summary)
def doInstantiateDialog(self, screen, arguments, kwargs, desktop):
# create dialog
dlg = screen(self, *arguments, **kwargs)
if dlg is None:
return
# read skin data
readSkin(dlg, None, dlg.skinName, desktop)
# create GUI view of this dialog
dlg.setDesktop(desktop)
dlg.applySkin()
return dlg
def pushCurrent(self):
if self.current_dialog is not None:
self.dialog_stack.append((self.current_dialog, self.current_dialog.shown))
self.execEnd(last=False)
def popCurrent(self):
if self.dialog_stack:
(self.current_dialog, do_show) = self.dialog_stack.pop()
self.execBegin(first=False, do_show=do_show)
else:
self.current_dialog = None
def execDialog(self, dialog):
self.pushCurrent()
self.current_dialog = dialog
self.current_dialog.isTmp = False
self.current_dialog.callback = None # would cause re-entrancy problems.
self.execBegin()
def openWithCallback(self, callback, screen, *arguments, **kwargs):
dlg = self.open(screen, *arguments, **kwargs)
dlg.callback = callback
return dlg
def open(self, screen, *arguments, **kwargs):
if self.dialog_stack and not self.in_exec:
raise RuntimeError("modal open are allowed only from a screen which is modal!")
# ...unless it's the very first screen.
self.pushCurrent()
dlg = self.current_dialog = self.instantiateDialog(screen, *arguments, **kwargs)
dlg.isTmp = True
dlg.callback = None
self.execBegin()
return dlg
def close(self, screen, *retval):
if not self.in_exec:
print "close after exec!"
return
# be sure that the close is for the right dialog!
# if it's not, you probably closed after another dialog
# was opened. this can happen if you open a dialog
# onExecBegin, and forget to do this only once.
# after close of the top dialog, the underlying will
# gain focus again (for a short time), thus triggering
# the onExec, which opens the dialog again, closing the loop.
assert screen == self.current_dialog
self.current_dialog.returnValue = retval
self.delay_timer.start(0, 1)
self.execEnd()
def pushSummary(self):
if self.summary is not None:
self.summary.hide()
self.summary_stack.append(self.summary)
self.summary = None
def popSummary(self):
if self.summary is not None:
self.summary.doClose()
self.summary = self.summary_stack.pop()
if self.summary is not None:
self.summary.show()
profile("Standby,PowerKey")
import Screens.Standby
from Screens.Menu import MainMenu, mdom
from GlobalActions import globalActionMap
class PowerKey:
""" PowerKey stuff - handles the powerkey press and powerkey release actions"""
def __init__(self, session):
self.session = session
globalActionMap.actions["power_down"]=self.powerdown
globalActionMap.actions["power_up"]=self.powerup
globalActionMap.actions["power_long"]=self.powerlong
globalActionMap.actions["deepstandby"]=self.shutdown # frontpanel long power button press
globalActionMap.actions["discrete_off"]=self.standby
self.standbyblocked = 1
def MenuClosed(self, *val):
self.session.infobar = None
def shutdown(self):
print "PowerOff - Now!"
if not Screens.Standby.inTryQuitMainloop and self.session.current_dialog and self.session.current_dialog.ALLOW_SUSPEND:
self.session.open(Screens.Standby.TryQuitMainloop, 1)
def powerlong(self):
if Screens.Standby.inTryQuitMainloop or (self.session.current_dialog and not self.session.current_dialog.ALLOW_SUSPEND):
return
self.doAction(action = config.usage.on_long_powerpress.value)
def doAction(self, action):
self.standbyblocked = 1
if action == "shutdown":
self.shutdown()
elif action == "show_menu":
print "Show shutdown Menu"
root = mdom.getroot()
for x in root.findall("menu"):
y = x.find("id")
if y is not None:
id = y.get("val")
if id and id == "shutdown":
self.session.infobar = self
menu_screen = self.session.openWithCallback(self.MenuClosed, MainMenu, x)
menu_screen.setTitle(_("Standby / restart"))
return
elif action == "standby":
self.standby()
def powerdown(self):
self.standbyblocked = 0
def powerup(self):
if self.standbyblocked == 0:
self.doAction(action = config.usage.on_short_powerpress.value)
def standby(self):
if not Screens.Standby.inStandby and self.session.current_dialog and self.session.current_dialog.ALLOW_SUSPEND and self.session.in_exec:
self.session.open(Screens.Standby.Standby)
profile("Scart")
from Screens.Scart import Scart
class AutoScartControl:
def __init__(self, session):
self.force = False
self.current_vcr_sb = enigma.eAVSwitch.getInstance().getVCRSlowBlanking()
if self.current_vcr_sb and config.av.vcrswitch.value:
self.scartDialog = session.instantiateDialog(Scart, True)
else:
self.scartDialog = session.instantiateDialog(Scart, False)
config.av.vcrswitch.addNotifier(self.recheckVCRSb)
enigma.eAVSwitch.getInstance().vcr_sb_notifier.get().append(self.VCRSbChanged)
def recheckVCRSb(self, configElement):
self.VCRSbChanged(self.current_vcr_sb)
def VCRSbChanged(self, value):
#print "vcr sb changed to", value
self.current_vcr_sb = value
if config.av.vcrswitch.value or value > 2:
if value:
self.scartDialog.showMessageBox()
else:
self.scartDialog.switchToTV()
profile("Load:CI")
from enigma import eDVBCIInterfaces
from Screens.Ci import CiHandler
profile("Load:VolumeControl")
from Components.VolumeControl import VolumeControl
def runScreenTest():
config.misc.startCounter.value += 1
config.misc.startCounter.save()
profile("readPluginList")
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
profile("Init:Session")
nav = Navigation()
session = Session(desktop = enigma.getDesktop(0), summary_desktop = enigma.getDesktop(1), navigation = nav)
CiHandler.setSession(session)
screensToRun = [ p.__call__ for p in plugins.getPlugins(PluginDescriptor.WHERE_WIZARD) ]
profile("wizards")
screensToRun += wizardManager.getWizards()
screensToRun.append((100, InfoBar.InfoBar))
screensToRun.sort()
enigma.ePythonConfigQuery.setQueryFunc(configfile.getResolvedKey)
# eDVBCIInterfaces.getInstance().setDescrambleRules(0 # Slot Number
# ,( ["1:0:1:24:4:85:C00000:0:0:0:"], #service_list
# ["PREMIERE"], #provider_list,
# [] #caid_list
# ));
def runNextScreen(session, screensToRun, *result):
if result:
enigma.quitMainloop(*result)
return
screen = screensToRun[0][1]
args = screensToRun[0][2:]
if screensToRun:
session.openWithCallback(boundFunction(runNextScreen, session, screensToRun[1:]), screen, *args)
else:
session.open(screen, *args)
config.misc.epgcache_filename.addNotifier(setEPGCachePath)
runNextScreen(session, screensToRun)
profile("Init:VolumeControl")
vol = VolumeControl(session)
profile("Init:PowerKey")
power = PowerKey(session)
# we need session.scart to access it from within menu.xml
session.scart = AutoScartControl(session)
profile("Init:Trashcan")
import Tools.Trashcan
Tools.Trashcan.init(session)
profile("RunReactor")
profile_final()
runReactor()
profile("wakeup")
from time import time, strftime, localtime
from Tools.StbHardware import setFPWakeuptime, getFPWakeuptime, setRTCtime
#get currentTime
nowTime = time()
wakeupList = [
x for x in ((session.nav.RecordTimer.getNextRecordingTime(), 0),
(session.nav.RecordTimer.getNextZapTime(isWakeup=True), 1),
(plugins.getNextWakeupTime(), 2))
if x[0] != -1
]
wakeupList.sort()
if wakeupList:
from time import strftime
startTime = wakeupList[0]
if (startTime[0] - nowTime) < 270: # no time to switch box back on
wptime = nowTime + 30 # so switch back on in 30 seconds
else:
wptime = startTime[0] - 240
if not config.misc.useTransponderTime.value:
print "dvb time sync disabled... so set RTC now to current linux time!", strftime("%Y/%m/%d %H:%M", localtime(nowTime))
setRTCtime(nowTime)
print "set wakeup time to", strftime("%Y/%m/%d %H:%M", localtime(wptime))
setFPWakeuptime(wptime)
profile("stopService")
session.nav.stopService()
profile("nav shutdown")
session.nav.shutdown()
profile("configfile.save")
configfile.save()
from Screens import InfoBarGenerics
InfoBarGenerics.saveResumePoints()
return 0
profile("Init:skin")
import skin
skin.loadSkinData(enigma.getDesktop(0))
profile("InputDevice")
import Components.InputDevice
Components.InputDevice.InitInputDevices()
import Components.InputHotplug
profile("SetupDevices")
import Components.SetupDevices
Components.SetupDevices.InitSetupDevices()
profile("AVSwitch")
import Components.AVSwitch
Components.AVSwitch.InitAVSwitch()
profile("RecordingConfig")
import Components.RecordingConfig
Components.RecordingConfig.InitRecordingConfig()
profile("UsageConfig")
import Components.UsageConfig
Components.UsageConfig.InitUsageConfig()
profile("keymapparser")
import keymapparser
keymapparser.readKeymap(config.usage.keymap.value)
profile("Network")
import Components.Network
Components.Network.InitNetwork()
profile("LCD")
import Components.Lcd
Components.Lcd.InitLcd()
profile("RFMod")
import Components.RFmod
Components.RFmod.InitRFmod()
profile("Init:CI")
import Screens.Ci
Screens.Ci.InitCiConfig()
profile("RcModel")
import Components.RcModel
#from enigma import dump_malloc_stats
#t = eTimer()
#t.callback.append(dump_malloc_stats)
#t.start(1000)
# first, setup a screen
try:
runScreenTest()
plugins.shutdown()
Components.ParentalControl.parentalControl.save()
except:
print 'EXCEPTION IN PYTHON STARTUP CODE:'
print '-'*60
print_exc(file=stdout)
enigma.quitMainloop(5)
print '-'*60
| gpl-2.0 |
tdtrask/ansible | test/units/modules/cloud/amazon/test_lambda.py | 57 | 11980 | #
# (c) 2017 Michael De La Rue
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
import copy
import pytest
from ansible.compat.tests.mock import MagicMock, Mock, patch
from ansible.module_utils import basic
from units.modules.utils import set_module_args
boto3 = pytest.importorskip("boto3")
# lambda is a keyword so we have to hack this.
_temp = __import__("ansible.modules.cloud.amazon.lambda")
lda = getattr(_temp.modules.cloud.amazon, "lambda")
base_lambda_config = {
'FunctionName': 'lambda_name',
'Role': 'arn:aws:iam::987654321012:role/lambda_basic_execution',
'Handler': 'lambda_python.my_handler',
'Description': 'this that the other',
'Timeout': 3,
'MemorySize': 128,
'Runtime': 'python2.7',
'CodeSha256': 'AqMZ+xptM7aC9VXu+5jyp1sqO+Nj4WFMNzQxtPMP2n8=',
}
one_change_lambda_config = copy.copy(base_lambda_config)
one_change_lambda_config['Timeout'] = 4
two_change_lambda_config = copy.copy(one_change_lambda_config)
two_change_lambda_config['Role'] = 'arn:aws:iam::987654321012:role/lambda_advanced_execution'
code_change_lambda_config = copy.copy(base_lambda_config)
code_change_lambda_config['CodeSha256'] = 'P+Zy8U4T4RiiHWElhL10VBKj9jw4rSJ5bm/TiW+4Rts='
base_module_args = {
"region": "us-west-1",
"name": "lambda_name",
"state": "present",
"zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"memory_size": 128,
"timeout": 3,
"handler": 'lambda_python.my_handler'
}
module_args_with_environment = dict(base_module_args, environment_variables={
"variable_name": "variable_value"
})
def make_mock_no_connection_connection(config):
"""return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
lambda_client_double = MagicMock()
lambda_client_double.get_function.configure_mock(
return_value=False
)
lambda_client_double.update_function_configuration.configure_mock(
return_value={
'Version': 1
}
)
fake_boto3_conn = Mock(return_value=lambda_client_double)
return (fake_boto3_conn, lambda_client_double)
def make_mock_connection(config):
"""return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
lambda_client_double = MagicMock()
lambda_client_double.get_function.configure_mock(
return_value={
'Configuration': config
}
)
lambda_client_double.update_function_configuration.configure_mock(
return_value={
'Version': 1
}
)
fake_boto3_conn = Mock(return_value=lambda_client_double)
return (fake_boto3_conn, lambda_client_double)
class AnsibleFailJson(Exception):
pass
def fail_json_double(*args, **kwargs):
"""works like fail_json but returns module results inside exception instead of stdout"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
# TODO: def test_handle_different_types_in_config_params():
def test_create_lambda_if_not_exist():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_no_connection_connection(code_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"unexpectedly updated lambda configuration when should have only created"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"update lambda function code when function should have been created only"
assert(len(lambda_client_double.create_function.mock_calls) > 0), \
"failed to call create_function "
(create_args, create_kwargs) = lambda_client_double.create_function.call_args
assert (len(create_kwargs) > 0), "expected create called with keyword args, none found"
try:
# For now I assume that we should NOT send an empty environment. It might
# be okay / better to explicitly send an empty environment. However `None'
# is not acceptable - mikedlr
create_kwargs["Environment"]
raise(Exception("Environment sent to boto when none expected"))
except KeyError:
pass # We are happy, no environment is fine
def test_update_lambda_if_code_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(code_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"unexpectedly updatede lambda configuration when only code changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) > 1), \
"failed to update lambda function when code changed"
# 3 because after uploading we call into the return from mock to try to find what function version
# was returned so the MagicMock actually sees two calls for one update.
assert(len(lambda_client_double.update_function_code.mock_calls) < 3), \
"lambda function code update called multiple times when only one time should be needed"
def test_update_lambda_if_config_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(two_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_update_lambda_if_only_one_config_item_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(one_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_update_lambda_if_added_environment_variable():
set_module_args(module_args_with_environment)
(boto3_conn_double, lambda_client_double) = make_mock_connection(base_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
(update_args, update_kwargs) = lambda_client_double.update_function_configuration.call_args
assert (len(update_kwargs) > 0), "expected update configuration called with keyword args, none found"
assert update_kwargs['Environment']['Variables'] == module_args_with_environment['environment_variables']
def test_dont_update_lambda_if_nothing_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double) = make_mock_connection(base_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"updated lambda function when no configuration changed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_warn_region_not_specified():
set_module_args({
"name": "lambda_name",
"state": "present",
# Module is called without a region causing error
# "region": "us-east-1",
"zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"handler": 'lambda_python.my_handler'})
get_aws_connection_info_double = Mock(return_value=(None, None, None))
with patch.object(lda, 'get_aws_connection_info', get_aws_connection_info_double):
with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
try:
lda.main()
except AnsibleFailJson as e:
result = e.args[0]
assert("region must be specified" in result['msg'])
| gpl-3.0 |
qwertyezi/Test | text/freeline_core/gradle_inc_build.py | 5 | 43598 | # -*- coding:utf8 -*-
from __future__ import print_function
import os
import shutil
import android_tools
from build_commands import CompileCommand, IncAaptCommand, IncJavacCommand, IncDexCommand
from builder import IncrementalBuilder, Builder
from gradle_tools import get_project_info, GradleDirectoryFinder, GradleSyncClient, GradleSyncTask, \
GradleCleanCacheTask, GradleMergeDexTask, get_sync_native_file_path, fix_package_name, DataBindingProcessor, \
DatabindingDirectoryLookUp
from task import find_root_tasks, find_last_tasks, Task
from utils import get_file_content, write_file_content, is_windows_system, cexec, load_json_cache, get_md5, \
write_json_cache
from tracing import Tracing
from exceptions import FreelineException
class GradleIncBuilder(IncrementalBuilder):
def __init__(self, changed_files, config, task_engine, project_info=None):
IncrementalBuilder.__init__(self, changed_files, config, task_engine, builder_name="gradle_inc_builder")
self._project_info = project_info
self._tasks_dictionary = {}
self._module_dependencies = {}
self._all_modules = []
self._is_art = False
self._module_dir_map = {}
self._has_res_changed = self.__is_any_modules_have_res_changed()
self._changed_modules = self.__changed_modules()
self._original_changed_files = dict(changed_files)
def check_build_environment(self):
if not self._project_info:
self._project_info = get_project_info(self._config)
self._all_modules = self._project_info.keys()
for item in self._project_info.values():
self._module_dir_map[item['name']] = item['relative_dir']
for key, value in self._project_info.iteritems():
self._module_dependencies[key] = [item for item in value['local_module_dep']]
self._is_art = android_tools.get_device_sdk_version_by_adb(Builder.get_adb(self._config)) > 20
# merge all resources modified files to main resources
self.__merge_res_files()
self.__merge_native_files()
def generate_sorted_build_tasks(self):
"""
sort build tasks according to the module's dependency
:return: None
"""
for module in self._all_modules:
task = android_tools.AndroidIncrementalBuildTask(module, self.__setup_inc_command(module))
self._tasks_dictionary[module] = task
for module in self._all_modules:
task = self._tasks_dictionary[module]
for dep in self._module_dependencies[module]:
task.add_parent_task(self._tasks_dictionary[dep])
def __setup_inc_command(self, module):
return GradleCompileCommand(module, self.__setup_invoker(module))
def __setup_invoker(self, module):
return GradleIncBuildInvoker(module, self._project_info[module]['path'], self._config,
self._changed_files['projects'][module], self._project_info[module], self._is_art,
all_module_info=self._project_info, module_dir_map=self._module_dir_map,
is_any_modules_have_res_changed=self._has_res_changed,
changed_modules=self._changed_modules)
def __merge_res_files(self):
main_res = self._changed_files['projects'][self._config['main_project_name']]
for module, file_dict in self._changed_files['projects'].iteritems():
if module == self._config['main_project_name']:
continue
for key, files in file_dict.iteritems():
if key == 'res' or key == 'assets':
main_res[key].extend(files)
self._changed_files['projects'][self._config['main_project_name']] = main_res
def __merge_native_files(self):
so_files = []
for module, file_dict in self._changed_files['projects'].iteritems():
for key, files in file_dict.iteritems():
if key == 'so':
for m in range(len(files)):
self.debug('append {} to native queue'.format(files[m]))
so_files.append(files[m])
if len(so_files) > 0:
from zipfile import ZipFile
with ZipFile(get_sync_native_file_path(self._config['build_cache_dir']), "w") as nativeZip:
for m in range(len(so_files)):
nativeZip.write(so_files[m])
def __is_any_modules_have_res_changed(self):
for key, value in self._changed_files['projects'].iteritems():
if len(value['res']) > 0:
self.debug('find {} modules have res changed'.format(key))
return True
return False
def __changed_modules(self):
modules = []
for module, file_dict in self._changed_files['projects'].iteritems():
if len(file_dict['src']) > 0 or len(file_dict['res']) or len(file_dict['assets']) > 0:
modules.append(module)
return modules
def incremental_build(self):
merge_dex_task = GradleMergeDexTask(self._config['build_cache_dir'], self._all_modules, self._project_info)
aapt_task = GradleAaptTask(self.__setup_invoker(self._config['main_project_name']),
self._original_changed_files, self._changed_files)
task_list = self._tasks_dictionary.values()
last_tasks = find_last_tasks(task_list)
for rtask in find_root_tasks(task_list):
aapt_task.add_child_task(rtask)
clean_cache_task = GradleCleanCacheTask(self._config['build_cache_dir'], self._project_info)
sync_client = GradleSyncClient(self._is_art, self._config, self._project_info, self._all_modules)
connect_task = android_tools.ConnectDeviceTask(sync_client)
sync_task = GradleSyncTask(sync_client, self._config['build_cache_dir'])
update_stat_task = android_tools.UpdateStatTask(self._config, self._changed_files['projects'])
map(lambda task: task.add_child_task(merge_dex_task), last_tasks)
connect_task.add_child_task(sync_task)
merge_dex_task.add_child_task(sync_task)
sync_task.add_child_task(clean_cache_task)
clean_cache_task.add_child_task(update_stat_task)
# self._task_engine.add_root_task(find_root_tasks(task_list))
self._task_engine.add_root_task(aapt_task)
self._task_engine.add_root_task(connect_task)
self._task_engine.start()
class GradleAaptTask(Task):
def __init__(self, invoker, original_changed_files, changed_files):
Task.__init__(self, 'gradle_aapt_task')
self._invoker = invoker
self._original_changed_files = original_changed_files
self._changed_files_ref = changed_files
def execute(self):
should_run_res_task = self._invoker.check_res_task()
if not should_run_res_task:
self.debug('no need to execute')
return
self.debug('start to execute aapt command...')
self._invoker.fill_dependant_jars()
self._invoker.check_ids_change()
with Tracing("generate_id_keeper_files"):
self._invoker.generate_r_file()
# self._invoker.backup_res_files()
with Tracing("incremental_databinding_process"):
self._invoker.process_databinding(self._original_changed_files, self._changed_files_ref)
with Tracing("run_incremental_aapt_task"):
self._invoker.run_aapt_task()
with Tracing("check_other_modules_resources"):
self._invoker.check_other_modules_resources()
self._invoker.recover_original_file_path()
class GradleCompileCommand(CompileCommand):
def __init__(self, module, invoker):
self._module = module
CompileCommand.__init__(self, 'gradle_{}_compile_command'.format(module), invoker)
def _setup(self):
# self.add_command(GradleIncAaptCommand(self._module, self._invoker))
self.add_command(GradleIncJavacCommand(self._module, self._invoker))
self.add_command(GradleIncDexCommand(self._module, self._invoker))
def execute(self):
map(lambda command: command.execute(), self.command_list)
class GradleIncAaptCommand(IncAaptCommand):
def __init__(self, module_name, invoker):
IncAaptCommand.__init__(self, module_name, invoker)
def execute(self):
should_run_res_task = self._invoker.check_res_task()
if not should_run_res_task:
self.debug('no need to execute')
return
self.debug('start to execute aapt command...')
self._invoker.fill_dependant_jars()
self._invoker.check_ids_change()
self._invoker.generate_r_file()
# self._invoker.backup_res_files()
self._invoker.run_aapt_task()
class GradleIncJavacCommand(IncJavacCommand):
def __init__(self, module_name, invoker):
IncJavacCommand.__init__(self, module_name, invoker)
def execute(self):
self._invoker.check_r_md5() # check if R.java has changed
# self._invoker.check_other_modules_resources()
should_run_javac_task = self._invoker.check_javac_task()
if not should_run_javac_task:
self.debug('no need to execute')
return
self.debug('start to execute javac command...')
self._invoker.append_r_file()
self._invoker.fill_classpaths()
self._invoker.fill_extra_javac_args()
self._invoker.clean_dex_cache()
self._invoker.run_apt_only()
self._invoker.run_javac_task()
self._invoker.run_retrolambda()
class GradleIncDexCommand(IncDexCommand):
def __init__(self, module_name, invoker):
IncDexCommand.__init__(self, module_name, invoker)
def execute(self):
should_run_dex_task = self._invoker.check_dex_task()
if not should_run_dex_task:
self.debug('no need to execute')
return
self.debug('start to execute dex command...')
self._invoker.run_dex_task()
class GradleIncBuildInvoker(android_tools.AndroidIncBuildInvoker):
def __init__(self, module_name, path, config, changed_files, module_info, is_art, all_module_info=None,
module_dir_map=None, is_any_modules_have_res_changed=False, changed_modules=None):
android_tools.AndroidIncBuildInvoker.__init__(self, module_name, path, config, changed_files, module_info,
is_art=is_art)
self._all_module_info = all_module_info
self._module_dir_map = module_dir_map
self._is_any_modules_have_res_changed = is_any_modules_have_res_changed
self._changed_modules = changed_modules
self._merged_res_paths = []
self._merged_res_paths.append(self._finder.get_backup_res_dir())
self._replace_mapper = {}
self._is_retrolambda_enabled = 'retrolambda' in self._config and self._name in self._config['retrolambda'] \
and self._config['retrolambda'][self._name]['enabled']
self._is_databinding_enabled = 'databinding_modules' in self._config and self._name in self._config[
'databinding_modules']
self._is_dagger_enabled = 'apt_libraries' in self._config and self._config['apt_libraries']['dagger']
self._apt_output_dir = None
for mname in self._all_module_info.keys():
if mname in self._config['project_source_sets']:
self._merged_res_paths.extend(self._config['project_source_sets'][mname]['main_res_directory'])
self._merged_res_paths.extend(self._config['project_source_sets'][mname]['main_assets_directory'])
def before_execute(self):
self._finder = GradleDirectoryFinder(self._name, self._module_path, self._cache_dir,
package_name=self._module_info['packagename'], config=self._config)
def check_res_task(self):
if self._name != self._config['main_project_name']:
self.debug('skip {} aapt task'.format(self._name))
return False
return android_tools.AndroidIncBuildInvoker.check_res_task(self)
def fill_dependant_jars(self):
self._res_dependencies = self._module_info['dep_jar_path']
def process_databinding(self, original_changed_files, changed_files_ref):
if 'databinding' in self._config:
if self._config['databinding_modules'] == 0:
self.debug('no modules for processing databinding')
return
databinding_config = self._config['databinding']
DatabindingDirectoryLookUp.load_path_map(self._config['build_cache_dir'])
procossor = DataBindingProcessor(self._config)
for module_config in databinding_config:
module_name = module_config['name']
if module_name in original_changed_files['projects']:
resources_files = original_changed_files['projects'][module_config['name']]['res']
if len(resources_files) == 0:
self.debug('module {} has no resources files changed'.format(module_name))
continue
changed_files_map = {}
res_dirs = self._config['project_source_sets'][module_name]['main_res_directory']
# TODO: detect matches missing issue
for path in resources_files:
for rdir in res_dirs:
if path.startswith(rdir):
if rdir in changed_files_map:
changed_files_map[rdir].append(path)
else:
changed_files_map[rdir] = [path]
break
for rdir in changed_files_map.keys():
output_res_dir = DatabindingDirectoryLookUp.find_target_res_path(rdir)
output_java_dir = DatabindingDirectoryLookUp.find_target_java_path(rdir)
output_layoutinfo_dir = DatabindingDirectoryLookUp.get_merged_layoutinfo_dir(self._cache_dir)
if output_res_dir and output_java_dir and output_layoutinfo_dir:
changed_files_list = changed_files_map[rdir]
procossor.process_module_databinding(module_config, rdir, output_res_dir,
output_layoutinfo_dir, output_java_dir,
self._config['sdk_directory'],
changed_files=changed_files_list)
# replace file path
for path in changed_files_list:
new_path = path.replace(rdir, output_res_dir)
self._merged_res_paths.append(output_res_dir) # append new path prefix
self.debug('replace {} with output path: {}'.format(path, new_path))
self._replace_mapper[new_path] = path
self._changed_files['res'].remove(path)
self._changed_files['res'].append(new_path)
# mark java compiler
if os.path.exists(output_layoutinfo_dir):
has_layoutinfo = False
for name in os.listdir(output_layoutinfo_dir):
if name.endswith('.xml'):
has_layoutinfo = True
break
if has_layoutinfo:
info_file = os.path.join(output_java_dir, 'android', 'databinding', 'layouts',
'DataBindingInfo.java')
if os.path.exists(info_file):
append_files = [info_file]
append_files.extend(procossor.extract_related_java_files(module_name,
output_layoutinfo_dir))
if 'apt' not in changed_files_ref['projects'][module_name]:
changed_files_ref['projects'][module_name]['apt'] = []
for fpath in append_files:
self.debug('add {} to {} module'.format(fpath, module_name))
changed_files_ref['projects'][module_name]['apt'].append(fpath)
if not android_tools.is_src_changed(self._config['build_cache_dir']):
android_tools.mark_src_changed(self._config['build_cache_dir'])
def _get_aapt_args(self):
aapt_args = [self._aapt, 'package', '-f', '-I',
os.path.join(self._config['compile_sdk_directory'], 'android.jar'),
'-M', fix_package_name(self._config, self._finder.get_dst_manifest_path())]
for rdir in self._config['project_source_sets'][self._name]['main_res_directory']:
if os.path.exists(rdir):
aapt_args.append('-S')
aapt_args.append(DatabindingDirectoryLookUp.find_target_res_path(rdir))
for rdir in self._module_info['local_dep_res_path']:
if os.path.exists(rdir):
aapt_args.append('-S')
aapt_args.append(DatabindingDirectoryLookUp.find_target_res_path(rdir))
for resdir in self._module_info['dep_res_path']:
if os.path.exists(resdir):
aapt_args.append('-S')
aapt_args.append(resdir)
if 'extra_dep_res_paths' in self._config and self._config['extra_dep_res_paths'] is not None:
arr = self._config['extra_dep_res_paths']
for path in arr:
path = path.strip()
if os.path.isdir(path):
aapt_args.append('-S')
aapt_args.append(path)
aapt_args.append('-S')
aapt_args.append(self._finder.get_backup_res_dir())
freeline_assets_dir = os.path.join(self._config['build_cache_dir'], 'freeline-assets')
aapt_args.append('-A')
aapt_args.append(freeline_assets_dir)
for adir in self._config['project_source_sets'][self._name]['main_assets_directory']:
if os.path.exists(adir):
aapt_args.append('-A')
aapt_args.append(adir)
for adir in self._module_info['local_dep_assets_path']:
if os.path.exists(adir):
aapt_args.append('-A')
aapt_args.append(adir)
for adir in self._module_info['dep_assets_path']:
if os.path.exists(adir):
aapt_args.append('-A')
aapt_args.append(adir)
gen_path = self._finder.get_backup_dir()
aapt_args.append('--custom-package')
aapt_args.append(self._config['package'])
aapt_args.append('-m')
aapt_args.append('-J')
aapt_args.append(gen_path)
aapt_args.append('--auto-add-overlay')
aapt_args.append('-P')
aapt_args.append(self._finder.get_public_xml_path())
final_changed_list = self._parse_changed_list()
if is_windows_system():
final_changed_list = [fpath.replace('\\', '/') for fpath in final_changed_list]
final_changed_list_chain = ':'.join(final_changed_list)
aapt_args.append('-F')
aapt_args.append(self._finder.get_dst_res_pack_path(self._name))
aapt_args.append('--debug-mode')
aapt_args.append('--auto-add-overlay')
aapt_args.append('--no-version-vectors')
if len(final_changed_list_chain) > 0 and self._is_art:
aapt_args.append('--buildIncrement')
aapt_args.append(final_changed_list_chain)
aapt_args.append('--resoucres-md5-cache-path')
aapt_args.append(os.path.join(self._cache_dir, "arsc_cache.dat"))
aapt_args.append('--ignore-assets')
aapt_args.append('public_id.xml:public.xml:*.bak:.*')
if 'ignore_resource_ids' in self._config and len(self._config['ignore_resource_ids']) > 0 and not is_windows_system():
aapt_args.append('--ignore-ids')
aapt_args.append(':'.join(self._config['ignore_resource_ids']))
return aapt_args, final_changed_list
def recover_original_file_path(self):
copylist = list(self._changed_files['res'])
for fpath in copylist:
if fpath in self._replace_mapper:
self._changed_files['res'].remove(fpath)
self._changed_files['res'].append(self._replace_mapper[fpath])
def check_other_modules_resources(self):
if self._name == self._config['main_project_name'] and self._all_module_info is not None:
changed_modules = self._changed_modules
if len(changed_modules) > 0:
self.__modify_main_r()
for module in changed_modules:
fpath = self.__modify_other_modules_r(self._all_module_info[module]['packagename'])
self.debug('modify {}'.format(fpath))
def __modify_main_r(self):
main_r_fpath = os.path.join(self._finder.get_backup_dir(),
self._module_info['packagename'].replace('.', os.sep), 'R.java')
self.debug('modify {}'.format(main_r_fpath))
buf = GradleIncBuildInvoker.remove_final_tag(get_file_content(main_r_fpath))
buf = android_tools.fix_unicode_parse_error(buf, main_r_fpath)
write_file_content(main_r_fpath, buf)
target_main_r_dir = os.path.join(self.__get_freeline_backup_r_dir(),
self._module_info['packagename'].replace('.', os.sep))
if not os.path.exists(target_main_r_dir):
os.makedirs(target_main_r_dir)
target_main_r_path = os.path.join(target_main_r_dir, 'R.java')
self.debug('copy {} to {}'.format(main_r_fpath, target_main_r_path))
shutil.copy(main_r_fpath, target_main_r_path)
def append_r_file(self):
if self._name != self._config['main_project_name']:
backupdir = self.__get_freeline_backup_r_dir()
main_r_path = os.path.join(backupdir, self._config['package'].replace('.', os.sep), 'R.java')
# main_r_path existence means that resource modification exists, so that need to add R.java to classpath
if os.path.exists(main_r_path):
pns = [self._config['package'], self._module_info['packagename']]
for m in self._module_info['local_module_dep']:
pns.append(self._all_module_info[m]['packagename'])
for pn in pns:
rpath = os.path.join(backupdir, pn.replace('.', os.sep), 'R.java')
if os.path.exists(rpath) and rpath not in self._changed_files['src']:
self._changed_files['src'].append(rpath)
self.debug('add R.java to changed list: ' + rpath)
elif pn == self._module_info['packagename']:
fpath = self.__modify_other_modules_r(pn)
self.debug('modify {}'.format(fpath))
if fpath and os.path.exists(fpath):
self._changed_files['src'].append(fpath)
self.debug('add R.java to changed list: ' + fpath)
else:
if is_windows_system():
main_r_path = os.path.join(self._finder.get_backup_dir(),
self._module_info['packagename'].replace('.', os.sep), 'R.java')
if os.path.exists(main_r_path):
content = android_tools.fix_unicode_parse_error(get_file_content(main_r_path), main_r_path)
write_file_content(main_r_path, content)
def fill_classpaths(self):
# classpaths:
# 1. patch classes
# 2. dependent modules' patch classes
# 3. android.jar
# 4. third party jars
# 5. generated classes in build directory
patch_classes_cache_dir = self._finder.get_patch_classes_cache_dir()
self._classpaths.append(patch_classes_cache_dir)
self._classpaths.append(self._finder.get_dst_classes_dir())
for module in self._module_info['local_module_dep']:
finder = GradleDirectoryFinder(module, self._module_dir_map[module], self._cache_dir)
self._classpaths.append(finder.get_patch_classes_cache_dir())
# add main module classes dir to classpath to generate databinding files
main_module_name = self._config['main_project_name']
if self._name != main_module_name and self._is_databinding_enabled:
finder = GradleDirectoryFinder(main_module_name, self._module_dir_map[main_module_name], self._cache_dir,
config=self._config)
self._classpaths.append(finder.get_dst_classes_dir())
self._classpaths.append(os.path.join(self._config['compile_sdk_directory'], 'android.jar'))
self._classpaths.extend(self._module_info['dep_jar_path'])
# remove existing same-name class in build directory
srcdirs = self._config['project_source_sets'][self._name]['main_src_directory']
for dirpath, dirnames, files in os.walk(patch_classes_cache_dir):
for fn in files:
if self._is_r_file_changed and self._module_info['packagename'] + '.R.' in fn:
android_tools.delete_class(dirpath, fn.replace('.class', ''))
if fn.endswith('.class') and '$' not in fn and 'R.' not in fn and 'Manifest.' not in fn:
cp = os.path.join(dirpath, fn)
java_src = cp.replace('.class', '.java').split('classes' + os.path.sep)[1]
existence = True
for src_dir in srcdirs:
if os.path.exists(os.path.join(src_dir, java_src)):
existence = True
break
if not existence:
android_tools.delete_class(dirpath, fn.replace('.class', ''))
def fill_extra_javac_args(self):
if 'apt' in self._config and self._name in self._config['apt'] and self._config['apt'][self._name]['enabled']:
apt_config = self._config['apt'][self._name]
self._apt_output_dir = apt_config['aptOutput']
apt_args = ['-s', apt_config['aptOutput']]
if apt_config['processor']:
apt_args.append('-processor')
apt_args.append(apt_config['processor'])
if not apt_config['disableDiscovery']:
apt_args.append('-processorpath')
apt_args.append(apt_config['processorPath'])
apt_args.extend(apt_config['aptArgs'])
self._extra_javac_args.extend(apt_args)
elif self._is_databinding_enabled:
if self._name == self._config['main_project_name']:
apt_output = os.path.join(self._config['build_directory'], 'generated', 'source', 'apt',
self._config['product_flavor'], 'debug')
else:
apt_output = os.path.join(self._config['build_directory'], 'generated', 'source', 'apt', 'release')
self._apt_output_dir = apt_output
if not os.path.exists(apt_output):
os.makedirs(apt_output)
if self._config['databinding_compiler_jar'] != '':
self.debug('add compiler jar to classpath: {}'.format(self._config['databinding_compiler_jar']))
self._module_info['dep_jar_path'].append(self._config['databinding_compiler_jar'])
apt_args = ['-s', apt_output, '-processorpath', os.pathsep.join(self._module_info['dep_jar_path'])]
self._extra_javac_args.extend(apt_args)
def run_apt_only(self):
if self._is_databinding_enabled and self._should_run_databinding_apt():
apt_args = self._generate_java_compile_args(extra_javac_args_enabled=True)
self.debug('apt exec: ' + ' '.join(apt_args))
output, err, code = cexec(apt_args, callback=None)
if code != 0:
raise FreelineException('apt compile failed.', '{}\n{}'.format(output, err))
if self._apt_output_dir and os.path.exists(self._apt_output_dir):
apt_cache_path = os.path.join(self._config['build_cache_dir'], 'apt_files_stat_cache.json')
if os.path.exists(apt_cache_path):
apt_cache = load_json_cache(apt_cache_path)
for dirpath, dirnames, files in os.walk(self._apt_output_dir):
for fn in files:
fpath = os.path.join(dirpath, fn)
if apt_cache and self._name in apt_cache:
if fpath in apt_cache[self._name]:
new_md5 = get_md5(fpath)
if new_md5 != apt_cache[self._name][fpath]['md5']:
self.debug('detect new md5 value, add apt file to change list: {}'.format(fpath))
self._changed_files['src'].append(fpath)
else:
self.debug('find new apt file, add to change list: {}'.format(fpath))
self._changed_files['src'].append(fpath)
else:
self.debug('apt cache not found, add to change list: {}'.format(fpath))
self._changed_files['src'].append(fpath)
def run_javac_task(self):
if self._is_only_r_changed() and not self._is_other_modules_has_src_changed:
self._is_need_javac = False
android_tools.clean_src_changed_flag(self._cache_dir)
self.debug('apt process do not generate new files, ignore javac task.')
return
extra_javac_args_enabled = not (self._is_databinding_enabled and self._should_run_databinding_apt())
javacargs = self._generate_java_compile_args(extra_javac_args_enabled=extra_javac_args_enabled)
self.debug('javac exec: ' + ' '.join(javacargs))
output, err, code = cexec(javacargs, callback=None)
if code != 0:
raise FreelineException('incremental javac compile failed.', '{}\n{}'.format(output, err))
else:
if self._is_r_file_changed:
old_r_file = self._finder.get_dst_r_path(config=self._config)
new_r_file = android_tools.DirectoryFinder.get_r_file_path(self._finder.get_backup_dir())
if old_r_file and new_r_file:
shutil.copyfile(new_r_file, old_r_file)
self.debug('copy {} to {}'.format(new_r_file, old_r_file))
def _should_run_databinding_apt(self):
if 'apt' in self._changed_files:
for fpath in self._changed_files['apt']:
if fpath.endswith('DataBindingInfo.java'):
return True
return False
def _generate_java_compile_args(self, extra_javac_args_enabled=False):
javacargs = [self._javac]
arguments = ['-encoding', 'UTF-8', '-g']
if not self._is_retrolambda_enabled:
arguments.extend(['-target', '1.7', '-source', '1.7'])
arguments.append('-cp')
arguments.append(os.pathsep.join(self._classpaths))
for fpath in self._changed_files['src']:
arguments.append(fpath)
if extra_javac_args_enabled:
if 'apt' in self._changed_files:
for fpath in self._changed_files['apt']:
arguments.append(fpath)
filter_tags = []
if self._is_databinding_enabled:
filter_tags.extend(['BindingAdapter', 'BindingConversion', 'Bindable'])
if self._is_dagger_enabled:
filter_tags.extend(['DaggerComponent', 'DaggerModule'])
files = self._get_apt_related_files(filter_tags=filter_tags)
for fpath in files:
if fpath and os.path.exists(fpath) and fpath not in self._changed_files['src']:
if 'apt' in self._changed_files and fpath in self._changed_files['apt']:
continue
self.debug('add apt related file: {}'.format(fpath))
arguments.append(fpath)
arguments.extend(self._extra_javac_args)
arguments.append('-d')
arguments.append(self._finder.get_patch_classes_cache_dir())
# ref: https://support.microsoft.com/en-us/kb/830473
if is_windows_system():
arguments_length = sum(map(len, arguments))
if arguments_length > 8000:
argument_file_path = os.path.join(self._finder.get_module_cache_dir(), 'javac_args_file')
self.debug('arguments length: {} > 8000, save args to {}'.format(arguments_length, argument_file_path))
if os.path.exists(argument_file_path):
os.remove(argument_file_path)
arguments_content = ' '.join(arguments)
self.debug('javac arguments: ' + arguments_content)
write_file_content(argument_file_path, arguments_content)
arguments = ['@{}'.format(argument_file_path)]
javacargs.extend(arguments)
return javacargs
def _get_apt_related_files(self, filter_tags=None):
path = self._get_apt_related_files_cache_path()
if os.path.exists(path):
return load_json_cache(path)
else:
info_path = os.path.join(self._cache_dir, 'freeline_annotation_info.json')
if os.path.exists(info_path):
info_cache = load_json_cache(info_path)
related_files = []
for anno, files in info_cache.iteritems():
if filter_tags and anno not in filter_tags:
self.debug('ignore annotation: {}'.format(anno))
continue
for info in files:
if info['module'] == self._name or info['module'] in self._module_info['local_module_dep']:
if 'java_path' in info and info['java_path']:
related_files.append(info['java_path'])
write_json_cache(self._get_apt_related_files_cache_path(), related_files)
return related_files
return []
def _append_new_related_files(self):
related_files = self._get_apt_related_files()
def append_files(file_list):
for fpath in file_list:
if fpath and fpath not in related_files:
self.debug('add new related file: {}'.format(fpath))
related_files.append(fpath)
append_files(self._changed_files['src'])
append_files(self._changed_files['apt'])
write_json_cache(self._get_apt_related_files_cache_path(), related_files)
def _get_apt_related_files_cache_path(self):
return os.path.join(self._cache_dir, 'apt_related_files_cache.json')
def run_retrolambda(self):
if self._is_need_javac and self._is_retrolambda_enabled:
lambda_config = self._config['retrolambda'][self._name]
target_dir = self._finder.get_patch_classes_cache_dir()
jar_args = [Builder.get_java(self._config),
'-Dretrolambda.inputDir={}'.format(target_dir),
'-Dretrolambda.outputDir={}'.format(target_dir)]
if lambda_config['supportIncludeFiles']:
files_stat_path = os.path.join(self._cache_dir, self._name, 'lambda_files_stat.json')
include_files = []
if os.path.exists(files_stat_path):
files_stat = load_json_cache(files_stat_path)
else:
files_stat = {}
for dirpath, dirnames, files in os.walk(target_dir):
for fn in files:
fpath = os.path.join(dirpath, fn)
if fpath not in files_stat:
include_files.append(fpath)
self.debug('incremental build new lambda file: {}'.format(fpath))
else:
if os.path.getmtime(fpath) > files_stat[fpath]['mtime']:
include_files.append(fpath)
self.debug('incremental build lambda file: {}'.format(fpath))
include_files_param = os.pathsep.join(include_files)
if len(include_files_param) > 3496:
include_files_path = os.path.join(self._cache_dir, self._name, 'retrolambda_inc.list')
self.__save_parms_to_file(include_files_path, include_files)
jar_args.append('-Dretrolambda.includedFile={}'.format(include_files_path))
else:
jar_args.append('-Dretrolambda.includedFiles={}'.format(include_files_param))
lambda_classpaths = [target_dir, lambda_config['rtJar']]
lambda_classpaths.extend(self._classpaths)
param = os.pathsep.join(lambda_classpaths)
if lambda_config['supportIncludeFiles'] and len(param) > 3496:
classpath_file = os.path.join(self._cache_dir, self._name, 'retrolambda_classpaths.path')
self.__save_parms_to_file(classpath_file, lambda_classpaths)
jar_args.append('-Dretrolambda.classpathFile={}'.format(classpath_file))
else:
jar_args.append('-Dretrolambda.classpath={}'.format(param))
jar_args.append('-cp')
jar_args.append(lambda_config['targetJar'])
jar_args.append(lambda_config['mainClass'])
self.debug('retrolambda exec: ' + ' '.join(jar_args))
output, err, code = cexec(jar_args, callback=None)
if code != 0:
raise FreelineException('retrolambda compile failed.', '{}\n{}'.format(output, err))
if lambda_config['supportIncludeFiles']:
for fpath in include_files:
if fpath not in files_stat:
files_stat[fpath] = {}
files_stat[fpath]['mtime'] = os.path.getmtime(fpath)
write_json_cache(files_stat_path, files_stat)
self.debug('save lambda files stat to {}'.format(files_stat_path))
def __save_parms_to_file(self, path, params):
if os.path.exists(path):
os.remove(path)
content = ''
for param in params:
content += param + '\n'
write_file_content(path, content)
self.debug('save retrolambda params to {}'.format(path))
def _get_res_incremental_dst_path(self, fpath):
if 'assets' + os.sep in fpath:
return os.path.join(self._finder.get_base_gen_dir(), 'assets', 'debug', fpath.split('assets' + os.sep)[1])
elif 'res' + os.sep in fpath:
return os.path.join(self._finder.get_res_dir(), fpath.split('res' + os.sep)[1])
def _parse_changed_list(self):
changed_list = []
for rfile in self._changed_files['res']:
if rfile not in changed_list:
changed_list.append(self._get_res_relative_path(rfile))
for afile in self._changed_files['assets']:
if afile not in changed_list:
changed_list.append(self._get_res_relative_path(afile))
return changed_list
def _get_res_relative_path(self, res):
if res.startswith('res') or res.startswith('AndroidManifest.xml'):
return res
def path_fix(path):
return path if path.endswith(os.sep) else path + os.sep
for respath in self._merged_res_paths:
respath = path_fix(respath)
if res.startswith(respath):
index = respath.strip(os.sep).rfind(os.sep)
if index >= 0:
res_dir_name = respath[index + 1:].strip(os.sep)
relative_path = os.path.join(res_dir_name, res.replace(respath, ''))
self.debug("find relative path: {}".format(relative_path))
return relative_path
self.debug('relative path not found: {}'.format(res))
return None
def __get_freeline_backup_r_dir(self):
dirpath = os.path.join(self._cache_dir, 'freeline-backup-r')
if not os.path.exists(dirpath):
os.makedirs(dirpath)
return dirpath
def __modify_other_modules_r(self, package_name, finder=None):
if not finder:
finder = self._finder
r_path = android_tools.find_r_file(finder.get_dst_r_dir(), package_name=package_name)
if r_path and os.path.exists(r_path):
target_dir = os.path.join(self.__get_freeline_backup_r_dir(), package_name.replace('.', os.sep))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_path = os.path.join(target_dir, 'R.java')
if not os.path.exists(target_path):
self.debug('copy {} to {}'.format(r_path, target_path))
shutil.copy(r_path, target_path)
content = get_file_content(target_path)
content = GradleIncBuildInvoker.remove_final_tag(content)
content = GradleIncBuildInvoker.extend_main_r(content, self._config['package'])
content = android_tools.fix_unicode_parse_error(content, target_path)
write_file_content(target_path, content)
return target_path
def __find_res_in_which_module(self, res_path):
for module in self._all_module_info.keys():
# rdir = android_tools.get_res_dir(module)
res_dirs = self._config['project_source_sets'][module]['main_res_directory']
for rdir in res_dirs:
if rdir is not None:
if res_path.startswith(rdir) or rdir in res_path:
return module
return None
@staticmethod
def remove_final_tag(content):
content = content.replace('public final class', 'public class').replace('public static final class',
'public static class')
return content
@staticmethod
def extend_main_r(content, main_package_name):
import re
result = re.findall(r'''public static class (.*) \{''', content)
for tag in result:
content = content.replace('class ' + tag + ' {',
'class ' + tag + ' extends ' + main_package_name + '.R.' + tag + ' {')
return content
| apache-2.0 |
davidcox/glumpy | demos/demo-heightmap-2.py | 1 | 3540 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2010 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy, glumpy
import OpenGL.GL as gl
class Mesh(object):
def __init__(self, n=64):
self.indices = numpy.zeros((n-1,n-1,4), dtype=numpy.float32)
self.vertices = numpy.zeros((n,n,3), dtype=numpy.float32)
self.texcoords= numpy.zeros((n,n,2), dtype=numpy.float32)
for xi in range(n):
for yi in range(n):
x,y,z = xi/float(n-1), yi/float(n-1), 0
self.vertices[xi,yi] = x-0.5,y-0.5,z
self.texcoords[xi,yi] = x,y
for yi in range(n-1):
for xi in range(n-1):
i = yi*n + xi
self.indices[xi,yi] = i,i+1,i+n+1,i+n
def draw(self):
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY);
gl.glVertexPointerf(self.vertices)
gl.glTexCoordPointerf(self.texcoords)
gl.glDrawElementsus(gl.GL_QUADS, self.indices)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY);
if __name__ == '__main__':
window = glumpy.Window(width=800,height=600)
trackball = glumpy.Trackball(60,30,0.75)
mesh = Mesh(64)
# def func3(x,y):
# return (1-x/2+x**5+y**3)*numpy.exp(-x**2-y**2)
# dx, dy = .01, .01
# x = numpy.arange(-3.0, 3.0, dx, dtype=numpy.float32)
# y = numpy.arange(-3.0, 3.0, dy, dtype=numpy.float32)
# Z = func3(*numpy.meshgrid(x, y))
n = 64.0
X = numpy.empty((n,n), dtype=numpy.float32)
X.flat = numpy.arange(n)*2*numpy.pi/n*2
Y = numpy.empty((n,n), dtype=numpy.float32)
Y.flat = numpy.arange(n)*2*numpy.pi/n*2
Y = numpy.transpose(Y)
Z = numpy.sin(X) + numpy.cos(Y)
I = glumpy.Image(Z, interpolation='bilinear', cmap=glumpy.colormap.Hot,
gridsize= (31.0,31.0,10.0), elevation=0.25)
def draw_background():
viewport = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glDisable (gl.GL_LIGHTING)
gl.glDisable (gl.GL_DEPTH_TEST)
gl.glPolygonMode (gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glBegin(gl.GL_QUADS)
#gl.glColor(0.75,0.75,1.0)
gl.glColor(1.0,1.0,0.75)
gl.glVertex(0,0,-1)
gl.glVertex(viewport[2],0,-1)
gl.glColor(1.0,1.0,1.0)
gl.glVertex(viewport[2],viewport[3],0)
gl.glVertex(0,viewport[3],0)
gl.glEnd()
@window.event
def on_draw():
gl.glClearColor(1,1,1,1)
window.clear()
draw_background()
trackball.push()
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glTranslatef(0,0,-0.125)
gl.glColor4f(1,1,1,1)
I.shader.bind(I.texture,I._lut)
mesh.draw()
I.shader.unbind()
trackball.pop()
@window.event
def on_mouse_drag(x, y, dx, dy, button):
trackball.drag_to(x,y,dx,dy)
@window.event
def on_mouse_scroll(x, y, dx, dy):
trackball.zoom_to(x,y,dx,dy)
@window.timer(60.0)
def update(dt):
global X,Y
X += numpy.pi/150.
Y += numpy.pi/200.
Z[...] = numpy.sin(X) + numpy.cos(Y)
I.update()
window.draw()
window.mainloop()
| bsd-3-clause |
zhuwenping/python-for-android | python-modules/twisted/twisted/words/test/test_jabberxmppstringprep.py | 57 | 4450 | # Copyright (c) 2005 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.words.protocols.jabber.xmpp_stringprep import nodeprep, resourceprep, nameprep, crippled
class XMPPStringPrepTest(unittest.TestCase):
"""
The nodeprep stringprep profile is similar to the resourceprep profile,
but does an extra mapping of characters (table B.2) and disallows
more characters (table C.1.1 and eight extra punctuation characters).
Due to this similarity, the resourceprep tests are more extensive, and
the nodeprep tests only address the mappings additional restrictions.
The nameprep profile is nearly identical to the nameprep implementation in
L{encodings.idna}, but that implementation assumes the C{UseSTD4ASCIIRules}
flag to be false. This implementation assumes it to be true, and restricts
the allowed set of characters. The tests here only check for the
differences.
"""
def testResourcePrep(self):
self.assertEquals(resourceprep.prepare(u'resource'), u'resource')
self.assertNotEquals(resourceprep.prepare(u'Resource'), u'resource')
self.assertEquals(resourceprep.prepare(u' '), u' ')
if crippled:
return
self.assertEquals(resourceprep.prepare(u'Henry \u2163'), u'Henry IV')
self.assertEquals(resourceprep.prepare(u'foo\xad\u034f\u1806\u180b'
u'bar\u200b\u2060'
u'baz\ufe00\ufe08\ufe0f\ufeff'),
u'foobarbaz')
self.assertEquals(resourceprep.prepare(u'\u00a0'), u' ')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u1680')
self.assertEquals(resourceprep.prepare(u'\u2000'), u' ')
self.assertEquals(resourceprep.prepare(u'\u200b'), u'')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u0010\u007f')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u0085')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u180e')
self.assertEquals(resourceprep.prepare(u'\ufeff'), u'')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\uf123')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000f1234')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U0010f234')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U0008fffe')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U0010ffff')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\udf42')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\ufffd')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u2ff5')
self.assertEquals(resourceprep.prepare(u'\u0341'), u'\u0301')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u200e')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u202a')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000e0001')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000e0042')
self.assertRaises(UnicodeError, resourceprep.prepare, u'foo\u05bebar')
self.assertRaises(UnicodeError, resourceprep.prepare, u'foo\ufd50bar')
#self.assertEquals(resourceprep.prepare(u'foo\ufb38bar'),
# u'foo\u064ebar')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\u06271')
self.assertEquals(resourceprep.prepare(u'\u06271\u0628'),
u'\u06271\u0628')
self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000e0002')
def testNodePrep(self):
self.assertEquals(nodeprep.prepare(u'user'), u'user')
self.assertEquals(nodeprep.prepare(u'User'), u'user')
self.assertRaises(UnicodeError, nodeprep.prepare, u'us&er')
def testNamePrep(self):
self.assertEquals(nameprep.prepare(u'example.com'), u'example.com')
self.assertEquals(nameprep.prepare(u'Example.com'), u'example.com')
self.assertRaises(UnicodeError, nameprep.prepare, u'[email protected]')
self.assertRaises(UnicodeError, nameprep.prepare, u'-example.com')
self.assertRaises(UnicodeError, nameprep.prepare, u'example-.com')
if crippled:
return
self.assertEquals(nameprep.prepare(u'stra\u00dfe.example.com'),
u'strasse.example.com')
| apache-2.0 |
sumanau7/Ele_CC_Sumanau | lib/IPython/extensions/storemagic.py | 9 | 8183 | # -*- coding: utf-8 -*-
"""
%store magic for lightweight persistence.
Stores variables, aliases and macros in IPython's database.
To automatically restore stored variables at startup, add this to your
:file:`ipython_config.py` file::
c.StoreMagics.autorestore = True
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2012, The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import inspect, os, sys, textwrap
# Our own
from IPython.core.error import UsageError
from IPython.core.magic import Magics, magics_class, line_magic
from traitlets import Bool
from IPython.utils.py3compat import string_types
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
def restore_aliases(ip):
staliases = ip.db.get('stored_aliases', {})
for k,v in staliases.items():
#print "restore alias",k,v # dbg
#self.alias_table[k] = v
ip.alias_manager.define_alias(k,v)
def refresh_variables(ip):
db = ip.db
for key in db.keys('autorestore/*'):
# strip autorestore
justkey = os.path.basename(key)
try:
obj = db[key]
except KeyError:
print("Unable to restore variable '%s', ignoring (use %%store -d to forget!)" % justkey)
print("The error was:", sys.exc_info()[0])
else:
#print "restored",justkey,"=",obj #dbg
ip.user_ns[justkey] = obj
def restore_dhist(ip):
ip.user_ns['_dh'] = ip.db.get('dhist',[])
def restore_data(ip):
refresh_variables(ip)
restore_aliases(ip)
restore_dhist(ip)
@magics_class
class StoreMagics(Magics):
"""Lightweight persistence for python variables.
Provides the %store magic."""
autorestore = Bool(False, config=True, help=
"""If True, any %store-d variables will be automatically restored
when IPython starts.
"""
)
def __init__(self, shell):
super(StoreMagics, self).__init__(shell=shell)
self.shell.configurables.append(self)
if self.autorestore:
restore_data(self.shell)
@line_magic
def store(self, parameter_s=''):
"""Lightweight persistence for python variables.
Example::
In [1]: l = ['hello',10,'world']
In [2]: %store l
In [3]: exit
(IPython session is closed and started again...)
ville@badger:~$ ipython
In [1]: l
NameError: name 'l' is not defined
In [2]: %store -r
In [3]: l
Out[3]: ['hello', 10, 'world']
Usage:
* ``%store`` - Show list of all variables and their current
values
* ``%store spam`` - Store the *current* value of the variable spam
to disk
* ``%store -d spam`` - Remove the variable and its value from storage
* ``%store -z`` - Remove all variables from storage
* ``%store -r`` - Refresh all variables from store (overwrite
current vals)
* ``%store -r spam bar`` - Refresh specified variables from store
(delete current val)
* ``%store foo >a.txt`` - Store value of foo to new file a.txt
* ``%store foo >>a.txt`` - Append value of foo to file a.txt
It should be noted that if you change the value of a variable, you
need to %store it again if you want to persist the new value.
Note also that the variables will need to be pickleable; most basic
python types can be safely %store'd.
Also aliases can be %store'd across sessions.
"""
opts,argsl = self.parse_options(parameter_s,'drz',mode='string')
args = argsl.split(None,1)
ip = self.shell
db = ip.db
# delete
if 'd' in opts:
try:
todel = args[0]
except IndexError:
raise UsageError('You must provide the variable to forget')
else:
try:
del db['autorestore/' + todel]
except:
raise UsageError("Can't delete variable '%s'" % todel)
# reset
elif 'z' in opts:
for k in db.keys('autorestore/*'):
del db[k]
elif 'r' in opts:
if args:
for arg in args:
try:
obj = db['autorestore/' + arg]
except KeyError:
print("no stored variable %s" % arg)
else:
ip.user_ns[arg] = obj
else:
restore_data(ip)
# run without arguments -> list variables & values
elif not args:
vars = db.keys('autorestore/*')
vars.sort()
if vars:
size = max(map(len, vars))
else:
size = 0
print('Stored variables and their in-db values:')
fmt = '%-'+str(size)+'s -> %s'
get = db.get
for var in vars:
justkey = os.path.basename(var)
# print 30 first characters from every var
print(fmt % (justkey, repr(get(var, '<unavailable>'))[:50]))
# default action - store the variable
else:
# %store foo >file.txt or >>file.txt
if len(args) > 1 and args[1].startswith('>'):
fnam = os.path.expanduser(args[1].lstrip('>').lstrip())
if args[1].startswith('>>'):
fil = open(fnam, 'a')
else:
fil = open(fnam, 'w')
obj = ip.ev(args[0])
print("Writing '%s' (%s) to file '%s'." % (args[0],
obj.__class__.__name__, fnam))
if not isinstance (obj, string_types):
from pprint import pprint
pprint(obj, fil)
else:
fil.write(obj)
if not obj.endswith('\n'):
fil.write('\n')
fil.close()
return
# %store foo
try:
obj = ip.user_ns[args[0]]
except KeyError:
# it might be an alias
name = args[0]
try:
cmd = ip.alias_manager.retrieve_alias(name)
except ValueError:
raise UsageError("Unknown variable '%s'" % name)
staliases = db.get('stored_aliases',{})
staliases[name] = cmd
db['stored_aliases'] = staliases
print("Alias stored: %s (%s)" % (name, cmd))
return
else:
modname = getattr(inspect.getmodule(obj), '__name__', '')
if modname == '__main__':
print(textwrap.dedent("""\
Warning:%s is %s
Proper storage of interactively declared classes (or instances
of those classes) is not possible! Only instances
of classes in real modules on file system can be %%store'd.
""" % (args[0], obj) ))
return
#pickled = pickle.dumps(obj)
db[ 'autorestore/' + args[0] ] = obj
print("Stored '%s' (%s)" % (args[0], obj.__class__.__name__))
def load_ipython_extension(ip):
"""Load the extension in IPython."""
ip.register_magics(StoreMagics)
| apache-2.0 |
h2oai/h2o | py/testdir_release/c3/test_c3_exec_copy.py | 9 | 4111 | import unittest, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_glm, h2o_common, h2o_exec as h2e
import h2o_print
DO_GLM = True
LOG_MACHINE_STATS = False
# fails during exec env push ..second import has to do a key delete (the first)
DO_DOUBLE_IMPORT = False
print "Assumes you ran ../build_for_clone.py in this directory"
print "Using h2o-nodes.json. Also the sandbox dir"
class releaseTest(h2o_common.ReleaseCommon, unittest.TestCase):
def sub_c3_nongz_fvec_long(self, csvFilenameList):
# a kludge
h2o.setup_benchmark_log()
bucket = 'home-0xdiag-datasets'
importFolderPath = 'manyfiles-nflx'
print "Using nongz'ed files in", importFolderPath
if LOG_MACHINE_STATS:
benchmarkLogging = ['cpu', 'disk', 'network']
else:
benchmarkLogging = []
pollTimeoutSecs = 120
retryDelaySecs = 10
for trial, (csvFilepattern, csvFilename, totalBytes, timeoutSecs) in enumerate(csvFilenameList):
csvPathname = importFolderPath + "/" + csvFilepattern
if DO_DOUBLE_IMPORT:
(importResult, importPattern) = h2i.import_only(bucket=bucket, path=csvPathname, schema='local')
importFullList = importResult['files']
importFailList = importResult['fails']
print "\n Problem if this is not empty: importFailList:", h2o.dump_json(importFailList)
# this accumulates performance stats into a benchmark log over multiple runs
# good for tracking whether we're getting slower or faster
h2o.cloudPerfH2O.change_logfile(csvFilename)
h2o.cloudPerfH2O.message("")
h2o.cloudPerfH2O.message("Parse " + csvFilename + " Start--------------------------------")
start = time.time()
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='local',
hex_key="A.hex", timeoutSecs=timeoutSecs,
retryDelaySecs=retryDelaySecs,
pollTimeoutSecs=pollTimeoutSecs,
benchmarkLogging=benchmarkLogging)
elapsed = time.time() - start
print "Parse #", trial, "completed in", "%6.2f" % elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "Parse result['destination_key']:", parseResult['destination_key']
h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=False)
fileMBS = (totalBytes/1e6)/elapsed
msg = '{!s} jvms, {!s}GB heap, {:s} {:s} {:6.2f} MB/sec for {:.2f} secs'.format(
len(h2o.nodes), h2o.nodes[0].java_heap_GB, csvFilepattern, csvFilename, fileMBS, elapsed)
print msg
h2o.cloudPerfH2O.message(msg)
h2o_cmd.checkKeyDistribution()
# are the unparsed keys slowing down exec?
h2i.delete_keys_at_all_nodes(pattern="manyfile")
execExpr = 'B.hex=A.hex'
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
h2o_cmd.checkKeyDistribution()
execExpr = 'C.hex=B.hex'
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
h2o_cmd.checkKeyDistribution()
execExpr = 'D.hex=C.hex'
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
h2o_cmd.checkKeyDistribution()
#***********************************************************************
# these will be tracked individual by jenkins, which is nice
#***********************************************************************
def test_c3_exec_copy(self):
avgMichalSize = 237270000
csvFilenameList= [
("*[1][0-4][0-9].dat", "file_50_A.dat", 50 * avgMichalSize, 1800),
]
self.sub_c3_nongz_fvec_long(csvFilenameList)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
marcuskelly/recover | Lib/site-packages/alembic/testing/plugin/bootstrap.py | 43 | 1646 | """
Bootstrapper for nose/pytest plugins.
The entire rationale for this system is to get the modules in plugin/
imported without importing all of the supporting library, so that we can
set up things for testing before coverage starts.
The rationale for all of plugin/ being *in* the supporting library in the
first place is so that the testing and plugin suite is available to other
libraries, mainly external SQLAlchemy and Alembic dialects, to make use
of the same test environment and standard suites available to
SQLAlchemy/Alembic themselves without the need to ship/install a separate
package outside of SQLAlchemy.
NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0.
"""
import os
import sys
bootstrap_file = locals()['bootstrap_file']
to_bootstrap = locals()['to_bootstrap']
def load_file_as_module(name):
path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name)
if sys.version_info >= (3, 3):
from importlib import machinery
mod = machinery.SourceFileLoader(name, path).load_module()
else:
import imp
mod = imp.load_source(name, path)
return mod
if to_bootstrap == "pytest":
sys.modules["alembic_plugin_base"] = load_file_as_module("plugin_base")
sys.modules["alembic_pytestplugin"] = load_file_as_module("pytestplugin")
elif to_bootstrap == "nose":
sys.modules["alembic_plugin_base"] = load_file_as_module("plugin_base")
sys.modules["alembic_noseplugin"] = load_file_as_module("noseplugin")
else:
raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa
| bsd-2-clause |
tgsd96/gargnotes | venv/lib/python2.7/site-packages/django/contrib/admin/utils.py | 62 | 16223 | from __future__ import unicode_literals
import datetime
import decimal
from django.contrib.auth import get_permission_codename
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils import six
from django.utils.translation import ungettext
from django.core.urlresolvers import reverse, NoReverseMatch
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field_by_name(field_name)[0]
if hasattr(field, 'get_path_info') and any(path.m2m for path in field.get_path_info()):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{0}: <a href="{1}">{2}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
try:
label = field.verbose_name
except AttributeError:
# field is likely a RelatedObject
label = field.opts.verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field_data = model._meta.get_field_by_name(name)
except models.FieldDoesNotExist:
pass
else:
field = field_data[0]
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field, model, direct, m2m = parent._meta.get_field_by_name(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
if direct:
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field_by_name(piece)[0])
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a ``limit_choices_to`` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
get_limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'get_limit_choices_to', None))
if not get_limit_choices_to:
return models.Q() # empty Q
limit_choices_to = get_limit_choices_to()
if isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
| mit |
hollabaq86/haikuna-matata | env/lib/python2.7/site-packages/flask/signals.py | 123 | 2209 | # -*- coding: utf-8 -*-
"""
flask.signals
~~~~~~~~~~~~~
Implements signals based on blinker if available, otherwise
falls silently back to a noop.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
signals_available = False
try:
from blinker import Namespace
signals_available = True
except ImportError:
class Namespace(object):
def signal(self, name, doc=None):
return _FakeSignal(name, doc)
class _FakeSignal(object):
"""If blinker is unavailable, create a fake class with the same
interface that allows sending of signals but will fail with an
error on anything else. Instead of doing anything on send, it
will just ignore the arguments and do nothing instead.
"""
def __init__(self, name, doc=None):
self.name = name
self.__doc__ = doc
def _fail(self, *args, **kwargs):
raise RuntimeError('signalling support is unavailable '
'because the blinker library is '
'not installed.')
send = lambda *a, **kw: None
connect = disconnect = has_receivers_for = receivers_for = \
temporarily_connected_to = connected_to = _fail
del _fail
# The namespace for code signals. If you are not Flask code, do
# not put signals in here. Create your own namespace instead.
_signals = Namespace()
# Core signals. For usage examples grep the source code or consult
# the API documentation in docs/api.rst as well as docs/signals.rst
template_rendered = _signals.signal('template-rendered')
before_render_template = _signals.signal('before-render-template')
request_started = _signals.signal('request-started')
request_finished = _signals.signal('request-finished')
request_tearing_down = _signals.signal('request-tearing-down')
got_request_exception = _signals.signal('got-request-exception')
appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
appcontext_pushed = _signals.signal('appcontext-pushed')
appcontext_popped = _signals.signal('appcontext-popped')
message_flashed = _signals.signal('message-flashed')
| mit |
CoolCloud/aliyun-cli | aliyuncli/advance/userConfigHandler.py | 11 | 1850 | __author__ = 'zhaoyang.szy'
import os,sys
import response
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import aliyunExtensionCliHandler
class ConfigCmd:
showConfig = 'showConfig'
importConfig = 'importConfig'
exportConfig = 'exportConfig'
name = '--filename'
class ConfigHandler:
def __init__(self):
self.extensionCliHandler = aliyunExtensionCliHandler.aliyunExtensionCliHandler()
def getConfigHandlerCmd(self):
return [ConfigCmd.showConfig,ConfigCmd.importConfig,ConfigCmd.exportConfig]
def getConfigHandlerOptions(self):
return [ConfigCmd.name]
def showConfig(self):
_credentialsPath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.credentials)
_configurePath = os.path.join(self.extensionCliHandler.aliyunConfigurePath,self.extensionCliHandler.configure)
config = dict()
configContent = dict()
credentialsContent = dict ()
if os.path.exists(_configurePath):
for line in open(_configurePath):
line = line.strip('\n')
if line.find('=') > 0:
list = line.split("=",1)
configContent[list[0]] = list[1]
else:
pass
config['configure'] = configContent
if os.path.exists(_credentialsPath):
for line in open(_credentialsPath):
line = line.strip('\n')
if line.find('=') > 0:
list = line.split("=",1)
credentialsContent[list[0]] = list[1]
else:
pass
config ['credentials'] = credentialsContent
response.display_response("showConfigure",config,'table')
def importConfig():
pass
def exportConfig():
pass
if __name__ == "__main__":
handler = ConfigHandler()
handler.showConfig()
| apache-2.0 |
40223144/2015cdafinal | static/Brython3.1.0-20150301-090019/Lib/stat.py | 765 | 4304 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
"""Return the portion of the file's mode that can be set by
os.chmod().
"""
return mode & 0o7777
def S_IFMT(mode):
"""Return the portion of the file's mode that describes the
file type.
"""
return mode & 0o170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFBLK = 0o060000 # block device
S_IFREG = 0o100000 # regular file
S_IFIFO = 0o010000 # fifo (named pipe)
S_IFLNK = 0o120000 # symbolic link
S_IFSOCK = 0o140000 # socket file
# Functions to test for each file type
def S_ISDIR(mode):
"""Return True if mode is from a directory."""
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
"""Return True if mode is from a character special device file."""
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
"""Return True if mode is from a block special device file."""
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
"""Return True if mode is from a regular file."""
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
"""Return True if mode is from a FIFO (named pipe)."""
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
"""Return True if mode is from a symbolic link."""
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
"""Return True if mode is from a socket."""
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 0o4000 # set UID bit
S_ISGID = 0o2000 # set GID bit
S_ENFMT = S_ISGID # file locking enforcement
S_ISVTX = 0o1000 # sticky bit
S_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR
S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR
S_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR
S_IRWXU = 0o0700 # mask for owner permissions
S_IRUSR = 0o0400 # read by owner
S_IWUSR = 0o0200 # write by owner
S_IXUSR = 0o0100 # execute by owner
S_IRWXG = 0o0070 # mask for group permissions
S_IRGRP = 0o0040 # read by group
S_IWGRP = 0o0020 # write by group
S_IXGRP = 0o0010 # execute by group
S_IRWXO = 0o0007 # mask for others (not in group) permissions
S_IROTH = 0o0004 # read by others
S_IWOTH = 0o0002 # write by others
S_IXOTH = 0o0001 # execute by others
# Names for file flags
UF_NODUMP = 0x00000001 # do not dump file
UF_IMMUTABLE = 0x00000002 # file may not be changed
UF_APPEND = 0x00000004 # file may only be appended to
UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack
UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted
UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
SF_ARCHIVED = 0x00010000 # file may be archived
SF_IMMUTABLE = 0x00020000 # file may not be changed
SF_APPEND = 0x00040000 # file may only be appended to
SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted
SF_SNAPSHOT = 0x00200000 # file is a snapshot file
_filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((S_IRUSR, "r"),),
((S_IWUSR, "w"),),
((S_IXUSR|S_ISUID, "s"),
(S_ISUID, "S"),
(S_IXUSR, "x")),
((S_IRGRP, "r"),),
((S_IWGRP, "w"),),
((S_IXGRP|S_ISGID, "s"),
(S_ISGID, "S"),
(S_IXGRP, "x")),
((S_IROTH, "r"),),
((S_IWOTH, "w"),),
((S_IXOTH|S_ISVTX, "t"),
(S_ISVTX, "T"),
(S_IXOTH, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form '-rwxrwxrwx'."""
perm = []
for table in _filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
| gpl-3.0 |
ksachs/invenio | modules/bibformat/lib/elements/bfe_url.py | 39 | 1484 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints full-text URLs
"""
__revision__ = "$Id$"
def format_element(bfo, style, separator='; '):
"""
This is the default format for formatting full-text URLs.
@param separator: the separator between urls.
@param style: CSS class of the link
"""
urls_u = bfo.fields("8564_u")
if style != "":
style = 'class="'+style+'"'
urls = ['<a '+ style + \
'href="' + url + '">' + url +'</a>'
for url in urls_u]
return separator.join(urls)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
bright-sparks/chromium-spacewalk | build/win/reorder-imports.py | 103 | 1807 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import optparse
import os
import shutil
import subprocess
import sys
def reorder_imports(input_dir, output_dir, architecture):
"""Run swapimports.exe on the initial chrome.exe, and write to the output
directory. Also copy over any related files that might be needed
(pdbs, manifests etc.).
"""
input_image = os.path.join(input_dir, 'chrome.exe')
output_image = os.path.join(output_dir, 'chrome.exe')
swap_exe = os.path.join(
__file__,
'..\\..\\..\\third_party\\syzygy\\binaries\\exe\\swapimport.exe')
args = [swap_exe, '--input-image=%s' % input_image,
'--output-image=%s' % output_image, '--overwrite', '--no-logo']
if architecture == 'x64':
args.append('--x64');
args.append('chrome_elf.dll');
subprocess.call(args)
for fname in glob.iglob(os.path.join(input_dir, 'chrome.exe.*')):
shutil.copy(fname, os.path.join(output_dir, os.path.basename(fname)))
return 0
def main(argv):
usage = 'reorder_imports.py -i <input_dir> -o <output_dir> -a <target_arch>'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-i', '--input', help='reorder chrome.exe in DIR',
metavar='DIR')
parser.add_option('-o', '--output', help='write new chrome.exe to DIR',
metavar='DIR')
parser.add_option('-a', '--arch', help='architecture of build (optional)',
default='ia32')
opts, args = parser.parse_args()
if not opts.input or not opts.output:
parser.error('Please provide and input and output directory')
return reorder_imports(opts.input, opts.output, opts.arch)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
slz/delidded-kernel-5.1.1-note3 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
ChronoMonochrome/android_external_chromium_org | native_client_sdk/src/build_tools/verify_ppapi.py | 62 | 5922 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script for PPAPI's PRESUBMIT.py to detect if additions or removals of
PPAPI interfaces have been propagated to the Native Client libraries (.dsc
files).
For example, if a user adds "ppapi/c/foo.h", we check that the interface has
been added to "native_client_sdk/src/libraries/ppapi/library.dsc".
"""
import optparse
import os
import sys
from build_paths import PPAPI_DIR, SRC_DIR, SDK_LIBRARY_DIR
import parse_dsc
class VerifyException(Exception):
def __init__(self, lib_path, expected, unexpected):
self.expected = expected
self.unexpected = unexpected
msg = 'In %s:\n' % lib_path
if expected:
msg += ' these files are missing and should be added:\n'
for filename in sorted(expected):
msg += ' %s\n' % filename
if unexpected:
msg += ' these files no longer exist and should be removed:\n'
for filename in sorted(unexpected):
msg += ' %s\n' % filename
Exception.__init__(self, msg)
def PartitionFiles(filenames):
c_filenames = set()
cpp_filenames = set()
private_filenames = set()
for filename in filenames:
if os.path.splitext(filename)[1] not in ('.cc', '.h'):
continue
parts = filename.split(os.sep)
if 'private' in filename:
if 'flash' in filename:
continue
private_filenames.add(filename)
elif parts[0:2] == ['ppapi', 'c']:
if len(parts) >= 2 and parts[2] in ('documentation', 'trusted'):
continue
c_filenames.add(filename)
elif (parts[0:2] == ['ppapi', 'cpp'] or
parts[0:2] == ['ppapi', 'utility']):
if len(parts) >= 2 and parts[2] in ('documentation', 'trusted'):
continue
cpp_filenames.add(filename)
else:
continue
return {
'ppapi': c_filenames,
'ppapi_cpp': cpp_filenames,
'ppapi_cpp_private': private_filenames
}
def GetDirectoryList(directory_path, relative_to):
result = []
for root, _, files in os.walk(directory_path):
rel_root = os.path.relpath(root, relative_to)
if rel_root == '.':
rel_root = ''
for base_name in files:
result.append(os.path.join(rel_root, base_name))
return result
def GetDscSourcesAndHeaders(dsc):
result = []
for headers_info in dsc.get('HEADERS', []):
result.extend(headers_info['FILES'])
for targets_info in dsc.get('TARGETS', []):
result.extend(targets_info['SOURCES'])
return result
def GetChangedAndRemovedFilenames(modified_filenames, directory_list):
changed = set()
removed = set()
directory_list_set = set(directory_list)
for filename in modified_filenames:
if filename in directory_list_set:
# We can't know if a file was added (that would require knowing the
# previous state of the working directory). Instead, we assume that a
# changed file may have been added, and check it accordingly.
changed.add(filename)
else:
removed.add(filename)
return changed, removed
def GetDscFilenameFromLibraryName(lib_name):
return os.path.join(SDK_LIBRARY_DIR, lib_name, 'library.dsc')
def Verify(dsc_filename, dsc_sources_and_headers, changed_filenames,
removed_filenames):
expected_filenames = set()
unexpected_filenames = set()
for filename in changed_filenames:
basename = os.path.basename(filename)
if basename not in dsc_sources_and_headers:
expected_filenames.add(filename)
for filename in removed_filenames:
basename = os.path.basename(filename)
if basename in dsc_sources_and_headers:
unexpected_filenames.add(filename)
if expected_filenames or unexpected_filenames:
raise VerifyException(dsc_filename, expected_filenames,
unexpected_filenames)
def VerifyOrPrintError(dsc_filename, dsc_sources_and_headers, changed_filenames,
removed_filenames, is_private=False):
try:
Verify(dsc_filename, dsc_sources_and_headers, changed_filenames,
removed_filenames)
except VerifyException as e:
should_fail = True
if is_private and e.expected:
# For ppapi_cpp_private, we don't fail if there are expected filenames...
# we may not want to include them. We still want to fail if there are
# unexpected filenames, though.
sys.stderr.write('>>> WARNING: private interface files changed. '
'Should they be added to the Native Client SDK? <<<\n')
if not e.unexpected:
should_fail = False
sys.stderr.write(str(e) + '\n')
if should_fail:
return False
return True
def main(args):
usage = '%prog <file>...'
description = __doc__
parser = optparse.OptionParser(usage=usage, description=description)
args = parser.parse_args(args)[1]
if not args:
parser.error('Expected a PPAPI header or source file.')
retval = 0
lib_files = PartitionFiles(args)
directory_list = GetDirectoryList(PPAPI_DIR, relative_to=SRC_DIR)
for lib_name, filenames in lib_files.iteritems():
if not filenames:
continue
changed_filenames, removed_filenames = \
GetChangedAndRemovedFilenames(filenames, directory_list)
dsc_filename = GetDscFilenameFromLibraryName(lib_name)
dsc = parse_dsc.LoadProject(dsc_filename)
dsc_sources_and_headers = GetDscSourcesAndHeaders(dsc)
# Use the relative path to the .dsc to make the error messages shorter.
rel_dsc_filename = os.path.relpath(dsc_filename, SRC_DIR)
is_private = lib_name == 'ppapi_cpp_private'
if not VerifyOrPrintError(rel_dsc_filename, dsc_sources_and_headers,
changed_filenames, removed_filenames,
is_private=is_private):
retval = 1
return retval
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
jtimberman/omnibus | source/libxml2-2.7.7/python/generator.py | 24 | 47525 | #!/usr/bin/python -u
#
# generate python wrappers from the XML API description
#
functions = {}
enums = {} # { enumType: { enumConstant: enumValue } }
import os
import sys
import string
if __name__ == "__main__":
# launched as a script
srcPref = os.path.dirname(sys.argv[0])
else:
# imported
srcPref = os.path.dirname(__file__)
#######################################################################
#
# That part if purely the API acquisition phase from the
# XML API description
#
#######################################################################
import os
import xml.sax
debug = 0
def getparser():
# Attach parser to an unmarshalling object. return both objects.
target = docParser()
parser = xml.sax.make_parser()
parser.setContentHandler(target)
return parser, target
class docParser(xml.sax.handler.ContentHandler):
def __init__(self):
self._methodname = None
self._data = []
self.in_function = 0
self.startElement = self.start
self.endElement = self.end
self.characters = self.data
def close(self):
if debug:
print "close"
def getmethodname(self):
return self._methodname
def data(self, text):
if debug:
print "data %s" % text
self._data.append(text)
def start(self, tag, attrs):
if debug:
print "start %s, %s" % (tag, attrs)
if tag == 'function':
self._data = []
self.in_function = 1
self.function = None
self.function_cond = None
self.function_args = []
self.function_descr = None
self.function_return = None
self.function_file = None
if attrs.has_key('name'):
self.function = attrs['name']
if attrs.has_key('file'):
self.function_file = attrs['file']
elif tag == 'cond':
self._data = []
elif tag == 'info':
self._data = []
elif tag == 'arg':
if self.in_function == 1:
self.function_arg_name = None
self.function_arg_type = None
self.function_arg_info = None
if attrs.has_key('name'):
self.function_arg_name = attrs['name']
if attrs.has_key('type'):
self.function_arg_type = attrs['type']
if attrs.has_key('info'):
self.function_arg_info = attrs['info']
elif tag == 'return':
if self.in_function == 1:
self.function_return_type = None
self.function_return_info = None
self.function_return_field = None
if attrs.has_key('type'):
self.function_return_type = attrs['type']
if attrs.has_key('info'):
self.function_return_info = attrs['info']
if attrs.has_key('field'):
self.function_return_field = attrs['field']
elif tag == 'enum':
enum(attrs['type'],attrs['name'],attrs['value'])
def end(self, tag):
if debug:
print "end %s" % tag
if tag == 'function':
if self.function != None:
function(self.function, self.function_descr,
self.function_return, self.function_args,
self.function_file, self.function_cond)
self.in_function = 0
elif tag == 'arg':
if self.in_function == 1:
self.function_args.append([self.function_arg_name,
self.function_arg_type,
self.function_arg_info])
elif tag == 'return':
if self.in_function == 1:
self.function_return = [self.function_return_type,
self.function_return_info,
self.function_return_field]
elif tag == 'info':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_descr = str
elif tag == 'cond':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_cond = str
def function(name, desc, ret, args, file, cond):
functions[name] = (desc, ret, args, file, cond)
def enum(type, name, value):
if not enums.has_key(type):
enums[type] = {}
enums[type][name] = value
#######################################################################
#
# Some filtering rukes to drop functions/types which should not
# be exposed as-is on the Python interface
#
#######################################################################
skipped_modules = {
'xmlmemory': None,
'DOCBparser': None,
'SAX': None,
'hash': None,
'list': None,
'threads': None,
# 'xpointer': None,
}
skipped_types = {
'int *': "usually a return type",
'xmlSAXHandlerPtr': "not the proper interface for SAX",
'htmlSAXHandlerPtr': "not the proper interface for SAX",
'xmlRMutexPtr': "thread specific, skipped",
'xmlMutexPtr': "thread specific, skipped",
'xmlGlobalStatePtr': "thread specific, skipped",
'xmlListPtr': "internal representation not suitable for python",
'xmlBufferPtr': "internal representation not suitable for python",
'FILE *': None,
}
#######################################################################
#
# Table of remapping to/from the python type or class to the C
# counterpart.
#
#######################################################################
py_types = {
'void': (None, None, None, None),
'int': ('i', None, "int", "int"),
'long': ('l', None, "long", "long"),
'double': ('d', None, "double", "double"),
'unsigned int': ('i', None, "int", "int"),
'xmlChar': ('c', None, "int", "int"),
'unsigned char *': ('z', None, "charPtr", "char *"),
'char *': ('z', None, "charPtr", "char *"),
'const char *': ('z', None, "charPtrConst", "const char *"),
'xmlChar *': ('z', None, "xmlCharPtr", "xmlChar *"),
'const xmlChar *': ('z', None, "xmlCharPtrConst", "const xmlChar *"),
'xmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlDtdPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlDtdPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlDtd *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlDtd *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlAttrPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlAttrPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlAttr *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlAttr *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlEntityPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlEntityPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlEntity *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlEntity *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlElementPtr': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'const xmlElementPtr': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'xmlElement *': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'const xmlElement *': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'xmlAttributePtr': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'const xmlAttributePtr': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'xmlAttribute *': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'const xmlAttribute *': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'xmlNsPtr': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'const xmlNsPtr': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'xmlNs *': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'const xmlNs *': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'xmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const xmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'xmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const xmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const htmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const htmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const htmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'htmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const htmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlXPathContextPtr': ('O', "xmlXPathContext", "xmlXPathContextPtr", "xmlXPathContextPtr"),
'xmlXPathContext *': ('O', "xpathContext", "xmlXPathContextPtr", "xmlXPathContextPtr"),
'xmlXPathParserContextPtr': ('O', "xmlXPathParserContext", "xmlXPathParserContextPtr", "xmlXPathParserContextPtr"),
'xmlParserCtxtPtr': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'xmlParserCtxt *': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'htmlParserCtxtPtr': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'htmlParserCtxt *': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'xmlValidCtxtPtr': ('O', "ValidCtxt", "xmlValidCtxtPtr", "xmlValidCtxtPtr"),
'xmlCatalogPtr': ('O', "catalog", "xmlCatalogPtr", "xmlCatalogPtr"),
'FILE *': ('O', "File", "FILEPtr", "FILE *"),
'xmlURIPtr': ('O', "URI", "xmlURIPtr", "xmlURIPtr"),
'xmlErrorPtr': ('O', "Error", "xmlErrorPtr", "xmlErrorPtr"),
'xmlOutputBufferPtr': ('O', "outputBuffer", "xmlOutputBufferPtr", "xmlOutputBufferPtr"),
'xmlParserInputBufferPtr': ('O', "inputBuffer", "xmlParserInputBufferPtr", "xmlParserInputBufferPtr"),
'xmlRegexpPtr': ('O', "xmlReg", "xmlRegexpPtr", "xmlRegexpPtr"),
'xmlTextReaderLocatorPtr': ('O', "xmlTextReaderLocator", "xmlTextReaderLocatorPtr", "xmlTextReaderLocatorPtr"),
'xmlTextReaderPtr': ('O', "xmlTextReader", "xmlTextReaderPtr", "xmlTextReaderPtr"),
'xmlRelaxNGPtr': ('O', "relaxNgSchema", "xmlRelaxNGPtr", "xmlRelaxNGPtr"),
'xmlRelaxNGParserCtxtPtr': ('O', "relaxNgParserCtxt", "xmlRelaxNGParserCtxtPtr", "xmlRelaxNGParserCtxtPtr"),
'xmlRelaxNGValidCtxtPtr': ('O', "relaxNgValidCtxt", "xmlRelaxNGValidCtxtPtr", "xmlRelaxNGValidCtxtPtr"),
'xmlSchemaPtr': ('O', "Schema", "xmlSchemaPtr", "xmlSchemaPtr"),
'xmlSchemaParserCtxtPtr': ('O', "SchemaParserCtxt", "xmlSchemaParserCtxtPtr", "xmlSchemaParserCtxtPtr"),
'xmlSchemaValidCtxtPtr': ('O', "SchemaValidCtxt", "xmlSchemaValidCtxtPtr", "xmlSchemaValidCtxtPtr"),
}
py_return_types = {
'xmlXPathObjectPtr': ('O', "foo", "xmlXPathObjectPtr", "xmlXPathObjectPtr"),
}
unknown_types = {}
foreign_encoding_args = (
'htmlCreateMemoryParserCtxt',
'htmlCtxtReadMemory',
'htmlParseChunk',
'htmlReadMemory',
'xmlCreateMemoryParserCtxt',
'xmlCtxtReadMemory',
'xmlCtxtResetPush',
'xmlParseChunk',
'xmlParseMemory',
'xmlReadMemory',
'xmlRecoverMemory',
)
#######################################################################
#
# This part writes the C <-> Python stubs libxml2-py.[ch] and
# the table libxml2-export.c to add when registrering the Python module
#
#######################################################################
# Class methods which are written by hand in libxml.c but the Python-level
# code is still automatically generated (so they are not in skip_function()).
skip_impl = (
'xmlSaveFileTo',
'xmlSaveFormatFileTo',
)
def skip_function(name):
if name[0:12] == "xmlXPathWrap":
return 1
if name == "xmlFreeParserCtxt":
return 1
if name == "xmlCleanupParser":
return 1
if name == "xmlFreeTextReader":
return 1
# if name[0:11] == "xmlXPathNew":
# return 1
# the next function is defined in libxml.c
if name == "xmlRelaxNGFreeValidCtxt":
return 1
if name == "xmlFreeValidCtxt":
return 1
if name == "xmlSchemaFreeValidCtxt":
return 1
#
# Those are skipped because the Const version is used of the bindings
# instead.
#
if name == "xmlTextReaderBaseUri":
return 1
if name == "xmlTextReaderLocalName":
return 1
if name == "xmlTextReaderName":
return 1
if name == "xmlTextReaderNamespaceUri":
return 1
if name == "xmlTextReaderPrefix":
return 1
if name == "xmlTextReaderXmlLang":
return 1
if name == "xmlTextReaderValue":
return 1
if name == "xmlOutputBufferClose": # handled by by the superclass
return 1
if name == "xmlOutputBufferFlush": # handled by by the superclass
return 1
if name == "xmlErrMemory":
return 1
if name == "xmlValidBuildContentModel":
return 1
if name == "xmlValidateElementDecl":
return 1
if name == "xmlValidateAttributeDecl":
return 1
return 0
def print_function_wrapper(name, output, export, include):
global py_types
global unknown_types
global functions
global skipped_modules
try:
(desc, ret, args, file, cond) = functions[name]
except:
print "failed to get function %s infos"
return
if skipped_modules.has_key(file):
return 0
if skip_function(name) == 1:
return 0
if name in skip_impl:
# Don't delete the function entry in the caller.
return 1
c_call = ""
format=""
format_args=""
c_args=""
c_return=""
c_convert=""
num_bufs=0
for arg in args:
# This should be correct
if arg[1][0:6] == "const ":
arg[1] = arg[1][6:]
c_args = c_args + " %s %s;\n" % (arg[1], arg[0])
if py_types.has_key(arg[1]):
(f, t, n, c) = py_types[arg[1]]
if (f == 'z') and (name in foreign_encoding_args) and (num_bufs == 0):
f = 't#'
if f != None:
format = format + f
if t != None:
format_args = format_args + ", &pyobj_%s" % (arg[0])
c_args = c_args + " PyObject *pyobj_%s;\n" % (arg[0])
c_convert = c_convert + \
" %s = (%s) Py%s_Get(pyobj_%s);\n" % (arg[0],
arg[1], t, arg[0])
else:
format_args = format_args + ", &%s" % (arg[0])
if f == 't#':
format_args = format_args + ", &py_buffsize%d" % num_bufs
c_args = c_args + " int py_buffsize%d;\n" % num_bufs
num_bufs = num_bufs + 1
if c_call != "":
c_call = c_call + ", "
c_call = c_call + "%s" % (arg[0])
else:
if skipped_types.has_key(arg[1]):
return 0
if unknown_types.has_key(arg[1]):
lst = unknown_types[arg[1]]
lst.append(name)
else:
unknown_types[arg[1]] = [name]
return -1
if format != "":
format = format + ":%s" % (name)
if ret[0] == 'void':
if file == "python_accessor":
if args[1][1] == "char *" or args[1][1] == "xmlChar *":
c_call = "\n if (%s->%s != NULL) xmlFree(%s->%s);\n" % (
args[0][0], args[1][0], args[0][0], args[1][0])
c_call = c_call + " %s->%s = (%s)xmlStrdup((const xmlChar *)%s);\n" % (args[0][0],
args[1][0], args[1][1], args[1][0])
else:
c_call = "\n %s->%s = %s;\n" % (args[0][0], args[1][0],
args[1][0])
else:
c_call = "\n %s(%s);\n" % (name, c_call)
ret_convert = " Py_INCREF(Py_None);\n return(Py_None);\n"
elif py_types.has_key(ret[0]):
(f, t, n, c) = py_types[ret[0]]
c_return = " %s c_retval;\n" % (ret[0])
if file == "python_accessor" and ret[2] != None:
c_call = "\n c_retval = %s->%s;\n" % (args[0][0], ret[2])
else:
c_call = "\n c_retval = %s(%s);\n" % (name, c_call)
ret_convert = " py_retval = libxml_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
elif py_return_types.has_key(ret[0]):
(f, t, n, c) = py_return_types[ret[0]]
c_return = " %s c_retval;\n" % (ret[0])
c_call = "\n c_retval = %s(%s);\n" % (name, c_call)
ret_convert = " py_retval = libxml_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
else:
if skipped_types.has_key(ret[0]):
return 0
if unknown_types.has_key(ret[0]):
lst = unknown_types[ret[0]]
lst.append(name)
else:
unknown_types[ret[0]] = [name]
return -1
if cond != None and cond != "":
include.write("#if %s\n" % cond)
export.write("#if %s\n" % cond)
output.write("#if %s\n" % cond)
include.write("PyObject * ")
include.write("libxml_%s(PyObject *self, PyObject *args);\n" % (name))
export.write(" { (char *)\"%s\", libxml_%s, METH_VARARGS, NULL },\n" %
(name, name))
if file == "python":
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n")
export.write("#endif\n")
output.write("#endif\n")
return 1
if file == "python_accessor" and ret[0] != "void" and ret[2] is None:
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n")
export.write("#endif\n")
output.write("#endif\n")
return 1
output.write("PyObject *\n")
output.write("libxml_%s(PyObject *self ATTRIBUTE_UNUSED," % (name))
output.write(" PyObject *args")
if format == "":
output.write(" ATTRIBUTE_UNUSED")
output.write(") {\n")
if ret[0] != 'void':
output.write(" PyObject *py_retval;\n")
if c_return != "":
output.write(c_return)
if c_args != "":
output.write(c_args)
if format != "":
output.write("\n if (!PyArg_ParseTuple(args, (char *)\"%s\"%s))\n" %
(format, format_args))
output.write(" return(NULL);\n")
if c_convert != "":
output.write(c_convert)
output.write(c_call)
output.write(ret_convert)
output.write("}\n\n")
if cond != None and cond != "":
include.write("#endif /* %s */\n" % cond)
export.write("#endif /* %s */\n" % cond)
output.write("#endif /* %s */\n" % cond)
return 1
def buildStubs():
global py_types
global py_return_types
global unknown_types
try:
f = open(os.path.join(srcPref,"libxml2-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
try:
f = open(os.path.join(srcPref,"..","doc","libxml2-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
print file, ":", msg
sys.exit(1)
n = len(functions.keys())
print "Found %d functions in libxml2-api.xml" % (n)
py_types['pythonObject'] = ('O', "pythonObject", "pythonObject", "pythonObject")
try:
f = open(os.path.join(srcPref,"libxml2-python-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError, msg:
print file, ":", msg
print "Found %d functions in libxml2-python-api.xml" % (
len(functions.keys()) - n)
nb_wrap = 0
failed = 0
skipped = 0
include = open("libxml2-py.h", "w")
include.write("/* Generated */\n\n")
export = open("libxml2-export.c", "w")
export.write("/* Generated */\n\n")
wrapper = open("libxml2-py.c", "w")
wrapper.write("/* Generated */\n\n")
wrapper.write("#include <Python.h>\n")
wrapper.write("#include <libxml/xmlversion.h>\n")
wrapper.write("#include <libxml/tree.h>\n")
wrapper.write("#include <libxml/xmlschemastypes.h>\n")
wrapper.write("#include \"libxml_wrap.h\"\n")
wrapper.write("#include \"libxml2-py.h\"\n\n")
for function in functions.keys():
ret = print_function_wrapper(function, wrapper, export, include)
if ret < 0:
failed = failed + 1
del functions[function]
if ret == 0:
skipped = skipped + 1
del functions[function]
if ret == 1:
nb_wrap = nb_wrap + 1
include.close()
export.close()
wrapper.close()
print "Generated %d wrapper functions, %d failed, %d skipped\n" % (nb_wrap,
failed, skipped)
print "Missing type converters: "
for type in unknown_types.keys():
print "%s:%d " % (type, len(unknown_types[type])),
print
#######################################################################
#
# This part writes part of the Python front-end classes based on
# mapping rules between types and classes and also based on function
# renaming to get consistent function names at the Python level
#
#######################################################################
#
# The type automatically remapped to generated classes
#
classes_type = {
"xmlNodePtr": ("._o", "xmlNode(_obj=%s)", "xmlNode"),
"xmlNode *": ("._o", "xmlNode(_obj=%s)", "xmlNode"),
"xmlDocPtr": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"xmlDocPtr *": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"htmlDocPtr": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"htmlxmlDocPtr *": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"xmlAttrPtr": ("._o", "xmlAttr(_obj=%s)", "xmlAttr"),
"xmlAttr *": ("._o", "xmlAttr(_obj=%s)", "xmlAttr"),
"xmlNsPtr": ("._o", "xmlNs(_obj=%s)", "xmlNs"),
"xmlNs *": ("._o", "xmlNs(_obj=%s)", "xmlNs"),
"xmlDtdPtr": ("._o", "xmlDtd(_obj=%s)", "xmlDtd"),
"xmlDtd *": ("._o", "xmlDtd(_obj=%s)", "xmlDtd"),
"xmlEntityPtr": ("._o", "xmlEntity(_obj=%s)", "xmlEntity"),
"xmlEntity *": ("._o", "xmlEntity(_obj=%s)", "xmlEntity"),
"xmlElementPtr": ("._o", "xmlElement(_obj=%s)", "xmlElement"),
"xmlElement *": ("._o", "xmlElement(_obj=%s)", "xmlElement"),
"xmlAttributePtr": ("._o", "xmlAttribute(_obj=%s)", "xmlAttribute"),
"xmlAttribute *": ("._o", "xmlAttribute(_obj=%s)", "xmlAttribute"),
"xmlXPathContextPtr": ("._o", "xpathContext(_obj=%s)", "xpathContext"),
"xmlXPathContext *": ("._o", "xpathContext(_obj=%s)", "xpathContext"),
"xmlXPathParserContext *": ("._o", "xpathParserContext(_obj=%s)", "xpathParserContext"),
"xmlXPathParserContextPtr": ("._o", "xpathParserContext(_obj=%s)", "xpathParserContext"),
"xmlParserCtxtPtr": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"xmlParserCtxt *": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"htmlParserCtxtPtr": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"htmlParserCtxt *": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"xmlValidCtxtPtr": ("._o", "ValidCtxt(_obj=%s)", "ValidCtxt"),
"xmlCatalogPtr": ("._o", "catalog(_obj=%s)", "catalog"),
"xmlURIPtr": ("._o", "URI(_obj=%s)", "URI"),
"xmlErrorPtr": ("._o", "Error(_obj=%s)", "Error"),
"xmlOutputBufferPtr": ("._o", "outputBuffer(_obj=%s)", "outputBuffer"),
"xmlParserInputBufferPtr": ("._o", "inputBuffer(_obj=%s)", "inputBuffer"),
"xmlRegexpPtr": ("._o", "xmlReg(_obj=%s)", "xmlReg"),
"xmlTextReaderLocatorPtr": ("._o", "xmlTextReaderLocator(_obj=%s)", "xmlTextReaderLocator"),
"xmlTextReaderPtr": ("._o", "xmlTextReader(_obj=%s)", "xmlTextReader"),
'xmlRelaxNGPtr': ('._o', "relaxNgSchema(_obj=%s)", "relaxNgSchema"),
'xmlRelaxNGParserCtxtPtr': ('._o', "relaxNgParserCtxt(_obj=%s)", "relaxNgParserCtxt"),
'xmlRelaxNGValidCtxtPtr': ('._o', "relaxNgValidCtxt(_obj=%s)", "relaxNgValidCtxt"),
'xmlSchemaPtr': ("._o", "Schema(_obj=%s)", "Schema"),
'xmlSchemaParserCtxtPtr': ("._o", "SchemaParserCtxt(_obj=%s)", "SchemaParserCtxt"),
'xmlSchemaValidCtxtPtr': ("._o", "SchemaValidCtxt(_obj=%s)", "SchemaValidCtxt"),
}
converter_type = {
"xmlXPathObjectPtr": "xpathObjectRet(%s)",
}
primary_classes = ["xmlNode", "xmlDoc"]
classes_ancestor = {
"xmlNode" : "xmlCore",
"xmlDtd" : "xmlNode",
"xmlDoc" : "xmlNode",
"xmlAttr" : "xmlNode",
"xmlNs" : "xmlNode",
"xmlEntity" : "xmlNode",
"xmlElement" : "xmlNode",
"xmlAttribute" : "xmlNode",
"outputBuffer": "ioWriteWrapper",
"inputBuffer": "ioReadWrapper",
"parserCtxt": "parserCtxtCore",
"xmlTextReader": "xmlTextReaderCore",
"ValidCtxt": "ValidCtxtCore",
"SchemaValidCtxt": "SchemaValidCtxtCore",
"relaxNgValidCtxt": "relaxNgValidCtxtCore",
}
classes_destructors = {
"parserCtxt": "xmlFreeParserCtxt",
"catalog": "xmlFreeCatalog",
"URI": "xmlFreeURI",
# "outputBuffer": "xmlOutputBufferClose",
"inputBuffer": "xmlFreeParserInputBuffer",
"xmlReg": "xmlRegFreeRegexp",
"xmlTextReader": "xmlFreeTextReader",
"relaxNgSchema": "xmlRelaxNGFree",
"relaxNgParserCtxt": "xmlRelaxNGFreeParserCtxt",
"relaxNgValidCtxt": "xmlRelaxNGFreeValidCtxt",
"Schema": "xmlSchemaFree",
"SchemaParserCtxt": "xmlSchemaFreeParserCtxt",
"SchemaValidCtxt": "xmlSchemaFreeValidCtxt",
"ValidCtxt": "xmlFreeValidCtxt",
}
functions_noexcept = {
"xmlHasProp": 1,
"xmlHasNsProp": 1,
"xmlDocSetRootElement": 1,
"xmlNodeGetNs": 1,
"xmlNodeGetNsDefs": 1,
"xmlNextElementSibling": 1,
"xmlPreviousElementSibling": 1,
"xmlFirstElementChild": 1,
"xmlLastElementChild": 1,
}
reference_keepers = {
"xmlTextReader": [('inputBuffer', 'input')],
"relaxNgValidCtxt": [('relaxNgSchema', 'schema')],
"SchemaValidCtxt": [('Schema', 'schema')],
}
function_classes = {}
function_classes["None"] = []
def nameFixup(name, classe, type, file):
listname = classe + "List"
ll = len(listname)
l = len(classe)
if name[0:l] == listname:
func = name[l:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == "xmlParserGet" and file == "python_accessor":
func = name[12:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:12] == "xmlParserSet" and file == "python_accessor":
func = name[12:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:10] == "xmlNodeGet" and file == "python_accessor":
func = name[10:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:9] == "xmlURIGet" and file == "python_accessor":
func = name[9:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:9] == "xmlURISet" and file == "python_accessor":
func = name[6:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:11] == "xmlErrorGet" and file == "python_accessor":
func = name[11:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:17] == "xmlXPathParserGet" and file == "python_accessor":
func = name[17:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:11] == "xmlXPathGet" and file == "python_accessor":
func = name[11:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:11] == "xmlXPathSet" and file == "python_accessor":
func = name[8:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:15] == "xmlOutputBuffer" and file != "python":
func = name[15:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:20] == "xmlParserInputBuffer" and file != "python":
func = name[20:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:9] == "xmlRegexp" and file == "xmlregexp":
func = "regexp" + name[9:]
elif name[0:6] == "xmlReg" and file == "xmlregexp":
func = "regexp" + name[6:]
elif name[0:20] == "xmlTextReaderLocator" and file == "xmlreader":
func = name[20:]
elif name[0:18] == "xmlTextReaderConst" and file == "xmlreader":
func = name[18:]
elif name[0:13] == "xmlTextReader" and file == "xmlreader":
func = name[13:]
elif name[0:12] == "xmlReaderNew" and file == "xmlreader":
func = name[9:]
elif name[0:11] == "xmlACatalog":
func = name[11:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:l] == classe:
func = name[l:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:7] == "libxml_":
func = name[7:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:6] == "xmlGet":
func = name[6:]
func = string.lower(func[0:1]) + func[1:]
elif name[0:3] == "xml":
func = name[3:]
func = string.lower(func[0:1]) + func[1:]
else:
func = name
if func[0:5] == "xPath":
func = "xpath" + func[5:]
elif func[0:4] == "xPtr":
func = "xpointer" + func[4:]
elif func[0:8] == "xInclude":
func = "xinclude" + func[8:]
elif func[0:2] == "iD":
func = "ID" + func[2:]
elif func[0:3] == "uRI":
func = "URI" + func[3:]
elif func[0:4] == "uTF8":
func = "UTF8" + func[4:]
elif func[0:3] == 'sAX':
func = "SAX" + func[3:]
return func
def functionCompare(info1, info2):
(index1, func1, name1, ret1, args1, file1) = info1
(index2, func2, name2, ret2, args2, file2) = info2
if file1 == file2:
if func1 < func2:
return -1
if func1 > func2:
return 1
if file1 == "python_accessor":
return -1
if file2 == "python_accessor":
return 1
if file1 < file2:
return -1
if file1 > file2:
return 1
return 0
def writeDoc(name, args, indent, output):
if functions[name][0] is None or functions[name][0] == "":
return
val = functions[name][0]
val = string.replace(val, "NULL", "None")
output.write(indent)
output.write('"""')
while len(val) > 60:
if val[0] == " ":
val = val[1:]
continue
str = val[0:60]
i = string.rfind(str, " ")
if i < 0:
i = 60
str = val[0:i]
val = val[i:]
output.write(str)
output.write('\n ')
output.write(indent)
output.write(val)
output.write(' """\n')
def buildWrappers():
global ctypes
global py_types
global py_return_types
global unknown_types
global functions
global function_classes
global classes_type
global classes_list
global converter_type
global primary_classes
global converter_type
global classes_ancestor
global converter_type
global primary_classes
global classes_ancestor
global classes_destructors
global functions_noexcept
for type in classes_type.keys():
function_classes[classes_type[type][2]] = []
#
# Build the list of C types to look for ordered to start
# with primary classes
#
ctypes = []
classes_list = []
ctypes_processed = {}
classes_processed = {}
for classe in primary_classes:
classes_list.append(classe)
classes_processed[classe] = ()
for type in classes_type.keys():
tinfo = classes_type[type]
if tinfo[2] == classe:
ctypes.append(type)
ctypes_processed[type] = ()
for type in classes_type.keys():
if ctypes_processed.has_key(type):
continue
tinfo = classes_type[type]
if not classes_processed.has_key(tinfo[2]):
classes_list.append(tinfo[2])
classes_processed[tinfo[2]] = ()
ctypes.append(type)
ctypes_processed[type] = ()
for name in functions.keys():
found = 0
(desc, ret, args, file, cond) = functions[name]
for type in ctypes:
classe = classes_type[type][2]
if name[0:3] == "xml" and len(args) >= 1 and args[0][1] == type:
found = 1
func = nameFixup(name, classe, type, file)
info = (0, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:3] == "xml" and len(args) >= 2 and args[1][1] == type \
and file != "python_accessor":
found = 1
func = nameFixup(name, classe, type, file)
info = (1, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:4] == "html" and len(args) >= 1 and args[0][1] == type:
found = 1
func = nameFixup(name, classe, type, file)
info = (0, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:4] == "html" and len(args) >= 2 and args[1][1] == type \
and file != "python_accessor":
found = 1
func = nameFixup(name, classe, type, file)
info = (1, func, name, ret, args, file)
function_classes[classe].append(info)
if found == 1:
continue
if name[0:8] == "xmlXPath":
continue
if name[0:6] == "xmlStr":
continue
if name[0:10] == "xmlCharStr":
continue
func = nameFixup(name, "None", file, file)
info = (0, func, name, ret, args, file)
function_classes['None'].append(info)
classes = open("libxml2class.py", "w")
txt = open("libxml2class.txt", "w")
txt.write(" Generated Classes for libxml2-python\n\n")
txt.write("#\n# Global functions of the module\n#\n\n")
if function_classes.has_key("None"):
flist = function_classes["None"]
flist.sort(functionCompare)
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
if file != oldfile:
classes.write("#\n# Functions from module %s\n#\n\n" % file)
txt.write("\n# functions from module %s\n" % file)
oldfile = file
classes.write("def %s(" % func)
txt.write("%s()\n" % func)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
classes.write("%s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes)
for arg in args:
if classes_type.has_key(arg[1]):
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
if ret[0] != "void":
classes.write(" ret = ")
else:
classes.write(" ")
classes.write("libxml2mod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
classes.write("%s" % arg[0])
if classes_type.has_key(arg[1]):
classes.write("__o")
n = n + 1
classes.write(")\n")
if ret[0] != "void":
if classes_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(" if ret is None:return None\n")
elif string.find(name, "URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif string.find(name, "XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif string.find(name, "Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
classes.write(" return ")
classes.write(classes_type[ret[0]][1] % ("ret"))
classes.write("\n")
else:
classes.write(" return ret\n")
classes.write("\n")
txt.write("\n\n#\n# Set of classes of the module\n#\n\n")
for classname in classes_list:
if classname == "None":
pass
else:
if classes_ancestor.has_key(classname):
txt.write("\n\nClass %s(%s)\n" % (classname,
classes_ancestor[classname]))
classes.write("class %s(%s):\n" % (classname,
classes_ancestor[classname]))
classes.write(" def __init__(self, _obj=None):\n")
if classes_ancestor[classname] == "xmlCore" or \
classes_ancestor[classname] == "xmlNode":
classes.write(" if type(_obj).__name__ != ")
classes.write("'PyCObject':\n")
classes.write(" raise TypeError, ")
classes.write("'%s needs a PyCObject argument'\n" % \
classname)
if reference_keepers.has_key(classname):
rlist = reference_keepers[classname]
for ref in rlist:
classes.write(" self.%s = None\n" % ref[1])
classes.write(" self._o = _obj\n")
classes.write(" %s.__init__(self, _obj=_obj)\n\n" % (
classes_ancestor[classname]))
if classes_ancestor[classname] == "xmlCore" or \
classes_ancestor[classname] == "xmlNode":
classes.write(" def __repr__(self):\n")
format = "<%s (%%s) object at 0x%%x>" % (classname)
classes.write(" return \"%s\" %% (self.name, long(pos_id (self)))\n\n" % (
format))
else:
txt.write("Class %s()\n" % (classname))
classes.write("class %s:\n" % (classname))
classes.write(" def __init__(self, _obj=None):\n")
if reference_keepers.has_key(classname):
list = reference_keepers[classname]
for ref in list:
classes.write(" self.%s = None\n" % ref[1])
classes.write(" if _obj != None:self._o = _obj;return\n")
classes.write(" self._o = None\n\n")
destruct=None
if classes_destructors.has_key(classname):
classes.write(" def __del__(self):\n")
classes.write(" if self._o != None:\n")
classes.write(" libxml2mod.%s(self._o)\n" %
classes_destructors[classname])
classes.write(" self._o = None\n\n")
destruct=classes_destructors[classname]
flist = function_classes[classname]
flist.sort(functionCompare)
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
#
# Do not provide as method the destructors for the class
# to avoid double free
#
if name == destruct:
continue
if file != oldfile:
if file == "python_accessor":
classes.write(" # accessors for %s\n" % (classname))
txt.write(" # accessors\n")
else:
classes.write(" #\n")
classes.write(" # %s functions from module %s\n" % (
classname, file))
txt.write("\n # functions from module %s\n" % file)
classes.write(" #\n\n")
oldfile = file
classes.write(" def %s(self" % func)
txt.write(" %s()\n" % func)
n = 0
for arg in args:
if n != index:
classes.write(", %s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes)
n = 0
for arg in args:
if classes_type.has_key(arg[1]):
if n != index:
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
n = n + 1
if ret[0] != "void":
classes.write(" ret = ")
else:
classes.write(" ")
classes.write("libxml2mod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
if n != index:
classes.write("%s" % arg[0])
if classes_type.has_key(arg[1]):
classes.write("__o")
else:
classes.write("self")
if classes_type.has_key(arg[1]):
classes.write(classes_type[arg[1]][0])
n = n + 1
classes.write(")\n")
if ret[0] != "void":
if classes_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(
" if ret is None:return None\n")
elif string.find(name, "URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif string.find(name, "XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif string.find(name, "Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
#
# generate the returned class wrapper for the object
#
classes.write(" __tmp = ")
classes.write(classes_type[ret[0]][1] % ("ret"))
classes.write("\n")
#
# Sometime one need to keep references of the source
# class in the returned class object.
# See reference_keepers for the list
#
tclass = classes_type[ret[0]][2]
if reference_keepers.has_key(tclass):
list = reference_keepers[tclass]
for pref in list:
if pref[0] == classname:
classes.write(" __tmp.%s = self\n" %
pref[1])
#
# return the class
#
classes.write(" return __tmp\n")
elif converter_type.has_key(ret[0]):
#
# Raise an exception
#
if functions_noexcept.has_key(name):
classes.write(
" if ret is None:return None")
elif string.find(name, "URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif string.find(name, "XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif string.find(name, "Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
classes.write(" return ")
classes.write(converter_type[ret[0]] % ("ret"))
classes.write("\n")
else:
classes.write(" return ret\n")
classes.write("\n")
#
# Generate enum constants
#
for type,enum in enums.items():
classes.write("# %s\n" % type)
items = enum.items()
items.sort(lambda i1,i2: cmp(long(i1[1]),long(i2[1])))
for name,value in items:
classes.write("%s = %s\n" % (name,value))
classes.write("\n")
txt.close()
classes.close()
buildStubs()
buildWrappers()
| apache-2.0 |
alexthered/kienhoc-platform | common/lib/xmodule/xmodule/open_ended_grading_classes/peer_grading_service.py | 107 | 6601 | import logging
import dogstats_wrapper as dog_stats_api
from .grading_service_module import GradingService
from opaque_keys.edx.keys import UsageKey
log = logging.getLogger(__name__)
class PeerGradingService(GradingService):
"""
Interface with the grading controller for peer grading
"""
METRIC_NAME = 'edxapp.open_ended_grading.peer_grading_service'
def __init__(self, config, render_template):
config['render_template'] = render_template
super(PeerGradingService, self).__init__(config)
self.url = config['url'] + config['peer_grading']
self.login_url = self.url + '/login/'
self.get_next_submission_url = self.url + '/get_next_submission/'
self.save_grade_url = self.url + '/save_grade/'
self.is_student_calibrated_url = self.url + '/is_student_calibrated/'
self.show_calibration_essay_url = self.url + '/show_calibration_essay/'
self.save_calibration_essay_url = self.url + '/save_calibration_essay/'
self.get_problem_list_url = self.url + '/get_problem_list/'
self.get_notifications_url = self.url + '/get_notifications/'
self.get_data_for_location_url = self.url + '/get_data_for_location/'
def get_data_for_location(self, problem_location, student_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'location': problem_location, 'student_id': student_id}
result = self.get(self.get_data_for_location_url, params)
self._record_result('get_data_for_location', result)
for key in result.keys():
if key in ('success', 'error', 'version'):
continue
dog_stats_api.histogram(
self._metric_name('get_data_for_location.{}'.format(key)),
result[key],
)
return result
def get_next_submission(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
result = self._render_rubric(self.get(
self.get_next_submission_url,
{
'location': problem_location,
'grader_id': grader_id
}
))
self._record_result('get_next_submission', result)
return result
def save_grade(self, **kwargs):
data = kwargs
data.update({'rubric_scores_complete': True})
result = self.post(self.save_grade_url, data)
self._record_result('save_grade', result)
return result
def is_student_calibrated(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'problem_id': problem_location, 'student_id': grader_id}
result = self.get(self.is_student_calibrated_url, params)
self._record_result(
'is_student_calibrated',
result,
tags=['calibrated:{}'.format(result.get('calibrated'))]
)
return result
def show_calibration_essay(self, problem_location, grader_id):
if isinstance(problem_location, UsageKey):
problem_location = problem_location.to_deprecated_string()
params = {'problem_id': problem_location, 'student_id': grader_id}
result = self._render_rubric(self.get(self.show_calibration_essay_url, params))
self._record_result('show_calibration_essay', result)
return result
def save_calibration_essay(self, **kwargs):
data = kwargs
data.update({'rubric_scores_complete': True})
result = self.post(self.save_calibration_essay_url, data)
self._record_result('show_calibration_essay', result)
return result
def get_problem_list(self, course_id, grader_id):
params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id}
result = self.get(self.get_problem_list_url, params)
if 'problem_list' in result:
for problem in result['problem_list']:
problem['location'] = course_id.make_usage_key_from_deprecated_string(problem['location'])
self._record_result('get_problem_list', result)
dog_stats_api.histogram(
self._metric_name('get_problem_list.result.length'),
len(result.get('problem_list', [])),
)
return result
def get_notifications(self, course_id, grader_id):
params = {'course_id': course_id.to_deprecated_string(), 'student_id': grader_id}
result = self.get(self.get_notifications_url, params)
self._record_result(
'get_notifications',
result,
tags=['needs_to_peer_grade:{}'.format(result.get('student_needs_to_peer_grade'))]
)
return result
class MockPeerGradingService(object):
"""
This is a mock peer grading service that can be used for unit tests
without making actual service calls to the grading controller
"""
def get_next_submission(self, problem_location, grader_id):
return {
'success': True,
'submission_id': 1,
'submission_key': "",
'student_response': 'Sample student response.',
'prompt': 'Sample submission prompt.',
'rubric': 'Placeholder text for the full rubric.',
'max_score': 4
}
def save_grade(self, **kwargs):
return {'success': True}
def is_student_calibrated(self, problem_location, grader_id):
return {'success': True, 'calibrated': True}
def show_calibration_essay(self, problem_location, grader_id):
return {'success': True,
'submission_id': 1,
'submission_key': '',
'student_response': 'Sample student response.',
'prompt': 'Sample submission prompt.',
'rubric': 'Placeholder text for the full rubric.',
'max_score': 4}
def save_calibration_essay(self, **kwargs):
return {'success': True, 'actual_score': 2}
def get_problem_list(self, course_id, grader_id):
return {'success': True,
'problem_list': [
]}
def get_data_for_location(self, problem_location, student_id):
return {
"version": 1,
"count_graded": 3,
"count_required": 3,
"success": True,
"student_sub_count": 1,
'submissions_available': 0,
}
| agpl-3.0 |
kishikawakatsumi/Mozc-for-iOS | src/third_party/gyp/pylib/gyp/generator/android.py | 3 | 45146 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
ALL_MODULES_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)')
else:
self.WriteLn('LOCAL_MODULE_TARGET_ARCH := '
'$(TARGET_$(GYP_VAR_PREFIX)ARCH)')
# Grab output directories; needed for Actions and Rules.
if self.toolset == 'host':
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))')
else:
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all,
write_alias_target)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
# Don't allow spaces in input/output filenames, but make an exception for
# filenames which start with '$(' since it's okay for there to be spaces
# inside of make function/macro invocations.
for input in inputs:
if not input.startswith('$(') and ' ' in input:
raise gyp.common.GypError(
'Action input filename "%s" in target %s contains a space' %
(input, self.target))
for output in outputs:
if not output.startswith('$(') and ' ' in output:
raise gyp.common.GypError(
'Action output filename "%s" in target %s contains a space' %
(output, self.target))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn()
self.WriteLn()
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
# Android uses separate flags for assembly file invocations, but gyp expects
# the same CFLAGS to be applied:
self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp'
else:
suffix = '_gyp'
if self.path:
middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target))
else:
middle = make.StringToMakefileVariable(self.target)
return ''.join([prefix, middle, suffix])
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = ('$(call intermediates-dir-for,%s,%s,true,,'
'$(GYP_HOST_VAR_PREFIX))' % (self.android_class,
self.android_module))
else:
path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))'
% (self.android_class, self.android_module))
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all,
write_alias_target):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for this
target
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all and write_alias_target:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module and write_alias_target:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
# Executables are for build and test purposes only, so they're installed
# to a directory that doesn't get included in the system image.
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
if self.toolset == 'target':
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)')
else:
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
self.WriteLn()
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return os.path.normpath(path)
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
write_alias_targets = generator_flags.get('write_alias_targets', True)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all,
write_alias_target=write_alias_targets)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
root_makefile.write('GYP_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_MULTILIB ?=\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if write_alias_targets:
root_makefile.write(ALL_MODULES_FOOTER)
root_makefile.close()
| apache-2.0 |
bev-a-tron/pledgeservice | lib/stripe/error.py | 17 | 1339 | # Exceptions
class StripeError(Exception):
def __init__(self, message=None, http_body=None, http_status=None,
json_body=None):
super(StripeError, self).__init__(message)
if http_body and hasattr(http_body, 'decode'):
try:
http_body = http_body.decode('utf-8')
except:
http_body = ('<Could not decode body as utf-8. '
'Please report to [email protected]>')
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
class APIError(StripeError):
pass
class APIConnectionError(StripeError):
pass
class CardError(StripeError):
def __init__(self, message, param, code, http_body=None,
http_status=None, json_body=None):
super(CardError, self).__init__(message,
http_body, http_status, json_body)
self.param = param
self.code = code
class InvalidRequestError(StripeError):
def __init__(self, message, param, http_body=None,
http_status=None, json_body=None):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body)
self.param = param
class AuthenticationError(StripeError):
pass
| agpl-3.0 |
d40223223/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/optparse.py | 728 | 60616 | """A powerful, extensible, and easy-to-use option parser.
By Greg Ward <[email protected]>
Originally distributed as Optik.
For support, use the [email protected] mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext, ngettext
except ImportError:
def gettext(message):
return message
def ngettext(singular, plural, n):
if n == 1:
return singular
return plural
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError("subclasses must implement")
def format_heading(self, heading):
raise NotImplementedError("subclasses must implement")
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_int, _("integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = [opt for opt in opts if opt]
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = sorted(attrs.keys())
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of builtins is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import builtins
if ( isinstance(self.type, type) or
(hasattr(self.type, "__name__") and
getattr(builtins, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
not isinstance(self.callback_args, tuple)):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
not isinstance(self.callback_kwargs, dict)):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __eq__(self, other):
if isinstance(other, Values):
return self.__dict__ == other.__dict__
elif isinstance(other, dict):
return self.__dict__ == other
else:
return NotImplemented
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError("invalid update mode: %r" % mode)
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
exec(open(filename).read(), vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError("invalid conflict_resolution value %r" % handler)
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if isinstance(args[0], str):
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError("not an Option instance: %r" % option)
else:
raise TypeError("invalid arguments")
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if isinstance(args[0], str):
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError("not an OptionGroup instance: %r" % group)
if group.parser is not self:
raise ValueError("invalid OptionGroup (wrong parser)")
else:
raise TypeError("invalid arguments")
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbreviation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
self.error(ngettext(
"%(option)s option requires %(number)d argument",
"%(option)s option requires %(number)d arguments",
nargs) % {"option": opt, "number": nargs})
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print(self.get_usage(), file=file)
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print(self.get_version(), file=file)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
| gpl-3.0 |
Saturn/livestreamer | src/livestreamer/stream/akamaihd.py | 37 | 7880 | import base64
import io
import hashlib
import hmac
import random
from .stream import Stream
from .wrappers import StreamIOThreadWrapper, StreamIOIterWrapper
from ..buffers import Buffer
from ..compat import str, bytes, urlparse
from ..exceptions import StreamError
from ..utils import swfdecompress
from ..packages.flashmedia import FLV, FLVError
from ..packages.flashmedia.tag import ScriptData
class TokenGenerator(object):
def __init__(self, stream):
self.stream = stream
def generate(self):
raise NotImplementedError
class Auth3TokenGenerator(TokenGenerator):
def generate(self):
if not self.stream.swf:
raise StreamError("A SWF URL is required to create session token")
res = self.stream.session.http.get(self.stream.swf,
exception=StreamError)
data = swfdecompress(res.content)
md5 = hashlib.md5()
md5.update(data)
data = bytes(self.stream.sessionid, "ascii") + md5.digest()
sig = hmac.new(b"foo", data, hashlib.sha1)
b64 = base64.encodestring(sig.digest())
token = str(b64, "ascii").replace("\n", "")
return token
def cache_bust_string(length):
rval = ""
for i in range(length):
rval += chr(65 + int(round(random.random() * 25)))
return rval
class AkamaiHDStreamIO(io.IOBase):
Version = "2.5.8"
FlashVersion = "LNX 11,1,102,63"
StreamURLFormat = "{host}/{streamname}"
ControlURLFormat = "{host}/control/{streamname}"
ControlData = b":)"
TokenGenerators = {
"c11e59dea648d56e864fc07a19f717b9": Auth3TokenGenerator
}
StatusComplete = 3
StatusError = 4
Errors = {
1: "Stream not found",
2: "Track not found",
3: "Seek out of bounds",
4: "Authentication failed",
5: "DVR disabled",
6: "Invalid bitrate test"
}
def __init__(self, session, url, swf=None, seek=None):
parsed = urlparse(url)
self.session = session
self.logger = self.session.logger.new_module("stream.akamaihd")
self.host = ("{scheme}://{netloc}").format(scheme=parsed.scheme, netloc=parsed.netloc)
self.streamname = parsed.path[1:]
self.swf = swf
self.seek = seek
def open(self):
self.guid = cache_bust_string(12)
self.islive = None
self.sessionid = None
self.flv = None
self.buffer = Buffer()
self.completed_handshake = False
url = self.StreamURLFormat.format(host=self.host, streamname=self.streamname)
params = self._create_params(seek=self.seek)
self.logger.debug("Opening host={host} streamname={streamname}",
host=self.host, streamname=self.streamname)
try:
res = self.session.http.get(url, stream=True, params=params)
self.fd = StreamIOIterWrapper(res.iter_content(8192))
except Exception as err:
raise StreamError(str(err))
self.handshake(self.fd)
return self
def handshake(self, fd):
try:
self.flv = FLV(fd)
except FLVError as err:
raise StreamError(str(err))
self.buffer.write(self.flv.header.serialize())
self.logger.debug("Attempting to handshake")
for i, tag in enumerate(self.flv):
if i == 10:
raise StreamError("No OnEdge metadata in FLV after 10 tags, probably not a AkamaiHD stream")
self.process_tag(tag, exception=StreamError)
if self.completed_handshake:
self.logger.debug("Handshake successful")
break
def process_tag(self, tag, exception=IOError):
if isinstance(tag.data, ScriptData) and tag.data.name == "onEdge":
self._on_edge(tag.data.value, exception=exception)
self.buffer.write(tag.serialize())
def send_token(self, token):
headers = { "x-Akamai-Streaming-SessionToken": token }
self.logger.debug("Sending new session token")
self.send_control("sendingNewToken", headers=headers,
swf=self.swf)
def send_control(self, cmd, headers=None, **params):
if not headers:
headers = {}
url = self.ControlURLFormat.format(host=self.host,
streamname=self.streamname)
headers["x-Akamai-Streaming-SessionID"] = self.sessionid
params = self._create_params(cmd=cmd, **params)
return self.session.http.post(url,
headers=headers,
params=params,
data=self.ControlData,
exception=StreamError)
def read(self, size=-1):
if not (self.flv and self.fd):
return b""
if self.buffer.length:
return self.buffer.read(size)
else:
return self.fd.read(size)
def _create_params(self, **extra):
params = dict(v=self.Version, fp=self.FlashVersion,
r=cache_bust_string(5), g=self.guid)
params.update(extra)
return params
def _generate_session_token(self, data64):
swfdata = base64.decodestring(bytes(data64, "ascii"))
md5 = hashlib.md5()
md5.update(swfdata)
hash = md5.hexdigest()
if hash in self.TokenGenerators:
generator = self.TokenGenerators[hash](self)
return generator.generate()
else:
raise StreamError(("No token generator available for hash '{0}'").format(hash))
def _on_edge(self, data, exception=IOError):
def updateattr(attr, key):
if key in data:
setattr(self, attr, data[key])
self.logger.debug("onEdge data")
for key, val in data.items():
if isinstance(val, str):
val = val[:50]
self.logger.debug(" {key}={val}",
key=key, val=val)
updateattr("islive", "isLive")
updateattr("sessionid", "session")
updateattr("status", "status")
updateattr("streamname", "streamName")
if self.status == self.StatusComplete:
self.flv = None
elif self.status == self.StatusError:
errornum = data["errorNumber"]
if errornum in self.Errors:
msg = self.Errors[errornum]
else:
msg = "Unknown error"
raise exception("onEdge error: " + msg)
if not self.completed_handshake:
if "data64" in data:
sessiontoken = self._generate_session_token(data["data64"])
else:
sessiontoken = None
self.send_token(sessiontoken)
self.completed_handshake = True
class AkamaiHDStream(Stream):
"""
Implements the AkamaiHD Adaptive Streaming protocol
*Attributes:*
- :attr:`url` URL to the stream
- :attr:`swf` URL to a SWF used by the handshake protocol
- :attr:`seek` Position to seek to when opening the stream
"""
__shortname__ = "akamaihd"
def __init__(self, session, url, swf=None, seek=None):
Stream.__init__(self, session)
self.seek = seek
self.swf = swf
self.url = url
def __repr__(self):
return ("<AkamaiHDStream({0!r}, "
"swf={1!r})>".format(self.url, self.swf))
def __json__(self):
return dict(type=AkamaiHDStream.shortname(),
url=self.url, swf=self.swf)
def open(self):
stream = AkamaiHDStreamIO(self.session, self.url,
self.swf, self.seek)
return StreamIOThreadWrapper(self.session, stream.open())
__all__ = ["AkamaiHDStream"]
| bsd-2-clause |
mgymrek/lobstr-code | scripts/lobSTR_capillary_comparator.py | 1 | 6675 | #!/usr/bin/env python
"""
Compare capillary vs. lobSTR calls
This script is part of lobSTR_validation_suite.sh and
is not mean to be called directly.
"""
import argparse
import numpy as np
import pandas as pd
import sys
from scipy.stats import pearsonr
def ConvertSample(x):
"""
Convert HGDP samples numbers to standard format
HGDPXXXXX
"""
num = x.split("_")[1]
zeros = 5-len(num)
return "HGDP"+"0"*zeros + num
def LoadCapillaryFromStru(capfile, convfile):
"""
Input:
capfile: filename for .stru file
convfile: filename giving illumina sample id->HGDP id
Output: data frame with capillary calls
Construct data frame with:
marker
sample
allele1.cap
allele2.cap
Use sample names converted to Illumina format
Ignore alleles that are -9,-9
"""
# Load conversions
conv = pd.read_csv(convfile, sep="\t")
converter = dict(zip(conv.hgdp, conv.sample))
# Load genotypes
markers = []
samples = []
allele1s = []
allele2s = []
f = open(capfile, "r")
marker_names = f.readline().strip().split()
line = f.readline()
while line != "":
items = line.strip().split()
ident = "HGDP_%s"%items[0]
pop_code = items[1]
pop_name = items[2]
geo = items[3]
geo2 = items[4]
alleles1 = items[5:]
line = f.readline() # get second allele for the individual
items = line.strip().split()
ident2 = "HGDP_%s"%items[0]
if ident != ident2:
sys.stderr.write("ERROR parsing .stru file for individual %s\n"%items[0])
sys.exit(1)
alleles2 = items[5:]
sample = converter.get(ConvertSample(ident), "NA")
if sample != "NA":
for i in range(len(alleles1)):
if str(alleles1[i]) != "-9" and str(alleles2[i]) != "-9":
markers.append(marker_names[i])
samples.append(sample)
allele1s.append(int(alleles1[i]))
allele2s.append(int(alleles2[i]))
line = f.readline()
return pd.DataFrame({"marker": markers, "sample": samples, \
"allele1.cap": allele1s, "allele2.cap": allele2s})
def GetAllele(x, allele_num):
if allele_num == 1:
al = x["allele1.cap"]
else: al = x["allele2.cap"]
raw_allele = ((al-x["effective_product_size"])/x["period"])*x["period"]
corr_allele = raw_allele - x["correction"]
return corr_allele
def GetDosage(a1, a2):
if str(a1) == "." or str(a2) == ".": return "NA"
else: return (float(a1)+float(a2))*0.5
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--lobSTR", help="Tab file with lobSTR calls created by lobSTR_vcf_to_tab.py", type=str, required=True)
parser.add_argument("--cap", help=".stru file with capillary calls", type=str, required=True)
parser.add_argument("--corrections", help="Tab file with Marshfield marker corrections", type=str, required=True)
parser.add_argument("--sample-conversions", help="Tab file with conversion between sample ids", type=str, required=True)
parser.add_argument("--output-stats", help="Output sample, call, and locus level stats to files with this prefix", type=str, required=False)
args = parser.parse_args()
LOBFILE = args.lobSTR
CAPFILE = args.cap
CORRFILE = args.corrections
CONVFILE = args.sample_conversions
# Load lobSTR calls and corrections
lob = pd.read_csv(LOBFILE, sep="\t")
corr = pd.read_csv(CORRFILE, sep="\t")
# Load capillary calls to data frame
cap = LoadCapillaryFromStru(CAPFILE, CONVFILE)
# Merge ddatasets
res = pd.merge(lob, corr, on=["chrom", "start"])
res = pd.merge(res, cap, on=["marker", "sample"])
res["allele1.cap.corr"] = res.apply(lambda x: GetAllele(x, 1), 1)
res["allele2.cap.corr"] = res.apply(lambda x: GetAllele(x, 2), 1)
res["correct"] = res.apply(lambda x: str(x["allele1"])==str(x["allele1.cap.corr"]) and \
str(x["allele2"])==str(x["allele2.cap.corr"]), 1)
res["dosage_lob"] = res.apply(lambda x: GetDosage(x["allele1"], x["allele2"]), 1)
res["dosage_cap"] = res.apply(lambda x: GetDosage(x["allele1.cap.corr"], x["allele2.cap.corr"]), 1)
##### Stats #####
if args.output_stats:
# Call level stats
res.to_csv(args.output_stats+".calllevel.tab", index=False, sep="\t")
# Sample level stats
sample_level = res.groupby("sample", as_index=False).agg({"DP": np.mean,
"Q": np.mean,
"start": len,
"correct": np.mean})
sample_level.to_csv(args.output_stats+".samplelevel.tab", index=False, sep="\t")
# Locus level stats
res["length"] = res["end_x"]-res["start"]+1
locus_level = res.groupby(["chrom","start"], as_index=False).agg({"length": np.mean,
"DP": np.mean,
"Q": np.mean,
"GT": len,
"SB": np.mean,
"DISTENDS": np.mean,
"correct": np.mean})
locus_level.to_csv(args.output_stats+".locuslevel.tab", index=False, sep="\t")
##### Results #####
sys.stdout.write("########## Results ########\n")
# Get stats about calls
num_samples = len(set(res[res["allele1"].apply(str)!="."]["sample"]))
num_markers = len(set(res[res["allele1"].apply(str)!="."]["marker"]))
num_nocalls = res[res["allele1"].apply(str)=="."].shape[0]
sys.stdout.write("# Samples: %s\n"%num_samples)
sys.stdout.write("# Markers: %s\n"%num_markers)
sys.stdout.write("# No call rate: %s\n"%(num_nocalls*1.0/res.shape[0]))
sys.stdout.write("# Number of calls compared: %s\n"%res.shape[0])
# Accuracy
acc = np.mean(res[res["allele1"].apply(str)!="."]["correct"])
sys.stdout.write("# Accuracy: %s\n"%acc)
# R2
dl = map(float, list(res[res["allele1"].apply(str)!="."]["dosage_lob"]))
dc = map(float, list(res[res["allele1"].apply(str)!="."]["dosage_cap"]))
r2 = pearsonr(dl, dc)[0]**2
sys.stdout.write("# R2: %s\n"%r2)
| gpl-3.0 |
itsjeyd/edx-platform | common/djangoapps/track/views/tests/test_segmentio.py | 19 | 22011 | """Ensure we can parse events sent to us from the Segment webhook integration"""
from datetime import datetime
import json
from ddt import ddt, data, unpack
from mock import sentinel
from nose.plugins.attrib import attr
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.test.utils import override_settings
from openedx.core.lib.tests.assertions.events import assert_event_matches
from track.middleware import TrackMiddleware
from track.tests import EventTrackingTestCase
from track.views import segmentio
SECRET = 'anything'
ENDPOINT = '/segmentio/test/event'
USER_ID = 10
MOBILE_SHIM_PROCESSOR = [
{'ENGINE': 'track.shim.LegacyFieldMappingProcessor'},
{'ENGINE': 'track.shim.PrefixedEventProcessor'},
]
def expect_failure_with_message(message):
"""Ensure the test raises an exception and does not emit an event"""
def test_decorator(func):
def test_decorated(self, *args, **kwargs):
self.assertRaisesRegexp(segmentio.EventValidationError, message, func, self, *args, **kwargs)
self.assert_no_events_emitted()
return test_decorated
return test_decorator
@attr(shard=3)
@ddt
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=SECRET,
TRACKING_IGNORE_URL_PATTERNS=[ENDPOINT],
TRACKING_SEGMENTIO_ALLOWED_TYPES=['track'],
TRACKING_SEGMENTIO_DISALLOWED_SUBSTRING_NAMES=['.bi.'],
TRACKING_SEGMENTIO_SOURCE_MAP={'test-app': 'mobile'},
EVENT_TRACKING_PROCESSORS=MOBILE_SHIM_PROCESSOR,
)
class SegmentIOTrackingTestCase(EventTrackingTestCase):
"""Test processing of Segment events"""
def setUp(self):
super(SegmentIOTrackingTestCase, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.request_factory = RequestFactory()
def test_get_request(self):
request = self.request_factory.get(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 405)
self.assert_no_events_emitted()
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=None
)
def test_no_secret_config(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_no_secret_provided(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_secret_mismatch(self):
request = self.create_request(key='y')
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def create_request(self, key=None, **kwargs):
"""Create a fake request that emulates a request from the Segment servers to ours"""
if key is None:
key = SECRET
request = self.request_factory.post(ENDPOINT + "?key=" + key, **kwargs)
if 'data' in kwargs:
request.json = json.loads(kwargs['data'])
return request
@data('identify', 'Group', 'Alias', 'Page', 'identify', 'screen')
def test_segmentio_ignore_actions(self, action):
self.post_segmentio_event(action=action)
self.assert_no_events_emitted()
@data('edx.bi.some_name', 'EDX.BI.CAPITAL_NAME')
def test_segmentio_ignore_names(self, name):
self.post_segmentio_event(name=name)
self.assert_no_events_emitted()
def post_segmentio_event(self, **kwargs):
"""Post a fake Segment event to the view that processes it"""
request = self.create_request(
data=self.create_segmentio_event_json(**kwargs),
content_type='application/json'
)
segmentio.track_segmentio_event(request)
def create_segmentio_event(self, **kwargs):
"""Populate a fake Segment event with data of interest"""
action = kwargs.get('action', 'Track')
sample_event = {
"userId": kwargs.get('user_id', USER_ID),
"event": "Did something",
"properties": {
'name': kwargs.get('name', str(sentinel.name)),
'data': kwargs.get('data', {}),
'context': {
'course_id': kwargs.get('course_id') or '',
'app_name': 'edx.mobile.android',
}
},
"channel": 'server',
"context": {
"library": {
"name": kwargs.get('library_name', 'test-app'),
"version": "unknown"
},
"app": {
"version": "1.0.1",
},
'userAgent': str(sentinel.user_agent),
},
"receivedAt": "2014-08-27T16:33:39.100Z",
"timestamp": "2014-08-27T16:33:39.215Z",
"type": action.lower(),
"projectId": "u0j33yjkr8",
"messageId": "qy52hwp4",
"version": 2,
"integrations": {},
"options": {
"library": "unknown",
"providers": {}
},
"action": action
}
if 'context' in kwargs:
sample_event['properties']['context'].update(kwargs['context'])
return sample_event
def create_segmentio_event_json(self, **kwargs):
"""Return a json string containing a fake Segment event"""
return json.dumps(self.create_segmentio_event(**kwargs))
def test_segmentio_ignore_unknown_libraries(self):
self.post_segmentio_event(library_name='foo')
self.assert_no_events_emitted()
@expect_failure_with_message(segmentio.ERROR_USER_NOT_EXIST)
def test_no_user_for_user_id(self):
self.post_segmentio_event(user_id=40)
@expect_failure_with_message(segmentio.ERROR_INVALID_USER_ID)
def test_invalid_user_id(self):
self.post_segmentio_event(user_id='foobar')
@data('foo/bar/baz', 'course-v1:foo+bar+baz')
def test_success(self, course_id):
middleware = TrackMiddleware()
request = self.create_request(
data=self.create_segmentio_event_json(data={'foo': 'bar'}, course_id=course_id),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
# The middleware normally emits an event, make sure it doesn't in this case.
self.assert_no_events_emitted()
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': str(sentinel.name),
'name': str(sentinel.name),
'event': {'foo': 'bar'},
'agent': str(sentinel.user_agent),
'page': None,
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'application': {
'name': 'edx.mobile.android',
'version': '1.0.1',
},
'user_id': USER_ID,
'course_id': course_id,
'org_id': u'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
}
finally:
middleware.process_response(request, None)
assert_event_matches(expected_event, self.get_event())
def test_invalid_course_id(self):
request = self.create_request(
data=self.create_segmentio_event_json(course_id='invalid'),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
self.assert_events_emitted()
@expect_failure_with_message(segmentio.ERROR_MISSING_NAME)
def test_missing_name(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['name']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_DATA)
def test_missing_data(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['data']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_TIMESTAMP)
def test_missing_timestamp(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_RECEIVED_AT)
def test_missing_received_at(self):
sample_event_raw = self.create_event_without_fields('receivedAt')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
def create_event_without_fields(self, *fields):
"""Create a fake event and remove some fields from it"""
event = self.create_segmentio_event()
for field in fields:
if field in event:
del event[field]
return event
def test_string_user_id(self):
User.objects.create(pk=USER_ID, username=str(sentinel.username))
self.post_segmentio_event(user_id=str(USER_ID))
self.assert_events_emitted()
def test_hiding_failure(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
self.assert_no_events_emitted()
@data(
('edx.video.played', 'play_video'),
('edx.video.paused', 'pause_video'),
('edx.video.stopped', 'stop_video'),
('edx.video.loaded', 'load_video'),
('edx.video.position.changed', 'seek_video'),
('edx.video.transcript.shown', 'show_transcript'),
('edx.video.transcript.hidden', 'hide_transcript'),
)
@unpack
def test_video_event(self, name, event_type):
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
'current_time': 132.134456,
'module_id': 'i4x://foo/bar/baz/some_module',
'code': 'mobile'
}
if name == 'edx.video.loaded':
# We use the same expected payload for all of these types of events, but the load video event is the only
# one that is not actually expected to contain a "current time" field. So we remove it from the expected
# event here.
del input_payload['current_time']
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'course_id': course_id,
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
}
}),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': event_type,
'name': name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
'event': {
'currentTime': 132.134456,
'id': 'i4x-foo-bar-baz-some_module',
'code': 'mobile'
}
}
if name == 'edx.video.loaded':
# We use the same expected payload for all of these types of events, but the load video event is the
# only one that is not actually expected to contain a "current time" field. So we remove it from the
# expected event here.
del expected_event['event']['currentTime']
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
@data(
# Verify positive slide case. Verify slide to onSlideSeek. Verify
# edx.video.seeked emitted from iOS v1.0.02 is changed to
# edx.video.position.changed.
(1, 1, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify negative slide case. Verify slide to onSlideSeek. Verify
# edx.video.seeked to edx.video.position.changed.
(-2, -2, "seek_type", "slide", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify +30 is changed to -30 which is incorrectly emitted in iOS
# v1.0.02. Verify skip to onSkipSeek
(30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify the correct case of -30 is also handled as well. Verify skip
# to onSkipSeek
(-30, -30, "seek_type", "skip", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.iOS', '1.0.02'),
# Verify positive slide case where onSkipSeek is changed to
# onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is
# changed to edx.video.position.changed.
(1, 1, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive slide case where onSkipSeek is changed to
# onSlideSkip. Verify edx.video.seeked emitted from Android v1.0.02 is
# changed to edx.video.position.changed.
(-2, -2, "type", "onSkipSeek", "onSlideSeek", "edx.video.seeked", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive skip case where onSkipSeek is not changed and does
# not become negative.
(30, 30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02'),
# Verify positive skip case where onSkipSeek is not changed.
(-30, -30, "type", "onSkipSeek", "onSkipSeek", "edx.video.position.changed", "edx.video.position.changed", 'edx.mobileapp.android', '1.0.02')
)
@unpack
def test_previous_builds(self,
requested_skip_interval,
expected_skip_interval,
seek_type_key,
seek_type,
expected_seek_type,
name,
expected_name,
platform,
version,
):
"""
Test backwards compatibility of previous app builds
iOS version 1.0.02: Incorrectly emits the skip back 30 seconds as +30
instead of -30.
Android version 1.0.02: Skip and slide were both being returned as a
skip. Skip or slide is determined by checking if the skip time is == -30
Additionally, for both of the above mentioned versions, edx.video.seeked
was sent instead of edx.video.position.changed
"""
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
"code": "mobile",
"new_time": 89.699177437,
"old_time": 119.699177437,
seek_type_key: seek_type,
"requested_skip_interval": requested_skip_interval,
'module_id': 'i4x://foo/bar/baz/some_module',
}
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'open_in_browser_url': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'course_id': course_id,
'application': {
'name': platform,
'version': version,
'component': 'videoplayer'
}
},
),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'accept_language': '',
'referer': '',
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': "seek_video",
'name': expected_name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
},
'app': {
'version': '1.0.1',
},
},
'application': {
'name': platform,
'version': version,
'component': 'videoplayer'
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
'event': {
"code": "mobile",
"new_time": 89.699177437,
"old_time": 119.699177437,
"type": expected_seek_type,
"requested_skip_interval": expected_skip_interval,
'id': 'i4x-foo-bar-baz-some_module',
}
}
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
| agpl-3.0 |
DickJC123/mxnet | example/profiler/profiler_matmul.py | 7 | 2449 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import argparse
import time
def parse_args():
parser = argparse.ArgumentParser(description='Set network parameters for benchmark test.')
parser.add_argument('--profile_filename', type=str, default='profile_matmul_20iter.json')
parser.add_argument('--iter_num', type=int, default=100)
parser.add_argument('--begin_profiling_iter', type=int, default=50)
parser.add_argument('--end_profiling_iter', type=int, default=70)
return parser.parse_args()
args = parse_args()
if __name__ == '__main__':
mx.profiler.set_config(profile_symbolic=True, filename=args.profile_filename)
print('profile file save to {0}'.format(args.profile_filename))
A = mx.sym.Variable('A')
B = mx.sym.Variable('B')
C = mx.symbol.dot(A, B)
executor = C.simple_bind(mx.gpu(0), 'write', A=(4096, 4096), B=(4096, 4096))
a = mx.random.uniform(-1.0, 1.0, shape=(4096, 4096))
b = mx.random.uniform(-1.0, 1.0, shape=(4096, 4096))
a.copyto(executor.arg_dict['A'])
b.copyto(executor.arg_dict['B'])
flag = False
print("execution begin")
for i in range(args.iter_num):
if i == args.begin_profiling_iter:
t0 = time.process_time()
mx.profiler.set_state('run')
if i == args.end_profiling_iter:
t1 = time.process_time()
mx.profiler.set_state('stop')
executor.forward()
c = executor.outputs[0]
c.wait_to_read()
print("execution end")
duration = t1 - t0
print('duration: {0}s'.format(duration))
print(' {0}ms/operator'.format(duration*1000/args.iter_num))
| apache-2.0 |
sve-odoo/odoo | addons/sale_mrp/__openerp__.py | 61 | 1935 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sales and MRP Management',
'version': '1.0',
'category': 'Hidden',
'description': """
This module provides facility to the user to install mrp and sales modulesat a time.
====================================================================================
It is basically used when we want to keep track of production orders generated
from sales order. It adds sales name and sales Reference on production order.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/SO_to_MO.jpeg'],
'depends': ['mrp', 'sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_mrp_view.xml',
],
'demo': [],
'test':[
'test/cancellation_propagated.yml',
'test/sale_mrp.yml',
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
BaluDontu/docker-volume-vsphere | esx_service/vsan_policy.py | 1 | 9052 | #!/usr/bin/env python
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Module for VSAN storage policy creation and configuration
import os
import logging
import shutil
import vmdk_utils
import vsan_info
import volume_kv as kv
import vsan_info
ERROR_NO_VSAN_DATASTORE = 'Error: VSAN datastore does not exist'
def create(name, content):
"""
Create a new storage policy and save it as dockvols/policies/name in
the VSAN datastore. If there are VSAN volumes currently using a policy with
the same name, creation will fail. Return a string on error and None on
success.
"""
datastore_path = vsan_info.get_vsan_dockvols_path()
if not datastore_path:
return ERROR_NO_VSAN_DATASTORE
policies_dir = make_policies_dir(datastore_path)
filename = os.path.join(policies_dir, name)
if os.path.isfile(filename):
return 'Error: Policy already exists'
if not validate_vsan_policy_string(content):
return 'Error: Invalid policy string'
return create_policy_file(filename, content)
def update(name, content):
"""
Update the content of an existing VSAN policy in the VSAN datastore.
Update the policy content in each VSAN object currently using the policy. If
a VSAN policy of the given name does not exist return an error string.
Return None on success.
"""
path = policy_path(name)
if not path:
return ERROR_NO_VSAN_DATASTORE
err = update_policy_file_content(path, content)
if err:
return err
return update_vsan_objects_with_policy(name, content)
def update_policy_file_content(path, content):
"""
Update the VSAN policy file content. Return an error msg or None on success.
"""
try:
with open(path) as f:
existing_content = f.read()
except OSError as e:
if not os.path.isfile(path):
return 'Error: Policy {0} does not exist'.format(
os.path.basename(path))
else:
return 'Error opening existing policy file {0}: {1}'.format(path, e)
if existing_content.strip() == content.strip():
return 'Error: New policy is identical to old policy. Ignoring.'
# Create a temporary file so we don't corrupt an existing policy file
tmpfile = '{0}.tmp'.format(path)
err = create_policy_file(tmpfile, content)
if err:
return err
# Copy the original policy file to a backup file (.old)
# The backup will be maintained in case the policy content is invalid, when
# attempting to apply it to existing volumes.
# Do an atomic rename of the tmpfile to the real policy file name
try:
shutil.copy(path, backup_policy_filename(path))
os.rename(tmpfile, path)
except OSError:
print('Internal Error: Failed to update policy file contents: '
'{0}').format(path)
raise
return None
def update_vsan_objects_with_policy(name, content):
"""
Find all VSAN objects using the policy given by `name` and update the policy
contents in their objects. Returns an error string containing the list of
volumes that failed to update, or a msg if there were no volumes to update.
Returns None if all volumes were updated successfully.
Note: This function assumes datastore_path exists.
"""
update_count = 0
failed_updates = []
dockvols_path = vsan_info.get_vsan_dockvols_path()
print("This operation may take a while. Please be patient.")
for v in list_volumes_and_policies():
if v['policy'] == name:
volume_name = v['volume']
vmdk_path = os.path.join(dockvols_path, volume_name)
if vsan_info.set_policy(vmdk_path, content):
update_count = 1
else:
failed_updates.append(volume_name)
if len(failed_updates) != 0:
if update_count == 0:
# All volumes failed to update, so reset the original policy
os.rename(policy_path(backup_policy_filename(name)),
policy_path(name))
else:
log_failed_updates(failed_updates, name)
return ('Successfully updated: {0} volumes.\n'
'Failed to update: {1} volumes'.format(update_count,
failed_updates))
# Remove old policy file on success
os.remove(policy_path(backup_policy_filename(name)))
return None
def backup_policy_filename(name):
""" Generate a .old file from a policy name or path """
return '{0}.old'.format(name)
def log_failed_updates(volumes, policy_name):
"""
During policy update, some volumes may fail to have their VSAN policies
updated. We create a file containing these volumes for debugging purposes.
"""
filename = policy_path('{0}.failed_volume_updates'.format(policy_name))
try:
with open(filename, 'w') as f:
f.write(volumes)
f.write('\n')
except:
print("Failed to save volume names that failed to update to file."
"Please record them for future use.")
def make_policies_dir(datastore_path):
"""
Create the policies dir if it doesn't exist and return the path.
This function assumes that datastore_path is a VSAN datastore,
although it won't fail if it isn't.
"""
policies_dir = os.path.join(datastore_path, 'policies')
try:
os.mkdir(policies_dir)
except OSError:
pass
return policies_dir
def create_policy_file(filename, content):
"""
Create a storage policy file in filename. Returns an error string on
failure and None on success.
"""
try:
with open(filename, 'w') as f:
f.write(content)
f.write('\n')
except:
msg = 'Error: Failed to open {0} for writing'.format(filename)
logging.exception(msg)
if os.path.isfile(filename):
os.remove(filename)
return msg
return None
def delete(name):
"""
Remove a given policy. If the policy does not exist return an error string,
otherwise return None
"""
path = vsan_info.get_vsan_dockvols_path()
if not path:
return ERROR_NO_VSAN_DATASTORE
vmdk = policy_in_use(path, name)
if vmdk:
return 'Error: Cannot remove. Policy is in use by {0}'.format(vmdk)
try:
os.remove(policy_path(name))
except:
logging.exception("Failed to remove %s policy file", name)
return 'Error: {0} does not exist'.format(name)
return None
def get_policies():
""" Return a dict of all VSAN policy names to policy content. """
policies = {}
path = vsan_info.get_vsan_dockvols_path()
if not path:
return {}
path = make_policies_dir(path)
for name in os.listdir(path):
with open(os.path.join(path, name)) as f:
content = f.read()
policies[name] = content
return policies
def list_volumes_and_policies():
""" Return a list of vmdks and the policies in use"""
vmdks_and_policies = []
path = vsan_info.get_vsan_dockvols_path()
if not path:
return []
for vmdk in vmdk_utils.list_vmdks(path):
policy = kv_get_vsan_policy_name(os.path.join(path, vmdk))
vmdks_and_policies.append({'volume': vmdk, 'policy': policy})
return vmdks_and_policies
def policy_exists(name):
""" Check if the policy file exists """
return os.path.isfile(policy_path(name))
def policy_path(name):
"""
Return the path to a given policy file or None if VSAN datastore doesn't
exist
"""
path = vsan_info.get_vsan_dockvols_path()
if not path:
return None
return os.path.join(path, 'policies', name)
def kv_get_vsan_policy_name(path):
"""
Take a path for a vmdk and return a policy name if it exists or None if it
doesn't
"""
try:
return kv.getAll(path)[kv.VOL_OPTS][kv.VSAN_POLICY_NAME]
except:
return None
def policy_in_use(path, name):
"""
Check if a policy is in use by a VMDK and return the name of the first VMDK
using it if it is, None otherwise
"""
for vmdk in vmdk_utils.list_vmdks(path):
policy = kv_get_vsan_policy_name(os.path.join(path, vmdk))
if policy == name:
return vmdk
return None
def validate_vsan_policy_string(content):
"""
Stub for a function that validates the syntax of a vsan policy string
"""
return True
| apache-2.0 |
eugene373/Galaxy-S2-ICS | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
Ditmar/plugin.video.pelisalacarta | servers/watchfreeinhd.py | 44 | 1983 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para watchfreeinhd
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[watchfreeinhd.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
# Descarga la página, el usuario tiene dos botones de "Descargar" o "Ver"
data = scrapertools.cache_page(page_url)
# La descarga de nuevo como si hubiera pulsado el botón "Ver"
# http://srv.hdplay.org/storage/flv/xGylz8.flv?token=703acade4b51aa6b26ad264327c4a4cf
data = scrapertools.cache_page(page_url,post="agree=")
patron = '<div id="playerHolder">[^<]+'
patron += '<a href="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if len(matches)>0:
video_urls.append( ["[watchfreeinhd]",matches[0] ] )
for video_url in video_urls:
logger.info("[watchfreeinhd.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://www.watchfreeinhd.com/r0GUbN
patronvideos = '(http://www.watchfreeinhd.com/[A-Za-z0-9]+)'
logger.info("[watchfreeinhd.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[watchfreeinhd]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'watchfreeinhd' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| gpl-3.0 |
adaitche/luigi | test/task_test.py | 13 | 14484 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import doctest
import pickle
import six
import warnings
from helpers import unittest, LuigiTestCase
from datetime import datetime, timedelta
import luigi
import luigi.task
import luigi.util
import collections
from luigi.task_register import load_task
class DummyTask(luigi.Task):
param = luigi.Parameter()
bool_param = luigi.BoolParameter()
int_param = luigi.IntParameter()
float_param = luigi.FloatParameter()
date_param = luigi.DateParameter()
datehour_param = luigi.DateHourParameter()
timedelta_param = luigi.TimeDeltaParameter()
insignificant_param = luigi.Parameter(significant=False)
DUMMY_TASK_OK_PARAMS = dict(
param='test',
bool_param=True,
int_param=666,
float_param=123.456,
date_param=datetime(2014, 9, 13).date(),
datehour_param=datetime(2014, 9, 13, 9),
timedelta_param=timedelta(44), # doesn't support seconds
insignificant_param='test')
class DefaultInsignificantParamTask(luigi.Task):
insignificant_param = luigi.Parameter(significant=False, default='value')
necessary_param = luigi.Parameter(significant=False)
class TaskTest(unittest.TestCase):
def test_tasks_doctest(self):
doctest.testmod(luigi.task)
def test_task_to_str_to_task(self):
original = DummyTask(**DUMMY_TASK_OK_PARAMS)
other = DummyTask.from_str_params(original.to_str_params())
self.assertEqual(original, other)
def test_task_from_str_insignificant(self):
params = {'necessary_param': 'needed'}
original = DefaultInsignificantParamTask(**params)
other = DefaultInsignificantParamTask.from_str_params(params)
self.assertEqual(original, other)
def test_task_missing_necessary_param(self):
with self.assertRaises(luigi.parameter.MissingParameterException):
DefaultInsignificantParamTask.from_str_params({})
def test_external_tasks_loadable(self):
task = load_task("luigi", "ExternalTask", {})
assert(isinstance(task, luigi.ExternalTask))
def test_getpaths(self):
class RequiredTask(luigi.Task):
def output(self):
return luigi.LocalTarget("/path/to/target/file")
t = RequiredTask()
reqs = {}
reqs["bare"] = t
reqs["dict"] = {"key": t}
reqs["OrderedDict"] = collections.OrderedDict([("key", t)])
reqs["list"] = [t]
reqs["tuple"] = (t,)
reqs["generator"] = (t for _ in range(10))
struct = luigi.task.getpaths(reqs)
self.assertIsInstance(struct, dict)
self.assertIsInstance(struct["bare"], luigi.Target)
self.assertIsInstance(struct["dict"], dict)
self.assertIsInstance(struct["OrderedDict"], collections.OrderedDict)
self.assertIsInstance(struct["list"], list)
self.assertIsInstance(struct["tuple"], tuple)
self.assertTrue(hasattr(struct["generator"], "__iter__"))
def test_flatten(self):
flatten = luigi.task.flatten
self.assertEqual(sorted(flatten({'a': 'foo', 'b': 'bar'})), ['bar', 'foo'])
self.assertEqual(sorted(flatten(['foo', ['bar', 'troll']])), ['bar', 'foo', 'troll'])
self.assertEqual(flatten('foo'), ['foo'])
self.assertEqual(flatten(42), [42])
self.assertEqual(flatten((len(i) for i in ["foo", "troll"])), [3, 5])
self.assertRaises(TypeError, flatten, (len(i) for i in ["foo", "troll", None]))
def test_externalized_task_picklable(self):
task = luigi.task.externalize(luigi.Task())
pickled_task = pickle.dumps(task)
self.assertEqual(task, pickle.loads(pickled_task))
def test_no_unpicklable_properties(self):
task = luigi.Task()
task.set_tracking_url = lambda tracking_url: tracking_url
task.set_status_message = lambda message: message
with task.no_unpicklable_properties():
pickle.dumps(task)
self.assertIsNotNone(task.set_tracking_url)
self.assertIsNotNone(task.set_status_message)
tracking_url = task.set_tracking_url('http://test.luigi.com/')
self.assertEqual(tracking_url, 'http://test.luigi.com/')
message = task.set_status_message('message')
self.assertEqual(message, 'message')
def test_no_warn_if_param_types_ok(self):
with warnings.catch_warnings(record=True) as w:
DummyTask(**DUMMY_TASK_OK_PARAMS)
self.assertEqual(len(w), 0, msg='No warning should be raised when correct parameter types are used')
if six.PY3: # assertWarnsRegex was introduced in Python 3.2
def test_warn_on_non_str_param(self):
params = dict(**DUMMY_TASK_OK_PARAMS)
params['param'] = 42
with self.assertWarnsRegex(UserWarning, 'Parameter "param" with value "42" is not of type string.'):
DummyTask(**params)
def test_warn_on_non_timedelta_param(self):
params = dict(**DUMMY_TASK_OK_PARAMS)
class MockTimedelta(object):
days = 1
seconds = 1
params['timedelta_param'] = MockTimedelta()
with self.assertWarnsRegex(UserWarning, 'Parameter "timedelta_param" with value ".*" is not of type timedelta.'):
DummyTask(**params)
class ExternalizeTaskTest(LuigiTestCase):
def test_externalize_taskclass(self):
class MyTask(luigi.Task):
def run(self):
pass
self.assertIsNotNone(MyTask.run) # Assert what we believe
task_object = luigi.task.externalize(MyTask)()
self.assertIsNone(task_object.run)
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_taskobject(self):
class MyTask(luigi.Task):
def run(self):
pass
task_object = luigi.task.externalize(MyTask())
self.assertIsNone(task_object.run)
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_taskclass_readable_name(self):
class MyTask(luigi.Task):
def run(self):
pass
task_class = luigi.task.externalize(MyTask)
self.assertIsNot(task_class, MyTask)
self.assertIn("MyTask", task_class.__name__)
def test_externalize_taskclass_instance_cache(self):
class MyTask(luigi.Task):
def run(self):
pass
task_class = luigi.task.externalize(MyTask)
self.assertIsNot(task_class, MyTask)
self.assertIs(MyTask(), MyTask()) # Assert it have enabled the instance caching
self.assertIsNot(task_class(), MyTask()) # Now, they should not be the same of course
def test_externalize_same_id(self):
class MyTask(luigi.Task):
def run(self):
pass
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask)()
task_ext_2 = luigi.task.externalize(MyTask())
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
def test_externalize_same_id_with_task_namespace(self):
# Dependent on the new behavior from spotify/luigi#1953
class MyTask(luigi.Task):
task_namespace = "something.domething"
def run(self):
pass
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask())
task_ext_2 = luigi.task.externalize(MyTask)()
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
self.assertEqual(str(task_normal), str(task_ext_1))
self.assertEqual(str(task_normal), str(task_ext_2))
def test_externalize_same_id_with_luigi_namespace(self):
# Dependent on the new behavior from spotify/luigi#1953
luigi.namespace('lets.externalize')
class MyTask(luigi.Task):
def run(self):
pass
luigi.namespace()
task_normal = MyTask()
task_ext_1 = luigi.task.externalize(MyTask())
task_ext_2 = luigi.task.externalize(MyTask)()
self.assertEqual(task_normal.task_id, task_ext_1.task_id)
self.assertEqual(task_normal.task_id, task_ext_2.task_id)
self.assertEqual(str(task_normal), str(task_ext_1))
self.assertEqual(str(task_normal), str(task_ext_2))
def test_externalize_with_requires(self):
class MyTask(luigi.Task):
def run(self):
pass
@luigi.util.requires(luigi.task.externalize(MyTask))
class Requirer(luigi.Task):
def run(self):
pass
self.assertIsNotNone(MyTask.run) # Check immutability
self.assertIsNotNone(MyTask().run) # Check immutability
def test_externalize_doesnt_affect_the_registry(self):
class MyTask(luigi.Task):
pass
reg_orig = luigi.task_register.Register._get_reg()
luigi.task.externalize(MyTask)
reg_afterwards = luigi.task_register.Register._get_reg()
self.assertEqual(reg_orig, reg_afterwards)
def test_can_uniquely_command_line_parse(self):
class MyTask(luigi.Task):
pass
# This first check is just an assumption rather than assertion
self.assertTrue(self.run_locally(['MyTask']))
luigi.task.externalize(MyTask)
# Now we check we don't encounter "ambiguous task" issues
self.assertTrue(self.run_locally(['MyTask']))
# We do this once again, is there previously was a bug like this.
luigi.task.externalize(MyTask)
self.assertTrue(self.run_locally(['MyTask']))
class TaskNamespaceTest(LuigiTestCase):
def setup_tasks(self):
class Foo(luigi.Task):
pass
class FooSubclass(Foo):
pass
return (Foo, FooSubclass, self.go_mynamespace())
def go_mynamespace(self):
luigi.namespace("mynamespace")
class Foo(luigi.Task):
p = luigi.IntParameter()
class Bar(Foo):
task_namespace = "othernamespace" # namespace override
class Baz(Bar): # inherits namespace for Bar
pass
luigi.namespace()
return collections.namedtuple('mynamespace', 'Foo Bar Baz')(Foo, Bar, Baz)
def test_vanilla(self):
(Foo, FooSubclass, namespace_test_helper) = self.setup_tasks()
self.assertEqual(Foo.task_family, "Foo")
self.assertEqual(str(Foo()), "Foo()")
self.assertEqual(FooSubclass.task_family, "FooSubclass")
self.assertEqual(str(FooSubclass()), "FooSubclass()")
def test_namespace(self):
(Foo, FooSubclass, namespace_test_helper) = self.setup_tasks()
self.assertEqual(namespace_test_helper.Foo.task_family, "mynamespace.Foo")
self.assertEqual(str(namespace_test_helper.Foo(1)), "mynamespace.Foo(p=1)")
self.assertEqual(namespace_test_helper.Bar.task_namespace, "othernamespace")
self.assertEqual(namespace_test_helper.Bar.task_family, "othernamespace.Bar")
self.assertEqual(str(namespace_test_helper.Bar(1)), "othernamespace.Bar(p=1)")
self.assertEqual(namespace_test_helper.Baz.task_namespace, "othernamespace")
self.assertEqual(namespace_test_helper.Baz.task_family, "othernamespace.Baz")
self.assertEqual(str(namespace_test_helper.Baz(1)), "othernamespace.Baz(p=1)")
def test_uses_latest_namespace(self):
luigi.namespace('a')
class _BaseTask(luigi.Task):
pass
luigi.namespace('b')
class _ChildTask(_BaseTask):
pass
luigi.namespace() # Reset everything
child_task = _ChildTask()
self.assertEqual(child_task.task_family, 'b._ChildTask')
self.assertEqual(str(child_task), 'b._ChildTask()')
def test_with_scope(self):
luigi.namespace('wohoo', scope='task_test')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='task_test')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'wohoo')
def test_with_scope_not_matching(self):
luigi.namespace('wohoo', scope='incorrect_namespace')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'bleh')
class AutoNamespaceTest(LuigiTestCase):
this_module = 'task_test'
def test_auto_namespace_global(self):
luigi.auto_namespace()
class MyTask(luigi.Task):
pass
luigi.namespace()
self.assertEqual(MyTask.get_task_namespace(), self.this_module)
def test_auto_namespace_scope(self):
luigi.auto_namespace(scope='task_test')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='task_test')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), self.this_module)
def test_auto_namespace_not_matching(self):
luigi.auto_namespace(scope='incorrect_namespace')
luigi.namespace('bleh', scope='')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
luigi.namespace(scope='')
self.assertEqual(MyTask.get_task_namespace(), 'bleh')
def test_auto_namespace_not_matching_2(self):
luigi.auto_namespace(scope='incorrect_namespace')
class MyTask(luigi.Task):
pass
luigi.namespace(scope='incorrect_namespace')
self.assertEqual(MyTask.get_task_namespace(), '')
| apache-2.0 |
zhangkun456/TeamTalk | win-client/3rdParty/src/json/test/pyjsontestrunner.py | 257 | 2137 | # Simple implementation of a json test runner to run the test against json-py.
import sys
import os.path
import json
import types
if len(sys.argv) != 2:
print "Usage: %s input-json-file", sys.argv[0]
sys.exit(3)
input_path = sys.argv[1]
base_path = os.path.splitext(input_path)[0]
actual_path = base_path + '.actual'
rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite'
def valueTreeToString( fout, value, path = '.' ):
ty = type(value)
if ty is types.DictType:
fout.write( '%s={}\n' % path )
suffix = path[-1] != '.' and '.' or ''
names = value.keys()
names.sort()
for name in names:
valueTreeToString( fout, value[name], path + suffix + name )
elif ty is types.ListType:
fout.write( '%s=[]\n' % path )
for index, childValue in zip( xrange(0,len(value)), value ):
valueTreeToString( fout, childValue, path + '[%d]' % index )
elif ty is types.StringType:
fout.write( '%s="%s"\n' % (path,value) )
elif ty is types.IntType:
fout.write( '%s=%d\n' % (path,value) )
elif ty is types.FloatType:
fout.write( '%s=%.16g\n' % (path,value) )
elif value is True:
fout.write( '%s=true\n' % path )
elif value is False:
fout.write( '%s=false\n' % path )
elif value is None:
fout.write( '%s=null\n' % path )
else:
assert False and "Unexpected value type"
def parseAndSaveValueTree( input, actual_path ):
root = json.loads( input )
fout = file( actual_path, 'wt' )
valueTreeToString( fout, root )
fout.close()
return root
def rewriteValueTree( value, rewrite_path ):
rewrite = json.dumps( value )
#rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
file( rewrite_path, 'wt').write( rewrite + '\n' )
return rewrite
input = file( input_path, 'rt' ).read()
root = parseAndSaveValueTree( input, actual_path )
rewrite = rewriteValueTree( json.write( root ), rewrite_path )
rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
sys.exit( 0 )
| apache-2.0 |
storm-computers/odoo | addons/hw_escpos/escpos/escpos.py | 48 | 31717 | # -*- coding: utf-8 -*-
import time
import copy
import io
import base64
import math
import md5
import re
import traceback
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
from PIL import Image
try:
import jcconv
except ImportError:
jcconv = None
try:
import qrcode
except ImportError:
qrcode = None
from constants import *
from exceptions import *
def utfstr(stuff):
""" converts stuff to string and does without failing if stuff is a utf8 string """
if isinstance(stuff,basestring):
return stuff
else:
return str(stuff)
class StyleStack:
"""
The stylestack is used by the xml receipt serializer to compute the active styles along the xml
document. Styles are just xml attributes, there is no css mechanism. But the style applied by
the attributes are inherited by deeper nodes.
"""
def __init__(self):
self.stack = []
self.defaults = { # default style values
'align': 'left',
'underline': 'off',
'bold': 'off',
'size': 'normal',
'font' : 'a',
'width': 48,
'indent': 0,
'tabwidth': 2,
'bullet': ' - ',
'line-ratio':0.5,
'color': 'black',
'value-decimals': 2,
'value-symbol': '',
'value-symbol-position': 'after',
'value-autoint': 'off',
'value-decimals-separator': '.',
'value-thousands-separator': ',',
'value-width': 0,
}
self.types = { # attribute types, default is string and can be ommitted
'width': 'int',
'indent': 'int',
'tabwidth': 'int',
'line-ratio': 'float',
'value-decimals': 'int',
'value-width': 'int',
}
self.cmds = {
# translation from styles to escpos commands
# some style do not correspond to escpos command are used by
# the serializer instead
'align': {
'left': TXT_ALIGN_LT,
'right': TXT_ALIGN_RT,
'center': TXT_ALIGN_CT,
'_order': 1,
},
'underline': {
'off': TXT_UNDERL_OFF,
'on': TXT_UNDERL_ON,
'double': TXT_UNDERL2_ON,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'bold': {
'off': TXT_BOLD_OFF,
'on': TXT_BOLD_ON,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'font': {
'a': TXT_FONT_A,
'b': TXT_FONT_B,
# must be issued after 'size' command
# because ESC ! resets ESC -
'_order': 10,
},
'size': {
'normal': TXT_NORMAL,
'double-height': TXT_2HEIGHT,
'double-width': TXT_2WIDTH,
'double': TXT_DOUBLE,
'_order': 1,
},
'color': {
'black': TXT_COLOR_BLACK,
'red': TXT_COLOR_RED,
'_order': 1,
},
}
self.push(self.defaults)
def get(self,style):
""" what's the value of a style at the current stack level"""
level = len(self.stack) -1
while level >= 0:
if style in self.stack[level]:
return self.stack[level][style]
else:
level = level - 1
return None
def enforce_type(self, attr, val):
"""converts a value to the attribute's type"""
if not attr in self.types:
return utfstr(val)
elif self.types[attr] == 'int':
return int(float(val))
elif self.types[attr] == 'float':
return float(val)
else:
return utfstr(val)
def push(self, style={}):
"""push a new level on the stack with a style dictionnary containing style:value pairs"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
_style[attr] = self.enforce_type(attr, style[attr])
self.stack.append(_style)
def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr])
def pop(self):
""" pop a style stack level """
if len(self.stack) > 1 :
self.stack = self.stack[:-1]
def to_escpos(self):
""" converts the current style to an escpos command string """
cmd = ''
ordered_cmds = self.cmds.keys()
ordered_cmds.sort(lambda x,y: cmp(self.cmds[x]['_order'], self.cmds[y]['_order']))
for style in ordered_cmds:
cmd += self.cmds[style][self.get(style)]
return cmd
class XmlSerializer:
"""
Converts the xml inline / block tree structure to a string,
keeping track of newlines and spacings.
The string is outputted asap to the provided escpos driver.
"""
def __init__(self,escpos):
self.escpos = escpos
self.stack = ['block']
self.dirty = False
def start_inline(self,stylestack=None):
""" starts an inline entity with an optional style definition """
self.stack.append('inline')
if self.dirty:
self.escpos._raw(' ')
if stylestack:
self.style(stylestack)
def start_block(self,stylestack=None):
""" starts a block entity with an optional style definition """
if self.dirty:
self.escpos._raw('\n')
self.dirty = False
self.stack.append('block')
if stylestack:
self.style(stylestack)
def end_entity(self):
""" ends the entity definition. (but does not cancel the active style!) """
if self.stack[-1] == 'block' and self.dirty:
self.escpos._raw('\n')
self.dirty = False
if len(self.stack) > 1:
self.stack = self.stack[:-1]
def pre(self,text):
""" puts a string of text in the entity keeping the whitespace intact """
if text:
self.escpos.text(text)
self.dirty = True
def text(self,text):
""" puts text in the entity. Whitespace and newlines are stripped to single spaces. """
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self.dirty = True
self.escpos.text(text)
def linebreak(self):
""" inserts a linebreak in the entity """
self.dirty = False
self.escpos._raw('\n')
def style(self,stylestack):
""" apply a style to the entity (only applies to content added after the definition) """
self.raw(stylestack.to_escpos())
def raw(self,raw):
""" puts raw text or escpos command in the entity without affecting the state of the serializer """
self.escpos._raw(raw)
class XmlLineSerializer:
"""
This is used to convert a xml tree into a single line, with a left and a right part.
The content is not output to escpos directly, and is intended to be fedback to the
XmlSerializer as the content of a block entity.
"""
def __init__(self, indent=0, tabwidth=2, width=48, ratio=0.5):
self.tabwidth = tabwidth
self.indent = indent
self.width = max(0, width - int(tabwidth*indent))
self.lwidth = int(self.width*ratio)
self.rwidth = max(0, self.width - self.lwidth)
self.clwidth = 0
self.crwidth = 0
self.lbuffer = ''
self.rbuffer = ''
self.left = True
def _txt(self,txt):
if self.left:
if self.clwidth < self.lwidth:
txt = txt[:max(0, self.lwidth - self.clwidth)]
self.lbuffer += txt
self.clwidth += len(txt)
else:
if self.crwidth < self.rwidth:
txt = txt[:max(0, self.rwidth - self.crwidth)]
self.rbuffer += txt
self.crwidth += len(txt)
def start_inline(self,stylestack=None):
if (self.left and self.clwidth) or (not self.left and self.crwidth):
self._txt(' ')
def start_block(self,stylestack=None):
self.start_inline(stylestack)
def end_entity(self):
pass
def pre(self,text):
if text:
self._txt(text)
def text(self,text):
if text:
text = utfstr(text)
text = text.strip()
text = re.sub('\s+',' ',text)
if text:
self._txt(text)
def linebreak(self):
pass
def style(self,stylestack):
pass
def raw(self,raw):
pass
def start_right(self):
self.left = False
def get_line(self):
return ' ' * self.indent * self.tabwidth + self.lbuffer + ' ' * (self.width - self.clwidth - self.crwidth) + self.rbuffer
class Escpos:
""" ESC/POS Printer object """
device = None
encoding = None
img_cache = {}
def _check_image_size(self, size):
""" Check and fix the size of the image to 32 bits """
if size % 32 == 0:
return (0, 0)
else:
image_border = 32 - (size % 32)
if (image_border % 2) == 0:
return (image_border / 2, image_border / 2)
else:
return (image_border / 2, (image_border / 2) + 1)
def _print_image(self, line, size):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
self._raw(S_RASTER_N)
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
self._raw(buffer.decode('hex'))
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
self._raw(buffer.decode("hex"))
buffer = ""
cont = 0
def _raw_print_image(self, line, size, output=None ):
""" Print formatted image """
i = 0
cont = 0
buffer = ""
raw = ""
def __raw(string):
if output:
output(string)
else:
self._raw(string)
raw += S_RASTER_N
buffer = "%02X%02X%02X%02X" % (((size[0]/size[1])/8), 0, size[1], 0)
raw += buffer.decode('hex')
buffer = ""
while i < len(line):
hex_string = int(line[i:i+8],2)
buffer += "%02X" % hex_string
i += 8
cont += 1
if cont % 4 == 0:
raw += buffer.decode("hex")
buffer = ""
cont = 0
return raw
def _convert_image(self, im):
""" Parse image and prepare it to a printable format """
pixels = []
pix_line = ""
im_left = ""
im_right = ""
switch = 0
img_size = [ 0, 0 ]
if im.size[0] > 512:
print "WARNING: Image is wider than 512 and could be truncated at print time "
if im.size[1] > 255:
raise ImageSizeError()
im_border = self._check_image_size(im.size[0])
for i in range(im_border[0]):
im_left += "0"
for i in range(im_border[1]):
im_right += "0"
for y in range(im.size[1]):
img_size[1] += 1
pix_line += im_left
img_size[0] += im_border[0]
for x in range(im.size[0]):
img_size[0] += 1
RGB = im.getpixel((x, y))
im_color = (RGB[0] + RGB[1] + RGB[2])
im_pattern = "1X0"
pattern_len = len(im_pattern)
switch = (switch - 1 ) * (-1)
for x in range(pattern_len):
if im_color <= (255 * 3 / pattern_len * (x+1)):
if im_pattern[x] == "X":
pix_line += "%d" % switch
else:
pix_line += im_pattern[x]
break
elif im_color > (255 * 3 / pattern_len * pattern_len) and im_color <= (255 * 3):
pix_line += im_pattern[-1]
break
pix_line += im_right
img_size[0] += im_border[1]
return (pix_line, img_size)
def image(self,path_img):
""" Open image file """
im_open = Image.open(path_img)
im = im_open.convert("RGB")
# Convert the RGB image in printable image
pix_line, img_size = self._convert_image(im)
self._print_image(pix_line, img_size)
def print_base64_image(self,img):
print 'print_b64_img'
id = md5.new(img).digest()
if id not in self.img_cache:
print 'not in cache'
img = img[img.find(',')+1:]
f = io.BytesIO('img')
f.write(base64.decodestring(img))
f.seek(0)
img_rgba = Image.open(f)
img = Image.new('RGB', img_rgba.size, (255,255,255))
channels = img_rgba.split()
if len(channels) > 3:
# use alpha channel as mask
img.paste(img_rgba, mask=channels[3])
else:
img.paste(img_rgba)
print 'convert image'
pix_line, img_size = self._convert_image(img)
print 'print image'
buffer = self._raw_print_image(pix_line, img_size)
self.img_cache[id] = buffer
print 'raw image'
self._raw(self.img_cache[id])
def qr(self,text):
""" Print QR Code for the provided string """
qr_code = qrcode.QRCode(version=4, box_size=4, border=1)
qr_code.add_data(text)
qr_code.make(fit=True)
qr_img = qr_code.make_image()
im = qr_img._img.convert("RGB")
# Convert the RGB image in printable image
self._convert_image(im)
def barcode(self, code, bc, width=255, height=2, pos='below', font='a'):
""" Print Barcode """
# Align Bar Code()
self._raw(TXT_ALIGN_CT)
# Height
if height >=2 or height <=6:
self._raw(BARCODE_HEIGHT)
else:
raise BarcodeSizeError()
# Width
if width >= 1 or width <=255:
self._raw(BARCODE_WIDTH)
else:
raise BarcodeSizeError()
# Font
if font.upper() == "B":
self._raw(BARCODE_FONT_B)
else: # DEFAULT FONT: A
self._raw(BARCODE_FONT_A)
# Position
if pos.upper() == "OFF":
self._raw(BARCODE_TXT_OFF)
elif pos.upper() == "BOTH":
self._raw(BARCODE_TXT_BTH)
elif pos.upper() == "ABOVE":
self._raw(BARCODE_TXT_ABV)
else: # DEFAULT POSITION: BELOW
self._raw(BARCODE_TXT_BLW)
# Type
if bc.upper() == "UPC-A":
self._raw(BARCODE_UPC_A)
elif bc.upper() == "UPC-E":
self._raw(BARCODE_UPC_E)
elif bc.upper() == "EAN13":
self._raw(BARCODE_EAN13)
elif bc.upper() == "EAN8":
self._raw(BARCODE_EAN8)
elif bc.upper() == "CODE39":
self._raw(BARCODE_CODE39)
elif bc.upper() == "ITF":
self._raw(BARCODE_ITF)
elif bc.upper() == "NW7":
self._raw(BARCODE_NW7)
else:
raise BarcodeTypeError()
# Print Code
if code:
self._raw(code)
else:
raise exception.BarcodeCodeError()
def receipt(self,xml):
"""
Prints an xml based receipt definition
"""
def strclean(string):
if not string:
string = ''
string = string.strip()
string = re.sub('\s+',' ',string)
return string
def format_value(value, decimals=3, width=0, decimals_separator='.', thousands_separator=',', autoint=False, symbol='', position='after'):
decimals = max(0,int(decimals))
width = max(0,int(width))
value = float(value)
if autoint and math.floor(value) == value:
decimals = 0
if width == 0:
width = ''
if thousands_separator:
formatstr = "{:"+str(width)+",."+str(decimals)+"f}"
else:
formatstr = "{:"+str(width)+"."+str(decimals)+"f}"
ret = formatstr.format(value)
ret = ret.replace(',','COMMA')
ret = ret.replace('.','DOT')
ret = ret.replace('COMMA',thousands_separator)
ret = ret.replace('DOT',decimals_separator)
if symbol:
if position == 'after':
ret = ret + symbol
else:
ret = symbol + ret
return ret
def print_elem(stylestack, serializer, elem, indent=0):
elem_styles = {
'h1': {'bold': 'on', 'size':'double'},
'h2': {'size':'double'},
'h3': {'bold': 'on', 'size':'double-height'},
'h4': {'size': 'double-height'},
'h5': {'bold': 'on'},
'em': {'font': 'b'},
'b': {'bold': 'on'},
}
stylestack.push()
if elem.tag in elem_styles:
stylestack.set(elem_styles[elem.tag])
stylestack.set(elem.attrib)
if elem.tag in ('p','div','section','article','receipt','header','footer','li','h1','h2','h3','h4','h5'):
serializer.start_block(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag in ('span','em','b','left','right'):
serializer.start_inline(stylestack)
serializer.text(elem.text)
for child in elem:
print_elem(stylestack,serializer,child)
serializer.start_inline(stylestack)
serializer.text(child.tail)
serializer.end_entity()
serializer.end_entity()
elif elem.tag == 'value':
serializer.start_inline(stylestack)
serializer.pre(format_value(
elem.text,
decimals=stylestack.get('value-decimals'),
width=stylestack.get('value-width'),
decimals_separator=stylestack.get('value-decimals-separator'),
thousands_separator=stylestack.get('value-thousands-separator'),
autoint=(stylestack.get('value-autoint') == 'on'),
symbol=stylestack.get('value-symbol'),
position=stylestack.get('value-symbol-position')
))
serializer.end_entity()
elif elem.tag == 'line':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
lineserializer = XmlLineSerializer(stylestack.get('indent')+indent,stylestack.get('tabwidth'),width,stylestack.get('line-ratio'))
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'left':
print_elem(stylestack,lineserializer,child,indent=indent)
elif child.tag == 'right':
lineserializer.start_right()
print_elem(stylestack,lineserializer,child,indent=indent)
serializer.pre(lineserializer.get_line())
serializer.end_entity()
elif elem.tag == 'ul':
serializer.start_block(stylestack)
bullet = stylestack.get('bullet')
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + bullet)
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'ol':
cwidth = len(str(len(elem))) + 2
i = 1
serializer.start_block(stylestack)
for child in elem:
if child.tag == 'li':
serializer.style(stylestack)
serializer.raw(' ' * indent * stylestack.get('tabwidth') + ' ' + (str(i)+')').ljust(cwidth))
i = i + 1
print_elem(stylestack,serializer,child,indent=indent+1)
serializer.end_entity()
elif elem.tag == 'pre':
serializer.start_block(stylestack)
serializer.pre(elem.text)
serializer.end_entity()
elif elem.tag == 'hr':
width = stylestack.get('width')
if stylestack.get('size') in ('double', 'double-width'):
width = width / 2
serializer.start_block(stylestack)
serializer.text('-'*width)
serializer.end_entity()
elif elem.tag == 'br':
serializer.linebreak()
elif elem.tag == 'img':
if 'src' in elem.attrib and 'data:' in elem.attrib['src']:
self.print_base64_image(elem.attrib['src'])
elif elem.tag == 'barcode' and 'encoding' in elem.attrib:
serializer.start_block(stylestack)
self.barcode(strclean(elem.text),elem.attrib['encoding'])
serializer.end_entity()
elif elem.tag == 'cut':
self.cut()
elif elem.tag == 'partialcut':
self.cut(mode='part')
elif elem.tag == 'cashdraw':
self.cashdraw(2)
self.cashdraw(5)
stylestack.pop()
try:
stylestack = StyleStack()
serializer = XmlSerializer(self)
root = ET.fromstring(xml.encode('utf-8'))
self._raw(stylestack.to_escpos())
print_elem(stylestack,serializer,root)
if 'open-cashdrawer' in root.attrib and root.attrib['open-cashdrawer'] == 'true':
self.cashdraw(2)
self.cashdraw(5)
if not 'cut' in root.attrib or root.attrib['cut'] == 'true' :
self.cut()
except Exception as e:
errmsg = str(e)+'\n'+'-'*48+'\n'+traceback.format_exc() + '-'*48+'\n'
self.text(errmsg)
self.cut()
raise e
def text(self,txt):
""" Print Utf8 encoded alpha-numeric text """
if not txt:
return
try:
txt = txt.decode('utf-8')
except:
try:
txt = txt.decode('utf-16')
except:
pass
self.extra_chars = 0
def encode_char(char):
"""
Encodes a single utf-8 character into a sequence of
esc-pos code page change instructions and character declarations
"""
char_utf8 = char.encode('utf-8')
encoded = ''
encoding = self.encoding # we reuse the last encoding to prevent code page switches at every character
encodings = {
# TODO use ordering to prevent useless switches
# TODO Support other encodings not natively supported by python ( Thai, Khazakh, Kanjis )
'cp437': TXT_ENC_PC437,
'cp850': TXT_ENC_PC850,
'cp852': TXT_ENC_PC852,
'cp857': TXT_ENC_PC857,
'cp858': TXT_ENC_PC858,
'cp860': TXT_ENC_PC860,
'cp863': TXT_ENC_PC863,
'cp865': TXT_ENC_PC865,
'cp866': TXT_ENC_PC866,
'cp862': TXT_ENC_PC862,
'cp720': TXT_ENC_PC720,
'cp936': TXT_ENC_PC936,
'iso8859_2': TXT_ENC_8859_2,
'iso8859_7': TXT_ENC_8859_7,
'iso8859_9': TXT_ENC_8859_9,
'cp1254' : TXT_ENC_WPC1254,
'cp1255' : TXT_ENC_WPC1255,
'cp1256' : TXT_ENC_WPC1256,
'cp1257' : TXT_ENC_WPC1257,
'cp1258' : TXT_ENC_WPC1258,
'katakana' : TXT_ENC_KATAKANA,
}
remaining = copy.copy(encodings)
if not encoding :
encoding = 'cp437'
while True: # Trying all encoding until one succeeds
try:
if encoding == 'katakana': # Japanese characters
if jcconv:
# try to convert japanese text to a half-katakanas
kata = jcconv.kata2half(jcconv.hira2kata(char_utf8))
if kata != char_utf8:
self.extra_chars += len(kata.decode('utf-8')) - 1
# the conversion may result in multiple characters
return encode_str(kata.decode('utf-8'))
else:
kata = char_utf8
if kata in TXT_ENC_KATAKANA_MAP:
encoded = TXT_ENC_KATAKANA_MAP[kata]
break
else:
raise ValueError()
else:
encoded = char.encode(encoding)
break
except ValueError: #the encoding failed, select another one and retry
if encoding in remaining:
del remaining[encoding]
if len(remaining) >= 1:
encoding = remaining.items()[0][0]
else:
encoding = 'cp437'
encoded = '\xb1' # could not encode, output error character
break;
if encoding != self.encoding:
# if the encoding changed, remember it and prefix the character with
# the esc-pos encoding change sequence
self.encoding = encoding
encoded = encodings[encoding] + encoded
return encoded
def encode_str(txt):
buffer = ''
for c in txt:
buffer += encode_char(c)
return buffer
txt = encode_str(txt)
# if the utf-8 -> codepage conversion inserted extra characters,
# remove double spaces to try to restore the original string length
# and prevent printing alignment issues
while self.extra_chars > 0:
dspace = txt.find(' ')
if dspace > 0:
txt = txt[:dspace] + txt[dspace+1:]
self.extra_chars -= 1
else:
break
self._raw(txt)
def set(self, align='left', font='a', type='normal', width=1, height=1):
""" Set text properties """
# Align
if align.upper() == "CENTER":
self._raw(TXT_ALIGN_CT)
elif align.upper() == "RIGHT":
self._raw(TXT_ALIGN_RT)
elif align.upper() == "LEFT":
self._raw(TXT_ALIGN_LT)
# Font
if font.upper() == "B":
self._raw(TXT_FONT_B)
else: # DEFAULT FONT: A
self._raw(TXT_FONT_A)
# Type
if type.upper() == "B":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_OFF)
elif type.upper() == "U":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "U2":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL2_ON)
elif type.upper() == "BU":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL_ON)
elif type.upper() == "BU2":
self._raw(TXT_BOLD_ON)
self._raw(TXT_UNDERL2_ON)
elif type.upper == "NORMAL":
self._raw(TXT_BOLD_OFF)
self._raw(TXT_UNDERL_OFF)
# Width
if width == 2 and height != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2WIDTH)
elif height == 2 and width != 2:
self._raw(TXT_NORMAL)
self._raw(TXT_2HEIGHT)
elif height == 2 and width == 2:
self._raw(TXT_2WIDTH)
self._raw(TXT_2HEIGHT)
else: # DEFAULT SIZE: NORMAL
self._raw(TXT_NORMAL)
def cut(self, mode=''):
""" Cut paper """
# Fix the size between last line and cut
# TODO: handle this with a line feed
self._raw("\n\n\n\n\n\n")
if mode.upper() == "PART":
self._raw(PAPER_PART_CUT)
else: # DEFAULT MODE: FULL CUT
self._raw(PAPER_FULL_CUT)
def cashdraw(self, pin):
""" Send pulse to kick the cash drawer """
if pin == 2:
self._raw(CD_KICK_2)
elif pin == 5:
self._raw(CD_KICK_5)
else:
raise CashDrawerError()
def hw(self, hw):
""" Hardware operations """
if hw.upper() == "INIT":
self._raw(HW_INIT)
elif hw.upper() == "SELECT":
self._raw(HW_SELECT)
elif hw.upper() == "RESET":
self._raw(HW_RESET)
else: # DEFAULT: DOES NOTHING
pass
def control(self, ctl):
""" Feed control sequences """
if ctl.upper() == "LF":
self._raw(CTL_LF)
elif ctl.upper() == "FF":
self._raw(CTL_FF)
elif ctl.upper() == "CR":
self._raw(CTL_CR)
elif ctl.upper() == "HT":
self._raw(CTL_HT)
elif ctl.upper() == "VT":
self._raw(CTL_VT)
| agpl-3.0 |
uwevil/namebench | nb_third_party/dns/opcode.py | 248 | 2615 | # Copyright (C) 2001-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Opcodes."""
import dns.exception
QUERY = 0
IQUERY = 1
STATUS = 2
NOTIFY = 4
UPDATE = 5
_by_text = {
'QUERY' : QUERY,
'IQUERY' : IQUERY,
'STATUS' : STATUS,
'NOTIFY' : NOTIFY,
'UPDATE' : UPDATE
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
class UnknownOpcode(dns.exception.DNSException):
"""Raised if an opcode is unknown."""
pass
def from_text(text):
"""Convert text into an opcode.
@param text: the textual opcode
@type text: string
@raises UnknownOpcode: the opcode is unknown
@rtype: int
"""
if text.isdigit():
value = int(text)
if value >= 0 and value <= 15:
return value
value = _by_text.get(text.upper())
if value is None:
raise UnknownOpcode
return value
def from_flags(flags):
"""Extract an opcode from DNS message flags.
@param flags: int
@rtype: int
"""
return (flags & 0x7800) >> 11
def to_flags(value):
"""Convert an opcode to a value suitable for ORing into DNS message
flags.
@rtype: int
"""
return (value << 11) & 0x7800
def to_text(value):
"""Convert an opcode to text.
@param value: the opcdoe
@type value: int
@raises UnknownOpcode: the opcode is unknown
@rtype: string
"""
text = _by_value.get(value)
if text is None:
text = str(value)
return text
def is_update(flags):
"""True if the opcode in flags is UPDATE.
@param flags: DNS flags
@type flags: int
@rtype: bool
"""
if (from_flags(flags) == UPDATE):
return True
return False
| apache-2.0 |
EdDev/vdsm | lib/vdsm/network/errors.py | 1 | 2525 | #
# Copyright 2011-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import six
ERR_OK = 0
ERR_BAD_PARAMS = 21
ERR_BAD_ADDR = 22
ERR_BAD_NIC = 23
ERR_USED_NIC = 24
ERR_BAD_BONDING = 25
ERR_BAD_VLAN = 26
ERR_BAD_BRIDGE = 27
ERR_USED_BRIDGE = 28
ERR_FAILED_IFUP = 29
ERR_FAILED_IFDOWN = 30
ERR_USED_BOND = 31
ERR_LOST_CONNECTION = 10 # noConPeer
ERR_OVS_CONNECTION = 32
class ConfigNetworkError(Exception):
def __init__(self, errCode, message):
self.errCode = errCode
self.message = message
super(ConfigNetworkError, self).__init__(errCode, message)
class OvsDBConnectionError(ConfigNetworkError):
def __init__(self, *args):
message = _get_message(args)
super(OvsDBConnectionError, self).__init__(errCode=ERR_OVS_CONNECTION,
message=message)
@staticmethod
def is_ovs_db_conn_error(err_msg):
return 'database connection failed' in err_msg[0]
class RollbackIncomplete(Exception):
"""
This exception is raised in order to signal API.Global that a call to
setupNetworks has failed and there are leftovers that need to be cleaned
up.
Note that it is never raised by the default ifcfg configurator.
"""
pass
def _get_message(args):
"""
Due to multiprocessing limitation in the way it processes an exception
serialization and deserialization, a derived exception needs to accept
all super classes arguments as input, even if it ignores them.
Given the list of arguments and assuming the message is a string type,
this helper function fetches the message argument.
"""
for arg in args:
if isinstance(arg, six.string_types):
return arg
| gpl-2.0 |
seize-the-dave/XlsxWriter | xlsxwriter/test/comparison/test_rich_string07.py | 8 | 1570 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'rich_string07.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
italic = workbook.add_format({'italic': 1})
worksheet.write('A1', 'Foo', bold)
worksheet.write('A2', 'Bar', italic)
worksheet.write_rich_string('A3', 'a', bold, 'bc', 'defg')
worksheet.write_rich_string('B4', 'abc', italic, 'de', 'fg')
worksheet.write_rich_string('C5', 'a', bold, 'bc', 'defg')
worksheet.write_rich_string('D6', 'abc', italic, 'de', 'fg')
worksheet.write_rich_string('E7', 'a', bold, 'bcdef', 'g')
worksheet.write_rich_string('F8', italic, 'abcd', 'efg')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
dreamapplehappy/myblog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/_scilab_builtins.py | 364 | 31261 | # -*- coding: utf-8 -*-
"""
pygments.lexers._scilab_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the ScilabLexer.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# These lists are generated automatically.
# Run the following in a Scilab script:
#
# varType=["functions", "commands", "macros", "variables" ];
# fd = mopen('list.txt','wt');
#
# for j=1:size(varType,"*")
# myStr="";
# a=completion("",varType(j));
# myStr=varType(j)+"_kw = [";
# for i=1:size(a,"*")
# myStr = myStr + """" + a(i) + """";
# if size(a,"*") <> i then
# myStr = myStr + ","; end
# end
# myStr = myStr + "]";
# mputl(myStr,fd);
# end
# mclose(fd);
#
# Then replace "$" by "\\$" manually.
functions_kw = ["%XMLAttr_6","%XMLAttr_e","%XMLAttr_i_XMLElem","%XMLAttr_length","%XMLAttr_p","%XMLAttr_size","%XMLDoc_6","%XMLDoc_e","%XMLDoc_i_XMLList","%XMLDoc_p","%XMLElem_6","%XMLElem_e","%XMLElem_i_XMLDoc","%XMLElem_i_XMLElem","%XMLElem_i_XMLList","%XMLElem_p","%XMLList_6","%XMLList_e","%XMLList_i_XMLElem","%XMLList_i_XMLList","%XMLList_length","%XMLList_p","%XMLList_size","%XMLNs_6","%XMLNs_e","%XMLNs_i_XMLElem","%XMLNs_p","%XMLSet_6","%XMLSet_e","%XMLSet_length","%XMLSet_p","%XMLSet_size","%XMLValid_p","%b_i_XMLList","%c_i_XMLAttr","%c_i_XMLDoc","%c_i_XMLElem","%c_i_XMLList","%ce_i_XMLList","%fptr_i_XMLList","%h_i_XMLList","%hm_i_XMLList","%i_abs","%i_cumprod","%i_cumsum","%i_diag","%i_i_XMLList","%i_matrix","%i_max","%i_maxi","%i_min","%i_mini","%i_mput","%i_p","%i_prod","%i_sum","%i_tril","%i_triu","%ip_i_XMLList","%l_i_XMLList","%lss_i_XMLList","%mc_i_XMLList","%msp_full","%msp_i_XMLList","%msp_spget","%p_i_XMLList","%ptr_i_XMLList","%r_i_XMLList","%s_i_XMLList","%sp_i_XMLList","%spb_i_XMLList","%st_i_XMLList","Calendar","ClipBoard","Matplot","Matplot1","PlaySound","TCL_DeleteInterp","TCL_DoOneEvent","TCL_EvalFile","TCL_EvalStr","TCL_ExistArray","TCL_ExistInterp","TCL_ExistVar","TCL_GetVar","TCL_GetVersion","TCL_SetVar","TCL_UnsetVar","TCL_UpVar","_","_code2str","_str2code","about","abs","acos","addcb","addf","addhistory","addinter","amell","and","argn","arl2_ius","ascii","asin","atan","backslash","balanc","banner","base2dec","basename","bdiag","beep","besselh","besseli","besselj","besselk","bessely","beta","bezout","bfinit","blkfc1i","blkslvi","bool2s","browsehistory","browsevar","bsplin3val","buildDocv2","buildouttb","bvode","c_link","calerf","call","callblk","captions","cd","cdfbet","cdfbin","cdfchi","cdfchn","cdff","cdffnc","cdfgam","cdfnbn","cdfnor","cdfpoi","cdft","ceil","champ","champ1","chdir","chol","clc","clean","clear","clear_pixmap","clearfun","clearglobal","closeEditor","closeXcos","code2str","coeff","comp","completion","conj","contour2di","contr","conv2","convstr","copy","copyfile","corr","cos","coserror","createdir","cshep2d","ctree2","ctree3","ctree4","cumprod","cumsum","curblock","curblockc","dasrt","dassl","data2sig","debug","dec2base","deff","definedfields","degree","delbpt","delete","deletefile","delip","delmenu","det","dgettext","dhinf","diag","diary","diffobjs","disp","dispbpt","displayhistory","disposefftwlibrary","dlgamma","dnaupd","dneupd","double","draw","drawaxis","drawlater","drawnow","dsaupd","dsearch","dseupd","duplicate","editor","editvar","emptystr","end_scicosim","ereduc","errcatch","errclear","error","eval_cshep2d","exec","execstr","exists","exit","exp","expm","exportUI","export_to_hdf5","eye","fadj2sp","fec","feval","fft","fftw","fftw_flags","fftw_forget_wisdom","fftwlibraryisloaded","file","filebrowser","fileext","fileinfo","fileparts","filesep","find","findBD","findfiles","floor","format","fort","fprintfMat","freq","frexp","fromc","fromjava","fscanfMat","fsolve","fstair","full","fullpath","funcprot","funptr","gamma","gammaln","geom3d","get","get_absolute_file_path","get_fftw_wisdom","getblocklabel","getcallbackobject","getdate","getdebuginfo","getdefaultlanguage","getdrives","getdynlibext","getenv","getfield","gethistory","gethistoryfile","getinstalledlookandfeels","getio","getlanguage","getlongpathname","getlookandfeel","getmd5","getmemory","getmodules","getos","getpid","getrelativefilename","getscicosvars","getscilabmode","getshortpathname","gettext","getvariablesonstack","getversion","glist","global","glue","grand","grayplot","grep","gsort","gstacksize","havewindow","helpbrowser","hess","hinf","historymanager","historysize","host","iconvert","iconvert","ieee","ilib_verbose","imag","impl","import_from_hdf5","imult","inpnvi","int","int16","int2d","int32","int3d","int8","interp","interp2d","interp3d","intg","intppty","inttype","inv","is_handle_valid","isalphanum","isascii","isdef","isdigit","isdir","isequal","isequalbitwise","iserror","isfile","isglobal","isletter","isreal","iswaitingforinput","javaclasspath","javalibrarypath","kron","lasterror","ldiv","ldivf","legendre","length","lib","librarieslist","libraryinfo","linear_interpn","lines","link","linmeq","list","load","loadScicos","loadfftwlibrary","loadhistory","log","log1p","lsq","lsq_splin","lsqrsolve","lsslist","lstcat","lstsize","ltitr","lu","ludel","lufact","luget","lusolve","macr2lst","macr2tree","matfile_close","matfile_listvar","matfile_open","matfile_varreadnext","matfile_varwrite","matrix","max","maxfiles","mclearerr","mclose","meof","merror","messagebox","mfprintf","mfscanf","mget","mgeti","mgetl","mgetstr","min","mlist","mode","model2blk","mopen","move","movefile","mprintf","mput","mputl","mputstr","mscanf","mseek","msprintf","msscanf","mtell","mtlb_mode","mtlb_sparse","mucomp","mulf","nearfloat","newaxes","newest","newfun","nnz","notify","number_properties","ode","odedc","ones","opentk","optim","or","ordmmd","parallel_concurrency","parallel_run","param3d","param3d1","part","pathconvert","pathsep","phase_simulation","plot2d","plot2d1","plot2d2","plot2d3","plot2d4","plot3d","plot3d1","pointer_xproperty","poly","ppol","pppdiv","predef","print","printf","printfigure","printsetupbox","prod","progressionbar","prompt","pwd","qld","qp_solve","qr","raise_window","rand","rankqr","rat","rcond","rdivf","read","read4b","readb","readgateway","readmps","real","realtime","realtimeinit","regexp","relocate_handle","remez","removedir","removelinehistory","res_with_prec","resethistory","residu","resume","return","ricc","ricc_old","rlist","roots","rotate_axes","round","rpem","rtitr","rubberbox","save","saveafterncommands","saveconsecutivecommands","savehistory","schur","sci_haltscicos","sci_tree2","sci_tree3","sci_tree4","sciargs","scicos_debug","scicos_debug_count","scicos_time","scicosim","scinotes","sctree","semidef","set","set_blockerror","set_fftw_wisdom","set_xproperty","setbpt","setdefaultlanguage","setenv","setfield","sethistoryfile","setlanguage","setlookandfeel","setmenu","sfact","sfinit","show_pixmap","show_window","showalluimenushandles","sident","sig2data","sign","simp","simp_mode","sin","size","slash","sleep","sorder","sparse","spchol","spcompack","spec","spget","splin","splin2d","splin3d","spones","sprintf","sqrt","stacksize","str2code","strcat","strchr","strcmp","strcspn","strindex","string","stringbox","stripblanks","strncpy","strrchr","strrev","strsplit","strspn","strstr","strsubst","strtod","strtok","subf","sum","svd","swap_handles","symfcti","syredi","system_getproperty","system_setproperty","ta2lpd","tan","taucs_chdel","taucs_chfact","taucs_chget","taucs_chinfo","taucs_chsolve","tempname","testmatrix","timer","tlist","tohome","tokens","toolbar","toprint","tr_zer","tril","triu","type","typename","uiDisplayTree","uicontextmenu","uicontrol","uigetcolor","uigetdir","uigetfile","uigetfont","uimenu","uint16","uint32","uint8","uipopup","uiputfile","uiwait","ulink","umf_ludel","umf_lufact","umf_luget","umf_luinfo","umf_lusolve","umfpack","unglue","unix","unsetmenu","unzoom","updatebrowsevar","usecanvas","user","var2vec","varn","vec2var","waitbar","warnBlockByUID","warning","what","where","whereis","who","winsid","with_embedded_jre","with_module","writb","write","write4b","x_choose","x_choose_modeless","x_dialog","x_mdialog","xarc","xarcs","xarrows","xchange","xchoicesi","xclick","xcos","xcosAddToolsMenu","xcosConfigureXmlFile","xcosDiagramToScilab","xcosPalCategoryAdd","xcosPalDelete","xcosPalDisable","xcosPalEnable","xcosPalGenerateIcon","xcosPalLoad","xcosPalMove","xcosUpdateBlock","xdel","xfarc","xfarcs","xfpoly","xfpolys","xfrect","xget","xgetech","xgetmouse","xgraduate","xgrid","xlfont","xls_open","xls_read","xmlAddNs","xmlAsNumber","xmlAsText","xmlDTD","xmlDelete","xmlDocument","xmlDump","xmlElement","xmlFormat","xmlGetNsByHref","xmlGetNsByPrefix","xmlGetOpenDocs","xmlIsValidObject","xmlNs","xmlRead","xmlReadStr","xmlRelaxNG","xmlRemove","xmlSchema","xmlSetAttributes","xmlValidate","xmlWrite","xmlXPath","xname","xpause","xpoly","xpolys","xrect","xrects","xs2bmp","xs2eps","xs2gif","xs2jpg","xs2pdf","xs2png","xs2ppm","xs2ps","xs2svg","xsegs","xset","xsetech","xstring","xstringb","xtitle","zeros","znaupd","zneupd","zoom_rect"]
commands_kw = ["abort","apropos","break","case","catch","clc","clear","continue","do","else","elseif","end","endfunction","exit","for","function","help","if","pause","pwd","quit","resume","return","select","then","try","what","while","who"]
macros_kw = ["%0_i_st","%3d_i_h","%Block_xcosUpdateBlock","%TNELDER_p","%TNELDER_string","%TNMPLOT_p","%TNMPLOT_string","%TOPTIM_p","%TOPTIM_string","%TSIMPLEX_p","%TSIMPLEX_string","%_gsort","%_strsplit","%ar_p","%asn","%b_a_b","%b_a_s","%b_c_s","%b_c_spb","%b_cumprod","%b_cumsum","%b_d_s","%b_diag","%b_e","%b_f_s","%b_f_spb","%b_g_s","%b_g_spb","%b_h_s","%b_h_spb","%b_i_b","%b_i_ce","%b_i_h","%b_i_hm","%b_i_s","%b_i_sp","%b_i_spb","%b_i_st","%b_iconvert","%b_l_b","%b_l_s","%b_m_b","%b_m_s","%b_matrix","%b_n_hm","%b_o_hm","%b_p_s","%b_prod","%b_r_b","%b_r_s","%b_s_b","%b_s_s","%b_string","%b_sum","%b_tril","%b_triu","%b_x_b","%b_x_s","%c_a_c","%c_b_c","%c_b_s","%c_diag","%c_e","%c_eye","%c_f_s","%c_i_c","%c_i_ce","%c_i_h","%c_i_hm","%c_i_lss","%c_i_r","%c_i_s","%c_i_st","%c_matrix","%c_n_l","%c_n_st","%c_o_l","%c_o_st","%c_ones","%c_rand","%c_tril","%c_triu","%cblock_c_cblock","%cblock_c_s","%cblock_e","%cblock_f_cblock","%cblock_p","%cblock_size","%ce_6","%ce_c_ce","%ce_e","%ce_f_ce","%ce_i_ce","%ce_i_s","%ce_i_st","%ce_matrix","%ce_p","%ce_size","%ce_string","%ce_t","%champdat_i_h","%choose","%diagram_xcos","%dir_p","%fptr_i_st","%grayplot_i_h","%h_i_st","%hm_1_hm","%hm_1_s","%hm_2_hm","%hm_2_s","%hm_3_hm","%hm_3_s","%hm_4_hm","%hm_4_s","%hm_5","%hm_a_hm","%hm_a_r","%hm_a_s","%hm_abs","%hm_and","%hm_bool2s","%hm_c_hm","%hm_ceil","%hm_conj","%hm_cos","%hm_cumprod","%hm_cumsum","%hm_d_hm","%hm_d_s","%hm_degree","%hm_e","%hm_exp","%hm_f_hm","%hm_fft","%hm_find","%hm_floor","%hm_g_hm","%hm_h_hm","%hm_i_b","%hm_i_ce","%hm_i_hm","%hm_i_i","%hm_i_p","%hm_i_r","%hm_i_s","%hm_i_st","%hm_iconvert","%hm_imag","%hm_int","%hm_isnan","%hm_isreal","%hm_j_hm","%hm_j_s","%hm_k_hm","%hm_k_s","%hm_log","%hm_m_p","%hm_m_r","%hm_m_s","%hm_matrix","%hm_maxi","%hm_mean","%hm_median","%hm_mini","%hm_n_b","%hm_n_c","%hm_n_hm","%hm_n_i","%hm_n_p","%hm_n_s","%hm_o_b","%hm_o_c","%hm_o_hm","%hm_o_i","%hm_o_p","%hm_o_s","%hm_ones","%hm_or","%hm_p","%hm_prod","%hm_q_hm","%hm_r_s","%hm_rand","%hm_real","%hm_round","%hm_s","%hm_s_hm","%hm_s_r","%hm_s_s","%hm_sign","%hm_sin","%hm_size","%hm_sqrt","%hm_st_deviation","%hm_string","%hm_sum","%hm_x_hm","%hm_x_p","%hm_x_s","%hm_zeros","%i_1_s","%i_2_s","%i_3_s","%i_4_s","%i_Matplot","%i_a_i","%i_a_s","%i_and","%i_ascii","%i_b_s","%i_bezout","%i_champ","%i_champ1","%i_contour","%i_contour2d","%i_d_i","%i_d_s","%i_e","%i_fft","%i_g_i","%i_gcd","%i_h_i","%i_i_ce","%i_i_h","%i_i_hm","%i_i_i","%i_i_s","%i_i_st","%i_j_i","%i_j_s","%i_l_s","%i_lcm","%i_length","%i_m_i","%i_m_s","%i_mfprintf","%i_mprintf","%i_msprintf","%i_n_s","%i_o_s","%i_or","%i_p_i","%i_p_s","%i_plot2d","%i_plot2d1","%i_plot2d2","%i_q_s","%i_r_i","%i_r_s","%i_round","%i_s_i","%i_s_s","%i_sign","%i_string","%i_x_i","%i_x_s","%ip_a_s","%ip_i_st","%ip_m_s","%ip_n_ip","%ip_o_ip","%ip_p","%ip_s_s","%ip_string","%k","%l_i_h","%l_i_s","%l_i_st","%l_isequal","%l_n_c","%l_n_l","%l_n_m","%l_n_p","%l_n_s","%l_n_st","%l_o_c","%l_o_l","%l_o_m","%l_o_p","%l_o_s","%l_o_st","%lss_a_lss","%lss_a_p","%lss_a_r","%lss_a_s","%lss_c_lss","%lss_c_p","%lss_c_r","%lss_c_s","%lss_e","%lss_eye","%lss_f_lss","%lss_f_p","%lss_f_r","%lss_f_s","%lss_i_ce","%lss_i_lss","%lss_i_p","%lss_i_r","%lss_i_s","%lss_i_st","%lss_inv","%lss_l_lss","%lss_l_p","%lss_l_r","%lss_l_s","%lss_m_lss","%lss_m_p","%lss_m_r","%lss_m_s","%lss_n_lss","%lss_n_p","%lss_n_r","%lss_n_s","%lss_norm","%lss_o_lss","%lss_o_p","%lss_o_r","%lss_o_s","%lss_ones","%lss_r_lss","%lss_r_p","%lss_r_r","%lss_r_s","%lss_rand","%lss_s","%lss_s_lss","%lss_s_p","%lss_s_r","%lss_s_s","%lss_size","%lss_t","%lss_v_lss","%lss_v_p","%lss_v_r","%lss_v_s","%lt_i_s","%m_n_l","%m_o_l","%mc_i_h","%mc_i_s","%mc_i_st","%mc_n_st","%mc_o_st","%mc_string","%mps_p","%mps_string","%msp_a_s","%msp_abs","%msp_e","%msp_find","%msp_i_s","%msp_i_st","%msp_length","%msp_m_s","%msp_maxi","%msp_n_msp","%msp_nnz","%msp_o_msp","%msp_p","%msp_sparse","%msp_spones","%msp_t","%p_a_lss","%p_a_r","%p_c_lss","%p_c_r","%p_cumprod","%p_cumsum","%p_d_p","%p_d_r","%p_d_s","%p_det","%p_e","%p_f_lss","%p_f_r","%p_i_ce","%p_i_h","%p_i_hm","%p_i_lss","%p_i_p","%p_i_r","%p_i_s","%p_i_st","%p_inv","%p_j_s","%p_k_p","%p_k_r","%p_k_s","%p_l_lss","%p_l_p","%p_l_r","%p_l_s","%p_m_hm","%p_m_lss","%p_m_r","%p_matrix","%p_n_l","%p_n_lss","%p_n_r","%p_o_l","%p_o_lss","%p_o_r","%p_o_sp","%p_p_s","%p_prod","%p_q_p","%p_q_r","%p_q_s","%p_r_lss","%p_r_p","%p_r_r","%p_r_s","%p_s_lss","%p_s_r","%p_simp","%p_string","%p_sum","%p_v_lss","%p_v_p","%p_v_r","%p_v_s","%p_x_hm","%p_x_r","%p_y_p","%p_y_r","%p_y_s","%p_z_p","%p_z_r","%p_z_s","%r_a_hm","%r_a_lss","%r_a_p","%r_a_r","%r_a_s","%r_c_lss","%r_c_p","%r_c_r","%r_c_s","%r_clean","%r_cumprod","%r_d_p","%r_d_r","%r_d_s","%r_det","%r_diag","%r_e","%r_eye","%r_f_lss","%r_f_p","%r_f_r","%r_f_s","%r_i_ce","%r_i_hm","%r_i_lss","%r_i_p","%r_i_r","%r_i_s","%r_i_st","%r_inv","%r_j_s","%r_k_p","%r_k_r","%r_k_s","%r_l_lss","%r_l_p","%r_l_r","%r_l_s","%r_m_hm","%r_m_lss","%r_m_p","%r_m_r","%r_m_s","%r_matrix","%r_n_lss","%r_n_p","%r_n_r","%r_n_s","%r_norm","%r_o_lss","%r_o_p","%r_o_r","%r_o_s","%r_ones","%r_p","%r_p_s","%r_prod","%r_q_p","%r_q_r","%r_q_s","%r_r_lss","%r_r_p","%r_r_r","%r_r_s","%r_rand","%r_s","%r_s_hm","%r_s_lss","%r_s_p","%r_s_r","%r_s_s","%r_simp","%r_size","%r_string","%r_sum","%r_t","%r_tril","%r_triu","%r_v_lss","%r_v_p","%r_v_r","%r_v_s","%r_x_p","%r_x_r","%r_x_s","%r_y_p","%r_y_r","%r_y_s","%r_z_p","%r_z_r","%r_z_s","%s_1_hm","%s_1_i","%s_2_hm","%s_2_i","%s_3_hm","%s_3_i","%s_4_hm","%s_4_i","%s_5","%s_a_b","%s_a_hm","%s_a_i","%s_a_ip","%s_a_lss","%s_a_msp","%s_a_r","%s_a_sp","%s_and","%s_b_i","%s_b_s","%s_c_b","%s_c_cblock","%s_c_lss","%s_c_r","%s_c_sp","%s_d_b","%s_d_i","%s_d_p","%s_d_r","%s_d_sp","%s_e","%s_f_b","%s_f_cblock","%s_f_lss","%s_f_r","%s_f_sp","%s_g_b","%s_g_s","%s_h_b","%s_h_s","%s_i_b","%s_i_c","%s_i_ce","%s_i_h","%s_i_hm","%s_i_i","%s_i_lss","%s_i_p","%s_i_r","%s_i_s","%s_i_sp","%s_i_spb","%s_i_st","%s_j_i","%s_k_hm","%s_k_p","%s_k_r","%s_k_sp","%s_l_b","%s_l_hm","%s_l_i","%s_l_lss","%s_l_p","%s_l_r","%s_l_s","%s_l_sp","%s_m_b","%s_m_hm","%s_m_i","%s_m_ip","%s_m_lss","%s_m_msp","%s_m_r","%s_matrix","%s_n_hm","%s_n_i","%s_n_l","%s_n_lss","%s_n_r","%s_n_st","%s_o_hm","%s_o_i","%s_o_l","%s_o_lss","%s_o_r","%s_o_st","%s_or","%s_p_b","%s_p_i","%s_pow","%s_q_hm","%s_q_i","%s_q_p","%s_q_r","%s_q_sp","%s_r_b","%s_r_i","%s_r_lss","%s_r_p","%s_r_r","%s_r_s","%s_r_sp","%s_s_b","%s_s_hm","%s_s_i","%s_s_ip","%s_s_lss","%s_s_r","%s_s_sp","%s_simp","%s_v_lss","%s_v_p","%s_v_r","%s_v_s","%s_x_b","%s_x_hm","%s_x_i","%s_x_r","%s_y_p","%s_y_r","%s_y_sp","%s_z_p","%s_z_r","%s_z_sp","%sn","%sp_a_s","%sp_a_sp","%sp_and","%sp_c_s","%sp_ceil","%sp_cos","%sp_cumprod","%sp_cumsum","%sp_d_s","%sp_d_sp","%sp_diag","%sp_e","%sp_exp","%sp_f_s","%sp_floor","%sp_gsort","%sp_i_ce","%sp_i_h","%sp_i_s","%sp_i_sp","%sp_i_st","%sp_int","%sp_inv","%sp_k_s","%sp_k_sp","%sp_l_s","%sp_l_sp","%sp_length","%sp_norm","%sp_or","%sp_p_s","%sp_prod","%sp_q_s","%sp_q_sp","%sp_r_s","%sp_r_sp","%sp_round","%sp_s_s","%sp_s_sp","%sp_sin","%sp_sqrt","%sp_string","%sp_sum","%sp_tril","%sp_triu","%sp_y_s","%sp_y_sp","%sp_z_s","%sp_z_sp","%spb_and","%spb_c_b","%spb_cumprod","%spb_cumsum","%spb_diag","%spb_e","%spb_f_b","%spb_g_b","%spb_g_spb","%spb_h_b","%spb_h_spb","%spb_i_b","%spb_i_ce","%spb_i_h","%spb_i_st","%spb_or","%spb_prod","%spb_sum","%spb_tril","%spb_triu","%st_6","%st_c_st","%st_e","%st_f_st","%st_i_b","%st_i_c","%st_i_fptr","%st_i_h","%st_i_i","%st_i_ip","%st_i_lss","%st_i_msp","%st_i_p","%st_i_r","%st_i_s","%st_i_sp","%st_i_spb","%st_i_st","%st_matrix","%st_n_c","%st_n_l","%st_n_mc","%st_n_p","%st_n_s","%st_o_c","%st_o_l","%st_o_mc","%st_o_p","%st_o_s","%st_o_tl","%st_p","%st_size","%st_string","%st_t","%ticks_i_h","%xls_e","%xls_p","%xlssheet_e","%xlssheet_p","%xlssheet_size","%xlssheet_string","DominationRank","G_make","IsAScalar","NDcost","OS_Version","PlotSparse","ReadHBSparse","ReadmiMatrix","TCL_CreateSlave","WritemiMatrix","abcd","abinv","accept_func_default","accept_func_vfsa","acf","acosd","acosh","acoshm","acosm","acot","acotd","acoth","acsc","acscd","acsch","add_demo","add_help_chapter","add_module_help_chapter","add_param","add_profiling","adj2sp","aff2ab","ana_style","analpf","analyze","aplat","apropos","arhnk","arl2","arma2p","armac","armax","armax1","arobasestring2strings","arsimul","ascii2string","asciimat","asec","asecd","asech","asind","asinh","asinhm","asinm","assert_checkalmostequal","assert_checkequal","assert_checkerror","assert_checkfalse","assert_checkfilesequal","assert_checktrue","assert_comparecomplex","assert_computedigits","assert_cond2reltol","assert_cond2reqdigits","assert_generror","atand","atanh","atanhm","atanm","atomsAutoload","atomsAutoloadAdd","atomsAutoloadDel","atomsAutoloadList","atomsCategoryList","atomsCheckModule","atomsDepTreeShow","atomsGetConfig","atomsGetInstalled","atomsGetLoaded","atomsGetLoadedPath","atomsInstall","atomsIsInstalled","atomsIsLoaded","atomsList","atomsLoad","atomsRemove","atomsRepositoryAdd","atomsRepositoryDel","atomsRepositoryList","atomsRestoreConfig","atomsSaveConfig","atomsSearch","atomsSetConfig","atomsShow","atomsSystemInit","atomsSystemUpdate","atomsTest","atomsUpdate","atomsVersion","augment","auread","auwrite","balreal","bench_run","bilin","bilt","bin2dec","binomial","bitand","bitcmp","bitget","bitor","bitset","bitxor","black","blanks","bloc2exp","bloc2ss","block_parameter_error","bode","bstap","buttmag","bvodeS","bytecode","bytecodewalk","cainv","calendar","calfrq","canon","casc","cat","cat_code","cb_m2sci_gui","ccontrg","cell","cell2mat","cellstr","center","cepstrum","cfspec","char","chart","cheb1mag","cheb2mag","check_gateways","check_help","check_modules_xml","check_versions","chepol","chfact","chsolve","classmarkov","clean_help","clock","cls2dls","cmb_lin","cmndred","cmoment","coding_ga_binary","coding_ga_identity","coff","coffg","colcomp","colcompr","colinout","colregul","companion","complex","compute_initial_temp","cond","cond2sp","condestsp","config","configure_msifort","configure_msvc","cont_frm","cont_mat","contrss","conv","convert_to_float","convertindex","convol","convol2d","copfac","correl","cosd","cosh","coshm","cosm","cotd","cotg","coth","cothm","covar","createfun","createstruct","crossover_ga_binary","crossover_ga_default","csc","cscd","csch","csgn","csim","cspect","ctr_gram","czt","dae","daeoptions","damp","datafit","date","datenum","datevec","dbphi","dcf","ddp","dec2bin","dec2hex","dec2oct","del_help_chapter","del_module_help_chapter","demo_begin","demo_choose","demo_compiler","demo_end","demo_file_choice","demo_folder_choice","demo_function_choice","demo_gui","demo_mdialog","demo_message","demo_run","demo_viewCode","denom","derivat","derivative","des2ss","des2tf","detectmsifort64tools","detectmsvc64tools","determ","detr","detrend","devtools_run_builder","dft","dhnorm","diff","diophant","dir","dirname","dispfiles","dllinfo","dscr","dsimul","dt_ility","dtsi","edit","edit_error","eigenmarkov","ell1mag","enlarge_shape","entropy","eomday","epred","eqfir","eqiir","equil","equil1","erf","erfc","erfcx","erfinv","etime","eval","evans","evstr","expression2code","extract_help_examples","factor","factorial","factors","faurre","ffilt","fft2","fftshift","fieldnames","filt_sinc","filter","findABCD","findAC","findBDK","findR","find_freq","find_links","find_scicos_version","findm","findmsifortcompiler","findmsvccompiler","findx0BD","firstnonsingleton","fit_dat","fix","fixedpointgcd","flipdim","flts","fminsearch","format_txt","fourplan","fprintf","frep2tf","freson","frfit","frmag","fscanf","fseek_origin","fsfirlin","fspec","fspecg","fstabst","ftest","ftuneq","fullfile","fullrf","fullrfk","fun2string","g_margin","gainplot","gamitg","gcare","gcd","gencompilationflags_unix","generateBlockImage","generateBlockImages","generic_i_ce","generic_i_h","generic_i_hm","generic_i_s","generic_i_st","genlib","genlib_old","genmarkov","geomean","getDiagramVersion","getModelicaPath","get_file_path","get_function_path","get_param","get_profile","get_scicos_version","getd","getscilabkeywords","getshell","gettklib","gfare","gfrancis","givens","glever","gmres","group","gschur","gspec","gtild","h2norm","h_cl","h_inf","h_inf_st","h_norm","hallchart","halt","hank","hankelsv","harmean","haveacompiler","head_comments","help","help_from_sci","help_skeleton","hermit","hex2dec","hilb","hilbert","horner","householder","hrmt","htrianr","hypermat","ifft","iir","iirgroup","iirlp","iirmod","ilib_build","ilib_compile","ilib_for_link","ilib_gen_Make","ilib_gen_Make_unix","ilib_gen_cleaner","ilib_gen_gateway","ilib_gen_loader","ilib_include_flag","ilib_mex_build","im_inv","importScicosDiagram","importScicosPal","importXcosDiagram","imrep2ss","ind2sub","inistate","init_ga_default","init_param","initial_scicos_tables","input","instruction2code","intc","intdec","integrate","interp1","interpln","intersect","intl","intsplin","inttrap","inv_coeff","invr","invrs","invsyslin","iqr","isLeapYear","is_absolute_path","is_param","iscell","iscellstr","isempty","isfield","isinf","isnan","isnum","issparse","isstruct","isvector","jmat","justify","kalm","karmarkar","kernel","kpure","krac2","kroneck","lattn","launchtest","lcf","lcm","lcmdiag","leastsq","leqe","leqr","lev","levin","lex_sort","lft","lin","lin2mu","lincos","lindquist","linf","linfn","linsolve","linspace","list2vec","list_param","listfiles","listfunctions","listvarinfile","lmisolver","lmitool","loadXcosLibs","loadmatfile","loadwave","log10","log2","logm","logspace","lqe","lqg","lqg2stan","lqg_ltr","lqr","ls","lyap","m2sci_gui","m_circle","macglov","macrovar","mad","makecell","manedit","mapsound","markp2ss","matfile2sci","mdelete","mean","meanf","median","mese","meshgrid","mfft","mfile2sci","minreal","minss","mkdir","modulo","moment","mrfit","msd","mstr2sci","mtlb","mtlb_0","mtlb_a","mtlb_all","mtlb_any","mtlb_axes","mtlb_axis","mtlb_beta","mtlb_box","mtlb_choices","mtlb_close","mtlb_colordef","mtlb_cond","mtlb_conv","mtlb_cov","mtlb_cumprod","mtlb_cumsum","mtlb_dec2hex","mtlb_delete","mtlb_diag","mtlb_diff","mtlb_dir","mtlb_double","mtlb_e","mtlb_echo","mtlb_error","mtlb_eval","mtlb_exist","mtlb_eye","mtlb_false","mtlb_fft","mtlb_fftshift","mtlb_filter","mtlb_find","mtlb_findstr","mtlb_fliplr","mtlb_fopen","mtlb_format","mtlb_fprintf","mtlb_fread","mtlb_fscanf","mtlb_full","mtlb_fwrite","mtlb_get","mtlb_grid","mtlb_hold","mtlb_i","mtlb_ifft","mtlb_image","mtlb_imp","mtlb_int16","mtlb_int32","mtlb_int8","mtlb_is","mtlb_isa","mtlb_isfield","mtlb_isletter","mtlb_isspace","mtlb_l","mtlb_legendre","mtlb_linspace","mtlb_logic","mtlb_logical","mtlb_loglog","mtlb_lower","mtlb_max","mtlb_mean","mtlb_median","mtlb_mesh","mtlb_meshdom","mtlb_min","mtlb_more","mtlb_num2str","mtlb_ones","mtlb_pcolor","mtlb_plot","mtlb_prod","mtlb_qr","mtlb_qz","mtlb_rand","mtlb_randn","mtlb_rcond","mtlb_realmax","mtlb_realmin","mtlb_repmat","mtlb_s","mtlb_semilogx","mtlb_semilogy","mtlb_setstr","mtlb_size","mtlb_sort","mtlb_sortrows","mtlb_sprintf","mtlb_sscanf","mtlb_std","mtlb_strcmp","mtlb_strcmpi","mtlb_strfind","mtlb_strrep","mtlb_subplot","mtlb_sum","mtlb_t","mtlb_toeplitz","mtlb_tril","mtlb_triu","mtlb_true","mtlb_type","mtlb_uint16","mtlb_uint32","mtlb_uint8","mtlb_upper","mtlb_var","mtlb_zeros","mu2lin","mutation_ga_binary","mutation_ga_default","mvcorrel","mvvacov","nancumsum","nand2mean","nanmax","nanmean","nanmeanf","nanmedian","nanmin","nanstdev","nansum","narsimul","ndgrid","ndims","nehari","neigh_func_csa","neigh_func_default","neigh_func_fsa","neigh_func_vfsa","neldermead_cget","neldermead_configure","neldermead_costf","neldermead_defaultoutput","neldermead_destroy","neldermead_display","neldermead_function","neldermead_get","neldermead_log","neldermead_new","neldermead_restart","neldermead_search","neldermead_updatesimp","nextpow2","nfreq","nicholschart","nlev","nmplot_cget","nmplot_configure","nmplot_contour","nmplot_destroy","nmplot_display","nmplot_function","nmplot_get","nmplot_historyplot","nmplot_log","nmplot_new","nmplot_outputcmd","nmplot_restart","nmplot_search","nmplot_simplexhistory","noisegen","nonreg_test_run","norm","now","null","num2cell","numdiff","numer","nyquist","nyquistfrequencybounds","obs_gram","obscont","observer","obsv_mat","obsvss","oct2dec","odeoptions","optim_ga","optim_moga","optim_nsga","optim_nsga2","optim_sa","optimbase_cget","optimbase_checkbounds","optimbase_checkcostfun","optimbase_checkx0","optimbase_configure","optimbase_destroy","optimbase_display","optimbase_function","optimbase_get","optimbase_hasbounds","optimbase_hasconstraints","optimbase_hasnlcons","optimbase_histget","optimbase_histset","optimbase_incriter","optimbase_isfeasible","optimbase_isinbounds","optimbase_isinnonlincons","optimbase_log","optimbase_logshutdown","optimbase_logstartup","optimbase_new","optimbase_outputcmd","optimbase_outstruct","optimbase_proj2bnds","optimbase_set","optimbase_stoplog","optimbase_terminate","optimget","optimplotfunccount","optimplotfval","optimplotx","optimset","optimsimplex_center","optimsimplex_check","optimsimplex_compsomefv","optimsimplex_computefv","optimsimplex_deltafv","optimsimplex_deltafvmax","optimsimplex_destroy","optimsimplex_dirmat","optimsimplex_fvmean","optimsimplex_fvstdev","optimsimplex_fvvariance","optimsimplex_getall","optimsimplex_getallfv","optimsimplex_getallx","optimsimplex_getfv","optimsimplex_getn","optimsimplex_getnbve","optimsimplex_getve","optimsimplex_getx","optimsimplex_gradientfv","optimsimplex_log","optimsimplex_new","optimsimplex_print","optimsimplex_reflect","optimsimplex_setall","optimsimplex_setallfv","optimsimplex_setallx","optimsimplex_setfv","optimsimplex_setn","optimsimplex_setnbve","optimsimplex_setve","optimsimplex_setx","optimsimplex_shrink","optimsimplex_size","optimsimplex_sort","optimsimplex_tostring","optimsimplex_xbar","orth","p_margin","pack","pareto_filter","parrot","pbig","pca","pcg","pdiv","pen2ea","pencan","pencost","penlaur","perctl","perl","perms","permute","pertrans","pfactors","pfss","phasemag","phaseplot","phc","pinv","playsnd","plotprofile","plzr","pmodulo","pol2des","pol2str","polar","polfact","prbs_a","prettyprint","primes","princomp","profile","proj","projsl","projspec","psmall","pspect","qmr","qpsolve","quart","quaskro","rafiter","randpencil","range","rank","read_csv","readxls","recompilefunction","recons","reglin","regress","remezb","remove_param","remove_profiling","repfreq","replace_Ix_by_Fx","repmat","reset_profiling","resize_matrix","returntoscilab","rhs2code","ric_desc","riccati","rmdir","routh_t","rowcomp","rowcompr","rowinout","rowregul","rowshuff","rref","sample","samplef","samwr","savematfile","savewave","scanf","sci2exp","sciGUI_init","sci_sparse","scicos_getvalue","scicos_simulate","scicos_workspace_init","scisptdemo","scitest","sdiff","sec","secd","sech","selection_ga_elitist","selection_ga_random","sensi","set_param","setdiff","sgrid","show_margins","show_pca","showprofile","signm","sinc","sincd","sind","sinh","sinhm","sinm","sm2des","sm2ss","smga","smooth","solve","sound","soundsec","sp2adj","spaninter","spanplus","spantwo","specfact","speye","sprand","spzeros","sqroot","sqrtm","squarewave","squeeze","srfaur","srkf","ss2des","ss2ss","ss2tf","sscanf","sskf","ssprint","ssrand","st_deviation","st_i_generic","st_ility","stabil","statgain","stdev","stdevf","steadycos","strange","strcmpi","struct","sub2ind","sva","svplot","sylm","sylv","sysconv","sysdiag","sysfact","syslin","syssize","system","systmat","tabul","tand","tanh","tanhm","tanm","tbx_build_blocks","tbx_build_cleaner","tbx_build_gateway","tbx_build_gateway_clean","tbx_build_gateway_loader","tbx_build_help","tbx_build_help_loader","tbx_build_loader","tbx_build_macros","tbx_build_src","tbx_builder","tbx_builder_gateway","tbx_builder_gateway_lang","tbx_builder_help","tbx_builder_help_lang","tbx_builder_macros","tbx_builder_src","tbx_builder_src_lang","temp_law_csa","temp_law_default","temp_law_fsa","temp_law_huang","temp_law_vfsa","test_clean","test_on_columns","test_run","test_run_level","testexamples","tf2des","tf2ss","thrownan","tic","time_id","toc","toeplitz","tokenpos","toolboxes","trace","trans","translatepaths","tree2code","trfmod","trianfml","trimmean","trisolve","trzeros","typeof","ui_observer","union","unique","unit_test_run","unix_g","unix_s","unix_w","unix_x","unobs","unpack","variance","variancef","vec2list","vectorfind","ver","warnobsolete","wavread","wavwrite","wcenter","weekday","wfir","wfir_gui","whereami","who_user","whos","wiener","wigner","winclose","window","winlist","with_javasci","with_macros_source","with_modelica_compiler","with_pvm","with_texmacs","with_tk","write_csv","xcosBlockEval","xcosBlockInterface","xcosCodeGeneration","xcosConfigureModelica","xcosPal","xcosPalAdd","xcosPalAddBlock","xcosPalExport","xcosShowBlockWarning","xcosValidateBlockSet","xcosValidateCompareBlock","xcos_compile","xcos_run","xcos_simulate","xcos_workspace_init","xmltochm","xmltoformat","xmltohtml","xmltojar","xmltopdf","xmltops","xmltoweb","yulewalk","zeropen","zgrid","zpbutt","zpch1","zpch2","zpell"]
builtin_consts = ["\\$","%F","%T","%e","%eps","%f","%fftw","%gui","%i","%inf","%io","%modalWarning","%nan","%pi","%s","%t","%tk","%toolboxes","%toolboxes_dir","%z","PWD","SCI","SCIHOME","TMPDIR","a","ans","assertlib","atomslib","cacsdlib","compatibility_functilib","corelib","data_structureslib","demo_toolslib","development_toolslib","differential_equationlib","dynamic_linklib","elementary_functionslib","fd","fileiolib","functionslib","genetic_algorithmslib","helptoolslib","home","i","integerlib","interpolationlib","iolib","j","linear_algebralib","m2scilib","matiolib","modules_managerlib","myStr","neldermeadlib","optimbaselib","optimizationlib","optimsimplexlib","output_streamlib","overloadinglib","parameterslib","polynomialslib","scicos_autolib","scicos_utilslib","scinoteslib","signal_processinglib","simulated_annealinglib","soundlib","sparselib","special_functionslib","spreadsheetlib","statisticslib","stringlib","tclscilib","timelib","umfpacklib","varType","xcoslib"]
| mit |
catchmrbharath/servo | tests/wpt/harness/wptrunner/update/update.py | 118 | 5053 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
from metadata import MetadataUpdateRunner
from sync import SyncFromUpstreamRunner
from tree import GitTree, HgTree, NoVCSTree
from .. import environment as env
from base import Step, StepRunner, exit_clean, exit_unclean
from state import State
def setup_paths(sync_path):
sys.path.insert(0, os.path.abspath(sync_path))
from tools import localpaths
class LoadConfig(Step):
"""Step for loading configuration from the ini file and kwargs."""
provides = ["sync", "paths", "metadata_path", "tests_path"]
def create(self, state):
state.sync = {"remote_url": state.kwargs["remote_url"],
"branch": state.kwargs["branch"],
"path": state.kwargs["sync_path"]}
state.paths = state.kwargs["test_paths"]
state.tests_path = state.paths["/"]["tests_path"]
state.metadata_path = state.paths["/"]["metadata_path"]
assert state.tests_path.startswith("/")
class LoadTrees(Step):
"""Step for creating a Tree for the local copy and a GitTree for the
upstream sync."""
provides = ["local_tree", "sync_tree"]
def create(self, state):
if os.path.exists(state.sync["path"]):
sync_tree = GitTree(root=state.sync["path"])
else:
sync_tree = None
if GitTree.is_type():
local_tree = GitTree()
elif HgTree.is_type():
local_tree = HgTree()
else:
local_tree = NoVCSTree()
state.update({"local_tree": local_tree,
"sync_tree": sync_tree})
class SyncFromUpstream(Step):
"""Step that synchronises a local copy of the code with upstream."""
def create(self, state):
if not state.kwargs["sync"]:
return
if not state.sync_tree:
os.mkdir(state.sync["path"])
state.sync_tree = GitTree(root=state.sync["path"])
kwargs = state.kwargs
with state.push(["sync", "paths", "metadata_path", "tests_path", "local_tree",
"sync_tree"]):
state.target_rev = kwargs["rev"]
state.no_patch = kwargs["no_patch"]
state.suite_name = kwargs["suite_name"]
runner = SyncFromUpstreamRunner(self.logger, state)
runner.run()
class UpdateMetadata(Step):
"""Update the expectation metadata from a set of run logs"""
def create(self, state):
if not state.kwargs["run_log"]:
return
kwargs = state.kwargs
with state.push(["local_tree", "sync_tree", "paths", "serve_root"]):
state.run_log = kwargs["run_log"]
state.ignore_existing = kwargs["ignore_existing"]
state.no_patch = kwargs["no_patch"]
state.suite_name = kwargs["suite_name"]
state.product = kwargs["product"]
state.config = kwargs["config"]
runner = MetadataUpdateRunner(self.logger, state)
runner.run()
class UpdateRunner(StepRunner):
"""Runner for doing an overall update."""
steps = [LoadConfig,
LoadTrees,
SyncFromUpstream,
UpdateMetadata]
class WPTUpdate(object):
def __init__(self, logger, runner_cls=UpdateRunner, **kwargs):
"""Object that controls the running of a whole wptupdate.
:param runner_cls: Runner subclass holding the overall list of
steps to run.
:param kwargs: Command line arguments
"""
self.runner_cls = runner_cls
self.serve_root = kwargs["test_paths"]["/"]["tests_path"]
if not kwargs["sync"]:
setup_paths(self.serve_root)
else:
if os.path.exists(kwargs["sync_path"]):
# If the sync path doesn't exist we defer this until it does
setup_paths(kwargs["sync_path"])
self.state = State(logger)
self.kwargs = kwargs
self.logger = logger
def run(self, **kwargs):
if self.kwargs["abort"]:
self.abort()
return exit_clean
if not self.kwargs["continue"] and not self.state.is_empty():
self.logger.error("Found existing state. Run with --continue to resume or --abort to clear state")
return exit_unclean
if self.kwargs["continue"]:
if self.state.is_empty():
self.logger.error("No sync in progress?")
return exit_clean
self.kwargs = self.state.kwargs
else:
self.state.kwargs = self.kwargs
self.state.serve_root = self.serve_root
update_runner = self.runner_cls(self.logger, self.state)
rv = update_runner.run()
if rv in (exit_clean, None):
self.state.clear()
return rv
def abort(self):
self.state.clear()
| mpl-2.0 |
odooindia/odoo | openerp/tools/func.py | 49 | 3346 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010, 2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
__all__ = ['synchronized', 'lazy_property']
from functools import wraps
from inspect import getsourcefile
class lazy_property(object):
""" Decorator for a lazy property of an object, i.e., an object attribute
that is determined by the result of a method call evaluated once. To
reevaluate the property, simply delete the attribute on the object, and
get it again.
"""
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
value = self.fget(obj)
setattr(obj, self.fget.__name__, value)
return value
@staticmethod
def reset_all(obj):
""" Reset all lazy properties on the instance `obj`. """
cls = type(obj)
obj_dict = vars(obj)
for name in obj_dict.keys():
if isinstance(getattr(cls, name, None), lazy_property):
obj_dict.pop(name)
def synchronized(lock_attr='_lock'):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
lock = getattr(self, lock_attr)
try:
lock.acquire()
return func(self, *args, **kwargs)
finally:
lock.release()
return wrapper
return decorator
def frame_codeinfo(fframe, back=0):
""" Return a (filename, line) pair for a previous frame .
@return (filename, lineno) where lineno is either int or string==''
"""
try:
if not fframe:
return "<unknown>", ''
for i in range(back):
fframe = fframe.f_back
try:
fname = getsourcefile(fframe)
except TypeError:
fname = '<builtin>'
lineno = fframe.f_lineno or ''
return fname, lineno
except Exception:
return "<unknown>", ''
def compose(a, b):
""" Composes the callables ``a`` and ``b``. ``compose(a, b)(*args)`` is
equivalent to ``a(b(*args))``.
Can be used as a decorator by partially applying ``a``::
@partial(compose, a)
def b():
...
"""
@wraps(b)
def wrapper(*args, **kwargs):
return a(b(*args, **kwargs))
return wrapper
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
FlorentChamault/My_sickbeard | lib/requests/packages/oauthlib/oauth1/rfc5849/utils.py | 74 | 3015 | # -*- coding: utf-8 -*-
"""
oauthlib.utils
~~~~~~~~~~~~~~
This module contains utility methods used by various parts of the OAuth
spec.
"""
import string
import urllib2
from oauthlib.common import quote, unquote
UNICODE_ASCII_CHARACTER_SET = (string.ascii_letters.decode('ascii') +
string.digits.decode('ascii'))
def filter_params(target):
"""Decorator which filters params to remove non-oauth_* parameters
Assumes the decorated method takes a params dict or list of tuples as its
first argument.
"""
def wrapper(params, *args, **kwargs):
params = filter_oauth_params(params)
return target(params, *args, **kwargs)
wrapper.__doc__ = target.__doc__
return wrapper
def filter_oauth_params(params):
"""Removes all non oauth parameters from a dict or a list of params."""
is_oauth = lambda kv: kv[0].startswith(u"oauth_")
if isinstance(params, dict):
return filter(is_oauth, params.items())
else:
return filter(is_oauth, params)
def escape(u):
"""Escape a unicode string in an OAuth-compatible fashion.
Per `section 3.6`_ of the spec.
.. _`section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
"""
if not isinstance(u, unicode):
raise ValueError('Only unicode objects are escapable.')
# Letters, digits, and the characters '_.-' are already treated as safe
# by urllib.quote(). We need to add '~' to fully support rfc5849.
return quote(u, safe='~')
def unescape(u):
if not isinstance(u, unicode):
raise ValueError('Only unicode objects are unescapable.')
return unquote(u)
def urlencode(query):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
Operates using an OAuth-safe escape() method, in contrast to urllib.urlencode.
"""
# Convert dictionaries to list of tuples
if isinstance(query, dict):
query = query.items()
return u"&".join([u'='.join([escape(k), escape(v)]) for k, v in query])
def parse_keqv_list(l):
"""A unicode-safe version of urllib2.parse_keqv_list"""
encoded_list = [u.encode('utf-8') for u in l]
encoded_parsed = urllib2.parse_keqv_list(encoded_list)
return dict((k.decode('utf-8'),
v.decode('utf-8')) for k, v in encoded_parsed.items())
def parse_http_list(u):
"""A unicode-safe version of urllib2.parse_http_list"""
encoded_str = u.encode('utf-8')
encoded_list = urllib2.parse_http_list(encoded_str)
return [s.decode('utf-8') for s in encoded_list]
def parse_authorization_header(authorization_header):
"""Parse an OAuth authorization header into a list of 2-tuples"""
auth_scheme = u'OAuth '
if authorization_header.startswith(auth_scheme):
authorization_header = authorization_header.replace(auth_scheme, u'', 1)
items = parse_http_list(authorization_header)
try:
return parse_keqv_list(items).items()
except ValueError:
raise ValueError('Malformed authorization header')
| gpl-3.0 |
ThePlayground/android_kernel_htc_shooter | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
2014c2g8/c2g8 | wsgi/static/Brython2.1.0-20140419-113919/Lib/collections/abc.py | 739 | 16026 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# Private list of types that we want to register with the various ABCs
# so that they will pass tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: in other implementations, these types many not be distinct
# and they make have their own implementation specific types that
# are not included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
### ONE-TRICK PONIES ###
class Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
return NotImplemented
class Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if (any("__next__" in B.__dict__ for B in C.__mro__) and
any("__iter__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if any("__call__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other <= self
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
def isdisjoint(self, other):
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
__slots__ = ()
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
__slots__ = ()
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
Mapping.register(mappingproxy)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
__slots__ = ()
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
__slots__ = ()
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
__slots__ = ()
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
__slots__ = ()
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
raise IndexError
def append(self, value):
self.insert(len(self), value)
def clear(self):
try:
while True:
self.pop()
except IndexError:
pass
def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
for v in values:
self.append(v)
def pop(self, index=-1):
v = self[index]
del self[index]
return v
def remove(self, value):
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
| gpl-2.0 |
simartin/servo | components/script/dom/bindings/codegen/parser/WebIDL.py | 4 | 303096 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""" A WebIDL parser. """
from ply import lex, yacc
import re
import os
import traceback
import math
import string
from collections import defaultdict, OrderedDict
from itertools import chain
# Machinery
def parseInt(literal):
string = literal
sign = 0
base = 0
if string[0] == '-':
sign = -1
string = string[1:]
else:
sign = 1
if string[0] == '0' and len(string) > 1:
if string[1] == 'x' or string[1] == 'X':
base = 16
string = string[2:]
else:
base = 8
string = string[1:]
else:
base = 10
value = int(string, base)
return value * sign
def enum(*names, **kw):
class Foo(object):
attrs = OrderedDict()
def __init__(self, names):
for v, k in enumerate(names):
self.attrs[k] = v
def __getattr__(self, attr):
if attr in self.attrs:
return self.attrs[attr]
raise AttributeError
def __setattr__(self, name, value): # this makes it read-only
raise NotImplementedError
if "base" not in kw:
return Foo(names)
return Foo(chain(list(kw["base"].attrs.keys()), names))
class WebIDLError(Exception):
def __init__(self, message, locations, warning=False):
self.message = message
self.locations = [str(loc) for loc in locations]
self.warning = warning
def __str__(self):
return "%s: %s%s%s" % (self.warning and 'warning' or 'error',
self.message,
", " if len(self.locations) != 0 else "",
"\n".join(self.locations))
class Location(object):
def __init__(self, lexer, lineno, lexpos, filename):
self._line = None
self._lineno = lineno
self._lexpos = lexpos
self._lexdata = lexer.lexdata
self._file = filename if filename else "<unknown>"
def __eq__(self, other):
return (self._lexpos == other._lexpos and
self._file == other._file)
def filename(self):
return self._file
def resolve(self):
if self._line:
return
startofline = self._lexdata.rfind('\n', 0, self._lexpos) + 1
endofline = self._lexdata.find('\n', self._lexpos, self._lexpos + 80)
if endofline != -1:
self._line = self._lexdata[startofline:endofline]
else:
self._line = self._lexdata[startofline:]
self._colno = self._lexpos - startofline
# Our line number seems to point to the start of self._lexdata
self._lineno += self._lexdata.count('\n', 0, startofline)
def get(self):
self.resolve()
return "%s line %s:%s" % (self._file, self._lineno, self._colno)
def _pointerline(self):
return " " * self._colno + "^"
def __str__(self):
self.resolve()
return "%s line %s:%s\n%s\n%s" % (self._file, self._lineno, self._colno,
self._line, self._pointerline())
class BuiltinLocation(object):
def __init__(self, text):
self.msg = text + "\n"
def __eq__(self, other):
return (isinstance(other, BuiltinLocation) and
self.msg == other.msg)
def __hash__(self):
return hash(self.msg)
def filename(self):
return '<builtin>'
def resolve(self):
pass
def get(self):
return self.msg
def __str__(self):
return self.get()
# Data Model
class IDLObject(object):
def __init__(self, location):
self.location = location
self.userData = dict()
def filename(self):
return self.location.filename()
def isInterface(self):
return False
def isNamespace(self):
return False
def isInterfaceMixin(self):
return False
def isEnum(self):
return False
def isCallback(self):
return False
def isType(self):
return False
def isDictionary(self):
return False
def isUnion(self):
return False
def isTypedef(self):
return False
def getUserData(self, key, default):
return self.userData.get(key, default)
def setUserData(self, key, value):
self.userData[key] = value
def addExtendedAttributes(self, attrs):
assert False # Override me!
def handleExtendedAttribute(self, attr):
assert False # Override me!
def _getDependentObjects(self):
assert False # Override me!
def getDeps(self, visited=None):
""" Return a set of files that this object depends on. If any of
these files are changed the parser needs to be rerun to regenerate
a new IDLObject.
The visited argument is a set of all the objects already visited.
We must test to see if we are in it, and if so, do nothing. This
prevents infinite recursion."""
# NB: We can't use visited=set() above because the default value is
# evaluated when the def statement is evaluated, not when the function
# is executed, so there would be one set for all invocations.
if visited is None:
visited = set()
if self in visited:
return set()
visited.add(self)
deps = set()
if self.filename() != "<builtin>":
deps.add(self.filename())
for d in self._getDependentObjects():
deps.update(d.getDeps(visited))
return deps
class IDLScope(IDLObject):
def __init__(self, location, parentScope, identifier):
IDLObject.__init__(self, location)
self.parentScope = parentScope
if identifier:
assert isinstance(identifier, IDLIdentifier)
self._name = identifier
else:
self._name = None
self._dict = {}
self.globalNames = set()
# A mapping from global name to the set of global interfaces
# that have that global name.
self.globalNameMapping = defaultdict(set)
def __str__(self):
return self.QName()
def QName(self):
# It's possible for us to be called before __init__ has been called, for
# the IDLObjectWithScope case. In that case, self._name won't be set yet.
if hasattr(self, "_name"):
name = self._name
else:
name = None
if name:
return name.QName() + "::"
return "::"
def ensureUnique(self, identifier, object):
"""
Ensure that there is at most one 'identifier' in scope ('self').
Note that object can be None. This occurs if we end up here for an
interface type we haven't seen yet.
"""
assert isinstance(identifier, IDLUnresolvedIdentifier)
assert not object or isinstance(object, IDLObjectWithIdentifier)
assert not object or object.identifier == identifier
if identifier.name in self._dict:
if not object:
return
# ensureUnique twice with the same object is not allowed
assert id(object) != id(self._dict[identifier.name])
replacement = self.resolveIdentifierConflict(self, identifier,
self._dict[identifier.name],
object)
self._dict[identifier.name] = replacement
return
assert object
self._dict[identifier.name] = object
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
if (isinstance(originalObject, IDLExternalInterface) and
isinstance(newObject, IDLExternalInterface) and
originalObject.identifier.name == newObject.identifier.name):
return originalObject
if (isinstance(originalObject, IDLExternalInterface) or
isinstance(newObject, IDLExternalInterface)):
raise WebIDLError(
"Name collision between "
"interface declarations for identifier '%s' at '%s' and '%s'"
% (identifier.name,
originalObject.location, newObject.location), [])
if (isinstance(originalObject, IDLDictionary) or
isinstance(newObject, IDLDictionary)):
raise WebIDLError(
"Name collision between dictionary declarations for "
"identifier '%s'.\n%s\n%s"
% (identifier.name,
originalObject.location, newObject.location), [])
# We do the merging of overloads here as opposed to in IDLInterface
# because we need to merge overloads of NamedConstructors and we need to
# detect conflicts in those across interfaces. See also the comment in
# IDLInterface.addExtendedAttributes for "NamedConstructor".
if (isinstance(originalObject, IDLMethod) and
isinstance(newObject, IDLMethod)):
return originalObject.addOverload(newObject)
# Default to throwing, derived classes can override.
conflictdesc = "\n\t%s at %s\n\t%s at %s" % (originalObject,
originalObject.location,
newObject,
newObject.location)
raise WebIDLError(
"Multiple unresolvable definitions of identifier '%s' in scope '%s'%s"
% (identifier.name, str(self), conflictdesc), [])
def _lookupIdentifier(self, identifier):
return self._dict[identifier.name]
def lookupIdentifier(self, identifier):
assert isinstance(identifier, IDLIdentifier)
assert identifier.scope == self
return self._lookupIdentifier(identifier)
def addIfaceGlobalNames(self, interfaceName, globalNames):
"""Record the global names (from |globalNames|) that can be used in
[Exposed] to expose things in a global named |interfaceName|"""
self.globalNames.update(globalNames)
for name in globalNames:
self.globalNameMapping[name].add(interfaceName)
class IDLIdentifier(IDLObject):
def __init__(self, location, scope, name):
IDLObject.__init__(self, location)
self.name = name
assert isinstance(scope, IDLScope)
self.scope = scope
def __str__(self):
return self.QName()
def QName(self):
return self.scope.QName() + self.name
def __hash__(self):
return self.QName().__hash__()
def __eq__(self, other):
return self.QName() == other.QName()
def object(self):
return self.scope.lookupIdentifier(self)
class IDLUnresolvedIdentifier(IDLObject):
def __init__(self, location, name, allowDoubleUnderscore=False,
allowForbidden=False):
IDLObject.__init__(self, location)
assert len(name) > 0
if name == "__noSuchMethod__":
raise WebIDLError("__noSuchMethod__ is deprecated", [location])
if name[:2] == "__" and name != "__content" and not allowDoubleUnderscore:
raise WebIDLError("Identifiers beginning with __ are reserved",
[location])
if name[0] == '_' and not allowDoubleUnderscore:
name = name[1:]
if (name in ["constructor", "toString"] and
not allowForbidden):
raise WebIDLError("Cannot use reserved identifier '%s'" % (name),
[location])
self.name = name
def __str__(self):
return self.QName()
def QName(self):
return "<unresolved scope>::" + self.name
def resolve(self, scope, object):
assert isinstance(scope, IDLScope)
assert not object or isinstance(object, IDLObjectWithIdentifier)
assert not object or object.identifier == self
scope.ensureUnique(self, object)
identifier = IDLIdentifier(self.location, scope, self.name)
if object:
object.identifier = identifier
return identifier
def finish(self):
assert False # Should replace with a resolved identifier first.
class IDLObjectWithIdentifier(IDLObject):
def __init__(self, location, parentScope, identifier):
IDLObject.__init__(self, location)
assert isinstance(identifier, IDLUnresolvedIdentifier)
self.identifier = identifier
if parentScope:
self.resolve(parentScope)
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
assert isinstance(self.identifier, IDLUnresolvedIdentifier)
self.identifier.resolve(parentScope, self)
class IDLObjectWithScope(IDLObjectWithIdentifier, IDLScope):
def __init__(self, location, parentScope, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLScope.__init__(self, location, parentScope, self.identifier)
class IDLIdentifierPlaceholder(IDLObjectWithIdentifier):
def __init__(self, location, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
def finish(self, scope):
try:
scope._lookupIdentifier(self.identifier)
except:
raise WebIDLError("Unresolved type '%s'." % self.identifier,
[self.location])
obj = self.identifier.resolve(scope, None)
return scope.lookupIdentifier(obj)
class IDLExposureMixins():
def __init__(self, location):
# _exposureGlobalNames are the global names listed in our [Exposed]
# extended attribute. exposureSet is the exposure set as defined in the
# Web IDL spec: it contains interface names.
self._exposureGlobalNames = set()
self.exposureSet = set()
self._location = location
self._globalScope = None
def finish(self, scope):
assert scope.parentScope is None
self._globalScope = scope
# Verify that our [Exposed] value, if any, makes sense.
for globalName in self._exposureGlobalNames:
if globalName not in scope.globalNames:
raise WebIDLError("Unknown [Exposed] value %s" % globalName,
[self._location])
# Verify that we are exposed _somwhere_ if we have some place to be
# exposed. We don't want to assert that we're definitely exposed
# because a lot of our parser tests have small-enough IDL snippets that
# they don't include any globals, and we don't really want to go through
# and add global interfaces and [Exposed] annotations to all those
# tests.
if len(scope.globalNames) != 0:
if (len(self._exposureGlobalNames) == 0):
raise WebIDLError(("'%s' is not exposed anywhere even though we have "
"globals to be exposed to") % self,
[self.location])
globalNameSetToExposureSet(scope, self._exposureGlobalNames,
self.exposureSet)
def isExposedInWindow(self):
return 'Window' in self.exposureSet
def isExposedInAnyWorker(self):
return len(self.getWorkerExposureSet()) > 0
def isExposedInWorkerDebugger(self):
return len(self.getWorkerDebuggerExposureSet()) > 0
def isExposedInAnyWorklet(self):
return len(self.getWorkletExposureSet()) > 0
def isExposedInSomeButNotAllWorkers(self):
"""
Returns true if the Exposed extended attribute for this interface
exposes it in some worker globals but not others. The return value does
not depend on whether the interface is exposed in Window or System
globals.
"""
if not self.isExposedInAnyWorker():
return False
workerScopes = self.parentScope.globalNameMapping["Worker"]
return len(workerScopes.difference(self.exposureSet)) > 0
def getWorkerExposureSet(self):
workerScopes = self._globalScope.globalNameMapping["Worker"]
return workerScopes.intersection(self.exposureSet)
def getWorkletExposureSet(self):
workletScopes = self._globalScope.globalNameMapping["Worklet"]
return workletScopes.intersection(self.exposureSet)
def getWorkerDebuggerExposureSet(self):
workerDebuggerScopes = self._globalScope.globalNameMapping["WorkerDebugger"]
return workerDebuggerScopes.intersection(self.exposureSet)
class IDLExternalInterface(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
assert isinstance(parentScope, IDLScope)
self.parent = None
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLObjectWithIdentifier.resolve(self, parentScope)
def finish(self, scope):
pass
def validate(self):
pass
def isIteratorInterface(self):
return False
def isExternal(self):
return True
def isInterface(self):
return True
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on external interfaces",
[attrs[0].location, self.location])
def resolve(self, parentScope):
pass
def getJSImplementation(self):
return None
def isJSImplemented(self):
return False
def hasProbablyShortLivingWrapper(self):
return False
def _getDependentObjects(self):
return set()
class IDLPartialDictionary(IDLObject):
def __init__(self, location, name, members, nonPartialDictionary):
assert isinstance(name, IDLUnresolvedIdentifier)
IDLObject.__init__(self, location)
self.identifier = name
self.members = members
self._nonPartialDictionary = nonPartialDictionary
self._finished = False
nonPartialDictionary.addPartialDictionary(self)
def addExtendedAttributes(self, attrs):
pass
def finish(self, scope):
if self._finished:
return
self._finished = True
# Need to make sure our non-partial dictionary gets
# finished so it can report cases when we only have partial
# dictionaries.
self._nonPartialDictionary.finish(scope)
def validate(self):
pass
class IDLPartialInterfaceOrNamespace(IDLObject):
def __init__(self, location, name, members, nonPartialInterfaceOrNamespace):
assert isinstance(name, IDLUnresolvedIdentifier)
IDLObject.__init__(self, location)
self.identifier = name
self.members = members
# propagatedExtendedAttrs are the ones that should get
# propagated to our non-partial interface.
self.propagatedExtendedAttrs = []
self._haveSecureContextExtendedAttribute = False
self._nonPartialInterfaceOrNamespace = nonPartialInterfaceOrNamespace
self._finished = False
nonPartialInterfaceOrNamespace.addPartial(self)
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
if identifier == "NamedConstructor":
self.propagatedExtendedAttrs.append(attr)
elif identifier == "SecureContext":
self._haveSecureContextExtendedAttribute = True
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both a "
"partial interface member and on the "
"partial interface itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
elif identifier == "Exposed":
# This just gets propagated to all our members.
for member in self.members:
if len(member._exposureGlobalNames) != 0:
raise WebIDLError("[Exposed] specified on both a "
"partial interface member and on the "
"partial interface itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
else:
raise WebIDLError("Unknown extended attribute %s on partial "
"interface" % identifier,
[attr.location])
def finish(self, scope):
if self._finished:
return
self._finished = True
if (not self._haveSecureContextExtendedAttribute and
self._nonPartialInterfaceOrNamespace.getExtendedAttribute("SecureContext")):
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both a "
"partial interface member and on the "
"non-partial interface",
[member.location,
self._nonPartialInterfaceOrNamespace.location])
member.addExtendedAttributes(
[IDLExtendedAttribute(self._nonPartialInterfaceOrNamespace.location,
("SecureContext",))])
# Need to make sure our non-partial interface or namespace gets
# finished so it can report cases when we only have partial
# interfaces/namespaces.
self._nonPartialInterfaceOrNamespace.finish(scope)
def validate(self):
pass
def convertExposedAttrToGlobalNameSet(exposedAttr, targetSet):
assert len(targetSet) == 0
if exposedAttr.hasValue():
targetSet.add(exposedAttr.value())
else:
assert exposedAttr.hasArgs()
targetSet.update(exposedAttr.args())
def globalNameSetToExposureSet(globalScope, nameSet, exposureSet):
for name in nameSet:
exposureSet.update(globalScope.globalNameMapping[name])
class IDLInterfaceOrInterfaceMixinOrNamespace(IDLObjectWithScope, IDLExposureMixins):
def __init__(self, location, parentScope, name):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
self._finished = False
self.members = []
self._partials = []
self._extendedAttrDict = {}
self._isKnownNonPartial = False
IDLObjectWithScope.__init__(self, location, parentScope, name)
IDLExposureMixins.__init__(self, location)
def finish(self, scope):
if not self._isKnownNonPartial:
raise WebIDLError("%s does not have a non-partial declaration" %
str(self), [self.location])
IDLExposureMixins.finish(self, scope)
# Now go ahead and merge in our partials.
for partial in self._partials:
partial.finish(scope)
self.addExtendedAttributes(partial.propagatedExtendedAttrs)
self.members.extend(partial.members)
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
assert isinstance(scope, IDLScope)
assert isinstance(originalObject, IDLInterfaceMember)
assert isinstance(newObject, IDLInterfaceMember)
retval = IDLScope.resolveIdentifierConflict(self, scope, identifier,
originalObject, newObject)
# Might be a ctor, which isn't in self.members
if newObject in self.members:
self.members.remove(newObject)
return retval
def typeName(self):
if self.isInterface():
return "interface"
if self.isNamespace():
return "namespace"
assert self.isInterfaceMixin()
return "interface mixin"
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def setNonPartial(self, location, members):
if self._isKnownNonPartial:
raise WebIDLError("Two non-partial definitions for the "
"same %s" % self.typeName(),
[location, self.location])
self._isKnownNonPartial = True
# Now make it look like we were parsed at this new location, since
# that's the place where the interface is "really" defined
self.location = location
# Put the new members at the beginning
self.members = members + self.members
def addPartial(self, partial):
assert self.identifier.name == partial.identifier.name
self._partials.append(partial)
def getPartials(self):
# Don't let people mutate our guts.
return list(self._partials)
def finishMembers(self, scope):
# Assuming we've merged in our partials, set the _exposureGlobalNames on
# any members that don't have it set yet. Note that any partial
# interfaces that had [Exposed] set have already set up
# _exposureGlobalNames on all the members coming from them, so this is
# just implementing the "members default to interface or interface mixin
# that defined them" and "partial interfaces or interface mixins default
# to interface or interface mixin they're a partial for" rules from the
# spec.
for m in self.members:
# If m, or the partial m came from, had [Exposed]
# specified, it already has a nonempty exposure global names set.
if len(m._exposureGlobalNames) == 0:
m._exposureGlobalNames.update(self._exposureGlobalNames)
if m.isAttr() and m.stringifier:
m.expand(self.members)
# resolve() will modify self.members, so we need to iterate
# over a copy of the member list here.
for member in list(self.members):
member.resolve(self)
for member in self.members:
member.finish(scope)
# Now that we've finished our members, which has updated their exposure
# sets, make sure they aren't exposed in places where we are not.
for member in self.members:
if not member.exposureSet.issubset(self.exposureSet):
raise WebIDLError("Interface or interface mixin member has "
"larger exposure set than its container",
[member.location, self.location])
def isExternal(self):
return False
class IDLInterfaceMixin(IDLInterfaceOrInterfaceMixinOrNamespace):
def __init__(self, location, parentScope, name, members, isKnownNonPartial):
self.actualExposureGlobalNames = set()
assert isKnownNonPartial or len(members) == 0
IDLInterfaceOrInterfaceMixinOrNamespace.__init__(self, location, parentScope, name)
if isKnownNonPartial:
self.setNonPartial(location, members)
def __str__(self):
return "Interface mixin '%s'" % self.identifier.name
def isInterfaceMixin(self):
return True
def finish(self, scope):
if self._finished:
return
self._finished = True
# Expose to the globals of interfaces that includes this mixin if this
# mixin has no explicit [Exposed] so that its members can be exposed
# based on the base interface exposure set.
#
# Make sure this is done before IDLExposureMixins.finish call, since
# that converts our set of exposure global names to an actual exposure
# set.
hasImplicitExposure = len(self._exposureGlobalNames) == 0
if hasImplicitExposure:
self._exposureGlobalNames.update(self.actualExposureGlobalNames)
IDLInterfaceOrInterfaceMixinOrNamespace.finish(self, scope)
self.finishMembers(scope)
def validate(self):
for member in self.members:
if member.isAttr():
if member.inherit:
raise WebIDLError("Interface mixin member cannot include "
"an inherited attribute",
[member.location, self.location])
if member.isStatic():
raise WebIDLError("Interface mixin member cannot include "
"a static member",
[member.location, self.location])
if member.isMethod():
if member.isStatic():
raise WebIDLError("Interface mixin member cannot include "
"a static operation",
[member.location, self.location])
if (member.isGetter() or
member.isSetter() or
member.isDeleter() or
member.isLegacycaller()):
raise WebIDLError("Interface mixin member cannot include a "
"special operation",
[member.location, self.location])
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
if identifier == "SecureContext":
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both "
"an interface mixin member and on"
"the interface mixin itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr,
self._exposureGlobalNames)
else:
raise WebIDLError("Unknown extended attribute %s on interface" % identifier,
[attr.location])
attrlist = attr.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def _getDependentObjects(self):
return set(self.members)
class IDLInterfaceOrNamespace(IDLInterfaceOrInterfaceMixinOrNamespace):
def __init__(self, location, parentScope, name, parent, members,
isKnownNonPartial, toStringTag):
assert isKnownNonPartial or not parent
assert isKnownNonPartial or len(members) == 0
self.parent = None
self._callback = False
self.maplikeOrSetlikeOrIterable = None
# namedConstructors needs deterministic ordering because bindings code
# outputs the constructs in the order that namedConstructors enumerates
# them.
self.namedConstructors = list()
self.legacyWindowAliases = []
self.includedMixins = set()
# self.interfacesBasedOnSelf is the set of interfaces that inherit from
# self, including self itself.
# Used for distinguishability checking.
self.interfacesBasedOnSelf = set([self])
self._hasChildInterfaces = False
self._isOnGlobalProtoChain = False
# Tracking of the number of reserved slots we need for our
# members and those of ancestor interfaces.
self.totalMembersInSlots = 0
# Tracking of the number of own own members we have in slots
self._ownMembersInSlots = 0
# If this is an iterator interface, we need to know what iterable
# interface we're iterating for in order to get its nativeType.
self.iterableInterface = None
# True if we have cross-origin members.
self.hasCrossOriginMembers = False
# True if some descendant (including ourselves) has cross-origin members
self.hasDescendantWithCrossOriginMembers = False
self.toStringTag = toStringTag
IDLInterfaceOrInterfaceMixinOrNamespace.__init__(self, location, parentScope, name)
if isKnownNonPartial:
self.setNonPartial(location, parent, members)
def ctor(self):
identifier = IDLUnresolvedIdentifier(self.location, "constructor",
allowForbidden=True)
try:
return self._lookupIdentifier(identifier)
except:
return None
def isIterable(self):
return (self.maplikeOrSetlikeOrIterable and
self.maplikeOrSetlikeOrIterable.isIterable())
def isIteratorInterface(self):
return self.iterableInterface is not None
def finish(self, scope):
if self._finished:
return
self._finished = True
IDLInterfaceOrInterfaceMixinOrNamespace.finish(self, scope)
if len(self.legacyWindowAliases) > 0:
if not self.hasInterfaceObject():
raise WebIDLError("Interface %s unexpectedly has [LegacyWindowAlias] "
"and [NoInterfaceObject] together" % self.identifier.name,
[self.location])
if not self.isExposedInWindow():
raise WebIDLError("Interface %s has [LegacyWindowAlias] "
"but not exposed in Window" % self.identifier.name,
[self.location])
# Generate maplike/setlike interface members. Since generated members
# need to be treated like regular interface members, do this before
# things like exposure setting.
for member in self.members:
if member.isMaplikeOrSetlikeOrIterable():
# Check that we only have one interface declaration (currently
# there can only be one maplike/setlike declaration per
# interface)
if self.maplikeOrSetlikeOrIterable:
raise WebIDLError("%s declaration used on "
"interface that already has %s "
"declaration" %
(member.maplikeOrSetlikeOrIterableType,
self.maplikeOrSetlikeOrIterable.maplikeOrSetlikeOrIterableType),
[self.maplikeOrSetlikeOrIterable.location,
member.location])
self.maplikeOrSetlikeOrIterable = member
# If we've got a maplike or setlike declaration, we'll be building all of
# our required methods in Codegen. Generate members now.
self.maplikeOrSetlikeOrIterable.expand(self.members, self.isJSImplemented())
assert not self.parent or isinstance(self.parent, IDLIdentifierPlaceholder)
parent = self.parent.finish(scope) if self.parent else None
if parent and isinstance(parent, IDLExternalInterface):
raise WebIDLError("%s inherits from %s which does not have "
"a definition" %
(self.identifier.name,
self.parent.identifier.name),
[self.location])
if parent and not isinstance(parent, IDLInterface):
raise WebIDLError("%s inherits from %s which is not an interface " %
(self.identifier.name,
self.parent.identifier.name),
[self.location, parent.location])
self.parent = parent
assert iter(self.members)
if self.isNamespace():
assert not self.parent
for m in self.members:
if m.isAttr() or m.isMethod():
if m.isStatic():
raise WebIDLError("Don't mark things explicitly static "
"in namespaces",
[self.location, m.location])
# Just mark all our methods/attributes as static. The other
# option is to duplicate the relevant InterfaceMembers
# production bits but modified to produce static stuff to
# start with, but that sounds annoying.
m.forceStatic()
if self.parent:
self.parent.finish(scope)
self.parent._hasChildInterfaces = True
self.totalMembersInSlots = self.parent.totalMembersInSlots
# Interfaces with [Global] must not have anything inherit from them
if self.parent.getExtendedAttribute("Global"):
# Note: This is not a self.parent.isOnGlobalProtoChain() check
# because ancestors of a [Global] interface can have other
# descendants.
raise WebIDLError("[Global] interface has another interface "
"inheriting from it",
[self.location, self.parent.location])
# Make sure that we're not exposed in places where our parent is not
if not self.exposureSet.issubset(self.parent.exposureSet):
raise WebIDLError("Interface %s is exposed in globals where its "
"parent interface %s is not exposed." %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Callbacks must not inherit from non-callbacks.
# XXXbz Can non-callbacks inherit from callbacks? Spec issue pending.
if self.isCallback():
if not self.parent.isCallback():
raise WebIDLError("Callback interface %s inheriting from "
"non-callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
elif self.parent.isCallback():
raise WebIDLError("Non-callback interface %s inheriting from "
"callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Interfaces which have interface objects can't inherit
# from [NoInterfaceObject] interfaces.
if (self.parent.getExtendedAttribute("NoInterfaceObject") and
not self.getExtendedAttribute("NoInterfaceObject")):
raise WebIDLError("Interface %s does not have "
"[NoInterfaceObject] but inherits from "
"interface %s which does" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Interfaces that are not [SecureContext] can't inherit
# from [SecureContext] interfaces.
if (self.parent.getExtendedAttribute("SecureContext") and
not self.getExtendedAttribute("SecureContext")):
raise WebIDLError("Interface %s does not have "
"[SecureContext] but inherits from "
"interface %s which does" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
for mixin in self.includedMixins:
mixin.finish(scope)
cycleInGraph = self.findInterfaceLoopPoint(self)
if cycleInGraph:
raise WebIDLError(
"Interface %s has itself as ancestor" % self.identifier.name,
[self.location, cycleInGraph.location])
self.finishMembers(scope)
ctor = self.ctor()
if ctor is not None:
if not self.hasInterfaceObject():
raise WebIDLError(
"Can't have both a constructor and [NoInterfaceObject]",
[self.location, ctor.location])
if self.globalNames:
raise WebIDLError(
"Can't have both a constructor and [Global]",
[self.location, ctor.location])
assert(ctor._exposureGlobalNames == self._exposureGlobalNames)
ctor._exposureGlobalNames.update(self._exposureGlobalNames)
# Remove the constructor operation from our member list so
# it doesn't get in the way later.
self.members.remove(ctor)
for ctor in self.namedConstructors:
if self.globalNames:
raise WebIDLError(
"Can't have both a named constructor and [Global]",
[self.location, ctor.location])
assert len(ctor._exposureGlobalNames) == 0
ctor._exposureGlobalNames.update(self._exposureGlobalNames)
ctor.finish(scope)
# Make a copy of our member list, so things that implement us
# can get those without all the stuff we implement ourselves
# admixed.
self.originalMembers = list(self.members)
for mixin in sorted(self.includedMixins,
key=lambda x: x.identifier.name):
for mixinMember in mixin.members:
for member in self.members:
if mixinMember.identifier.name == member.identifier.name:
raise WebIDLError(
"Multiple definitions of %s on %s coming from 'includes' statements" %
(member.identifier.name, self),
[mixinMember.location, member.location])
self.members.extend(mixin.members)
for ancestor in self.getInheritedInterfaces():
ancestor.interfacesBasedOnSelf.add(self)
if (ancestor.maplikeOrSetlikeOrIterable is not None and
self.maplikeOrSetlikeOrIterable is not None):
raise WebIDLError("Cannot have maplike/setlike on %s that "
"inherits %s, which is already "
"maplike/setlike" %
(self.identifier.name,
ancestor.identifier.name),
[self.maplikeOrSetlikeOrIterable.location,
ancestor.maplikeOrSetlikeOrIterable.location])
# Deal with interfaces marked [Unforgeable], now that we have our full
# member list, except unforgeables pulled in from parents. We want to
# do this before we set "originatingInterface" on our unforgeable
# members.
if self.getExtendedAttribute("Unforgeable"):
# Check that the interface already has all the things the
# spec would otherwise require us to synthesize and is
# missing the ones we plan to synthesize.
if not any(m.isMethod() and m.isStringifier() for m in self.members):
raise WebIDLError("Unforgeable interface %s does not have a "
"stringifier" % self.identifier.name,
[self.location])
for m in self.members:
if m.identifier.name == "toJSON":
raise WebIDLError("Unforgeable interface %s has a "
"toJSON so we won't be able to add "
"one ourselves" % self.identifier.name,
[self.location, m.location])
if m.identifier.name == "valueOf" and not m.isStatic():
raise WebIDLError("Unforgeable interface %s has a valueOf "
"member so we won't be able to add one "
"ourselves" % self.identifier.name,
[self.location, m.location])
for member in self.members:
if ((member.isAttr() or member.isMethod()) and
member.isUnforgeable() and
not hasattr(member, "originatingInterface")):
member.originatingInterface = self
for member in self.members:
if ((member.isMethod() and
member.getExtendedAttribute("CrossOriginCallable")) or
(member.isAttr() and
(member.getExtendedAttribute("CrossOriginReadable") or
member.getExtendedAttribute("CrossOriginWritable")))):
self.hasCrossOriginMembers = True
break
if self.hasCrossOriginMembers:
parent = self
while parent:
parent.hasDescendantWithCrossOriginMembers = True
parent = parent.parent
# Compute slot indices for our members before we pull in unforgeable
# members from our parent. Also, maplike/setlike declarations get a
# slot to hold their backing object.
for member in self.members:
if ((member.isAttr() and
(member.getExtendedAttribute("StoreInSlot") or
member.getExtendedAttribute("Cached"))) or
member.isMaplikeOrSetlike()):
if self.isJSImplemented() and not member.isMaplikeOrSetlike():
raise WebIDLError("Interface %s is JS-implemented and we "
"don't support [Cached] or [StoreInSlot] "
"on JS-implemented interfaces" %
self.identifier.name,
[self.location, member.location])
if member.slotIndices is None:
member.slotIndices = dict()
member.slotIndices[self.identifier.name] = self.totalMembersInSlots
self.totalMembersInSlots += 1
if member.getExtendedAttribute("StoreInSlot"):
self._ownMembersInSlots += 1
if self.parent:
# Make sure we don't shadow any of the [Unforgeable] attributes on our
# ancestor interfaces. We don't have to worry about mixins here, because
# those have already been imported into the relevant .members lists. And
# we don't have to worry about anything other than our parent, because it
# has already imported its ancestors' unforgeable attributes into its
# member list.
for unforgeableMember in (member for member in self.parent.members if
(member.isAttr() or member.isMethod()) and
member.isUnforgeable()):
shadows = [m for m in self.members if
(m.isAttr() or m.isMethod()) and
not m.isStatic() and
m.identifier.name == unforgeableMember.identifier.name]
if len(shadows) != 0:
locs = [unforgeableMember.location] + [s.location for s
in shadows]
raise WebIDLError("Interface %s shadows [Unforgeable] "
"members of %s" %
(self.identifier.name,
ancestor.identifier.name),
locs)
# And now just stick it in our members, since we won't be
# inheriting this down the proto chain. If we really cared we
# could try to do something where we set up the unforgeable
# attributes/methods of ancestor interfaces, with their
# corresponding getters, on our interface, but that gets pretty
# complicated and seems unnecessary.
self.members.append(unforgeableMember)
# At this point, we have all of our members. If the current interface
# uses maplike/setlike, check for collisions anywhere in the current
# interface or higher in the inheritance chain.
if self.maplikeOrSetlikeOrIterable:
testInterface = self
isAncestor = False
while testInterface:
self.maplikeOrSetlikeOrIterable.checkCollisions(testInterface.members,
isAncestor)
isAncestor = True
testInterface = testInterface.parent
# Ensure that there's at most one of each {named,indexed}
# {getter,setter,deleter}, at most one stringifier,
# and at most one legacycaller. Note that this last is not
# quite per spec, but in practice no one overloads
# legacycallers. Also note that in practice we disallow
# indexed deleters, but it simplifies some other code to
# treat deleter analogously to getter/setter by
# prefixing it with "named".
specialMembersSeen = {}
for member in self.members:
if not member.isMethod():
continue
if member.isGetter():
memberType = "getters"
elif member.isSetter():
memberType = "setters"
elif member.isDeleter():
memberType = "deleters"
elif member.isStringifier():
memberType = "stringifiers"
elif member.isLegacycaller():
memberType = "legacycallers"
else:
continue
if (memberType != "stringifiers" and memberType != "legacycallers"):
if member.isNamed():
memberType = "named " + memberType
else:
assert member.isIndexed()
memberType = "indexed " + memberType
if memberType in specialMembersSeen:
raise WebIDLError("Multiple " + memberType + " on %s" % (self),
[self.location,
specialMembersSeen[memberType].location,
member.location])
specialMembersSeen[memberType] = member
if self.getExtendedAttribute("LegacyUnenumerableNamedProperties"):
# Check that we have a named getter.
if "named getters" not in specialMembersSeen:
raise WebIDLError(
"Interface with [LegacyUnenumerableNamedProperties] does "
"not have a named getter",
[self.location])
ancestor = self.parent
while ancestor:
if ancestor.getExtendedAttribute("LegacyUnenumerableNamedProperties"):
raise WebIDLError(
"Interface with [LegacyUnenumerableNamedProperties] "
"inherits from another interface with "
"[LegacyUnenumerableNamedProperties]",
[self.location, ancestor.location])
ancestor = ancestor.parent
if self._isOnGlobalProtoChain:
# Make sure we have no named setters or deleters
for memberType in ["setter", "deleter"]:
memberId = "named " + memberType + "s"
if memberId in specialMembersSeen:
raise WebIDLError("Interface with [Global] has a named %s" %
memberType,
[self.location,
specialMembersSeen[memberId].location])
# Make sure we're not [OverrideBuiltins]
if self.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] also has "
"[OverrideBuiltins]",
[self.location])
# Mark all of our ancestors as being on the global's proto chain too
parent = self.parent
while parent:
# Must not inherit from an interface with [OverrideBuiltins]
if parent.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] inherits from "
"interface with [OverrideBuiltins]",
[self.location, parent.location])
parent._isOnGlobalProtoChain = True
parent = parent.parent
def validate(self):
def checkDuplicateNames(member, name, attributeName):
for m in self.members:
if m.identifier.name == name:
raise WebIDLError("[%s=%s] has same name as interface member" %
(attributeName, name),
[member.location, m.location])
if m.isMethod() and m != member and name in m.aliases:
raise WebIDLError("conflicting [%s=%s] definitions" %
(attributeName, name),
[member.location, m.location])
if m.isAttr() and m != member and name in m.bindingAliases:
raise WebIDLError("conflicting [%s=%s] definitions" %
(attributeName, name),
[member.location, m.location])
# We also don't support inheriting from unforgeable interfaces.
if self.getExtendedAttribute("Unforgeable") and self.hasChildInterfaces():
locations = ([self.location] +
list(i.location for i in
self.interfacesBasedOnSelf if i.parent == self))
raise WebIDLError("%s is an unforgeable ancestor interface" %
self.identifier.name,
locations)
ctor = self.ctor()
if ctor is not None:
ctor.validate()
for namedCtor in self.namedConstructors:
namedCtor.validate()
indexedGetter = None
hasLengthAttribute = False
for member in self.members:
member.validate()
if self.isCallback() and member.getExtendedAttribute("Replaceable"):
raise WebIDLError("[Replaceable] used on an attribute on "
"interface %s which is a callback interface" %
self.identifier.name,
[self.location, member.location])
# Check that PutForwards refers to another attribute and that no
# cycles exist in forwarded assignments. Also check for a
# integer-typed "length" attribute.
if member.isAttr():
if (member.identifier.name == "length" and
member.type.isInteger()):
hasLengthAttribute = True
iface = self
attr = member
putForwards = attr.getExtendedAttribute("PutForwards")
if putForwards and self.isCallback():
raise WebIDLError("[PutForwards] used on an attribute "
"on interface %s which is a callback "
"interface" % self.identifier.name,
[self.location, member.location])
while putForwards is not None:
forwardIface = attr.type.unroll().inner
fowardAttr = None
for forwardedMember in forwardIface.members:
if (not forwardedMember.isAttr() or
forwardedMember.identifier.name != putForwards[0]):
continue
if forwardedMember == member:
raise WebIDLError("Cycle detected in forwarded "
"assignments for attribute %s on "
"%s" %
(member.identifier.name, self),
[member.location])
fowardAttr = forwardedMember
break
if fowardAttr is None:
raise WebIDLError("Attribute %s on %s forwards to "
"missing attribute %s" %
(attr.identifier.name, iface, putForwards),
[attr.location])
iface = forwardIface
attr = fowardAttr
putForwards = attr.getExtendedAttribute("PutForwards")
# Check that the name of an [Alias] doesn't conflict with an
# interface member and whether we support indexed properties.
if member.isMethod():
if member.isGetter() and member.isIndexed():
indexedGetter = member
for alias in member.aliases:
if self.isOnGlobalProtoChain():
raise WebIDLError("[Alias] must not be used on a "
"[Global] interface operation",
[member.location])
if (member.getExtendedAttribute("Exposed") or
member.getExtendedAttribute("ChromeOnly") or
member.getExtendedAttribute("Pref") or
member.getExtendedAttribute("Func") or
member.getExtendedAttribute("SecureContext")):
raise WebIDLError("[Alias] must not be used on a "
"conditionally exposed operation",
[member.location])
if member.isStatic():
raise WebIDLError("[Alias] must not be used on a "
"static operation",
[member.location])
if member.isIdentifierLess():
raise WebIDLError("[Alias] must not be used on an "
"identifierless operation",
[member.location])
if member.isUnforgeable():
raise WebIDLError("[Alias] must not be used on an "
"[Unforgeable] operation",
[member.location])
checkDuplicateNames(member, alias, "Alias")
# Check that the name of a [BindingAlias] doesn't conflict with an
# interface member.
if member.isAttr():
for bindingAlias in member.bindingAliases:
checkDuplicateNames(member, bindingAlias, "BindingAlias")
# Conditional exposure makes no sense for interfaces with no
# interface object.
# And SecureContext makes sense for interfaces with no interface object,
# since it is also propagated to interface members.
if (self.isExposedConditionally(exclusions=["SecureContext"]) and
not self.hasInterfaceObject()):
raise WebIDLError("Interface with no interface object is "
"exposed conditionally",
[self.location])
# Value iterators are only allowed on interfaces with indexed getters,
# and pair iterators are only allowed on interfaces without indexed
# getters.
if self.isIterable():
iterableDecl = self.maplikeOrSetlikeOrIterable
if iterableDecl.isValueIterator():
if not indexedGetter:
raise WebIDLError("Interface with value iterator does not "
"support indexed properties",
[self.location, iterableDecl.location])
if iterableDecl.valueType != indexedGetter.signatures()[0][0]:
raise WebIDLError("Iterable type does not match indexed "
"getter type",
[iterableDecl.location,
indexedGetter.location])
if not hasLengthAttribute:
raise WebIDLError('Interface with value iterator does not '
'have an integer-typed "length" attribute',
[self.location, iterableDecl.location])
else:
assert iterableDecl.isPairIterator()
if indexedGetter:
raise WebIDLError("Interface with pair iterator supports "
"indexed properties",
[self.location, iterableDecl.location,
indexedGetter.location])
if indexedGetter and not hasLengthAttribute:
raise WebIDLError('Interface with an indexed getter does not have '
'an integer-typed "length" attribute',
[self.location, indexedGetter.location])
def setCallback(self, value):
self._callback = value
def isCallback(self):
return self._callback
def isSingleOperationInterface(self):
assert self.isCallback() or self.isJSImplemented()
return (
# JS-implemented things should never need the
# this-handling weirdness of single-operation interfaces.
not self.isJSImplemented() and
# Not inheriting from another interface
not self.parent and
# No attributes of any kinds
not any(m.isAttr() for m in self.members) and
# There is at least one regular operation, and all regular
# operations have the same identifier
len(set(m.identifier.name for m in self.members if
m.isMethod() and not m.isStatic())) == 1)
def inheritanceDepth(self):
depth = 0
parent = self.parent
while parent:
depth = depth + 1
parent = parent.parent
return depth
def hasConstants(self):
return any(m.isConst() for m in self.members)
def hasInterfaceObject(self):
if self.isCallback():
return self.hasConstants()
return not hasattr(self, "_noInterfaceObject")
def hasInterfacePrototypeObject(self):
return (not self.isCallback() and not self.isNamespace()
and self.getUserData('hasConcreteDescendant', False))
def addIncludedMixin(self, includedMixin):
assert(isinstance(includedMixin, IDLInterfaceMixin))
self.includedMixins.add(includedMixin)
def getInheritedInterfaces(self):
"""
Returns a list of the interfaces this interface inherits from
(not including this interface itself). The list is in order
from most derived to least derived.
"""
assert(self._finished)
if not self.parent:
return []
parentInterfaces = self.parent.getInheritedInterfaces()
parentInterfaces.insert(0, self.parent)
return parentInterfaces
def findInterfaceLoopPoint(self, otherInterface):
"""
Finds an interface amongst our ancestors that inherits from otherInterface.
If there is no such interface, returns None.
"""
if self.parent:
if self.parent == otherInterface:
return self
loopPoint = self.parent.findInterfaceLoopPoint(otherInterface)
if loopPoint:
return loopPoint
return None
def setNonPartial(self, location, parent, members):
assert not parent or isinstance(parent, IDLIdentifierPlaceholder)
IDLInterfaceOrInterfaceMixinOrNamespace.setNonPartial(self, location, members)
assert not self.parent
self.parent = parent
def getJSImplementation(self):
classId = self.getExtendedAttribute("JSImplementation")
if not classId:
return classId
assert isinstance(classId, list)
assert len(classId) == 1
return classId[0]
def isJSImplemented(self):
return bool(self.getJSImplementation())
def hasProbablyShortLivingWrapper(self):
current = self
while current:
if current.getExtendedAttribute("ProbablyShortLivingWrapper"):
return True
current = current.parent
return False
def hasChildInterfaces(self):
return self._hasChildInterfaces
def isOnGlobalProtoChain(self):
return self._isOnGlobalProtoChain
def _getDependentObjects(self):
deps = set(self.members)
deps.update(self.includedMixins)
if self.parent:
deps.add(self.parent)
return deps
def hasMembersInSlots(self):
return self._ownMembersInSlots != 0
conditionExtendedAttributes = [ "Pref", "ChromeOnly", "Func",
"SecureContext" ]
def isExposedConditionally(self, exclusions=[]):
return any(((not a in exclusions) and self.getExtendedAttribute(a)) for a in self.conditionExtendedAttributes)
class IDLInterface(IDLInterfaceOrNamespace):
def __init__(self, location, parentScope, name, parent, members,
isKnownNonPartial, classNameOverride=None,
toStringTag=None):
IDLInterfaceOrNamespace.__init__(self, location, parentScope, name,
parent, members, isKnownNonPartial,
toStringTag)
self.classNameOverride = classNameOverride
def __str__(self):
return "Interface '%s'" % self.identifier.name
def isInterface(self):
return True
def getClassName(self):
if self.classNameOverride:
return self.classNameOverride
return self.identifier.name
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
# Special cased attrs
if identifier == "TreatNonCallableAsNull":
raise WebIDLError("TreatNonCallableAsNull cannot be specified on interfaces",
[attr.location, self.location])
if identifier == "TreatNonObjectAsNull":
raise WebIDLError("TreatNonObjectAsNull cannot be specified on interfaces",
[attr.location, self.location])
elif identifier == "NoInterfaceObject":
if not attr.noArguments():
raise WebIDLError("[NoInterfaceObject] must take no arguments",
[attr.location])
self._noInterfaceObject = True
elif identifier == "NamedConstructor":
if not attr.hasValue():
raise WebIDLError("NamedConstructor must either take an identifier or take a named argument list",
[attr.location])
args = attr.args() if attr.hasArgs() else []
retType = IDLWrapperType(self.location, self)
method = IDLConstructor(attr.location, args, attr.value())
method.reallyInit(self)
# Named constructors are always assumed to be able to
# throw (since there's no way to indicate otherwise).
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Throws",))])
# We need to detect conflicts for NamedConstructors across
# interfaces. We first call resolve on the parentScope,
# which will merge all NamedConstructors with the same
# identifier accross interfaces as overloads.
method.resolve(self.parentScope)
# Then we look up the identifier on the parentScope. If the
# result is the same as the method we're adding then it
# hasn't been added as an overload and it's the first time
# we've encountered a NamedConstructor with that identifier.
# If the result is not the same as the method we're adding
# then it has been added as an overload and we need to check
# whether the result is actually one of our existing
# NamedConstructors.
newMethod = self.parentScope.lookupIdentifier(method.identifier)
if newMethod == method:
self.namedConstructors.append(method)
elif newMethod not in self.namedConstructors:
raise WebIDLError("NamedConstructor conflicts with a "
"NamedConstructor of a different interface",
[method.location, newMethod.location])
elif (identifier == "ExceptionClass"):
if not attr.noArguments():
raise WebIDLError("[ExceptionClass] must take no arguments",
[attr.location])
if self.parent:
raise WebIDLError("[ExceptionClass] must not be specified on "
"an interface with inherited interfaces",
[attr.location, self.location])
elif identifier == "Global":
if attr.hasValue():
self.globalNames = [attr.value()]
elif attr.hasArgs():
self.globalNames = attr.args()
else:
self.globalNames = [self.identifier.name]
self.parentScope.addIfaceGlobalNames(self.identifier.name,
self.globalNames)
self._isOnGlobalProtoChain = True
elif identifier == "LegacyWindowAlias":
if attr.hasValue():
self.legacyWindowAliases = [attr.value()]
elif attr.hasArgs():
self.legacyWindowAliases = attr.args()
else:
raise WebIDLError("[%s] must either take an identifier "
"or take an identifier list" % identifier,
[attr.location])
for alias in self.legacyWindowAliases:
unresolved = IDLUnresolvedIdentifier(attr.location, alias)
IDLObjectWithIdentifier(attr.location, self.parentScope, unresolved)
elif identifier == "SecureContext":
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
# This gets propagated to all our members.
for member in self.members:
if member.getExtendedAttribute("SecureContext"):
raise WebIDLError("[SecureContext] specified on both "
"an interface member and on the "
"interface itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
elif (identifier == "NeedResolve" or
identifier == "OverrideBuiltins" or
identifier == "ChromeOnly" or
identifier == "Unforgeable" or
identifier == "LegacyEventInit" or
identifier == "ProbablyShortLivingWrapper" or
identifier == "LegacyUnenumerableNamedProperties" or
identifier == "RunConstructorInCallerCompartment" or
identifier == "WantsEventListenerHooks" or
identifier == "Serializable" or
identifier == "Abstract" or
identifier == "Inline"):
# Known extended attributes that do not take values
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr,
self._exposureGlobalNames)
elif (identifier == "Pref" or
identifier == "JSImplementation" or
identifier == "HeaderFile" or
identifier == "Func" or
identifier == "Deprecated"):
# Known extended attributes that take a string value
if not attr.hasValue():
raise WebIDLError("[%s] must have a value" % identifier,
[attr.location])
elif identifier == "InstrumentedProps":
# Known extended attributes that take a list
if not attr.hasArgs():
raise WebIDLError("[%s] must have arguments" % identifier,
[attr.location])
else:
raise WebIDLError("Unknown extended attribute %s on interface" % identifier,
[attr.location])
attrlist = attr.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def validate(self):
IDLInterfaceOrNamespace.validate(self)
if self.parent and self.isSerializable() and not self.parent.isSerializable():
raise WebIDLError(
"Serializable interface inherits from non-serializable "
"interface. Per spec, that means the object should not be "
"serializable, so chances are someone made a mistake here "
"somewhere.",
[self.location, self.parent.location])
def isSerializable(self):
return self.getExtendedAttribute("Serializable")
def setNonPartial(self, location, parent, members):
# Before we do anything else, finish initializing any constructors that
# might be in "members", so we don't have partially-initialized objects
# hanging around. We couldn't do it before now because we needed to have
# to have the IDLInterface on hand to properly set the return type.
for member in members:
if isinstance(member, IDLConstructor):
member.reallyInit(self)
IDLInterfaceOrNamespace.setNonPartial(self, location, parent, members)
class IDLNamespace(IDLInterfaceOrNamespace):
def __init__(self, location, parentScope, name, members, isKnownNonPartial):
IDLInterfaceOrNamespace.__init__(self, location, parentScope, name,
None, members, isKnownNonPartial,
toStringTag=None)
def __str__(self):
return "Namespace '%s'" % self.identifier.name
def isNamespace(self):
return True
def addExtendedAttributes(self, attrs):
# The set of things namespaces support is small enough it's simpler
# to factor out into a separate method than it is to sprinkle
# isNamespace() checks all through
# IDLInterfaceOrNamespace.addExtendedAttributes.
for attr in attrs:
identifier = attr.identifier()
if identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr,
self._exposureGlobalNames)
elif identifier == "ClassString":
# Takes a string value to override the default "Object" if
# desired.
if not attr.hasValue():
raise WebIDLError("[%s] must have a value" % identifier,
[attr.location])
elif (identifier == "ProtoObjectHack" or
identifier == "ChromeOnly"):
if not attr.noArguments():
raise WebIDLError("[%s] must not have arguments" % identifier,
[attr.location])
elif (identifier == "Pref" or
identifier == "HeaderFile" or
identifier == "Func"):
# Known extended attributes that take a string value
if not attr.hasValue():
raise WebIDLError("[%s] must have a value" % identifier,
[attr.location])
else:
raise WebIDLError("Unknown extended attribute %s on namespace" %
identifier,
[attr.location])
attrlist = attr.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def isSerializable(self):
return False
class IDLDictionary(IDLObjectWithScope):
def __init__(self, location, parentScope, name, parent, members):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
assert not parent or isinstance(parent, IDLIdentifierPlaceholder)
self.parent = parent
self._finished = False
self.members = list(members)
self._partialDictionaries = []
self._extendedAttrDict = {}
self.needsConversionToJS = False
self.needsConversionFromJS = False
IDLObjectWithScope.__init__(self, location, parentScope, name)
def __str__(self):
return "Dictionary '%s'" % self.identifier.name
def isDictionary(self):
return True
def canBeEmpty(self):
"""
Returns true if this dictionary can be empty (that is, it has no
required members and neither do any of its ancestors).
"""
return (all(member.optional for member in self.members) and
(not self.parent or self.parent.canBeEmpty()))
def finish(self, scope):
if self._finished:
return
self._finished = True
if self.parent:
assert isinstance(self.parent, IDLIdentifierPlaceholder)
oldParent = self.parent
self.parent = self.parent.finish(scope)
if not isinstance(self.parent, IDLDictionary):
raise WebIDLError("Dictionary %s has parent that is not a dictionary" %
self.identifier.name,
[oldParent.location, self.parent.location])
# Make sure the parent resolves all its members before we start
# looking at them.
self.parent.finish(scope)
# Now go ahead and merge in our partial dictionaries.
for partial in self._partialDictionaries:
partial.finish(scope)
self.members.extend(partial.members)
for member in self.members:
member.resolve(self)
if not member.isComplete():
member.complete(scope)
assert member.type.isComplete()
# Members of a dictionary are sorted in lexicographic order
self.members.sort(key=lambda x: x.identifier.name)
inheritedMembers = []
ancestor = self.parent
while ancestor:
if ancestor == self:
raise WebIDLError("Dictionary %s has itself as an ancestor" %
self.identifier.name,
[self.identifier.location])
inheritedMembers.extend(ancestor.members)
ancestor = ancestor.parent
# Catch name duplication
for inheritedMember in inheritedMembers:
for member in self.members:
if member.identifier.name == inheritedMember.identifier.name:
raise WebIDLError("Dictionary %s has two members with name %s" %
(self.identifier.name, member.identifier.name),
[member.location, inheritedMember.location])
def validate(self):
def typeContainsDictionary(memberType, dictionary):
"""
Returns a tuple whose:
- First element is a Boolean value indicating whether
memberType contains dictionary.
- Second element is:
A list of locations that leads from the type that was passed in
the memberType argument, to the dictionary being validated,
if the boolean value in the first element is True.
None, if the boolean value in the first element is False.
"""
if (memberType.nullable() or
memberType.isSequence() or
memberType.isRecord()):
return typeContainsDictionary(memberType.inner, dictionary)
if memberType.isDictionary():
if memberType.inner == dictionary:
return (True, [memberType.location])
(contains, locations) = dictionaryContainsDictionary(memberType.inner,
dictionary)
if contains:
return (True, [memberType.location] + locations)
if memberType.isUnion():
for member in memberType.flatMemberTypes:
(contains, locations) = typeContainsDictionary(member, dictionary)
if contains:
return (True, locations)
return (False, None)
def dictionaryContainsDictionary(dictMember, dictionary):
for member in dictMember.members:
(contains, locations) = typeContainsDictionary(member.type, dictionary)
if contains:
return (True, [member.location] + locations)
if dictMember.parent:
if dictMember.parent == dictionary:
return (True, [dictMember.location])
else:
(contains, locations) = dictionaryContainsDictionary(dictMember.parent, dictionary)
if contains:
return (True, [dictMember.location] + locations)
return (False, None)
for member in self.members:
if member.type.isDictionary() and member.type.nullable():
raise WebIDLError("Dictionary %s has member with nullable "
"dictionary type" % self.identifier.name,
[member.location])
(contains, locations) = typeContainsDictionary(member.type, self)
if contains:
raise WebIDLError("Dictionary %s has member with itself as type." %
self.identifier.name,
[member.location] + locations)
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
if (identifier == "GenerateInitFromJSON" or
identifier == "GenerateInit"):
if not attr.noArguments():
raise WebIDLError("[%s] must not have arguments" % identifier,
[attr.location])
self.needsConversionFromJS = True
elif (identifier == "GenerateConversionToJS" or
identifier == "GenerateToJSON"):
if not attr.noArguments():
raise WebIDLError("[%s] must not have arguments" % identifier,
[attr.location])
# ToJSON methods require to-JS conversion, because we
# implement ToJSON by converting to a JS object and
# then using JSON.stringify.
self.needsConversionToJS = True
else:
raise WebIDLError("[%s] extended attribute not allowed on "
"dictionaries" % identifier,
[attr.location])
self._extendedAttrDict[identifier] = True
def _getDependentObjects(self):
deps = set(self.members)
if (self.parent):
deps.add(self.parent)
return deps
def addPartialDictionary(self, partial):
assert self.identifier.name == partial.identifier.name
self._partialDictionaries.append(partial)
class IDLEnum(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, name, values):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
if len(values) != len(set(values)):
raise WebIDLError("Enum %s has multiple identical strings" % name.name,
[location])
IDLObjectWithIdentifier.__init__(self, location, parentScope, name)
self._values = values
def values(self):
return self._values
def finish(self, scope):
pass
def validate(self):
pass
def isEnum(self):
return True
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on enums",
[attrs[0].location, self.location])
def _getDependentObjects(self):
return set()
class IDLType(IDLObject):
Tags = enum(
# The integer types
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'int64',
'uint64',
# Additional primitive types
'bool',
'unrestricted_float',
'float',
'unrestricted_double',
# "double" last primitive type to match IDLBuiltinType
'double',
# Other types
'any',
'domstring',
'bytestring',
'usvstring',
'utf8string',
'jsstring',
'object',
'void',
# Funny stuff
'interface',
'dictionary',
'enum',
'callback',
'union',
'sequence',
'record',
'promise',
)
def __init__(self, location, name):
IDLObject.__init__(self, location)
self.name = name
self.builtin = False
self.treatNullAsEmpty = False
self._clamp = False
self._enforceRange = False
self._allowShared = False
self._extendedAttrDict = {}
def __eq__(self, other):
return (other and self.builtin == other.builtin and self.name == other.name and
self._clamp == other.hasClamp() and self._enforceRange == other.hasEnforceRange() and
self.treatNullAsEmpty == other.treatNullAsEmpty and
self._allowShared == other.hasAllowShared())
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self.name)
def prettyName(self):
"""
A name that looks like what this type is named in the IDL spec. By default
this is just our .name, but types that have more interesting spec
representations should override this.
"""
return str(self.name)
def isType(self):
return True
def nullable(self):
return False
def isPrimitive(self):
return False
def isBoolean(self):
return False
def isNumeric(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isUTF8String(self):
return False
def isJSString(self):
return False
def isVoid(self):
return self.name == "Void"
def isSequence(self):
return False
def isRecord(self):
return False
def isReadableStream(self):
return False
def isArrayBuffer(self):
return False
def isArrayBufferView(self):
return False
def isTypedArray(self):
return False
def isBufferSource(self):
return self.isArrayBuffer() or self.isArrayBufferView() or self.isTypedArray()
def isCallbackInterface(self):
return False
def isNonCallbackInterface(self):
return False
def isGeckoInterface(self):
""" Returns a boolean indicating whether this type is an 'interface'
type that is implemented in Gecko. At the moment, this returns
true for all interface types that are not types from the TypedArray
spec."""
return self.isInterface() and not self.isSpiderMonkeyInterface()
def isSpiderMonkeyInterface(self):
""" Returns a boolean indicating whether this type is an 'interface'
type that is implemented in SpiderMonkey. """
return self.isInterface() and (self.isBufferSource() or
self.isReadableStream())
def isDictionary(self):
return False
def isInterface(self):
return False
def isAny(self):
return self.tag() == IDLType.Tags.any
def isObject(self):
return self.tag() == IDLType.Tags.object
def isPromise(self):
return False
def isComplete(self):
return True
def includesRestrictedFloat(self):
return False
def isFloat(self):
return False
def isUnrestricted(self):
# Should only call this on float types
assert self.isFloat()
def isJSONType(self):
return False
def hasClamp(self):
return self._clamp
def hasEnforceRange(self):
return self._enforceRange
def hasAllowShared(self):
return self._allowShared
def tag(self):
assert False # Override me!
def treatNonCallableAsNull(self):
assert self.tag() == IDLType.Tags.callback
return self.nullable() and self.inner.callback._treatNonCallableAsNull
def treatNonObjectAsNull(self):
assert self.tag() == IDLType.Tags.callback
return self.nullable() and self.inner.callback._treatNonObjectAsNull
def withExtendedAttributes(self, attrs):
if len(attrs) > 0:
raise WebIDLError("Extended attributes on types only supported for builtins",
[attrs[0].location, self.location])
return self
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def resolveType(self, parentScope):
pass
def unroll(self):
return self
def isDistinguishableFrom(self, other):
raise TypeError("Can't tell whether a generic type is or is not "
"distinguishable from other things")
def isExposedInAllOf(self, exposureSet):
return True
class IDLUnresolvedType(IDLType):
"""
Unresolved types are interface types
"""
def __init__(self, location, name, attrs=[]):
IDLType.__init__(self, location, name)
self.extraTypeAttributes = attrs
def isComplete(self):
return False
def complete(self, scope):
obj = None
try:
obj = scope._lookupIdentifier(self.name)
except:
raise WebIDLError("Unresolved type '%s'." % self.name,
[self.location])
assert obj
if obj.isType():
print(obj)
assert not obj.isType()
if obj.isTypedef():
assert self.name.name == obj.identifier.name
typedefType = IDLTypedefType(self.location, obj.innerType,
obj.identifier)
assert not typedefType.isComplete()
return typedefType.complete(scope).withExtendedAttributes(self.extraTypeAttributes)
elif obj.isCallback() and not obj.isInterface():
assert self.name.name == obj.identifier.name
return IDLCallbackType(obj.location, obj)
name = self.name.resolve(scope, None)
return IDLWrapperType(self.location, obj)
def withExtendedAttributes(self, attrs):
return IDLUnresolvedType(self.location, self.name, attrs)
def isDistinguishableFrom(self, other):
raise TypeError("Can't tell whether an unresolved type is or is not "
"distinguishable from other things")
class IDLParametrizedType(IDLType):
def __init__(self, location, name, innerType):
IDLType.__init__(self, location, name)
self.builtin = False
self.inner = innerType
def includesRestrictedFloat(self):
return self.inner.includesRestrictedFloat()
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolveType(parentScope)
def isComplete(self):
return self.inner.isComplete()
def unroll(self):
return self.inner.unroll()
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLNullableType(IDLParametrizedType):
def __init__(self, location, innerType):
assert not innerType.isVoid()
assert not innerType == BuiltinTypes[IDLBuiltinType.Types.any]
IDLParametrizedType.__init__(self, location, None, innerType)
def __eq__(self, other):
return isinstance(other, IDLNullableType) and self.inner == other.inner
def __hash__(self):
return hash(self.inner)
def __str__(self):
return self.inner.__str__() + "OrNull"
def prettyName(self):
return self.inner.prettyName() + "?"
def nullable(self):
return True
def isCallback(self):
return self.inner.isCallback()
def isPrimitive(self):
return self.inner.isPrimitive()
def isBoolean(self):
return self.inner.isBoolean()
def isNumeric(self):
return self.inner.isNumeric()
def isString(self):
return self.inner.isString()
def isByteString(self):
return self.inner.isByteString()
def isDOMString(self):
return self.inner.isDOMString()
def isUSVString(self):
return self.inner.isUSVString()
def isUTF8String(self):
return self.inner.isUTF8String()
def isJSString(self):
return self.inner.isJSString()
def isFloat(self):
return self.inner.isFloat()
def isUnrestricted(self):
return self.inner.isUnrestricted()
def isInteger(self):
return self.inner.isInteger()
def isVoid(self):
return False
def isSequence(self):
return self.inner.isSequence()
def isRecord(self):
return self.inner.isRecord()
def isReadableStream(self):
return self.inner.isReadableStream()
def isArrayBuffer(self):
return self.inner.isArrayBuffer()
def isArrayBufferView(self):
return self.inner.isArrayBufferView()
def isTypedArray(self):
return self.inner.isTypedArray()
def isDictionary(self):
return self.inner.isDictionary()
def isInterface(self):
return self.inner.isInterface()
def isPromise(self):
# There is no such thing as a nullable Promise.
assert not self.inner.isPromise()
return False
def isCallbackInterface(self):
return self.inner.isCallbackInterface()
def isNonCallbackInterface(self):
return self.inner.isNonCallbackInterface()
def isEnum(self):
return self.inner.isEnum()
def isUnion(self):
return self.inner.isUnion()
def isJSONType(self):
return self.inner.isJSONType()
def hasClamp(self):
return self.inner.hasClamp()
def hasEnforceRange(self):
return self.inner.hasEnforceRange()
def hasAllowShared(self):
return self.inner.hasAllowShared()
def isComplete(self):
return self.name is not None
def tag(self):
return self.inner.tag()
def complete(self, scope):
if not self.inner.isComplete():
self.inner = self.inner.complete(scope)
assert self.inner.isComplete()
if self.inner.nullable():
raise WebIDLError("The inner type of a nullable type must not be "
"a nullable type",
[self.location, self.inner.location])
if self.inner.isUnion():
if self.inner.hasNullableType:
raise WebIDLError("The inner type of a nullable type must not "
"be a union type that itself has a nullable "
"type as a member type", [self.location])
if self.inner.isDOMString():
if self.inner.treatNullAsEmpty:
raise WebIDLError("[TreatNullAs] not allowed on a nullable DOMString",
[self.location, self.inner.location])
self.name = self.inner.name + "OrNull"
return self
def isDistinguishableFrom(self, other):
if (other.nullable() or
other.isDictionary() or
(other.isUnion() and
(other.hasNullableType or other.hasDictionaryType()))):
# Can't tell which type null should become
return False
return self.inner.isDistinguishableFrom(other)
def withExtendedAttributes(self, attrs):
# See https://github.com/heycam/webidl/issues/827#issuecomment-565131350
# Allowing extended attributes to apply to a nullable type is an intermediate solution.
# A potential longer term solution is to introduce a null type and get rid of nullables.
# For example, we could do `([Clamp] long or null) foo` in the future.
return IDLNullableType(self.location, self.inner.withExtendedAttributes(attrs))
class IDLSequenceType(IDLParametrizedType):
def __init__(self, location, parameterType):
assert not parameterType.isVoid()
IDLParametrizedType.__init__(self, location, parameterType.name, parameterType)
# Need to set self.name up front if our inner type is already complete,
# since in that case our .complete() won't be called.
if self.inner.isComplete():
self.name = self.inner.name + "Sequence"
def __eq__(self, other):
return isinstance(other, IDLSequenceType) and self.inner == other.inner
def __hash__(self):
return hash(self.inner)
def __str__(self):
return self.inner.__str__() + "Sequence"
def prettyName(self):
return "sequence<%s>" % self.inner.prettyName()
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isUTF8String(self):
return False
def isJSString(self):
return False
def isVoid(self):
return False
def isSequence(self):
return True
def isDictionary(self):
return False
def isInterface(self):
return False
def isEnum(self):
return False
def isJSONType(self):
return self.inner.isJSONType()
def tag(self):
return IDLType.Tags.sequence
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.inner.name + "Sequence"
return self
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isInterface() or other.isDictionary() or
other.isCallback() or other.isRecord())
class IDLRecordType(IDLParametrizedType):
def __init__(self, location, keyType, valueType):
assert keyType.isString()
assert keyType.isComplete()
assert not valueType.isVoid()
IDLParametrizedType.__init__(self, location, valueType.name, valueType)
self.keyType = keyType
# Need to set self.name up front if our inner type is already complete,
# since in that case our .complete() won't be called.
if self.inner.isComplete():
self.name = self.keyType.name + self.inner.name + "Record"
def __eq__(self, other):
return isinstance(other, IDLRecordType) and self.inner == other.inner
def __str__(self):
return self.keyType.__str__() + self.inner.__str__() + "Record"
def prettyName(self):
return "record<%s, %s>" % (self.keyType.prettyName(), self.inner.prettyName())
def isRecord(self):
return True
def isJSONType(self):
return self.inner.isJSONType()
def tag(self):
return IDLType.Tags.record
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.keyType.name + self.inner.name + "Record"
return self
def unroll(self):
# We do not unroll our inner. Just stop at ourselves. That
# lets us add headers for both ourselves and our inner as
# needed.
return self
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isNonCallbackInterface() or other.isSequence())
def isExposedInAllOf(self, exposureSet):
return self.inner.unroll().isExposedInAllOf(exposureSet)
class IDLUnionType(IDLType):
def __init__(self, location, memberTypes):
IDLType.__init__(self, location, "")
self.memberTypes = memberTypes
self.hasNullableType = False
self._dictionaryType = None
self.flatMemberTypes = None
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLUnionType) and self.memberTypes == other.memberTypes
def __hash__(self):
assert self.isComplete()
return self.name.__hash__()
def prettyName(self):
return "(" + " or ".join(m.prettyName() for m in self.memberTypes) + ")"
def isVoid(self):
return False
def isUnion(self):
return True
def isJSONType(self):
return all(m.isJSONType() for m in self.memberTypes)
def includesRestrictedFloat(self):
return any(t.includesRestrictedFloat() for t in self.memberTypes)
def tag(self):
return IDLType.Tags.union
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
for t in self.memberTypes:
t.resolveType(parentScope)
def isComplete(self):
return self.flatMemberTypes is not None
def complete(self, scope):
def typeName(type):
if isinstance(type, IDLNullableType):
return typeName(type.inner) + "OrNull"
if isinstance(type, IDLWrapperType):
return typeName(type._identifier.object())
if isinstance(type, IDLObjectWithIdentifier):
return typeName(type.identifier)
if isinstance(type, IDLBuiltinType) and type.hasAllowShared():
assert type.isBufferSource()
return "MaybeShared" + type.name
return type.name
for (i, type) in enumerate(self.memberTypes):
# Exclude typedefs because if given "typedef (B or C) test",
# we want AOrTest, not AOrBOrC
if not type.isComplete() and not isinstance(type, IDLTypedefType):
self.memberTypes[i] = type.complete(scope)
self.name = "Or".join(typeName(type) for type in self.memberTypes)
# We do this again to complete the typedef types
for (i, type) in enumerate(self.memberTypes):
if not type.isComplete():
self.memberTypes[i] = type.complete(scope)
self.flatMemberTypes = list(self.memberTypes)
i = 0
while i < len(self.flatMemberTypes):
if self.flatMemberTypes[i].nullable():
if self.hasNullableType:
raise WebIDLError("Can't have more than one nullable types in a union",
[nullableType.location, self.flatMemberTypes[i].location])
if self.hasDictionaryType():
raise WebIDLError("Can't have a nullable type and a "
"dictionary type in a union",
[self._dictionaryType.location,
self.flatMemberTypes[i].location])
self.hasNullableType = True
nullableType = self.flatMemberTypes[i]
self.flatMemberTypes[i] = self.flatMemberTypes[i].inner
continue
if self.flatMemberTypes[i].isDictionary():
if self.hasNullableType:
raise WebIDLError("Can't have a nullable type and a "
"dictionary type in a union",
[nullableType.location,
self.flatMemberTypes[i].location])
self._dictionaryType = self.flatMemberTypes[i]
elif self.flatMemberTypes[i].isUnion():
self.flatMemberTypes[i:i + 1] = self.flatMemberTypes[i].memberTypes
continue
i += 1
for (i, t) in enumerate(self.flatMemberTypes[:-1]):
for u in self.flatMemberTypes[i + 1:]:
if not t.isDistinguishableFrom(u):
raise WebIDLError("Flat member types of a union should be "
"distinguishable, " + str(t) + " is not "
"distinguishable from " + str(u),
[self.location, t.location, u.location])
return self
def isDistinguishableFrom(self, other):
if self.hasNullableType and other.nullable():
# Can't tell which type null should become
return False
if other.isUnion():
otherTypes = other.unroll().memberTypes
else:
otherTypes = [other]
# For every type in otherTypes, check that it's distinguishable from
# every type in our types
for u in otherTypes:
if any(not t.isDistinguishableFrom(u) for t in self.memberTypes):
return False
return True
def isExposedInAllOf(self, exposureSet):
# We could have different member types in different globals. Just make sure that each thing in exposureSet has one of our member types exposed in it.
for globalName in exposureSet:
if not any(t.unroll().isExposedInAllOf(set([globalName])) for t
in self.flatMemberTypes):
return False
return True
def hasDictionaryType(self):
return self._dictionaryType is not None
def hasPossiblyEmptyDictionaryType(self):
return (self._dictionaryType is not None and
self._dictionaryType.inner.canBeEmpty())
def _getDependentObjects(self):
return set(self.memberTypes)
class IDLTypedefType(IDLType):
def __init__(self, location, innerType, name):
IDLType.__init__(self, location, name)
self.inner = innerType
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLTypedefType) and self.inner == other.inner
def __str__(self):
return self.name
def nullable(self):
return self.inner.nullable()
def isPrimitive(self):
return self.inner.isPrimitive()
def isBoolean(self):
return self.inner.isBoolean()
def isNumeric(self):
return self.inner.isNumeric()
def isString(self):
return self.inner.isString()
def isByteString(self):
return self.inner.isByteString()
def isDOMString(self):
return self.inner.isDOMString()
def isUSVString(self):
return self.inner.isUSVString()
def isUTF8String(self):
return self.inner.isUTF8String()
def isJSString(self):
return self.inner.isJSString()
def isVoid(self):
return self.inner.isVoid()
def isJSONType(self):
return self.inner.isJSONType()
def isSequence(self):
return self.inner.isSequence()
def isRecord(self):
return self.inner.isRecord()
def isReadableStream(self):
return self.inner.isReadableStream()
def isDictionary(self):
return self.inner.isDictionary()
def isArrayBuffer(self):
return self.inner.isArrayBuffer()
def isArrayBufferView(self):
return self.inner.isArrayBufferView()
def isTypedArray(self):
return self.inner.isTypedArray()
def isInterface(self):
return self.inner.isInterface()
def isCallbackInterface(self):
return self.inner.isCallbackInterface()
def isNonCallbackInterface(self):
return self.inner.isNonCallbackInterface()
def isComplete(self):
return False
def complete(self, parentScope):
if not self.inner.isComplete():
self.inner = self.inner.complete(parentScope)
assert self.inner.isComplete()
return self.inner
# Do we need a resolveType impl? I don't think it's particularly useful....
def tag(self):
return self.inner.tag()
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
return self.inner.isDistinguishableFrom(other)
def _getDependentObjects(self):
return self.inner._getDependentObjects()
def withExtendedAttributes(self, attrs):
return IDLTypedefType(self.location, self.inner.withExtendedAttributes(attrs), self.name)
class IDLTypedef(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, innerType, name):
# Set self.innerType first, because IDLObjectWithIdentifier.__init__
# will call our __str__, which wants to use it.
self.innerType = innerType
identifier = IDLUnresolvedIdentifier(location, name)
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
def __str__(self):
return "Typedef %s %s" % (self.identifier.name, self.innerType)
def finish(self, parentScope):
if not self.innerType.isComplete():
self.innerType = self.innerType.complete(parentScope)
def validate(self):
pass
def isTypedef(self):
return True
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on typedefs",
[attrs[0].location, self.location])
def _getDependentObjects(self):
return self.innerType._getDependentObjects()
class IDLWrapperType(IDLType):
def __init__(self, location, inner):
IDLType.__init__(self, location, inner.identifier.name)
self.inner = inner
self._identifier = inner.identifier
self.builtin = False
def __eq__(self, other):
return (isinstance(other, IDLWrapperType) and
self._identifier == other._identifier and
self.builtin == other.builtin)
def __hash__(self):
return hash((self._identifier, self.builtin))
def __str__(self):
return str(self.name) + " (Wrapper)"
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isUTF8String(self):
return False
def isJSString(self):
return False
def isVoid(self):
return False
def isSequence(self):
return False
def isDictionary(self):
return isinstance(self.inner, IDLDictionary)
def isInterface(self):
return (isinstance(self.inner, IDLInterface) or
isinstance(self.inner, IDLExternalInterface))
def isCallbackInterface(self):
return self.isInterface() and self.inner.isCallback()
def isNonCallbackInterface(self):
return self.isInterface() and not self.inner.isCallback()
def isEnum(self):
return isinstance(self.inner, IDLEnum)
def isJSONType(self):
if self.isInterface():
if self.inner.isExternal():
return False
iface = self.inner
while iface:
if any(m.isMethod() and m.isToJSON() for m in iface.members):
return True
iface = iface.parent
return False
elif self.isEnum():
return True
elif self.isDictionary():
dictionary = self.inner
while dictionary:
if not all(m.type.isJSONType() for m in dictionary.members):
return False
dictionary = dictionary.parent
return True
else:
raise WebIDLError("IDLWrapperType wraps type %s that we don't know if "
"is serializable" % type(self.inner), [self.location])
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolve(parentScope)
def isComplete(self):
return True
def tag(self):
if self.isInterface():
return IDLType.Tags.interface
elif self.isEnum():
return IDLType.Tags.enum
elif self.isDictionary():
return IDLType.Tags.dictionary
else:
assert False
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
assert self.isInterface() or self.isEnum() or self.isDictionary()
if self.isEnum():
return (other.isPrimitive() or other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord())
if self.isDictionary() and other.nullable():
return False
if (other.isPrimitive() or other.isString() or other.isEnum() or
other.isSequence()):
return True
if self.isDictionary():
return other.isNonCallbackInterface()
assert self.isInterface()
if other.isInterface():
if other.isSpiderMonkeyInterface():
# Just let |other| handle things
return other.isDistinguishableFrom(self)
assert self.isGeckoInterface() and other.isGeckoInterface()
if self.inner.isExternal() or other.unroll().inner.isExternal():
return self != other
return (len(self.inner.interfacesBasedOnSelf &
other.unroll().inner.interfacesBasedOnSelf) == 0 and
(self.isNonCallbackInterface() or
other.isNonCallbackInterface()))
if (other.isDictionary() or other.isCallback() or
other.isRecord()):
return self.isNonCallbackInterface()
# Not much else |other| can be
assert other.isObject()
return False
def isExposedInAllOf(self, exposureSet):
if not self.isInterface():
return True
iface = self.inner
if iface.isExternal():
# Let's say true, so we don't have to implement exposure mixins on
# external interfaces and sprinkle [Exposed=Window] on every single
# external interface declaration.
return True
return iface.exposureSet.issuperset(exposureSet)
def _getDependentObjects(self):
# NB: The codegen for an interface type depends on
# a) That the identifier is in fact an interface (as opposed to
# a dictionary or something else).
# b) The native type of the interface.
# If we depend on the interface object we will also depend on
# anything the interface depends on which is undesirable. We
# considered implementing a dependency just on the interface type
# file, but then every modification to an interface would cause this
# to be regenerated which is still undesirable. We decided not to
# depend on anything, reasoning that:
# 1) Changing the concrete type of the interface requires modifying
# Bindings.conf, which is still a global dependency.
# 2) Changing an interface to a dictionary (or vice versa) with the
# same identifier should be incredibly rare.
#
# On the other hand, if our type is a dictionary, we should
# depend on it, because the member types of a dictionary
# affect whether a method taking the dictionary as an argument
# takes a JSContext* argument or not.
if self.isDictionary():
return set([self.inner])
return set()
class IDLPromiseType(IDLParametrizedType):
def __init__(self, location, innerType):
IDLParametrizedType.__init__(self, location, "Promise", innerType)
def __eq__(self, other):
return (isinstance(other, IDLPromiseType) and
self.promiseInnerType() == other.promiseInnerType())
def __str__(self):
return self.inner.__str__() + "Promise"
def prettyName(self):
return "Promise<%s>" % self.inner.prettyName()
def isPromise(self):
return True
def promiseInnerType(self):
return self.inner
def tag(self):
return IDLType.Tags.promise
def complete(self, scope):
self.inner = self.promiseInnerType().complete(scope)
return self
def unroll(self):
# We do not unroll our inner. Just stop at ourselves. That
# lets us add headers for both ourselves and our inner as
# needed.
return self
def isDistinguishableFrom(self, other):
# Promises are not distinguishable from anything.
return False
def isExposedInAllOf(self, exposureSet):
# Check the internal type
return self.promiseInnerType().unroll().isExposedInAllOf(exposureSet)
class IDLBuiltinType(IDLType):
Types = enum(
# The integer types
'byte',
'octet',
'short',
'unsigned_short',
'long',
'unsigned_long',
'long_long',
'unsigned_long_long',
# Additional primitive types
'boolean',
'unrestricted_float',
'float',
'unrestricted_double',
# IMPORTANT: "double" must be the last primitive type listed
'double',
# Other types
'any',
'domstring',
'bytestring',
'usvstring',
'utf8string',
'jsstring',
'object',
'void',
# Funny stuff
'ArrayBuffer',
'ArrayBufferView',
'Int8Array',
'Uint8Array',
'Uint8ClampedArray',
'Int16Array',
'Uint16Array',
'Int32Array',
'Uint32Array',
'Float32Array',
'Float64Array',
'ReadableStream',
)
TagLookup = {
Types.byte: IDLType.Tags.int8,
Types.octet: IDLType.Tags.uint8,
Types.short: IDLType.Tags.int16,
Types.unsigned_short: IDLType.Tags.uint16,
Types.long: IDLType.Tags.int32,
Types.unsigned_long: IDLType.Tags.uint32,
Types.long_long: IDLType.Tags.int64,
Types.unsigned_long_long: IDLType.Tags.uint64,
Types.boolean: IDLType.Tags.bool,
Types.unrestricted_float: IDLType.Tags.unrestricted_float,
Types.float: IDLType.Tags.float,
Types.unrestricted_double: IDLType.Tags.unrestricted_double,
Types.double: IDLType.Tags.double,
Types.any: IDLType.Tags.any,
Types.domstring: IDLType.Tags.domstring,
Types.bytestring: IDLType.Tags.bytestring,
Types.usvstring: IDLType.Tags.usvstring,
Types.utf8string: IDLType.Tags.utf8string,
Types.jsstring: IDLType.Tags.jsstring,
Types.object: IDLType.Tags.object,
Types.void: IDLType.Tags.void,
Types.ArrayBuffer: IDLType.Tags.interface,
Types.ArrayBufferView: IDLType.Tags.interface,
Types.Int8Array: IDLType.Tags.interface,
Types.Uint8Array: IDLType.Tags.interface,
Types.Uint8ClampedArray: IDLType.Tags.interface,
Types.Int16Array: IDLType.Tags.interface,
Types.Uint16Array: IDLType.Tags.interface,
Types.Int32Array: IDLType.Tags.interface,
Types.Uint32Array: IDLType.Tags.interface,
Types.Float32Array: IDLType.Tags.interface,
Types.Float64Array: IDLType.Tags.interface,
Types.ReadableStream: IDLType.Tags.interface,
}
PrettyNames = {
Types.byte: "byte",
Types.octet: "octet",
Types.short: "short",
Types.unsigned_short: "unsigned short",
Types.long: "long",
Types.unsigned_long: "unsigned long",
Types.long_long: "long long",
Types.unsigned_long_long: "unsigned long long",
Types.boolean: "boolean",
Types.unrestricted_float: "unrestricted float",
Types.float: "float",
Types.unrestricted_double: "unrestricted double",
Types.double: "double",
Types.any: "any",
Types.domstring: "DOMString",
Types.bytestring: "ByteString",
Types.usvstring: "USVString",
Types.utf8string: "USVString", # That's what it is in spec terms
Types.jsstring: "USVString", # Again, that's what it is in spec terms
Types.object: "object",
Types.void: "void",
Types.ArrayBuffer: "ArrayBuffer",
Types.ArrayBufferView: "ArrayBufferView",
Types.Int8Array: "Int8Array",
Types.Uint8Array: "Uint8Array",
Types.Uint8ClampedArray: "Uint8ClampedArray",
Types.Int16Array: "Int16Array",
Types.Uint16Array: "Uint16Array",
Types.Int32Array: "Int32Array",
Types.Uint32Array: "Uint32Array",
Types.Float32Array: "Float32Array",
Types.Float64Array: "Float64Array",
Types.ReadableStream: "ReadableStream",
}
def __init__(self, location, name, type, clamp=False, enforceRange=False, treatNullAsEmpty=False,
allowShared=False, attrLocation=[]):
"""
The mutually exclusive clamp/enforceRange/treatNullAsEmpty/allowShared arguments are used
to create instances of this type with the appropriate attributes attached. Use .clamped(),
.rangeEnforced(), .withTreatNullAs() and .withAllowShared().
attrLocation is an array of source locations of these attributes for error reporting.
"""
IDLType.__init__(self, location, name)
self.builtin = True
self._typeTag = type
self._clamped = None
self._rangeEnforced = None
self._withTreatNullAs = None
self._withAllowShared = None;
if self.isInteger():
if clamp:
self._clamp = True
self.name = "Clamped" + self.name
self._extendedAttrDict["Clamp"] = True
elif enforceRange:
self._enforceRange = True
self.name = "RangeEnforced" + self.name
self._extendedAttrDict["EnforceRange"] = True
elif clamp or enforceRange:
raise WebIDLError("Non-integer types cannot be [Clamp] or [EnforceRange]", attrLocation)
if self.isDOMString() or self.isUTF8String():
if treatNullAsEmpty:
self.treatNullAsEmpty = True
self.name = "NullIsEmpty" + self.name
self._extendedAttrDict["TreatNullAs"] = ["EmptyString"]
elif treatNullAsEmpty:
raise WebIDLError("Non-string types cannot be [TreatNullAs]", attrLocation)
if self.isBufferSource():
if allowShared:
self._allowShared = True
self._extendedAttrDict["AllowShared"] = True
elif allowShared:
raise WebIDLError("Types that are not buffer source types cannot be [AllowShared]", attrLocation)
def __str__(self):
if self._allowShared:
assert self.isBufferSource()
return "MaybeShared" + str(self.name)
return str(self.name)
def __eq__(self, other):
return other and self.location == other.location and self.name == other.name and self._typeTag == other._typeTag
def __hash__(self):
return hash((self.location, self.name, self._typeTag))
def prettyName(self):
return IDLBuiltinType.PrettyNames[self._typeTag]
def clamped(self, attrLocation):
if not self._clamped:
self._clamped = IDLBuiltinType(self.location, self.name,
self._typeTag, clamp=True,
attrLocation=attrLocation)
return self._clamped
def rangeEnforced(self, attrLocation):
if not self._rangeEnforced:
self._rangeEnforced = IDLBuiltinType(self.location, self.name,
self._typeTag, enforceRange=True,
attrLocation=attrLocation)
return self._rangeEnforced
def withTreatNullAs(self, attrLocation):
if not self._withTreatNullAs:
self._withTreatNullAs = IDLBuiltinType(self.location, self.name,
self._typeTag, treatNullAsEmpty=True,
attrLocation=attrLocation)
return self._withTreatNullAs
def withAllowShared(self, attrLocation):
if not self._withAllowShared:
self._withAllowShared = IDLBuiltinType(self.location, self.name,
self._typeTag, allowShared=True,
attrLocation=attrLocation)
return self._withAllowShared
def isPrimitive(self):
return self._typeTag <= IDLBuiltinType.Types.double
def isBoolean(self):
return self._typeTag == IDLBuiltinType.Types.boolean
def isNumeric(self):
return self.isPrimitive() and not self.isBoolean()
def isString(self):
return (self._typeTag == IDLBuiltinType.Types.domstring or
self._typeTag == IDLBuiltinType.Types.bytestring or
self._typeTag == IDLBuiltinType.Types.usvstring or
self._typeTag == IDLBuiltinType.Types.utf8string or
self._typeTag == IDLBuiltinType.Types.jsstring)
def isByteString(self):
return self._typeTag == IDLBuiltinType.Types.bytestring
def isDOMString(self):
return self._typeTag == IDLBuiltinType.Types.domstring
def isUSVString(self):
return self._typeTag == IDLBuiltinType.Types.usvstring
def isUTF8String(self):
return self._typeTag == IDLBuiltinType.Types.utf8string
def isJSString(self):
return self._typeTag == IDLBuiltinType.Types.jsstring
def isInteger(self):
return self._typeTag <= IDLBuiltinType.Types.unsigned_long_long
def isArrayBuffer(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBuffer
def isArrayBufferView(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBufferView
def isTypedArray(self):
return (self._typeTag >= IDLBuiltinType.Types.Int8Array and
self._typeTag <= IDLBuiltinType.Types.Float64Array)
def isReadableStream(self):
return self._typeTag == IDLBuiltinType.Types.ReadableStream
def isInterface(self):
# TypedArray things are interface types per the TypedArray spec,
# but we handle them as builtins because SpiderMonkey implements
# all of it internally.
return (self.isArrayBuffer() or
self.isArrayBufferView() or
self.isTypedArray() or
self.isReadableStream())
def isNonCallbackInterface(self):
# All the interfaces we can be are non-callback
return self.isInterface()
def isFloat(self):
return (self._typeTag == IDLBuiltinType.Types.float or
self._typeTag == IDLBuiltinType.Types.double or
self._typeTag == IDLBuiltinType.Types.unrestricted_float or
self._typeTag == IDLBuiltinType.Types.unrestricted_double)
def isUnrestricted(self):
assert self.isFloat()
return (self._typeTag == IDLBuiltinType.Types.unrestricted_float or
self._typeTag == IDLBuiltinType.Types.unrestricted_double)
def isJSONType(self):
return self.isPrimitive() or self.isString() or self.isObject()
def includesRestrictedFloat(self):
return self.isFloat() and not self.isUnrestricted()
def tag(self):
return IDLBuiltinType.TagLookup[self._typeTag]
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
if self.isBoolean():
return (other.isNumeric() or other.isString() or other.isEnum() or
other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord())
if self.isNumeric():
return (other.isBoolean() or other.isString() or other.isEnum() or
other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord())
if self.isString():
return (other.isPrimitive() or other.isInterface() or
other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord())
if self.isAny():
# Can't tell "any" apart from anything
return False
if self.isObject():
return other.isPrimitive() or other.isString() or other.isEnum()
if self.isVoid():
return not other.isVoid()
# Not much else we could be!
assert self.isSpiderMonkeyInterface()
# Like interfaces, but we know we're not a callback
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isRecord() or
(other.isInterface() and (
# ArrayBuffer is distinguishable from everything
# that's not an ArrayBuffer or a callback interface
(self.isArrayBuffer() and not other.isArrayBuffer()) or
(self.isReadableStream() and not other.isReadableStream()) or
# ArrayBufferView is distinguishable from everything
# that's not an ArrayBufferView or typed array.
(self.isArrayBufferView() and not other.isArrayBufferView() and
not other.isTypedArray()) or
# Typed arrays are distinguishable from everything
# except ArrayBufferView and the same type of typed
# array
(self.isTypedArray() and not other.isArrayBufferView() and not
(other.isTypedArray() and other.name == self.name)))))
def _getDependentObjects(self):
return set()
def withExtendedAttributes(self, attrs):
ret = self
for attribute in attrs:
identifier = attribute.identifier()
if identifier == "Clamp":
if not attribute.noArguments():
raise WebIDLError("[Clamp] must take no arguments",
[attribute.location])
if ret.hasEnforceRange() or self._enforceRange:
raise WebIDLError("[EnforceRange] and [Clamp] are mutually exclusive",
[self.location, attribute.location])
ret = self.clamped([self.location, attribute.location])
elif identifier == "EnforceRange":
if not attribute.noArguments():
raise WebIDLError("[EnforceRange] must take no arguments",
[attribute.location])
if ret.hasClamp() or self._clamp:
raise WebIDLError("[EnforceRange] and [Clamp] are mutually exclusive",
[self.location, attribute.location])
ret = self.rangeEnforced([self.location, attribute.location])
elif identifier == "TreatNullAs":
if not (self.isDOMString() or self.isUTF8String()):
raise WebIDLError("[TreatNullAs] only allowed on DOMStrings and UTF8Strings",
[self.location, attribute.location])
assert not self.nullable()
if not attribute.hasValue():
raise WebIDLError("[TreatNullAs] must take an identifier argument",
[attribute.location])
value = attribute.value()
if value != 'EmptyString':
raise WebIDLError("[TreatNullAs] must take the identifier "
"'EmptyString', not '%s'" % value,
[attribute.location])
ret = self.withTreatNullAs([self.location, attribute.location])
elif identifier == "AllowShared":
if not attribute.noArguments():
raise WebIDLError("[AllowShared] must take no arguments",
[attribute.location])
if not self.isBufferSource():
raise WebIDLError("[AllowShared] only allowed on buffer source types",
[self.location, attribute.location])
ret = self.withAllowShared([self.location, attribute.location])
else:
raise WebIDLError("Unhandled extended attribute on type",
[self.location, attribute.location])
return ret
BuiltinTypes = {
IDLBuiltinType.Types.byte:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Byte",
IDLBuiltinType.Types.byte),
IDLBuiltinType.Types.octet:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Octet",
IDLBuiltinType.Types.octet),
IDLBuiltinType.Types.short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Short",
IDLBuiltinType.Types.short),
IDLBuiltinType.Types.unsigned_short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedShort",
IDLBuiltinType.Types.unsigned_short),
IDLBuiltinType.Types.long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Long",
IDLBuiltinType.Types.long),
IDLBuiltinType.Types.unsigned_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLong",
IDLBuiltinType.Types.unsigned_long),
IDLBuiltinType.Types.long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "LongLong",
IDLBuiltinType.Types.long_long),
IDLBuiltinType.Types.unsigned_long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLongLong",
IDLBuiltinType.Types.unsigned_long_long),
IDLBuiltinType.Types.boolean:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Boolean",
IDLBuiltinType.Types.boolean),
IDLBuiltinType.Types.float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float",
IDLBuiltinType.Types.float),
IDLBuiltinType.Types.unrestricted_float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnrestrictedFloat",
IDLBuiltinType.Types.unrestricted_float),
IDLBuiltinType.Types.double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Double",
IDLBuiltinType.Types.double),
IDLBuiltinType.Types.unrestricted_double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnrestrictedDouble",
IDLBuiltinType.Types.unrestricted_double),
IDLBuiltinType.Types.any:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Any",
IDLBuiltinType.Types.any),
IDLBuiltinType.Types.domstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "String",
IDLBuiltinType.Types.domstring),
IDLBuiltinType.Types.bytestring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ByteString",
IDLBuiltinType.Types.bytestring),
IDLBuiltinType.Types.usvstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "USVString",
IDLBuiltinType.Types.usvstring),
IDLBuiltinType.Types.utf8string:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UTF8String",
IDLBuiltinType.Types.utf8string),
IDLBuiltinType.Types.jsstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "JSString",
IDLBuiltinType.Types.jsstring),
IDLBuiltinType.Types.object:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Object",
IDLBuiltinType.Types.object),
IDLBuiltinType.Types.void:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Void",
IDLBuiltinType.Types.void),
IDLBuiltinType.Types.ArrayBuffer:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBuffer",
IDLBuiltinType.Types.ArrayBuffer),
IDLBuiltinType.Types.ArrayBufferView:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBufferView",
IDLBuiltinType.Types.ArrayBufferView),
IDLBuiltinType.Types.Int8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int8Array",
IDLBuiltinType.Types.Int8Array),
IDLBuiltinType.Types.Uint8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8Array",
IDLBuiltinType.Types.Uint8Array),
IDLBuiltinType.Types.Uint8ClampedArray:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8ClampedArray",
IDLBuiltinType.Types.Uint8ClampedArray),
IDLBuiltinType.Types.Int16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int16Array",
IDLBuiltinType.Types.Int16Array),
IDLBuiltinType.Types.Uint16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint16Array",
IDLBuiltinType.Types.Uint16Array),
IDLBuiltinType.Types.Int32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int32Array",
IDLBuiltinType.Types.Int32Array),
IDLBuiltinType.Types.Uint32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint32Array",
IDLBuiltinType.Types.Uint32Array),
IDLBuiltinType.Types.Float32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float32Array",
IDLBuiltinType.Types.Float32Array),
IDLBuiltinType.Types.Float64Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float64Array",
IDLBuiltinType.Types.Float64Array),
IDLBuiltinType.Types.ReadableStream:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ReadableStream",
IDLBuiltinType.Types.ReadableStream),
}
integerTypeSizes = {
IDLBuiltinType.Types.byte: (-128, 127),
IDLBuiltinType.Types.octet: (0, 255),
IDLBuiltinType.Types.short: (-32768, 32767),
IDLBuiltinType.Types.unsigned_short: (0, 65535),
IDLBuiltinType.Types.long: (-2147483648, 2147483647),
IDLBuiltinType.Types.unsigned_long: (0, 4294967295),
IDLBuiltinType.Types.long_long: (-9223372036854775808, 9223372036854775807),
IDLBuiltinType.Types.unsigned_long_long: (0, 18446744073709551615)
}
def matchIntegerValueToType(value):
for type, extremes in list(integerTypeSizes.items()):
(min, max) = extremes
if value <= max and value >= min:
return BuiltinTypes[type]
return None
class NoCoercionFoundError(WebIDLError):
"""
A class we use to indicate generic coercion failures because none of the
types worked out in IDLValue.coerceToType.
"""
class IDLValue(IDLObject):
def __init__(self, location, type, value):
IDLObject.__init__(self, location)
self.type = type
assert isinstance(type, IDLType)
self.value = value
def coerceToType(self, type, location):
if type == self.type:
return self # Nothing to do
# We first check for unions to ensure that even if the union is nullable
# we end up with the right flat member type, not the union's type.
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
coercedValue = self.coerceToType(subtype, location)
# Create a new IDLValue to make sure that we have the
# correct float/double type. This is necessary because we
# use the value's type when it is a default value of a
# union, and the union cares about the exact float type.
return IDLValue(self.location, subtype, coercedValue.value)
except Exception as e:
# Make sure to propagate out WebIDLErrors that are not the
# generic "hey, we could not coerce to this type at all"
# exception, because those are specific "coercion failed for
# reason X" exceptions. Note that we want to swallow
# non-WebIDLErrors here, because those can just happen if
# "type" is not something that can have a default value at
# all.
if (isinstance(e, WebIDLError) and
not isinstance(e, NoCoercionFoundError)):
raise e
# If the type allows null, rerun this matching on the inner type, except
# nullable enums. We handle those specially, because we want our
# default string values to stay strings even when assigned to a nullable
# enum.
elif type.nullable() and not type.isEnum():
innerValue = self.coerceToType(type.inner, location)
return IDLValue(self.location, type, innerValue.value)
elif self.type.isInteger() and type.isInteger():
# We're both integer types. See if we fit.
(min, max) = integerTypeSizes[type._typeTag]
if self.value <= max and self.value >= min:
# Promote
return IDLValue(self.location, type, self.value)
else:
raise WebIDLError("Value %s is out of range for type %s." %
(self.value, type), [location])
elif self.type.isInteger() and type.isFloat():
# Convert an integer literal into float
if -2**24 <= self.value <= 2**24:
return IDLValue(self.location, type, float(self.value))
else:
raise WebIDLError("Converting value %s to %s will lose precision." %
(self.value, type), [location])
elif self.type.isString() and type.isEnum():
# Just keep our string, but make sure it's a valid value for this enum
enum = type.unroll().inner
if self.value not in list(enum.values()):
raise WebIDLError("'%s' is not a valid default value for enum %s"
% (self.value, enum.identifier.name),
[location, enum.location])
return self
elif self.type.isFloat() and type.isFloat():
if (not type.isUnrestricted() and
(self.value == float("inf") or self.value == float("-inf") or
math.isnan(self.value))):
raise WebIDLError("Trying to convert unrestricted value %s to non-unrestricted"
% self.value, [location])
return IDLValue(self.location, type, self.value)
elif self.type.isString() and type.isUSVString():
# Allow USVStrings to use default value just like
# DOMString. No coercion is required in this case as Codegen.py
# treats USVString just like DOMString, but with an
# extra normalization step.
assert self.type.isDOMString()
return self
elif self.type.isDOMString() and type.treatNullAsEmpty:
# TreatNullAsEmpty is a different type for resolution reasons,
# however once you have a value it doesn't matter
return self
elif self.type.isString() and (type.isByteString() or type.isJSString() or type.isUTF8String()):
# Allow ByteStrings, UTF8String, and JSStrings to use a default
# value like DOMString.
# No coercion is required as Codegen.py will handle the
# extra steps. We want to make sure that our string contains
# only valid characters, so we check that here.
valid_ascii_lit = " " + string.ascii_letters + string.digits + string.punctuation
for idx, c in enumerate(self.value):
if c not in valid_ascii_lit:
raise WebIDLError("Coercing this string literal %s to a ByteString is not supported yet. "
"Coercion failed due to an unsupported byte %d at index %d."
% (self.value.__repr__(), ord(c), idx), [location])
return IDLValue(self.location, type, self.value)
raise NoCoercionFoundError("Cannot coerce type %s to type %s." %
(self.type, type), [location])
def _getDependentObjects(self):
return set()
class IDLNullValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if (not isinstance(type, IDLNullableType) and
not (type.isUnion() and type.hasNullableType) and
not type.isAny()):
raise WebIDLError("Cannot coerce null value to type %s." % type,
[location])
nullValue = IDLNullValue(self.location)
if type.isUnion() and not type.nullable() and type.hasDictionaryType():
# We're actually a default value for the union's dictionary member.
# Use its type.
for t in type.flatMemberTypes:
if t.isDictionary():
nullValue.type = t
return nullValue
nullValue.type = type
return nullValue
def _getDependentObjects(self):
return set()
class IDLEmptySequenceValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
return self.coerceToType(subtype, location)
except:
pass
if not type.isSequence():
raise WebIDLError("Cannot coerce empty sequence value to type %s." % type,
[location])
emptySequenceValue = IDLEmptySequenceValue(self.location)
emptySequenceValue.type = type
return emptySequenceValue
def _getDependentObjects(self):
return set()
class IDLDefaultDictionaryValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
return self.coerceToType(subtype, location)
except:
pass
if not type.isDictionary():
raise WebIDLError("Cannot coerce default dictionary value to type %s." % type,
[location])
defaultDictionaryValue = IDLDefaultDictionaryValue(self.location)
defaultDictionaryValue.type = type
return defaultDictionaryValue
def _getDependentObjects(self):
return set()
class IDLUndefinedValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if not type.isAny():
raise WebIDLError("Cannot coerce undefined value to type %s." % type,
[location])
undefinedValue = IDLUndefinedValue(self.location)
undefinedValue.type = type
return undefinedValue
def _getDependentObjects(self):
return set()
class IDLInterfaceMember(IDLObjectWithIdentifier, IDLExposureMixins):
Tags = enum(
'Const',
'Attr',
'Method',
'MaplikeOrSetlike',
'Iterable'
)
Special = enum(
'Static',
'Stringifier'
)
AffectsValues = ("Nothing", "Everything")
DependsOnValues = ("Nothing", "DOMState", "DeviceState", "Everything")
def __init__(self, location, identifier, tag, extendedAttrDict=None):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
IDLExposureMixins.__init__(self, location)
self.tag = tag
if extendedAttrDict is None:
self._extendedAttrDict = {}
else:
self._extendedAttrDict = extendedAttrDict
def isMethod(self):
return self.tag == IDLInterfaceMember.Tags.Method
def isAttr(self):
return self.tag == IDLInterfaceMember.Tags.Attr
def isConst(self):
return self.tag == IDLInterfaceMember.Tags.Const
def isMaplikeOrSetlikeOrIterable(self):
return (self.tag == IDLInterfaceMember.Tags.MaplikeOrSetlike or
self.tag == IDLInterfaceMember.Tags.Iterable)
def isMaplikeOrSetlike(self):
return self.tag == IDLInterfaceMember.Tags.MaplikeOrSetlike
def addExtendedAttributes(self, attrs):
for attr in attrs:
self.handleExtendedAttribute(attr)
attrlist = attr.listValue()
self._extendedAttrDict[attr.identifier()] = attrlist if len(attrlist) else True
def handleExtendedAttribute(self, attr):
pass
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def finish(self, scope):
IDLExposureMixins.finish(self, scope)
def validate(self):
if self.isAttr() or self.isMethod():
if self.affects == "Everything" and self.dependsOn != "Everything":
raise WebIDLError("Interface member is flagged as affecting "
"everything but not depending on everything. "
"That seems rather unlikely.",
[self.location])
if self.getExtendedAttribute("NewObject"):
if self.dependsOn == "Nothing" or self.dependsOn == "DOMState":
raise WebIDLError("A [NewObject] method is not idempotent, "
"so it has to depend on something other than DOM state.",
[self.location])
if (self.getExtendedAttribute("Cached") or
self.getExtendedAttribute("StoreInSlot")):
raise WebIDLError("A [NewObject] attribute shouldnt be "
"[Cached] or [StoreInSlot], since the point "
"of those is to keep returning the same "
"thing across multiple calls, which is not "
"what [NewObject] does.",
[self.location])
def _setDependsOn(self, dependsOn):
if self.dependsOn != "Everything":
raise WebIDLError("Trying to specify multiple different DependsOn, "
"Pure, or Constant extended attributes for "
"attribute", [self.location])
if dependsOn not in IDLInterfaceMember.DependsOnValues:
raise WebIDLError("Invalid [DependsOn=%s] on attribute" % dependsOn,
[self.location])
self.dependsOn = dependsOn
def _setAffects(self, affects):
if self.affects != "Everything":
raise WebIDLError("Trying to specify multiple different Affects, "
"Pure, or Constant extended attributes for "
"attribute", [self.location])
if affects not in IDLInterfaceMember.AffectsValues:
raise WebIDLError("Invalid [Affects=%s] on attribute" % dependsOn,
[self.location])
self.affects = affects
def _addAlias(self, alias):
if alias in self.aliases:
raise WebIDLError("Duplicate [Alias=%s] on attribute" % alias,
[self.location])
self.aliases.append(alias)
def _addBindingAlias(self, bindingAlias):
if bindingAlias in self.bindingAliases:
raise WebIDLError("Duplicate [BindingAlias=%s] on attribute" % bindingAlias,
[self.location])
self.bindingAliases.append(bindingAlias)
class IDLMaplikeOrSetlikeOrIterableBase(IDLInterfaceMember):
def __init__(self, location, identifier, ifaceType, keyType, valueType, ifaceKind):
IDLInterfaceMember.__init__(self, location, identifier, ifaceKind)
if keyType is not None:
assert isinstance(keyType, IDLType)
else:
assert valueType is not None
assert ifaceType in ['maplike', 'setlike', 'iterable']
if valueType is not None:
assert isinstance(valueType, IDLType)
self.keyType = keyType
self.valueType = valueType
self.maplikeOrSetlikeOrIterableType = ifaceType
self.disallowedMemberNames = []
self.disallowedNonMethodNames = []
def isMaplike(self):
return self.maplikeOrSetlikeOrIterableType == "maplike"
def isSetlike(self):
return self.maplikeOrSetlikeOrIterableType == "setlike"
def isIterable(self):
return self.maplikeOrSetlikeOrIterableType == "iterable"
def hasKeyType(self):
return self.keyType is not None
def hasValueType(self):
return self.valueType is not None
def checkCollisions(self, members, isAncestor):
for member in members:
# Check that there are no disallowed members
if (member.identifier.name in self.disallowedMemberNames and
not ((member.isMethod() and member.isMaplikeOrSetlikeOrIterableMethod()) or
(member.isAttr() and member.isMaplikeOrSetlikeAttr()))):
raise WebIDLError("Member '%s' conflicts "
"with reserved %s name." %
(member.identifier.name,
self.maplikeOrSetlikeOrIterableType),
[self.location, member.location])
# Check that there are no disallowed non-method members.
# Ancestor members are always disallowed here; own members
# are disallowed only if they're non-methods.
if ((isAncestor or member.isAttr() or member.isConst()) and
member.identifier.name in self.disallowedNonMethodNames):
raise WebIDLError("Member '%s' conflicts "
"with reserved %s method." %
(member.identifier.name,
self.maplikeOrSetlikeOrIterableType),
[self.location, member.location])
def addMethod(self, name, members, allowExistingOperations, returnType, args=[],
chromeOnly=False, isPure=False, affectsNothing=False, newObject=False,
isIteratorAlias=False):
"""
Create an IDLMethod based on the parameters passed in.
- members is the member list to add this function to, since this is
called during the member expansion portion of interface object
building.
- chromeOnly is only True for read-only js implemented classes, to
implement underscore prefixed convenience functions which would
otherwise not be available, unlike the case of C++ bindings.
- isPure is only True for idempotent functions, so it is not valid for
things like keys, values, etc. that return a new object every time.
- affectsNothing means that nothing changes due to this method, which
affects JIT optimization behavior
- newObject means the method creates and returns a new object.
"""
# Only add name to lists for collision checks if it's not chrome
# only.
if chromeOnly:
name = "__" + name
else:
if not allowExistingOperations:
self.disallowedMemberNames.append(name)
else:
self.disallowedNonMethodNames.append(name)
# If allowExistingOperations is True, and another operation exists
# with the same name as the one we're trying to add, don't add the
# maplike/setlike operation. However, if the operation is static,
# then fail by way of creating the function, which will cause a
# naming conflict, per the spec.
if allowExistingOperations:
for m in members:
if m.identifier.name == name and m.isMethod() and not m.isStatic():
return
method = IDLMethod(self.location,
IDLUnresolvedIdentifier(self.location, name, allowDoubleUnderscore=chromeOnly),
returnType, args, maplikeOrSetlikeOrIterable=self)
# We need to be able to throw from declaration methods
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Throws",))])
if chromeOnly:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("ChromeOnly",))])
if isPure:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Pure",))])
# Following attributes are used for keys/values/entries. Can't mark
# them pure, since they return a new object each time they are run.
if affectsNothing:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("DependsOn", "Everything")),
IDLExtendedAttribute(self.location, ("Affects", "Nothing"))])
if newObject:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NewObject",))])
if isIteratorAlias:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Alias", "@@iterator"))])
# Methods generated for iterables should be enumerable, but the ones for
# maplike/setlike should not be.
if not self.isIterable():
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NonEnumerable",))])
members.append(method)
def resolve(self, parentScope):
if self.keyType:
self.keyType.resolveType(parentScope)
if self.valueType:
self.valueType.resolveType(parentScope)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if self.keyType and not self.keyType.isComplete():
t = self.keyType.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.keyType = t
if self.valueType and not self.valueType.isComplete():
t = self.valueType.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.valueType = t
def validate(self):
IDLInterfaceMember.validate(self)
def handleExtendedAttribute(self, attr):
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def _getDependentObjects(self):
deps = set()
if self.keyType:
deps.add(self.keyType)
if self.valueType:
deps.add(self.valueType)
return deps
def getForEachArguments(self):
return [IDLArgument(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"callback"),
BuiltinTypes[IDLBuiltinType.Types.object]),
IDLArgument(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"thisArg"),
BuiltinTypes[IDLBuiltinType.Types.any],
optional=True)]
# Iterable adds ES6 iterator style functions and traits
# (keys/values/entries/@@iterator) to an interface.
class IDLIterable(IDLMaplikeOrSetlikeOrIterableBase):
def __init__(self, location, identifier, keyType, valueType=None, scope=None):
IDLMaplikeOrSetlikeOrIterableBase.__init__(self, location, identifier,
"iterable", keyType, valueType,
IDLInterfaceMember.Tags.Iterable)
self.iteratorType = None
def __str__(self):
return "declared iterable with key '%s' and value '%s'" % (self.keyType, self.valueType)
def expand(self, members, isJSImplemented):
"""
In order to take advantage of all of the method machinery in Codegen,
we generate our functions as if they were part of the interface
specification during parsing.
"""
# We only need to add entries/keys/values here if we're a pair iterator.
# Value iterators just copy these from %ArrayPrototype% instead.
if not self.isPairIterator():
return
# object entries()
self.addMethod("entries", members, False, self.iteratorType,
affectsNothing=True, newObject=True,
isIteratorAlias=True)
# object keys()
self.addMethod("keys", members, False, self.iteratorType,
affectsNothing=True, newObject=True)
# object values()
self.addMethod("values", members, False, self.iteratorType,
affectsNothing=True, newObject=True)
# void forEach(callback(valueType, keyType), optional any thisArg)
self.addMethod("forEach", members, False,
BuiltinTypes[IDLBuiltinType.Types.void],
self.getForEachArguments())
def isValueIterator(self):
return not self.isPairIterator()
def isPairIterator(self):
return self.hasKeyType()
# MaplikeOrSetlike adds ES6 map-or-set-like traits to an interface.
class IDLMaplikeOrSetlike(IDLMaplikeOrSetlikeOrIterableBase):
def __init__(self, location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType):
IDLMaplikeOrSetlikeOrIterableBase.__init__(self, location, identifier, maplikeOrSetlikeType,
keyType, valueType, IDLInterfaceMember.Tags.MaplikeOrSetlike)
self.readonly = readonly
self.slotIndices = None
# When generating JSAPI access code, we need to know the backing object
# type prefix to create the correct function. Generate here for reuse.
if self.isMaplike():
self.prefix = 'Map'
elif self.isSetlike():
self.prefix = 'Set'
def __str__(self):
return "declared '%s' with key '%s'" % (self.maplikeOrSetlikeOrIterableType, self.keyType)
def expand(self, members, isJSImplemented):
"""
In order to take advantage of all of the method machinery in Codegen,
we generate our functions as if they were part of the interface
specification during parsing.
"""
# Both maplike and setlike have a size attribute
sizeAttr = IDLAttribute(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"), "size"),
BuiltinTypes[IDLBuiltinType.Types.unsigned_long],
True,
maplikeOrSetlike=self)
# This should be non-enumerable.
sizeAttr.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NonEnumerable",))])
members.append(sizeAttr)
self.reserved_ro_names = ["size"]
self.disallowedMemberNames.append("size")
# object entries()
self.addMethod("entries", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True, isIteratorAlias=self.isMaplike())
# object keys()
self.addMethod("keys", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True)
# object values()
self.addMethod("values", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True, isIteratorAlias=self.isSetlike())
# void forEach(callback(valueType, keyType), thisVal)
self.addMethod("forEach", members, False, BuiltinTypes[IDLBuiltinType.Types.void],
self.getForEachArguments())
def getKeyArg():
return IDLArgument(self.location,
IDLUnresolvedIdentifier(self.location, "key"),
self.keyType)
# boolean has(keyType key)
self.addMethod("has", members, False, BuiltinTypes[IDLBuiltinType.Types.boolean],
[getKeyArg()], isPure=True)
if not self.readonly:
# void clear()
self.addMethod("clear", members, True, BuiltinTypes[IDLBuiltinType.Types.void],
[])
# boolean delete(keyType key)
self.addMethod("delete", members, True,
BuiltinTypes[IDLBuiltinType.Types.boolean], [getKeyArg()])
# Always generate underscored functions (e.g. __add, __clear) for js
# implemented interfaces as convenience functions.
if isJSImplemented:
# void clear()
self.addMethod("clear", members, True, BuiltinTypes[IDLBuiltinType.Types.void],
[], chromeOnly=True)
# boolean delete(keyType key)
self.addMethod("delete", members, True,
BuiltinTypes[IDLBuiltinType.Types.boolean], [getKeyArg()],
chromeOnly=True)
if self.isSetlike():
if not self.readonly:
# Add returns the set object it just added to.
# object add(keyType key)
self.addMethod("add", members, True,
BuiltinTypes[IDLBuiltinType.Types.object], [getKeyArg()])
if isJSImplemented:
self.addMethod("add", members, True,
BuiltinTypes[IDLBuiltinType.Types.object], [getKeyArg()],
chromeOnly=True)
return
# If we get this far, we're a maplike declaration.
# valueType get(keyType key)
#
# Note that instead of the value type, we're using any here. The
# validity checks should happen as things are inserted into the map,
# and using any as the return type makes code generation much simpler.
#
# TODO: Bug 1155340 may change this to use specific type to provide
# more info to JIT.
self.addMethod("get", members, False, BuiltinTypes[IDLBuiltinType.Types.any],
[getKeyArg()], isPure=True)
def getValueArg():
return IDLArgument(self.location,
IDLUnresolvedIdentifier(self.location, "value"),
self.valueType)
if not self.readonly:
self.addMethod("set", members, True, BuiltinTypes[IDLBuiltinType.Types.object],
[getKeyArg(), getValueArg()])
if isJSImplemented:
self.addMethod("set", members, True, BuiltinTypes[IDLBuiltinType.Types.object],
[getKeyArg(), getValueArg()], chromeOnly=True)
class IDLConst(IDLInterfaceMember):
def __init__(self, location, identifier, type, value):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Const)
assert isinstance(type, IDLType)
if type.isDictionary():
raise WebIDLError("A constant cannot be of a dictionary type",
[self.location])
if type.isRecord():
raise WebIDLError("A constant cannot be of a record type",
[self.location])
self.type = type
self.value = value
if identifier.name == "prototype":
raise WebIDLError("The identifier of a constant must not be 'prototype'",
[location])
def __str__(self):
return "'%s' const '%s'" % (self.type, self.identifier)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if not self.type.isComplete():
type = self.type.complete(scope)
if not type.isPrimitive() and not type.isString():
locations = [self.type.location, type.location]
try:
locations.append(type.inner.location)
except:
pass
raise WebIDLError("Incorrect type for constant", locations)
self.type = type
# The value might not match the type
coercedValue = self.value.coerceToType(self.type, self.location)
assert coercedValue
self.value = coercedValue
def validate(self):
IDLInterfaceMember.validate(self)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif (identifier == "Pref" or
identifier == "ChromeOnly" or
identifier == "Func" or
identifier == "SecureContext" or
identifier == "NonEnumerable" or
identifier == "NeedsWindowsUndef"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on constant" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def _getDependentObjects(self):
return set([self.type, self.value])
class IDLAttribute(IDLInterfaceMember):
def __init__(self, location, identifier, type, readonly, inherit=False,
static=False, stringifier=False, maplikeOrSetlike=None,
extendedAttrDict=None):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Attr,
extendedAttrDict=extendedAttrDict)
assert isinstance(type, IDLType)
self.type = type
self.readonly = readonly
self.inherit = inherit
self._static = static
self.lenientThis = False
self._unforgeable = False
self.stringifier = stringifier
self.slotIndices = None
assert maplikeOrSetlike is None or isinstance(maplikeOrSetlike, IDLMaplikeOrSetlike)
self.maplikeOrSetlike = maplikeOrSetlike
self.dependsOn = "Everything"
self.affects = "Everything"
self.bindingAliases = []
if static and identifier.name == "prototype":
raise WebIDLError("The identifier of a static attribute must not be 'prototype'",
[location])
if readonly and inherit:
raise WebIDLError("An attribute cannot be both 'readonly' and 'inherit'",
[self.location])
def isStatic(self):
return self._static
def forceStatic(self):
self._static = True
def __str__(self):
return "'%s' attribute '%s'" % (self.type, self.identifier)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if not self.type.isComplete():
t = self.type.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.type = t
if self.readonly and (self.type.hasClamp() or self.type.hasEnforceRange() or
self.type.hasAllowShared() or self.type.treatNullAsEmpty):
raise WebIDLError("A readonly attribute cannot be [Clamp] or [EnforceRange] or [AllowShared]",
[self.location])
if self.type.isDictionary() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("An attribute cannot be of a dictionary type",
[self.location])
if self.type.isSequence() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("A non-cached attribute cannot be of a sequence "
"type", [self.location])
if self.type.isRecord() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("A non-cached attribute cannot be of a record "
"type", [self.location])
if self.type.isUnion():
for f in self.type.unroll().flatMemberTypes:
if f.isDictionary():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a dictionary "
"type", [self.location, f.location])
if f.isSequence():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a sequence "
"type", [self.location, f.location])
if f.isRecord():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a record "
"type", [self.location, f.location])
if not self.type.isInterface() and self.getExtendedAttribute("PutForwards"):
raise WebIDLError("An attribute with [PutForwards] must have an "
"interface type as its type", [self.location])
if (not self.type.isInterface() and
self.getExtendedAttribute("SameObject")):
raise WebIDLError("An attribute with [SameObject] must have an "
"interface type as its type", [self.location])
if self.type.isPromise() and not self.readonly:
raise WebIDLError("Promise-returning attributes must be readonly",
[self.location])
def validate(self):
def typeContainsChromeOnlyDictionaryMember(type):
if (type.nullable() or
type.isSequence() or
type.isRecord()):
return typeContainsChromeOnlyDictionaryMember(type.inner)
if type.isUnion():
for memberType in type.flatMemberTypes:
(contains, location) = typeContainsChromeOnlyDictionaryMember(memberType)
if contains:
return (True, location)
if type.isDictionary():
dictionary = type.inner
while dictionary:
(contains, location) = dictionaryContainsChromeOnlyMember(dictionary)
if contains:
return (True, location)
dictionary = dictionary.parent
return (False, None)
def dictionaryContainsChromeOnlyMember(dictionary):
for member in dictionary.members:
if member.getExtendedAttribute("ChromeOnly"):
return (True, member.location)
(contains, location) = typeContainsChromeOnlyDictionaryMember(member.type)
if contains:
return (True, location)
return (False, None)
IDLInterfaceMember.validate(self)
if (self.getExtendedAttribute("Cached") or
self.getExtendedAttribute("StoreInSlot")):
if not self.affects == "Nothing":
raise WebIDLError("Cached attributes and attributes stored in "
"slots must be Constant or Pure or "
"Affects=Nothing, since the getter won't always "
"be called.",
[self.location])
(contains, location) = typeContainsChromeOnlyDictionaryMember(self.type)
if contains:
raise WebIDLError("[Cached] and [StoreInSlot] must not be used "
"on an attribute whose type contains a "
"[ChromeOnly] dictionary member",
[self.location, location])
if self.getExtendedAttribute("Frozen"):
if (not self.type.isSequence() and not self.type.isDictionary() and
not self.type.isRecord()):
raise WebIDLError("[Frozen] is only allowed on "
"sequence-valued, dictionary-valued, and "
"record-valued attributes",
[self.location])
if not self.type.unroll().isExposedInAllOf(self.exposureSet):
raise WebIDLError("Attribute returns a type that is not exposed "
"everywhere where the attribute is exposed",
[self.location])
if self.getExtendedAttribute("CEReactions"):
if self.readonly:
raise WebIDLError("[CEReactions] is not allowed on "
"readonly attributes",
[self.location])
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if ((identifier == "SetterThrows" or identifier == "SetterCanOOM" or
identifier == "SetterNeedsSubjectPrincipal")
and self.readonly):
raise WebIDLError("Readonly attributes must not be flagged as "
"[%s]" % identifier,
[self.location])
elif identifier == "BindingAlias":
if not attr.hasValue():
raise WebIDLError("[BindingAlias] takes an identifier or string",
[attr.location])
self._addBindingAlias(attr.value())
elif (((identifier == "Throws" or identifier == "GetterThrows" or
identifier == "CanOOM" or identifier == "GetterCanOOM") and
self.getExtendedAttribute("StoreInSlot")) or
(identifier == "StoreInSlot" and
(self.getExtendedAttribute("Throws") or
self.getExtendedAttribute("GetterThrows") or
self.getExtendedAttribute("CanOOM") or
self.getExtendedAttribute("GetterCanOOM")))):
raise WebIDLError("Throwing things can't be [StoreInSlot]",
[attr.location])
elif identifier == "LenientThis":
if not attr.noArguments():
raise WebIDLError("[LenientThis] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[LenientThis] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("CrossOriginReadable"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [CrossOriginReadable]",
[attr.location, self.location])
if self.getExtendedAttribute("CrossOriginWritable"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [CrossOriginWritable]",
[attr.location, self.location])
self.lenientThis = True
elif identifier == "Unforgeable":
if self.isStatic():
raise WebIDLError("[Unforgeable] is only allowed on non-static "
"attributes", [attr.location, self.location])
self._unforgeable = True
elif identifier == "SameObject" and not self.readonly:
raise WebIDLError("[SameObject] only allowed on readonly attributes",
[attr.location, self.location])
elif identifier == "Constant" and not self.readonly:
raise WebIDLError("[Constant] only allowed on readonly attributes",
[attr.location, self.location])
elif identifier == "PutForwards":
if not self.readonly:
raise WebIDLError("[PutForwards] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.type.isPromise():
raise WebIDLError("[PutForwards] is not allowed on "
"Promise-typed attributes",
[attr.location, self.location])
if self.isStatic():
raise WebIDLError("[PutForwards] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("Replaceable") is not None:
raise WebIDLError("[PutForwards] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
if not attr.hasValue():
raise WebIDLError("[PutForwards] takes an identifier",
[attr.location, self.location])
elif identifier == "Replaceable":
if not attr.noArguments():
raise WebIDLError("[Replaceable] must take no arguments",
[attr.location])
if not self.readonly:
raise WebIDLError("[Replaceable] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.type.isPromise():
raise WebIDLError("[Replaceable] is not allowed on "
"Promise-typed attributes",
[attr.location, self.location])
if self.isStatic():
raise WebIDLError("[Replaceable] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("PutForwards") is not None:
raise WebIDLError("[PutForwards] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
elif identifier == "LenientSetter":
if not attr.noArguments():
raise WebIDLError("[LenientSetter] must take no arguments",
[attr.location])
if not self.readonly:
raise WebIDLError("[LenientSetter] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.type.isPromise():
raise WebIDLError("[LenientSetter] is not allowed on "
"Promise-typed attributes",
[attr.location, self.location])
if self.isStatic():
raise WebIDLError("[LenientSetter] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("PutForwards") is not None:
raise WebIDLError("[LenientSetter] and [PutForwards] can't both "
"appear on the same attribute",
[attr.location, self.location])
if self.getExtendedAttribute("Replaceable") is not None:
raise WebIDLError("[LenientSetter] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
elif identifier == "LenientFloat":
if self.readonly:
raise WebIDLError("[LenientFloat] used on a readonly attribute",
[attr.location, self.location])
if not self.type.includesRestrictedFloat():
raise WebIDLError("[LenientFloat] used on an attribute with a "
"non-restricted-float type",
[attr.location, self.location])
elif identifier == "StoreInSlot":
if self.getExtendedAttribute("Cached"):
raise WebIDLError("[StoreInSlot] and [Cached] must not be "
"specified on the same attribute",
[attr.location, self.location])
elif identifier == "Cached":
if self.getExtendedAttribute("StoreInSlot"):
raise WebIDLError("[Cached] and [StoreInSlot] must not be "
"specified on the same attribute",
[attr.location, self.location])
elif (identifier == "CrossOriginReadable" or
identifier == "CrossOriginWritable"):
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
if self.isStatic():
raise WebIDLError("[%s] is only allowed on non-static "
"attributes" % identifier,
[attr.location, self.location])
if self.getExtendedAttribute("LenientThis"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [%s]" % identifier,
[attr.location, self.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif identifier == "Pure":
if not attr.noArguments():
raise WebIDLError("[Pure] must take no arguments",
[attr.location])
self._setDependsOn("DOMState")
self._setAffects("Nothing")
elif identifier == "Constant" or identifier == "SameObject":
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
self._setDependsOn("Nothing")
self._setAffects("Nothing")
elif identifier == "Affects":
if not attr.hasValue():
raise WebIDLError("[Affects] takes an identifier",
[attr.location])
self._setAffects(attr.value())
elif identifier == "DependsOn":
if not attr.hasValue():
raise WebIDLError("[DependsOn] takes an identifier",
[attr.location])
if (attr.value() != "Everything" and attr.value() != "DOMState" and
not self.readonly):
raise WebIDLError("[DependsOn=%s] only allowed on "
"readonly attributes" % attr.value(),
[attr.location, self.location])
self._setDependsOn(attr.value())
elif identifier == "UseCounter":
if self.stringifier:
raise WebIDLError("[UseCounter] must not be used on a "
"stringifier attribute",
[attr.location, self.location])
elif identifier == "Unscopable":
if not attr.noArguments():
raise WebIDLError("[Unscopable] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[Unscopable] is only allowed on non-static "
"attributes and operations",
[attr.location, self.location])
elif identifier == "CEReactions":
if not attr.noArguments():
raise WebIDLError("[CEReactions] must take no arguments",
[attr.location])
elif (identifier == "Pref" or
identifier == "Deprecated" or
identifier == "SetterThrows" or
identifier == "Throws" or
identifier == "GetterThrows" or
identifier == "SetterCanOOM" or
identifier == "CanOOM" or
identifier == "GetterCanOOM" or
identifier == "ChromeOnly" or
identifier == "Func" or
identifier == "SecureContext" or
identifier == "Frozen" or
identifier == "NewObject" or
identifier == "NeedsSubjectPrincipal" or
identifier == "SetterNeedsSubjectPrincipal" or
identifier == "GetterNeedsSubjectPrincipal" or
identifier == "NeedsCallerType" or
identifier == "ReturnValueNeedsContainsHack" or
identifier == "BinaryName" or
identifier == "NonEnumerable"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on attribute" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.type.resolveType(parentScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
def hasLenientThis(self):
return self.lenientThis
def isMaplikeOrSetlikeAttr(self):
"""
True if this attribute was generated from an interface with
maplike/setlike (e.g. this is the size attribute for
maplike/setlike)
"""
return self.maplikeOrSetlike is not None
def isUnforgeable(self):
return self._unforgeable
def _getDependentObjects(self):
return set([self.type])
def expand(self, members):
assert self.stringifier
if not self.type.isDOMString() and not self.type.isUSVString():
raise WebIDLError("The type of a stringifer attribute must be "
"either DOMString or USVString",
[self.location])
identifier = IDLUnresolvedIdentifier(self.location, "__stringifier",
allowDoubleUnderscore=True)
method = IDLMethod(self.location,
identifier,
returnType=self.type, arguments=[],
stringifier=True, underlyingAttr=self)
allowedExtAttrs = ["Throws", "NeedsSubjectPrincipal", "Pure"]
# Safe to ignore these as they are only meaningful for attributes
attributeOnlyExtAttrs = [
"CEReactions",
"CrossOriginWritable",
"SetterThrows",
]
for (key, value) in list(self._extendedAttrDict.items()):
if key in allowedExtAttrs:
if value is not True:
raise WebIDLError("[%s] with a value is currently "
"unsupported in stringifier attributes, "
"please file a bug to add support" % key,
[self.location])
method.addExtendedAttributes([IDLExtendedAttribute(self.location, (key,))])
elif not key in attributeOnlyExtAttrs:
raise WebIDLError("[%s] is currently unsupported in "
"stringifier attributes, please file a bug "
"to add support" % key,
[self.location])
members.append(method)
class IDLArgument(IDLObjectWithIdentifier):
def __init__(self, location, identifier, type, optional=False, defaultValue=None, variadic=False, dictionaryMember=False, allowTypeAttributes=False):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
assert isinstance(type, IDLType)
self.type = type
self.optional = optional
self.defaultValue = defaultValue
self.variadic = variadic
self.dictionaryMember = dictionaryMember
self._isComplete = False
self._allowTreatNonCallableAsNull = False
self._extendedAttrDict = {}
self.allowTypeAttributes = allowTypeAttributes
assert not variadic or optional
assert not variadic or not defaultValue
def addExtendedAttributes(self, attrs):
for attribute in attrs:
identifier = attribute.identifier()
if self.allowTypeAttributes and (identifier == "EnforceRange" or identifier == "Clamp" or
identifier == "TreatNullAs" or identifier == "AllowShared"):
self.type = self.type.withExtendedAttributes([attribute])
elif identifier == "TreatNonCallableAsNull":
self._allowTreatNonCallableAsNull = True
elif (self.dictionaryMember and
(identifier == "ChromeOnly" or
identifier == "Func" or
identifier == "Pref")):
if not self.optional:
raise WebIDLError("[%s] must not be used on a required "
"dictionary member" % identifier,
[attribute.location])
else:
raise WebIDLError("Unhandled extended attribute on %s" %
("a dictionary member" if self.dictionaryMember else
"an argument"),
[attribute.location])
attrlist = attribute.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def isComplete(self):
return self._isComplete
def complete(self, scope):
if self._isComplete:
return
self._isComplete = True
if not self.type.isComplete():
type = self.type.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
self.type = type
if self.type.isAny():
assert (self.defaultValue is None or
isinstance(self.defaultValue, IDLNullValue))
# optional 'any' values always have a default value
if self.optional and not self.defaultValue and not self.variadic:
# Set the default value to undefined, for simplicity, so the
# codegen doesn't have to special-case this.
self.defaultValue = IDLUndefinedValue(self.location)
if self.dictionaryMember and self.type.treatNullAsEmpty:
raise WebIDLError("Dictionary members cannot be [TreatNullAs]", [self.location])
# Now do the coercing thing; this needs to happen after the
# above creation of a default value.
if self.defaultValue:
self.defaultValue = self.defaultValue.coerceToType(self.type,
self.location)
assert self.defaultValue
def allowTreatNonCallableAsNull(self):
return self._allowTreatNonCallableAsNull
def _getDependentObjects(self):
deps = set([self.type])
if self.defaultValue:
deps.add(self.defaultValue)
return deps
def canHaveMissingValue(self):
return self.optional and not self.defaultValue
class IDLCallback(IDLObjectWithScope):
def __init__(self, location, parentScope, identifier, returnType, arguments, isConstructor):
assert isinstance(returnType, IDLType)
self._returnType = returnType
# Clone the list
self._arguments = list(arguments)
IDLObjectWithScope.__init__(self, location, parentScope, identifier)
for (returnType, arguments) in self.signatures():
for argument in arguments:
argument.resolve(self)
self._treatNonCallableAsNull = False
self._treatNonObjectAsNull = False
self._isRunScriptBoundary = False
self._isConstructor = isConstructor
def isCallback(self):
return True
def isConstructor(self):
return self._isConstructor
def signatures(self):
return [(self._returnType, self._arguments)]
def finish(self, scope):
if not self._returnType.isComplete():
type = self._returnType.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
self._returnType = type
for argument in self._arguments:
if argument.type.isComplete():
continue
type = argument.type.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
argument.type = type
def validate(self):
pass
def addExtendedAttributes(self, attrs):
unhandledAttrs = []
for attr in attrs:
if attr.identifier() == "TreatNonCallableAsNull":
self._treatNonCallableAsNull = True
elif attr.identifier() == "TreatNonObjectAsNull":
if self._isConstructor:
raise WebIDLError("[TreatNonObjectAsNull] is not supported "
"on constructors", [self.location])
self._treatNonObjectAsNull = True
elif attr.identifier() == "MOZ_CAN_RUN_SCRIPT_BOUNDARY":
if self._isConstructor:
raise WebIDLError("[MOZ_CAN_RUN_SCRIPT_BOUNDARY] is not "
"permitted on constructors",
[self.location])
self._isRunScriptBoundary = True
else:
unhandledAttrs.append(attr)
if self._treatNonCallableAsNull and self._treatNonObjectAsNull:
raise WebIDLError("Cannot specify both [TreatNonCallableAsNull] "
"and [TreatNonObjectAsNull]", [self.location])
if len(unhandledAttrs) != 0:
IDLType.addExtendedAttributes(self, unhandledAttrs)
def _getDependentObjects(self):
return set([self._returnType] + self._arguments)
def isRunScriptBoundary(self):
return self._isRunScriptBoundary;
class IDLCallbackType(IDLType):
def __init__(self, location, callback):
IDLType.__init__(self, location, callback.identifier.name)
self.callback = callback
def isCallback(self):
return True
def tag(self):
return IDLType.Tags.callback
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isNonCallbackInterface() or other.isSequence())
def _getDependentObjects(self):
return self.callback._getDependentObjects()
class IDLMethodOverload:
"""
A class that represents a single overload of a WebIDL method. This is not
quite the same as an element of the "effective overload set" in the spec,
because separate IDLMethodOverloads are not created based on arguments being
optional. Rather, when multiple methods have the same name, there is an
IDLMethodOverload for each one, all hanging off an IDLMethod representing
the full set of overloads.
"""
def __init__(self, returnType, arguments, location):
self.returnType = returnType
# Clone the list of arguments, just in case
self.arguments = list(arguments)
self.location = location
def _getDependentObjects(self):
deps = set(self.arguments)
deps.add(self.returnType)
return deps
def includesRestrictedFloatArgument(self):
return any(arg.type.includesRestrictedFloat() for arg in self.arguments)
class IDLMethod(IDLInterfaceMember, IDLScope):
Special = enum(
'Getter',
'Setter',
'Deleter',
'LegacyCaller',
base=IDLInterfaceMember.Special
)
NamedOrIndexed = enum(
'Neither',
'Named',
'Indexed'
)
def __init__(self, location, identifier, returnType, arguments,
static=False, getter=False, setter=False,
deleter=False, specialType=NamedOrIndexed.Neither,
legacycaller=False, stringifier=False,
maplikeOrSetlikeOrIterable=None,
underlyingAttr=None):
# REVIEW: specialType is NamedOrIndexed -- wow, this is messed up.
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Method)
self._hasOverloads = False
assert isinstance(returnType, IDLType)
# self._overloads is a list of IDLMethodOverloads
self._overloads = [IDLMethodOverload(returnType, arguments, location)]
assert isinstance(static, bool)
self._static = static
assert isinstance(getter, bool)
self._getter = getter
assert isinstance(setter, bool)
self._setter = setter
assert isinstance(deleter, bool)
self._deleter = deleter
assert isinstance(legacycaller, bool)
self._legacycaller = legacycaller
assert isinstance(stringifier, bool)
self._stringifier = stringifier
assert maplikeOrSetlikeOrIterable is None or isinstance(maplikeOrSetlikeOrIterable, IDLMaplikeOrSetlikeOrIterableBase)
self.maplikeOrSetlikeOrIterable = maplikeOrSetlikeOrIterable
self._htmlConstructor = False
self.underlyingAttr = underlyingAttr
self._specialType = specialType
self._unforgeable = False
self.dependsOn = "Everything"
self.affects = "Everything"
self.aliases = []
if static and identifier.name == "prototype":
raise WebIDLError("The identifier of a static operation must not be 'prototype'",
[location])
self.assertSignatureConstraints()
def __str__(self):
return "Method '%s'" % self.identifier
def assertSignatureConstraints(self):
if self._getter or self._deleter:
assert len(self._overloads) == 1
overload = self._overloads[0]
arguments = overload.arguments
assert len(arguments) == 1
assert (arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or
arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long])
assert not arguments[0].optional and not arguments[0].variadic
assert not self._getter or not overload.returnType.isVoid()
if self._setter:
assert len(self._overloads) == 1
arguments = self._overloads[0].arguments
assert len(arguments) == 2
assert (arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or
arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long])
assert not arguments[0].optional and not arguments[0].variadic
assert not arguments[1].optional and not arguments[1].variadic
if self._stringifier:
assert len(self._overloads) == 1
overload = self._overloads[0]
assert len(overload.arguments) == 0
if not self.underlyingAttr:
assert overload.returnType == BuiltinTypes[IDLBuiltinType.Types.domstring]
def isStatic(self):
return self._static
def forceStatic(self):
self._static = True
def isGetter(self):
return self._getter
def isSetter(self):
return self._setter
def isDeleter(self):
return self._deleter
def isNamed(self):
assert (self._specialType == IDLMethod.NamedOrIndexed.Named or
self._specialType == IDLMethod.NamedOrIndexed.Indexed)
return self._specialType == IDLMethod.NamedOrIndexed.Named
def isIndexed(self):
assert (self._specialType == IDLMethod.NamedOrIndexed.Named or
self._specialType == IDLMethod.NamedOrIndexed.Indexed)
return self._specialType == IDLMethod.NamedOrIndexed.Indexed
def isLegacycaller(self):
return self._legacycaller
def isStringifier(self):
return self._stringifier
def isToJSON(self):
return self.identifier.name == "toJSON"
def isDefaultToJSON(self):
return self.isToJSON() and self.getExtendedAttribute("Default")
def isMaplikeOrSetlikeOrIterableMethod(self):
"""
True if this method was generated as part of a
maplike/setlike/etc interface (e.g. has/get methods)
"""
return self.maplikeOrSetlikeOrIterable is not None
def isSpecial(self):
return (self.isGetter() or
self.isSetter() or
self.isDeleter() or
self.isLegacycaller() or
self.isStringifier())
def isHTMLConstructor(self):
return self._htmlConstructor
def hasOverloads(self):
return self._hasOverloads
def isIdentifierLess(self):
"""
True if the method name started with __, and if the method is not a
maplike/setlike method. Interfaces with maplike/setlike will generate
methods starting with __ for chrome only backing object access in JS
implemented interfaces, so while these functions use what is considered
an non-identifier name, they actually DO have an identifier.
"""
return (self.identifier.name[:2] == "__" and
not self.isMaplikeOrSetlikeOrIterableMethod())
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
IDLScope.__init__(self, self.location, parentScope, self.identifier)
for (returnType, arguments) in self.signatures():
for argument in arguments:
argument.resolve(self)
def addOverload(self, method):
assert len(method._overloads) == 1
if self._extendedAttrDict != method._extendedAttrDict:
extendedAttrDiff = set(self._extendedAttrDict.keys()) ^ set(method._extendedAttrDict.keys())
if extendedAttrDiff == { "LenientFloat" }:
if "LenientFloat" not in self._extendedAttrDict:
for overload in self._overloads:
if overload.includesRestrictedFloatArgument():
raise WebIDLError("Restricted float behavior differs on different "
"overloads of %s" % method.identifier,
[overload.location, method.location])
self._extendedAttrDict["LenientFloat"] = method._extendedAttrDict["LenientFloat"]
elif method._overloads[0].includesRestrictedFloatArgument():
raise WebIDLError("Restricted float behavior differs on different "
"overloads of %s" % method.identifier,
[self.location, method.location])
else:
raise WebIDLError("Extended attributes differ on different "
"overloads of %s" % method.identifier,
[self.location, method.location])
self._overloads.extend(method._overloads)
self._hasOverloads = True
if self.isStatic() != method.isStatic():
raise WebIDLError("Overloaded identifier %s appears with different values of the 'static' attribute" % method.identifier,
[method.location])
if self.isLegacycaller() != method.isLegacycaller():
raise WebIDLError("Overloaded identifier %s appears with different values of the 'legacycaller' attribute" % method.identifier,
[method.location])
# Can't overload special things!
assert not self.isGetter()
assert not method.isGetter()
assert not self.isSetter()
assert not method.isSetter()
assert not self.isDeleter()
assert not method.isDeleter()
assert not self.isStringifier()
assert not method.isStringifier()
assert not self.isHTMLConstructor()
assert not method.isHTMLConstructor()
return self
def signatures(self):
return [(overload.returnType, overload.arguments) for overload in
self._overloads]
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
for overload in self._overloads:
returnType = overload.returnType
if not returnType.isComplete():
returnType = returnType.complete(scope)
assert not isinstance(returnType, IDLUnresolvedType)
assert not isinstance(returnType, IDLTypedefType)
assert not isinstance(returnType.name, IDLUnresolvedIdentifier)
overload.returnType = returnType
for argument in overload.arguments:
if not argument.isComplete():
argument.complete(scope)
assert argument.type.isComplete()
# Now compute various information that will be used by the
# WebIDL overload resolution algorithm.
self.maxArgCount = max(len(s[1]) for s in self.signatures())
self.allowedArgCounts = [i for i in range(self.maxArgCount+1)
if len(self.signaturesForArgCount(i)) != 0]
def validate(self):
IDLInterfaceMember.validate(self)
# Make sure our overloads are properly distinguishable and don't have
# different argument types before the distinguishing args.
for argCount in self.allowedArgCounts:
possibleOverloads = self.overloadsForArgCount(argCount)
if len(possibleOverloads) == 1:
continue
distinguishingIndex = self.distinguishingIndexForArgCount(argCount)
for idx in range(distinguishingIndex):
firstSigType = possibleOverloads[0].arguments[idx].type
for overload in possibleOverloads[1:]:
if overload.arguments[idx].type != firstSigType:
raise WebIDLError(
"Signatures for method '%s' with %d arguments have "
"different types of arguments at index %d, which "
"is before distinguishing index %d" %
(self.identifier.name, argCount, idx,
distinguishingIndex),
[self.location, overload.location])
overloadWithPromiseReturnType = None
overloadWithoutPromiseReturnType = None
for overload in self._overloads:
returnType = overload.returnType
if not returnType.unroll().isExposedInAllOf(self.exposureSet):
raise WebIDLError("Overload returns a type that is not exposed "
"everywhere where the method is exposed",
[overload.location])
variadicArgument = None
arguments = overload.arguments
for (idx, argument) in enumerate(arguments):
assert argument.type.isComplete()
if ((argument.type.isDictionary() and
argument.type.unroll().inner.canBeEmpty()) or
(argument.type.isUnion() and
argument.type.unroll().hasPossiblyEmptyDictionaryType())):
# Optional dictionaries and unions containing optional
# dictionaries at the end of the list or followed by
# optional arguments must be optional.
if (not argument.optional and
all(arg.optional for arg in arguments[idx+1:])):
raise WebIDLError("Dictionary argument without any "
"required fields or union argument "
"containing such dictionary not "
"followed by a required argument "
"must be optional",
[argument.location])
if (not argument.defaultValue and
all(arg.optional for arg in arguments[idx+1:])):
raise WebIDLError("Dictionary argument without any "
"required fields or union argument "
"containing such dictionary not "
"followed by a required argument "
"must have a default value",
[argument.location])
# An argument cannot be a nullable dictionary or a
# nullable union containing a dictionary.
if (argument.type.nullable() and
(argument.type.isDictionary() or
(argument.type.isUnion() and
argument.type.unroll().hasDictionaryType()))):
raise WebIDLError("An argument cannot be a nullable "
"dictionary or nullable union "
"containing a dictionary",
[argument.location])
# Only the last argument can be variadic
if variadicArgument:
raise WebIDLError("Variadic argument is not last argument",
[variadicArgument.location])
if argument.variadic:
variadicArgument = argument
if returnType.isPromise():
overloadWithPromiseReturnType = overload
else:
overloadWithoutPromiseReturnType = overload
# Make sure either all our overloads return Promises or none do
if overloadWithPromiseReturnType and overloadWithoutPromiseReturnType:
raise WebIDLError("We have overloads with both Promise and "
"non-Promise return types",
[overloadWithPromiseReturnType.location,
overloadWithoutPromiseReturnType.location])
if overloadWithPromiseReturnType and self._legacycaller:
raise WebIDLError("May not have a Promise return type for a "
"legacycaller.",
[overloadWithPromiseReturnType.location])
if self.getExtendedAttribute("StaticClassOverride") and not \
(self.identifier.scope.isJSImplemented() and self.isStatic()):
raise WebIDLError("StaticClassOverride can be applied to static"
" methods on JS-implemented classes only.",
[self.location])
# Ensure that toJSON methods satisfy the spec constraints on them.
if self.identifier.name == "toJSON":
if len(self.signatures()) != 1:
raise WebIDLError("toJSON method has multiple overloads",
[self._overloads[0].location,
self._overloads[1].location])
if len(self.signatures()[0][1]) != 0:
raise WebIDLError("toJSON method has arguments",
[self.location])
if not self.signatures()[0][0].isJSONType():
raise WebIDLError("toJSON method has non-JSON return type",
[self.location])
def overloadsForArgCount(self, argc):
return [overload for overload in self._overloads if
len(overload.arguments) == argc or
(len(overload.arguments) > argc and
all(arg.optional for arg in overload.arguments[argc:])) or
(len(overload.arguments) < argc and
len(overload.arguments) > 0 and
overload.arguments[-1].variadic)]
def signaturesForArgCount(self, argc):
return [(overload.returnType, overload.arguments) for overload
in self.overloadsForArgCount(argc)]
def locationsForArgCount(self, argc):
return [overload.location for overload in self.overloadsForArgCount(argc)]
def distinguishingIndexForArgCount(self, argc):
def isValidDistinguishingIndex(idx, signatures):
for (firstSigIndex, (firstRetval, firstArgs)) in enumerate(signatures[:-1]):
for (secondRetval, secondArgs) in signatures[firstSigIndex+1:]:
if idx < len(firstArgs):
firstType = firstArgs[idx].type
else:
assert(firstArgs[-1].variadic)
firstType = firstArgs[-1].type
if idx < len(secondArgs):
secondType = secondArgs[idx].type
else:
assert(secondArgs[-1].variadic)
secondType = secondArgs[-1].type
if not firstType.isDistinguishableFrom(secondType):
return False
return True
signatures = self.signaturesForArgCount(argc)
for idx in range(argc):
if isValidDistinguishingIndex(idx, signatures):
return idx
# No valid distinguishing index. Time to throw
locations = self.locationsForArgCount(argc)
raise WebIDLError("Signatures with %d arguments for method '%s' are not "
"distinguishable" % (argc, self.identifier.name),
locations)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if (identifier == "GetterThrows" or
identifier == "SetterThrows" or
identifier == "GetterCanOOM" or
identifier == "SetterCanOOM" or
identifier == "SetterNeedsSubjectPrincipal" or
identifier == "GetterNeedsSubjectPrincipal"):
raise WebIDLError("Methods must not be flagged as "
"[%s]" % identifier,
[attr.location, self.location])
elif identifier == "Unforgeable":
if self.isStatic():
raise WebIDLError("[Unforgeable] is only allowed on non-static "
"methods", [attr.location, self.location])
self._unforgeable = True
elif identifier == "SameObject":
raise WebIDLError("Methods must not be flagged as [SameObject]",
[attr.location, self.location])
elif identifier == "Constant":
raise WebIDLError("Methods must not be flagged as [Constant]",
[attr.location, self.location])
elif identifier == "PutForwards":
raise WebIDLError("Only attributes support [PutForwards]",
[attr.location, self.location])
elif identifier == "LenientSetter":
raise WebIDLError("Only attributes support [LenientSetter]",
[attr.location, self.location])
elif identifier == "LenientFloat":
# This is called before we've done overload resolution
overloads = self._overloads
assert len(overloads) == 1
if not overloads[0].returnType.isVoid():
raise WebIDLError("[LenientFloat] used on a non-void method",
[attr.location, self.location])
if not overloads[0].includesRestrictedFloatArgument():
raise WebIDLError("[LenientFloat] used on an operation with no "
"restricted float type arguments",
[attr.location, self.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif (identifier == "CrossOriginCallable" or
identifier == "WebGLHandlesContextLoss"):
# Known no-argument attributes.
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
if identifier == "CrossOriginCallable" and self.isStatic():
raise WebIDLError("[CrossOriginCallable] is only allowed on non-static "
"attributes",
[attr.location, self.location])
elif identifier == "Pure":
if not attr.noArguments():
raise WebIDLError("[Pure] must take no arguments",
[attr.location])
self._setDependsOn("DOMState")
self._setAffects("Nothing")
elif identifier == "Affects":
if not attr.hasValue():
raise WebIDLError("[Affects] takes an identifier",
[attr.location])
self._setAffects(attr.value())
elif identifier == "DependsOn":
if not attr.hasValue():
raise WebIDLError("[DependsOn] takes an identifier",
[attr.location])
self._setDependsOn(attr.value())
elif identifier == "Alias":
if not attr.hasValue():
raise WebIDLError("[Alias] takes an identifier or string",
[attr.location])
self._addAlias(attr.value())
elif identifier == "UseCounter":
if self.isSpecial():
raise WebIDLError("[UseCounter] must not be used on a special "
"operation",
[attr.location, self.location])
elif identifier == "Unscopable":
if not attr.noArguments():
raise WebIDLError("[Unscopable] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[Unscopable] is only allowed on non-static "
"attributes and operations",
[attr.location, self.location])
elif identifier == "CEReactions":
if not attr.noArguments():
raise WebIDLError("[CEReactions] must take no arguments",
[attr.location])
if self.isSpecial() and not self.isSetter() and not self.isDeleter():
raise WebIDLError("[CEReactions] is only allowed on operation, "
"attribute, setter, and deleter",
[attr.location, self.location])
elif identifier == "Default":
if not attr.noArguments():
raise WebIDLError("[Default] must take no arguments",
[attr.location])
if not self.isToJSON():
raise WebIDLError("[Default] is only allowed on toJSON operations",
[attr.location, self.location])
if self.signatures()[0][0] != BuiltinTypes[IDLBuiltinType.Types.object]:
raise WebIDLError("The return type of the default toJSON "
"operation must be 'object'",
[attr.location, self.location])
elif (identifier == "Throws" or
identifier == "CanOOM" or
identifier == "NewObject" or
identifier == "ChromeOnly" or
identifier == "Pref" or
identifier == "Deprecated" or
identifier == "Func" or
identifier == "SecureContext" or
identifier == "BinaryName" or
identifier == "NeedsSubjectPrincipal" or
identifier == "NeedsCallerType" or
identifier == "StaticClassOverride" or
identifier == "NonEnumerable" or
identifier == "Unexposed"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on method" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def returnsPromise(self):
return self._overloads[0].returnType.isPromise()
def isUnforgeable(self):
return self._unforgeable
def _getDependentObjects(self):
deps = set()
for overload in self._overloads:
deps.update(overload._getDependentObjects())
return deps
class IDLConstructor(IDLMethod):
def __init__(self, location, args, name):
# We can't actually init our IDLMethod yet, because we do not know the
# return type yet. Just save the info we have for now and we will init
# it later.
self._initLocation = location
self._initArgs = args
self._initName = name
self._inited = False
self._initExtendedAttrs = []
def addExtendedAttributes(self, attrs):
if self._inited:
return IDLMethod.addExtendedAttributes(self, attrs)
self._initExtendedAttrs.extend(attrs)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if (identifier == "BinaryName" or
identifier == "ChromeOnly" or
identifier == "NewObject" or
identifier == "SecureContext" or
identifier == "Throws" or
identifier == "Func" or
identifier == "Pref"):
IDLMethod.handleExtendedAttribute(self, attr)
elif identifier == "HTMLConstructor":
if not attr.noArguments():
raise WebIDLError("[HTMLConstructor] must take no arguments",
[attr.location])
# We shouldn't end up here for named constructors.
assert(self.identifier.name == "constructor")
if any(len(sig[1]) != 0 for sig in self.signatures()):
raise WebIDLError("[HTMLConstructor] must not be applied to a "
"constructor operation that has arguments.",
[attr.location])
self._htmlConstructor = True
else:
raise WebIDLError("Unknown extended attribute %s on method" % identifier,
[attr.location])
def reallyInit(self, parentInterface):
name = self._initName
location = self._initLocation
identifier = IDLUnresolvedIdentifier(location, name, allowForbidden=True)
retType = IDLWrapperType(parentInterface.location, parentInterface)
IDLMethod.__init__(self, location, identifier, retType, self._initArgs,
static=True)
self._inited = True;
# Propagate through whatever extended attributes we already had
self.addExtendedAttributes(self._initExtendedAttrs)
self._initExtendedAttrs = []
# Constructors are always NewObject. Whether they throw or not is
# indicated by [Throws] annotations in the usual way.
self.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NewObject",))])
class IDLIncludesStatement(IDLObject):
def __init__(self, location, interface, mixin):
IDLObject.__init__(self, location)
self.interface = interface
self.mixin = mixin
self._finished = False
def finish(self, scope):
if self._finished:
return
self._finished = True
assert(isinstance(self.interface, IDLIdentifierPlaceholder))
assert(isinstance(self.mixin, IDLIdentifierPlaceholder))
interface = self.interface.finish(scope)
mixin = self.mixin.finish(scope)
# NOTE: we depend on not setting self.interface and
# self.mixin here to keep track of the original
# locations.
if not isinstance(interface, IDLInterface):
raise WebIDLError("Left-hand side of 'includes' is not an "
"interface",
[self.interface.location, interface.location])
if interface.isCallback():
raise WebIDLError("Left-hand side of 'includes' is a callback "
"interface",
[self.interface.location, interface.location])
if not isinstance(mixin, IDLInterfaceMixin):
raise WebIDLError("Right-hand side of 'includes' is not an "
"interface mixin",
[self.mixin.location, mixin.location])
mixin.actualExposureGlobalNames.update(interface._exposureGlobalNames)
interface.addIncludedMixin(mixin)
self.interface = interface
self.mixin = mixin
def validate(self):
pass
def addExtendedAttributes(self, attrs):
if len(attrs) != 0:
raise WebIDLError("There are no extended attributes that are "
"allowed on includes statements",
[attrs[0].location, self.location])
class IDLExtendedAttribute(IDLObject):
"""
A class to represent IDL extended attributes so we can give them locations
"""
def __init__(self, location, tuple):
IDLObject.__init__(self, location)
self._tuple = tuple
def identifier(self):
return self._tuple[0]
def noArguments(self):
return len(self._tuple) == 1
def hasValue(self):
return len(self._tuple) >= 2 and isinstance(self._tuple[1], str)
def value(self):
assert(self.hasValue())
return self._tuple[1]
def hasArgs(self):
return (len(self._tuple) == 2 and isinstance(self._tuple[1], list) or
len(self._tuple) == 3)
def args(self):
assert(self.hasArgs())
# Our args are our last element
return self._tuple[-1]
def listValue(self):
"""
Backdoor for storing random data in _extendedAttrDict
"""
return list(self._tuple)[1:]
# Parser
class Tokenizer(object):
tokens = [
"INTEGER",
"FLOATLITERAL",
"IDENTIFIER",
"STRING",
"COMMENTS",
"WHITESPACE",
"OTHER"
]
def t_FLOATLITERAL(self, t):
r'(-?(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+|Infinity))|NaN'
t.value = float(t.value)
return t
def t_INTEGER(self, t):
r'-?(0([0-7]+|[Xx][0-9A-Fa-f]+)?|[1-9][0-9]*)'
try:
# Can't use int(), because that doesn't handle octal properly.
t.value = parseInt(t.value)
except:
raise WebIDLError("Invalid integer literal",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename=self._filename)])
return t
def t_IDENTIFIER(self, t):
r'[_-]?[A-Za-z][0-9A-Z_a-z-]*'
t.type = self.keywords.get(t.value, 'IDENTIFIER')
return t
def t_STRING(self, t):
r'"[^"]*"'
t.value = t.value[1:-1]
return t
def t_COMMENTS(self, t):
r'(\/\*(.|\n)*?\*\/)|(\/\/.*)'
pass
def t_WHITESPACE(self, t):
r'[\t\n\r ]+'
pass
def t_ELLIPSIS(self, t):
r'\.\.\.'
t.type = self.keywords.get(t.value)
return t
def t_OTHER(self, t):
r'[^\t\n\r 0-9A-Z_a-z]'
t.type = self.keywords.get(t.value, 'OTHER')
return t
keywords = {
"interface": "INTERFACE",
"partial": "PARTIAL",
"mixin": "MIXIN",
"dictionary": "DICTIONARY",
"exception": "EXCEPTION",
"enum": "ENUM",
"callback": "CALLBACK",
"typedef": "TYPEDEF",
"includes": "INCLUDES",
"const": "CONST",
"null": "NULL",
"true": "TRUE",
"false": "FALSE",
"serializer": "SERIALIZER",
"stringifier": "STRINGIFIER",
"unrestricted": "UNRESTRICTED",
"attribute": "ATTRIBUTE",
"readonly": "READONLY",
"inherit": "INHERIT",
"static": "STATIC",
"getter": "GETTER",
"setter": "SETTER",
"deleter": "DELETER",
"legacycaller": "LEGACYCALLER",
"optional": "OPTIONAL",
"...": "ELLIPSIS",
"::": "SCOPE",
"DOMString": "DOMSTRING",
"ByteString": "BYTESTRING",
"USVString": "USVSTRING",
"JSString": "JSSTRING",
"UTF8String": "UTF8STRING",
"any": "ANY",
"boolean": "BOOLEAN",
"byte": "BYTE",
"double": "DOUBLE",
"float": "FLOAT",
"long": "LONG",
"object": "OBJECT",
"octet": "OCTET",
"Promise": "PROMISE",
"required": "REQUIRED",
"sequence": "SEQUENCE",
"record": "RECORD",
"short": "SHORT",
"unsigned": "UNSIGNED",
"void": "VOID",
":": "COLON",
";": "SEMICOLON",
"{": "LBRACE",
"}": "RBRACE",
"(": "LPAREN",
")": "RPAREN",
"[": "LBRACKET",
"]": "RBRACKET",
"?": "QUESTIONMARK",
",": "COMMA",
"=": "EQUALS",
"<": "LT",
">": "GT",
"ArrayBuffer": "ARRAYBUFFER",
"or": "OR",
"maplike": "MAPLIKE",
"setlike": "SETLIKE",
"iterable": "ITERABLE",
"namespace": "NAMESPACE",
"ReadableStream": "READABLESTREAM",
"constructor": "CONSTRUCTOR",
"symbol": "SYMBOL",
"async": "ASYNC",
}
tokens.extend(list(keywords.values()))
def t_error(self, t):
raise WebIDLError("Unrecognized Input",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename=self.filename)])
def __init__(self, lexer=None):
if lexer:
self.lexer = lexer
else:
self.lexer = lex.lex(object=self)
class SqueakyCleanLogger(object):
errorWhitelist = [
# Web IDL defines the WHITESPACE and COMMENTS token, but doesn't actually
# use it ... so far.
"Token 'WHITESPACE' defined, but not used",
"Token 'COMMENTS' defined, but not used",
# And that means we have unused tokens
"There are 2 unused tokens",
# Web IDL defines a OtherOrComma rule that's only used in
# ExtendedAttributeInner, which we don't use yet.
"Rule 'OtherOrComma' defined, but not used",
# And an unused rule
"There is 1 unused rule",
# And the OtherOrComma grammar symbol is unreachable.
"Symbol 'OtherOrComma' is unreachable",
# Which means the Other symbol is unreachable.
"Symbol 'Other' is unreachable",
]
def __init__(self):
self.errors = []
def debug(self, msg, *args, **kwargs):
pass
info = debug
def warning(self, msg, *args, **kwargs):
if msg == "%s:%d: Rule %r defined, but not used" or \
msg == "%s:%d: Rule '%s' defined, but not used":
# Munge things so we don't have to hardcode filenames and
# line numbers in our whitelist.
whitelistmsg = "Rule %r defined, but not used"
whitelistargs = args[2:]
else:
whitelistmsg = msg
whitelistargs = args
if (whitelistmsg % whitelistargs) not in SqueakyCleanLogger.errorWhitelist:
self.errors.append(msg % args)
error = warning
def reportGrammarErrors(self):
if self.errors:
raise WebIDLError("\n".join(self.errors), [])
class Parser(Tokenizer):
def getLocation(self, p, i):
return Location(self.lexer, p.lineno(i), p.lexpos(i), self._filename)
def globalScope(self):
return self._globalScope
# The p_Foo functions here must match the WebIDL spec's grammar.
# It's acceptable to split things at '|' boundaries.
def p_Definitions(self, p):
"""
Definitions : ExtendedAttributeList Definition Definitions
"""
if p[2]:
p[0] = [p[2]]
p[2].addExtendedAttributes(p[1])
else:
assert not p[1]
p[0] = []
p[0].extend(p[3])
def p_DefinitionsEmpty(self, p):
"""
Definitions :
"""
p[0] = []
def p_Definition(self, p):
"""
Definition : CallbackOrInterfaceOrMixin
| Namespace
| Partial
| Dictionary
| Exception
| Enum
| Typedef
| IncludesStatement
"""
p[0] = p[1]
assert p[1] # We might not have implemented something ...
def p_CallbackOrInterfaceOrMixinCallback(self, p):
"""
CallbackOrInterfaceOrMixin : CALLBACK CallbackRestOrInterface
"""
if p[2].isInterface():
assert isinstance(p[2], IDLInterface)
p[2].setCallback(True)
p[0] = p[2]
def p_CallbackOrInterfaceOrMixinInterfaceOrMixin(self, p):
"""
CallbackOrInterfaceOrMixin : INTERFACE InterfaceOrMixin
"""
p[0] = p[2]
def p_CallbackRestOrInterface(self, p):
"""
CallbackRestOrInterface : CallbackRest
| CallbackConstructorRest
| CallbackInterface
"""
assert p[1]
p[0] = p[1]
def handleNonPartialObject(self, location, identifier, constructor,
constructorArgs, nonPartialArgs):
"""
This handles non-partial objects (interfaces, namespaces and
dictionaries) by checking for an existing partial object, and promoting
it to non-partial as needed. The return value is the non-partial
object.
constructorArgs are all the args for the constructor except the last
one: isKnownNonPartial.
nonPartialArgs are the args for the setNonPartial call.
"""
# The name of the class starts with "IDL", so strip that off.
# Also, starts with a capital letter after that, so nix that
# as well.
prettyname = constructor.__name__[3:].lower()
try:
existingObj = self.globalScope()._lookupIdentifier(identifier)
if existingObj:
if not isinstance(existingObj, constructor):
raise WebIDLError("%s has the same name as "
"non-%s object" %
(prettyname.capitalize(), prettyname),
[location, existingObj.location])
existingObj.setNonPartial(*nonPartialArgs)
return existingObj
except Exception as ex:
if isinstance(ex, WebIDLError):
raise ex
pass
# True for isKnownNonPartial
return constructor(*(constructorArgs + [True]))
def p_InterfaceOrMixin(self, p):
"""
InterfaceOrMixin : InterfaceRest
| MixinRest
"""
p[0] = p[1]
def p_CallbackInterface(self, p):
"""
CallbackInterface : INTERFACE InterfaceRest
"""
p[0] = p[2]
def p_InterfaceRest(self, p):
"""
InterfaceRest : IDENTIFIER Inheritance LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(location, p[1])
members = p[4]
parent = p[2]
p[0] = self.handleNonPartialObject(
location, identifier, IDLInterface,
[location, self.globalScope(), identifier, parent, members],
[location, parent, members])
def p_InterfaceForwardDecl(self, p):
"""
InterfaceRest : IDENTIFIER SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(location, p[1])
try:
if self.globalScope()._lookupIdentifier(identifier):
p[0] = self.globalScope()._lookupIdentifier(identifier)
if not isinstance(p[0], IDLExternalInterface):
raise WebIDLError("Name collision between external "
"interface declaration for identifier "
"%s and %s" % (identifier.name, p[0]),
[location, p[0].location])
return
except Exception as ex:
if isinstance(ex, WebIDLError):
raise ex
pass
p[0] = IDLExternalInterface(location, self.globalScope(), identifier)
def p_MixinRest(self, p):
"""
MixinRest : MIXIN IDENTIFIER LBRACE MixinMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handleNonPartialObject(
location, identifier, IDLInterfaceMixin,
[location, self.globalScope(), identifier, members],
[location, members])
def p_Namespace(self, p):
"""
Namespace : NAMESPACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handleNonPartialObject(
location, identifier, IDLNamespace,
[location, self.globalScope(), identifier, members],
[location, None, members])
def p_Partial(self, p):
"""
Partial : PARTIAL PartialDefinition
"""
p[0] = p[2]
def p_PartialDefinitionInterface(self, p):
"""
PartialDefinition : INTERFACE PartialInterfaceOrPartialMixin
"""
p[0] = p[2]
def p_PartialDefinition(self, p):
"""
PartialDefinition : PartialNamespace
| PartialDictionary
"""
p[0] = p[1]
def handlePartialObject(self, location, identifier, nonPartialConstructor,
nonPartialConstructorArgs,
partialConstructorArgs):
"""
This handles partial objects (interfaces, namespaces and dictionaries)
by checking for an existing non-partial object, and adding ourselves to
it as needed. The return value is our partial object. We use
IDLPartialInterfaceOrNamespace for partial interfaces or namespaces,
and IDLPartialDictionary for partial dictionaries.
nonPartialConstructorArgs are all the args for the non-partial
constructor except the last two: members and isKnownNonPartial.
partialConstructorArgs are the arguments for the partial object
constructor, except the last one (the non-partial object).
"""
# The name of the class starts with "IDL", so strip that off.
# Also, starts with a capital letter after that, so nix that
# as well.
prettyname = nonPartialConstructor.__name__[3:].lower()
nonPartialObject = None
try:
nonPartialObject = self.globalScope()._lookupIdentifier(identifier)
if nonPartialObject:
if not isinstance(nonPartialObject, nonPartialConstructor):
raise WebIDLError("Partial %s has the same name as "
"non-%s object" %
(prettyname, prettyname),
[location, nonPartialObject.location])
except Exception as ex:
if isinstance(ex, WebIDLError):
raise ex
pass
if not nonPartialObject:
nonPartialObject = nonPartialConstructor(
# No members, False for isKnownNonPartial
*(nonPartialConstructorArgs), members=[], isKnownNonPartial=False)
partialObject = None
if isinstance(nonPartialObject, IDLDictionary):
partialObject = IDLPartialDictionary(
*(partialConstructorArgs + [nonPartialObject]))
elif isinstance(nonPartialObject, (IDLInterface, IDLInterfaceMixin, IDLNamespace)):
partialObject = IDLPartialInterfaceOrNamespace(
*(partialConstructorArgs + [nonPartialObject]))
else:
raise WebIDLError("Unknown partial object type %s" %
type(partialObject),
[location])
return partialObject
def p_PartialInterfaceOrPartialMixin(self, p):
"""
PartialInterfaceOrPartialMixin : PartialInterfaceRest
| PartialMixinRest
"""
p[0] = p[1]
def p_PartialInterfaceRest(self, p):
"""
PartialInterfaceRest : IDENTIFIER LBRACE PartialInterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(location, p[1])
members = p[3]
p[0] = self.handlePartialObject(
location, identifier, IDLInterface,
[location, self.globalScope(), identifier, None],
[location, identifier, members])
def p_PartialMixinRest(self, p):
"""
PartialMixinRest : MIXIN IDENTIFIER LBRACE MixinMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handlePartialObject(
location, identifier, IDLInterfaceMixin,
[location, self.globalScope(), identifier],
[location, identifier, members])
def p_PartialNamespace(self, p):
"""
PartialNamespace : NAMESPACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handlePartialObject(
location, identifier, IDLNamespace,
[location, self.globalScope(), identifier],
[location, identifier, members])
def p_PartialDictionary(self, p):
"""
PartialDictionary : DICTIONARY IDENTIFIER LBRACE DictionaryMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[4]
p[0] = self.handlePartialObject(
location, identifier, IDLDictionary,
[location, self.globalScope(), identifier],
[location, identifier, members])
def p_Inheritance(self, p):
"""
Inheritance : COLON ScopedName
"""
p[0] = IDLIdentifierPlaceholder(self.getLocation(p, 2), p[2])
def p_InheritanceEmpty(self, p):
"""
Inheritance :
"""
pass
def p_InterfaceMembers(self, p):
"""
InterfaceMembers : ExtendedAttributeList InterfaceMember InterfaceMembers
"""
p[0] = [p[2]]
assert not p[1] or p[2]
p[2].addExtendedAttributes(p[1])
p[0].extend(p[3])
def p_InterfaceMembersEmpty(self, p):
"""
InterfaceMembers :
"""
p[0] = []
def p_InterfaceMember(self, p):
"""
InterfaceMember : PartialInterfaceMember
| Constructor
"""
p[0] = p[1]
def p_Constructor(self, p):
"""
Constructor : CONSTRUCTOR LPAREN ArgumentList RPAREN SEMICOLON
"""
p[0] = IDLConstructor(self.getLocation(p, 1), p[3], "constructor")
def p_PartialInterfaceMembers(self, p):
"""
PartialInterfaceMembers : ExtendedAttributeList PartialInterfaceMember PartialInterfaceMembers
"""
p[0] = [p[2]]
assert not p[1] or p[2]
p[2].addExtendedAttributes(p[1])
p[0].extend(p[3])
def p_PartialInterfaceMembersEmpty(self, p):
"""
PartialInterfaceMembers :
"""
p[0] = []
def p_PartialInterfaceMember(self, p):
"""
PartialInterfaceMember : Const
| AttributeOrOperationOrMaplikeOrSetlikeOrIterable
"""
p[0] = p[1]
def p_MixinMembersEmpty(self, p):
"""
MixinMembers :
"""
p[0] = []
def p_MixinMembers(self, p):
"""
MixinMembers : ExtendedAttributeList MixinMember MixinMembers
"""
p[0] = [p[2]]
assert not p[1] or p[2]
p[2].addExtendedAttributes(p[1])
p[0].extend(p[3])
def p_MixinMember(self, p):
"""
MixinMember : Const
| Attribute
| Operation
"""
p[0] = p[1]
def p_Dictionary(self, p):
"""
Dictionary : DICTIONARY IDENTIFIER Inheritance LBRACE DictionaryMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[5]
p[0] = IDLDictionary(location, self.globalScope(), identifier, p[3], members)
def p_DictionaryMembers(self, p):
"""
DictionaryMembers : ExtendedAttributeList DictionaryMember DictionaryMembers
|
"""
if len(p) == 1:
# We're at the end of the list
p[0] = []
return
p[2].addExtendedAttributes(p[1])
p[0] = [p[2]]
p[0].extend(p[3])
def p_DictionaryMemberRequired(self, p):
"""
DictionaryMember : REQUIRED TypeWithExtendedAttributes IDENTIFIER SEMICOLON
"""
# These quack a lot like required arguments, so just treat them that way.
t = p[2]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
p[0] = IDLArgument(self.getLocation(p, 3), identifier, t,
optional=False,
defaultValue=None, variadic=False,
dictionaryMember=True)
def p_DictionaryMember(self, p):
"""
DictionaryMember : Type IDENTIFIER Default SEMICOLON
"""
# These quack a lot like optional arguments, so just treat them that way.
t = p[1]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
defaultValue = p[3]
# Any attributes that precede this may apply to the type, so
# we configure the argument to forward type attributes down instead of producing
# a parse error
p[0] = IDLArgument(self.getLocation(p, 2), identifier, t,
optional=True,
defaultValue=defaultValue, variadic=False,
dictionaryMember=True, allowTypeAttributes=True)
def p_Default(self, p):
"""
Default : EQUALS DefaultValue
|
"""
if len(p) > 1:
p[0] = p[2]
else:
p[0] = None
def p_DefaultValue(self, p):
"""
DefaultValue : ConstValue
| LBRACKET RBRACKET
| LBRACE RBRACE
"""
if len(p) == 2:
p[0] = p[1]
else:
assert len(p) == 3 # Must be [] or {}
if p[1] == "[":
p[0] = IDLEmptySequenceValue(self.getLocation(p, 1))
else:
assert p[1] == "{"
p[0] = IDLDefaultDictionaryValue(self.getLocation(p, 1))
def p_DefaultValueNull(self, p):
"""
DefaultValue : NULL
"""
p[0] = IDLNullValue(self.getLocation(p, 1))
def p_Exception(self, p):
"""
Exception : EXCEPTION IDENTIFIER Inheritance LBRACE ExceptionMembers RBRACE SEMICOLON
"""
pass
def p_Enum(self, p):
"""
Enum : ENUM IDENTIFIER LBRACE EnumValueList RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
values = p[4]
assert values
p[0] = IDLEnum(location, self.globalScope(), identifier, values)
def p_EnumValueList(self, p):
"""
EnumValueList : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListComma(self, p):
"""
EnumValueListComma : COMMA EnumValueListString
"""
p[0] = p[2]
def p_EnumValueListCommaEmpty(self, p):
"""
EnumValueListComma :
"""
p[0] = []
def p_EnumValueListString(self, p):
"""
EnumValueListString : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListStringEmpty(self, p):
"""
EnumValueListString :
"""
p[0] = []
def p_CallbackRest(self, p):
"""
CallbackRest : IDENTIFIER EQUALS ReturnType LPAREN ArgumentList RPAREN SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
p[0] = IDLCallback(self.getLocation(p, 1), self.globalScope(),
identifier, p[3], p[5], isConstructor=False)
def p_CallbackConstructorRest(self, p):
"""
CallbackConstructorRest : CONSTRUCTOR IDENTIFIER EQUALS ReturnType LPAREN ArgumentList RPAREN SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
p[0] = IDLCallback(self.getLocation(p, 2), self.globalScope(),
identifier, p[4], p[6], isConstructor=True)
def p_ExceptionMembers(self, p):
"""
ExceptionMembers : ExtendedAttributeList ExceptionMember ExceptionMembers
|
"""
pass
def p_Typedef(self, p):
"""
Typedef : TYPEDEF TypeWithExtendedAttributes IDENTIFIER SEMICOLON
"""
typedef = IDLTypedef(self.getLocation(p, 1), self.globalScope(),
p[2], p[3])
p[0] = typedef
def p_IncludesStatement(self, p):
"""
IncludesStatement : ScopedName INCLUDES ScopedName SEMICOLON
"""
assert(p[2] == "includes")
interface = IDLIdentifierPlaceholder(self.getLocation(p, 1), p[1])
mixin = IDLIdentifierPlaceholder(self.getLocation(p, 3), p[3])
p[0] = IDLIncludesStatement(self.getLocation(p, 1), interface, mixin)
def p_Const(self, p):
"""
Const : CONST ConstType IDENTIFIER EQUALS ConstValue SEMICOLON
"""
location = self.getLocation(p, 1)
type = p[2]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
value = p[5]
p[0] = IDLConst(location, identifier, type, value)
def p_ConstValueBoolean(self, p):
"""
ConstValue : BooleanLiteral
"""
location = self.getLocation(p, 1)
booleanType = BuiltinTypes[IDLBuiltinType.Types.boolean]
p[0] = IDLValue(location, booleanType, p[1])
def p_ConstValueInteger(self, p):
"""
ConstValue : INTEGER
"""
location = self.getLocation(p, 1)
# We don't know ahead of time what type the integer literal is.
# Determine the smallest type it could possibly fit in and use that.
integerType = matchIntegerValueToType(p[1])
if integerType is None:
raise WebIDLError("Integer literal out of range", [location])
p[0] = IDLValue(location, integerType, p[1])
def p_ConstValueFloat(self, p):
"""
ConstValue : FLOATLITERAL
"""
location = self.getLocation(p, 1)
p[0] = IDLValue(location, BuiltinTypes[IDLBuiltinType.Types.unrestricted_float], p[1])
def p_ConstValueString(self, p):
"""
ConstValue : STRING
"""
location = self.getLocation(p, 1)
stringType = BuiltinTypes[IDLBuiltinType.Types.domstring]
p[0] = IDLValue(location, stringType, p[1])
def p_BooleanLiteralTrue(self, p):
"""
BooleanLiteral : TRUE
"""
p[0] = True
def p_BooleanLiteralFalse(self, p):
"""
BooleanLiteral : FALSE
"""
p[0] = False
def p_AttributeOrOperationOrMaplikeOrSetlikeOrIterable(self, p):
"""
AttributeOrOperationOrMaplikeOrSetlikeOrIterable : Attribute
| Maplike
| Setlike
| Iterable
| Operation
"""
p[0] = p[1]
def p_Iterable(self, p):
"""
Iterable : ITERABLE LT TypeWithExtendedAttributes GT SEMICOLON
| ITERABLE LT TypeWithExtendedAttributes COMMA TypeWithExtendedAttributes GT SEMICOLON
"""
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__iterable",
allowDoubleUnderscore=True)
if (len(p) > 6):
keyType = p[3]
valueType = p[5]
else:
keyType = None
valueType = p[3]
p[0] = IDLIterable(location, identifier, keyType, valueType, self.globalScope())
def p_Setlike(self, p):
"""
Setlike : ReadOnly SETLIKE LT TypeWithExtendedAttributes GT SEMICOLON
"""
readonly = p[1]
maplikeOrSetlikeType = p[2]
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__setlike",
allowDoubleUnderscore=True)
keyType = p[4]
valueType = keyType
p[0] = IDLMaplikeOrSetlike(location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType)
def p_Maplike(self, p):
"""
Maplike : ReadOnly MAPLIKE LT TypeWithExtendedAttributes COMMA TypeWithExtendedAttributes GT SEMICOLON
"""
readonly = p[1]
maplikeOrSetlikeType = p[2]
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__maplike",
allowDoubleUnderscore=True)
keyType = p[4]
valueType = p[6]
p[0] = IDLMaplikeOrSetlike(location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType)
def p_AttributeWithQualifier(self, p):
"""
Attribute : Qualifier AttributeRest
"""
static = IDLInterfaceMember.Special.Static in p[1]
stringifier = IDLInterfaceMember.Special.Stringifier in p[1]
(location, identifier, type, readonly) = p[2]
p[0] = IDLAttribute(location, identifier, type, readonly,
static=static, stringifier=stringifier)
def p_AttributeInherited(self, p):
"""
Attribute : INHERIT AttributeRest
"""
(location, identifier, type, readonly) = p[2]
p[0] = IDLAttribute(location, identifier, type, readonly, inherit=True)
def p_Attribute(self, p):
"""
Attribute : AttributeRest
"""
(location, identifier, type, readonly) = p[1]
p[0] = IDLAttribute(location, identifier, type, readonly, inherit=False)
def p_AttributeRest(self, p):
"""
AttributeRest : ReadOnly ATTRIBUTE TypeWithExtendedAttributes AttributeName SEMICOLON
"""
location = self.getLocation(p, 2)
readonly = p[1]
t = p[3]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 4), p[4])
p[0] = (location, identifier, t, readonly)
def p_ReadOnly(self, p):
"""
ReadOnly : READONLY
"""
p[0] = True
def p_ReadOnlyEmpty(self, p):
"""
ReadOnly :
"""
p[0] = False
def p_Operation(self, p):
"""
Operation : Qualifiers OperationRest
"""
qualifiers = p[1]
# Disallow duplicates in the qualifier set
if not len(set(qualifiers)) == len(qualifiers):
raise WebIDLError("Duplicate qualifiers are not allowed",
[self.getLocation(p, 1)])
static = IDLInterfaceMember.Special.Static in p[1]
# If static is there that's all that's allowed. This is disallowed
# by the parser, so we can assert here.
assert not static or len(qualifiers) == 1
stringifier = IDLInterfaceMember.Special.Stringifier in p[1]
# If stringifier is there that's all that's allowed. This is disallowed
# by the parser, so we can assert here.
assert not stringifier or len(qualifiers) == 1
getter = True if IDLMethod.Special.Getter in p[1] else False
setter = True if IDLMethod.Special.Setter in p[1] else False
deleter = True if IDLMethod.Special.Deleter in p[1] else False
legacycaller = True if IDLMethod.Special.LegacyCaller in p[1] else False
if getter or deleter:
if setter:
raise WebIDLError("getter and deleter are incompatible with setter",
[self.getLocation(p, 1)])
(returnType, identifier, arguments) = p[2]
assert isinstance(returnType, IDLType)
specialType = IDLMethod.NamedOrIndexed.Neither
if getter or deleter:
if len(arguments) != 1:
raise WebIDLError("%s has wrong number of arguments" %
("getter" if getter else "deleter"),
[self.getLocation(p, 2)])
argType = arguments[0].type
if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
specialType = IDLMethod.NamedOrIndexed.Named
elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
specialType = IDLMethod.NamedOrIndexed.Indexed
if deleter:
raise WebIDLError("There is no such thing as an indexed deleter.",
[self.getLocation(p, 1)])
else:
raise WebIDLError("%s has wrong argument type (must be DOMString or UnsignedLong)" %
("getter" if getter else "deleter"),
[arguments[0].location])
if arguments[0].optional or arguments[0].variadic:
raise WebIDLError("%s cannot have %s argument" %
("getter" if getter else "deleter",
"optional" if arguments[0].optional else "variadic"),
[arguments[0].location])
if getter:
if returnType.isVoid():
raise WebIDLError("getter cannot have void return type",
[self.getLocation(p, 2)])
if setter:
if len(arguments) != 2:
raise WebIDLError("setter has wrong number of arguments",
[self.getLocation(p, 2)])
argType = arguments[0].type
if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
specialType = IDLMethod.NamedOrIndexed.Named
elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
specialType = IDLMethod.NamedOrIndexed.Indexed
else:
raise WebIDLError("settter has wrong argument type (must be DOMString or UnsignedLong)",
[arguments[0].location])
if arguments[0].optional or arguments[0].variadic:
raise WebIDLError("setter cannot have %s argument" %
("optional" if arguments[0].optional else "variadic"),
[arguments[0].location])
if arguments[1].optional or arguments[1].variadic:
raise WebIDLError("setter cannot have %s argument" %
("optional" if arguments[1].optional else "variadic"),
[arguments[1].location])
if stringifier:
if len(arguments) != 0:
raise WebIDLError("stringifier has wrong number of arguments",
[self.getLocation(p, 2)])
if not returnType.isDOMString():
raise WebIDLError("stringifier must have DOMString return type",
[self.getLocation(p, 2)])
# identifier might be None. This is only permitted for special methods.
if not identifier:
if (not getter and not setter and
not deleter and not legacycaller and not stringifier):
raise WebIDLError("Identifier required for non-special methods",
[self.getLocation(p, 2)])
location = BuiltinLocation("<auto-generated-identifier>")
identifier = IDLUnresolvedIdentifier(
location,
"__%s%s%s%s%s%s" %
("named" if specialType == IDLMethod.NamedOrIndexed.Named else
"indexed" if specialType == IDLMethod.NamedOrIndexed.Indexed else "",
"getter" if getter else "",
"setter" if setter else "",
"deleter" if deleter else "",
"legacycaller" if legacycaller else "",
"stringifier" if stringifier else ""),
allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 2), identifier, returnType, arguments,
static=static, getter=getter, setter=setter,
deleter=deleter, specialType=specialType,
legacycaller=legacycaller, stringifier=stringifier)
p[0] = method
def p_Stringifier(self, p):
"""
Operation : STRINGIFIER SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"__stringifier",
allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 1),
identifier,
returnType=BuiltinTypes[IDLBuiltinType.Types.domstring],
arguments=[],
stringifier=True)
p[0] = method
def p_QualifierStatic(self, p):
"""
Qualifier : STATIC
"""
p[0] = [IDLInterfaceMember.Special.Static]
def p_QualifierStringifier(self, p):
"""
Qualifier : STRINGIFIER
"""
p[0] = [IDLInterfaceMember.Special.Stringifier]
def p_Qualifiers(self, p):
"""
Qualifiers : Qualifier
| Specials
"""
p[0] = p[1]
def p_Specials(self, p):
"""
Specials : Special Specials
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_SpecialsEmpty(self, p):
"""
Specials :
"""
p[0] = []
def p_SpecialGetter(self, p):
"""
Special : GETTER
"""
p[0] = IDLMethod.Special.Getter
def p_SpecialSetter(self, p):
"""
Special : SETTER
"""
p[0] = IDLMethod.Special.Setter
def p_SpecialDeleter(self, p):
"""
Special : DELETER
"""
p[0] = IDLMethod.Special.Deleter
def p_SpecialLegacyCaller(self, p):
"""
Special : LEGACYCALLER
"""
p[0] = IDLMethod.Special.LegacyCaller
def p_OperationRest(self, p):
"""
OperationRest : ReturnType OptionalIdentifier LPAREN ArgumentList RPAREN SEMICOLON
"""
p[0] = (p[1], p[2], p[4])
def p_OptionalIdentifier(self, p):
"""
OptionalIdentifier : IDENTIFIER
"""
p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
def p_OptionalIdentifierEmpty(self, p):
"""
OptionalIdentifier :
"""
pass
def p_ArgumentList(self, p):
"""
ArgumentList : Argument Arguments
"""
p[0] = [p[1]] if p[1] else []
p[0].extend(p[2])
def p_ArgumentListEmpty(self, p):
"""
ArgumentList :
"""
p[0] = []
def p_Arguments(self, p):
"""
Arguments : COMMA Argument Arguments
"""
p[0] = [p[2]] if p[2] else []
p[0].extend(p[3])
def p_ArgumentsEmpty(self, p):
"""
Arguments :
"""
p[0] = []
def p_Argument(self, p):
"""
Argument : ExtendedAttributeList ArgumentRest
"""
p[0] = p[2]
p[0].addExtendedAttributes(p[1])
def p_ArgumentRestOptional(self, p):
"""
ArgumentRest : OPTIONAL TypeWithExtendedAttributes ArgumentName Default
"""
t = p[2]
assert isinstance(t, IDLType)
# Arg names can be reserved identifiers
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3],
allowForbidden=True)
defaultValue = p[4]
# We can't test t.isAny() here and give it a default value as needed,
# since at this point t is not a fully resolved type yet (e.g. it might
# be a typedef). We'll handle the 'any' case in IDLArgument.complete.
p[0] = IDLArgument(self.getLocation(p, 3), identifier, t, True, defaultValue, False)
def p_ArgumentRest(self, p):
"""
ArgumentRest : Type Ellipsis ArgumentName
"""
t = p[1]
assert isinstance(t, IDLType)
# Arg names can be reserved identifiers
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3],
allowForbidden=True)
variadic = p[2]
# We can't test t.isAny() here and give it a default value as needed,
# since at this point t is not a fully resolved type yet (e.g. it might
# be a typedef). We'll handle the 'any' case in IDLArgument.complete.
# variadic implies optional
# Any attributes that precede this may apply to the type, so
# we configure the argument to forward type attributes down instead of producing
# a parse error
p[0] = IDLArgument(self.getLocation(p, 3), identifier, t, variadic, None, variadic, allowTypeAttributes=True)
def p_ArgumentName(self, p):
"""
ArgumentName : IDENTIFIER
| ArgumentNameKeyword
"""
p[0] = p[1]
def p_ArgumentNameKeyword(self, p):
"""
ArgumentNameKeyword : ASYNC
| ATTRIBUTE
| CALLBACK
| CONST
| CONSTRUCTOR
| DELETER
| DICTIONARY
| ENUM
| EXCEPTION
| GETTER
| INCLUDES
| INHERIT
| INTERFACE
| ITERABLE
| LEGACYCALLER
| MAPLIKE
| MIXIN
| NAMESPACE
| PARTIAL
| READONLY
| REQUIRED
| SERIALIZER
| SETLIKE
| SETTER
| STATIC
| STRINGIFIER
| TYPEDEF
| UNRESTRICTED
"""
p[0] = p[1]
def p_AttributeName(self, p):
"""
AttributeName : IDENTIFIER
| AttributeNameKeyword
"""
p[0] = p[1]
def p_AttributeNameKeyword(self, p):
"""
AttributeNameKeyword : ASYNC
| REQUIRED
"""
p[0] = p[1]
def p_Ellipsis(self, p):
"""
Ellipsis : ELLIPSIS
"""
p[0] = True
def p_EllipsisEmpty(self, p):
"""
Ellipsis :
"""
p[0] = False
def p_ExceptionMember(self, p):
"""
ExceptionMember : Const
| ExceptionField
"""
pass
def p_ExceptionField(self, p):
"""
ExceptionField : Type IDENTIFIER SEMICOLON
"""
pass
def p_ExtendedAttributeList(self, p):
"""
ExtendedAttributeList : LBRACKET ExtendedAttribute ExtendedAttributes RBRACKET
"""
p[0] = [p[2]]
if p[3]:
p[0].extend(p[3])
def p_ExtendedAttributeListEmpty(self, p):
"""
ExtendedAttributeList :
"""
p[0] = []
def p_ExtendedAttribute(self, p):
"""
ExtendedAttribute : ExtendedAttributeNoArgs
| ExtendedAttributeArgList
| ExtendedAttributeIdent
| ExtendedAttributeNamedArgList
| ExtendedAttributeIdentList
"""
p[0] = IDLExtendedAttribute(self.getLocation(p, 1), p[1])
def p_ExtendedAttributeEmpty(self, p):
"""
ExtendedAttribute :
"""
pass
def p_ExtendedAttributes(self, p):
"""
ExtendedAttributes : COMMA ExtendedAttribute ExtendedAttributes
"""
p[0] = [p[2]] if p[2] else []
p[0].extend(p[3])
def p_ExtendedAttributesEmpty(self, p):
"""
ExtendedAttributes :
"""
p[0] = []
def p_Other(self, p):
"""
Other : INTEGER
| FLOATLITERAL
| IDENTIFIER
| STRING
| OTHER
| ELLIPSIS
| COLON
| SCOPE
| SEMICOLON
| LT
| EQUALS
| GT
| QUESTIONMARK
| DOMSTRING
| BYTESTRING
| USVSTRING
| UTF8STRING
| JSSTRING
| PROMISE
| ANY
| BOOLEAN
| BYTE
| DOUBLE
| FALSE
| FLOAT
| LONG
| NULL
| OBJECT
| OCTET
| OR
| OPTIONAL
| RECORD
| SEQUENCE
| SHORT
| SYMBOL
| TRUE
| UNSIGNED
| VOID
| ArgumentNameKeyword
"""
pass
def p_OtherOrComma(self, p):
"""
OtherOrComma : Other
| COMMA
"""
pass
def p_TypeSingleType(self, p):
"""
Type : SingleType
"""
p[0] = p[1]
def p_TypeUnionType(self, p):
"""
Type : UnionType Null
"""
p[0] = self.handleNullable(p[1], p[2])
def p_TypeWithExtendedAttributes(self, p):
"""
TypeWithExtendedAttributes : ExtendedAttributeList Type
"""
p[0] = p[2].withExtendedAttributes(p[1])
def p_SingleTypeDistinguishableType(self, p):
"""
SingleType : DistinguishableType
"""
p[0] = p[1]
def p_SingleTypeAnyType(self, p):
"""
SingleType : ANY
"""
p[0] = BuiltinTypes[IDLBuiltinType.Types.any]
# Note: Promise<void> is allowed, so we want to parametrize on ReturnType,
# not Type. Promise types can't be null, hence no "Null" in there.
def p_SingleTypePromiseType(self, p):
"""
SingleType : PROMISE LT ReturnType GT
"""
p[0] = IDLPromiseType(self.getLocation(p, 1), p[3])
def p_UnionType(self, p):
"""
UnionType : LPAREN UnionMemberType OR UnionMemberType UnionMemberTypes RPAREN
"""
types = [p[2], p[4]]
types.extend(p[5])
p[0] = IDLUnionType(self.getLocation(p, 1), types)
def p_UnionMemberTypeDistinguishableType(self, p):
"""
UnionMemberType : ExtendedAttributeList DistinguishableType
"""
p[0] = p[2].withExtendedAttributes(p[1])
def p_UnionMemberType(self, p):
"""
UnionMemberType : UnionType Null
"""
p[0] = self.handleNullable(p[1], p[2])
def p_UnionMemberTypes(self, p):
"""
UnionMemberTypes : OR UnionMemberType UnionMemberTypes
"""
p[0] = [p[2]]
p[0].extend(p[3])
def p_UnionMemberTypesEmpty(self, p):
"""
UnionMemberTypes :
"""
p[0] = []
def p_DistinguishableType(self, p):
"""
DistinguishableType : PrimitiveType Null
| ARRAYBUFFER Null
| READABLESTREAM Null
| OBJECT Null
"""
if p[1] == "object":
type = BuiltinTypes[IDLBuiltinType.Types.object]
elif p[1] == "ArrayBuffer":
type = BuiltinTypes[IDLBuiltinType.Types.ArrayBuffer]
elif p[1] == "ReadableStream":
type = BuiltinTypes[IDLBuiltinType.Types.ReadableStream]
else:
type = BuiltinTypes[p[1]]
p[0] = self.handleNullable(type, p[2])
def p_DistinguishableTypeStringType(self, p):
"""
DistinguishableType : StringType Null
"""
p[0] = self.handleNullable(p[1], p[2])
def p_DistinguishableTypeSequenceType(self, p):
"""
DistinguishableType : SEQUENCE LT TypeWithExtendedAttributes GT Null
"""
innerType = p[3]
type = IDLSequenceType(self.getLocation(p, 1), innerType)
p[0] = self.handleNullable(type, p[5])
def p_DistinguishableTypeRecordType(self, p):
"""
DistinguishableType : RECORD LT StringType COMMA TypeWithExtendedAttributes GT Null
"""
keyType = p[3]
valueType = p[5]
type = IDLRecordType(self.getLocation(p, 1), keyType, valueType)
p[0] = self.handleNullable(type, p[7])
def p_DistinguishableTypeScopedName(self, p):
"""
DistinguishableType : ScopedName Null
"""
assert isinstance(p[1], IDLUnresolvedIdentifier)
if p[1].name == "Promise":
raise WebIDLError("Promise used without saying what it's "
"parametrized over",
[self.getLocation(p, 1)])
type = None
try:
if self.globalScope()._lookupIdentifier(p[1]):
obj = self.globalScope()._lookupIdentifier(p[1])
assert not obj.isType()
if obj.isTypedef():
type = IDLTypedefType(self.getLocation(p, 1), obj.innerType,
obj.identifier.name)
elif obj.isCallback() and not obj.isInterface():
type = IDLCallbackType(obj.location, obj)
else:
type = IDLWrapperType(self.getLocation(p, 1), p[1])
p[0] = self.handleNullable(type, p[2])
return
except:
pass
type = IDLUnresolvedType(self.getLocation(p, 1), p[1])
p[0] = self.handleNullable(type, p[2])
def p_ConstType(self, p):
"""
ConstType : PrimitiveType
"""
p[0] = BuiltinTypes[p[1]]
def p_ConstTypeIdentifier(self, p):
"""
ConstType : IDENTIFIER
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
p[0] = IDLUnresolvedType(self.getLocation(p, 1), identifier)
def p_PrimitiveTypeUint(self, p):
"""
PrimitiveType : UnsignedIntegerType
"""
p[0] = p[1]
def p_PrimitiveTypeBoolean(self, p):
"""
PrimitiveType : BOOLEAN
"""
p[0] = IDLBuiltinType.Types.boolean
def p_PrimitiveTypeByte(self, p):
"""
PrimitiveType : BYTE
"""
p[0] = IDLBuiltinType.Types.byte
def p_PrimitiveTypeOctet(self, p):
"""
PrimitiveType : OCTET
"""
p[0] = IDLBuiltinType.Types.octet
def p_PrimitiveTypeFloat(self, p):
"""
PrimitiveType : FLOAT
"""
p[0] = IDLBuiltinType.Types.float
def p_PrimitiveTypeUnrestictedFloat(self, p):
"""
PrimitiveType : UNRESTRICTED FLOAT
"""
p[0] = IDLBuiltinType.Types.unrestricted_float
def p_PrimitiveTypeDouble(self, p):
"""
PrimitiveType : DOUBLE
"""
p[0] = IDLBuiltinType.Types.double
def p_PrimitiveTypeUnrestictedDouble(self, p):
"""
PrimitiveType : UNRESTRICTED DOUBLE
"""
p[0] = IDLBuiltinType.Types.unrestricted_double
def p_StringType(self, p):
"""
StringType : BuiltinStringType
"""
p[0] = BuiltinTypes[p[1]]
def p_BuiltinStringTypeDOMString(self, p):
"""
BuiltinStringType : DOMSTRING
"""
p[0] = IDLBuiltinType.Types.domstring
def p_BuiltinStringTypeBytestring(self, p):
"""
BuiltinStringType : BYTESTRING
"""
p[0] = IDLBuiltinType.Types.bytestring
def p_BuiltinStringTypeUSVString(self, p):
"""
BuiltinStringType : USVSTRING
"""
p[0] = IDLBuiltinType.Types.usvstring
def p_BuiltinStringTypeUTF8String(self, p):
"""
BuiltinStringType : UTF8STRING
"""
p[0] = IDLBuiltinType.Types.utf8string
def p_BuiltinStringTypeJSString(self, p):
"""
BuiltinStringType : JSSTRING
"""
p[0] = IDLBuiltinType.Types.jsstring
def p_UnsignedIntegerTypeUnsigned(self, p):
"""
UnsignedIntegerType : UNSIGNED IntegerType
"""
# Adding one to a given signed integer type gets you the unsigned type:
p[0] = p[2] + 1
def p_UnsignedIntegerType(self, p):
"""
UnsignedIntegerType : IntegerType
"""
p[0] = p[1]
def p_IntegerTypeShort(self, p):
"""
IntegerType : SHORT
"""
p[0] = IDLBuiltinType.Types.short
def p_IntegerTypeLong(self, p):
"""
IntegerType : LONG OptionalLong
"""
if p[2]:
p[0] = IDLBuiltinType.Types.long_long
else:
p[0] = IDLBuiltinType.Types.long
def p_OptionalLong(self, p):
"""
OptionalLong : LONG
"""
p[0] = True
def p_OptionalLongEmpty(self, p):
"""
OptionalLong :
"""
p[0] = False
def p_Null(self, p):
"""
Null : QUESTIONMARK
|
"""
if len(p) > 1:
p[0] = self.getLocation(p, 1)
else:
p[0] = None
def p_ReturnTypeType(self, p):
"""
ReturnType : Type
"""
p[0] = p[1]
def p_ReturnTypeVoid(self, p):
"""
ReturnType : VOID
"""
p[0] = BuiltinTypes[IDLBuiltinType.Types.void]
def p_ScopedName(self, p):
"""
ScopedName : AbsoluteScopedName
| RelativeScopedName
"""
p[0] = p[1]
def p_AbsoluteScopedName(self, p):
"""
AbsoluteScopedName : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass
def p_RelativeScopedName(self, p):
"""
RelativeScopedName : IDENTIFIER ScopedNameParts
"""
assert not p[2] # Not implemented!
p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
def p_ScopedNameParts(self, p):
"""
ScopedNameParts : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass
def p_ScopedNamePartsEmpty(self, p):
"""
ScopedNameParts :
"""
p[0] = None
def p_ExtendedAttributeNoArgs(self, p):
"""
ExtendedAttributeNoArgs : IDENTIFIER
"""
p[0] = (p[1],)
def p_ExtendedAttributeArgList(self, p):
"""
ExtendedAttributeArgList : IDENTIFIER LPAREN ArgumentList RPAREN
"""
p[0] = (p[1], p[3])
def p_ExtendedAttributeIdent(self, p):
"""
ExtendedAttributeIdent : IDENTIFIER EQUALS STRING
| IDENTIFIER EQUALS IDENTIFIER
"""
p[0] = (p[1], p[3])
def p_ExtendedAttributeNamedArgList(self, p):
"""
ExtendedAttributeNamedArgList : IDENTIFIER EQUALS IDENTIFIER LPAREN ArgumentList RPAREN
"""
p[0] = (p[1], p[3], p[5])
def p_ExtendedAttributeIdentList(self, p):
"""
ExtendedAttributeIdentList : IDENTIFIER EQUALS LPAREN IdentifierList RPAREN
"""
p[0] = (p[1], p[4])
def p_IdentifierList(self, p):
"""
IdentifierList : IDENTIFIER Identifiers
"""
idents = list(p[2])
# This is only used for identifier-list-valued extended attributes, and if
# we're going to restrict to IDENTIFIER here we should at least allow
# escaping with leading '_' as usual for identifiers.
ident = p[1]
if ident[0] == '_':
ident = ident[1:]
idents.insert(0, ident)
p[0] = idents
def p_IdentifiersList(self, p):
"""
Identifiers : COMMA IDENTIFIER Identifiers
"""
idents = list(p[3])
# This is only used for identifier-list-valued extended attributes, and if
# we're going to restrict to IDENTIFIER here we should at least allow
# escaping with leading '_' as usual for identifiers.
ident = p[2]
if ident[0] == '_':
ident = ident[1:]
idents.insert(0, ident)
p[0] = idents
def p_IdentifiersEmpty(self, p):
"""
Identifiers :
"""
p[0] = []
def p_error(self, p):
if not p:
raise WebIDLError("Syntax Error at end of file. Possibly due to missing semicolon(;), braces(}) or both",
[self._filename])
else:
raise WebIDLError("invalid syntax", [Location(self.lexer, p.lineno, p.lexpos, self._filename)])
def __init__(self, outputdir='', lexer=None):
Tokenizer.__init__(self, lexer)
logger = SqueakyCleanLogger()
try:
self.parser = yacc.yacc(module=self, errorlog=logger, debug=False)
finally:
logger.reportGrammarErrors()
self._globalScope = IDLScope(BuiltinLocation("<Global Scope>"), None, None)
self._installBuiltins(self._globalScope)
self._productions = []
self._filename = "<builtin>"
self.lexer.input(Parser._builtins)
self._filename = None
self.parser.parse(lexer=self.lexer, tracking=True)
def _installBuiltins(self, scope):
assert isinstance(scope, IDLScope)
# range omits the last value.
for x in range(IDLBuiltinType.Types.ArrayBuffer, IDLBuiltinType.Types.Float64Array + 1):
builtin = BuiltinTypes[x]
name = builtin.name
typedef = IDLTypedef(BuiltinLocation("<builtin type>"), scope, builtin, name)
@ staticmethod
def handleNullable(type, questionMarkLocation):
if questionMarkLocation is not None:
type = IDLNullableType(questionMarkLocation, type)
return type
def parse(self, t, filename=None):
self._filename = filename
self.lexer.input(t.decode(encoding = 'utf-8'))
# for tok in iter(self.lexer.token, None):
# print tok
self._productions.extend(self.parser.parse(lexer=self.lexer, tracking=True))
self._filename = None
def finish(self):
# If we have interfaces that are iterable, create their
# iterator interfaces and add them to the productions array.
interfaceStatements = []
for p in self._productions:
if isinstance(p, IDLInterface):
interfaceStatements.append(p)
iterableIteratorIface = None
for iface in interfaceStatements:
iterable = None
# We haven't run finish() on the interface yet, so we don't know
# whether our interface is maplike/setlike/iterable or not. This
# means we have to loop through the members to see if we have an
# iterable member.
for m in iface.members:
if isinstance(m, IDLIterable):
iterable = m
break
if iterable and iterable.isPairIterator():
def simpleExtendedAttr(str):
return IDLExtendedAttribute(iface.location, (str, ))
nextMethod = IDLMethod(
iface.location,
IDLUnresolvedIdentifier(iface.location, "next"),
BuiltinTypes[IDLBuiltinType.Types.object], [])
nextMethod.addExtendedAttributes([simpleExtendedAttr("Throws")])
itr_ident = IDLUnresolvedIdentifier(iface.location,
iface.identifier.name + "Iterator")
toStringTag = iface.identifier.name + " Iterator"
itr_iface = IDLInterface(iface.location, self.globalScope(),
itr_ident, None, [nextMethod],
isKnownNonPartial=True,
classNameOverride=toStringTag,
toStringTag=toStringTag)
itr_iface.addExtendedAttributes([simpleExtendedAttr("NoInterfaceObject")])
# Make sure the exposure set for the iterator interface is the
# same as the exposure set for the iterable interface, because
# we're going to generate methods on the iterable that return
# instances of the iterator.
itr_iface._exposureGlobalNames = set(iface._exposureGlobalNames)
# Always append generated iterable interfaces after the
# interface they're a member of, otherwise nativeType generation
# won't work correctly.
itr_iface.iterableInterface = iface
self._productions.append(itr_iface)
iterable.iteratorType = IDLWrapperType(iface.location, itr_iface)
# Make sure we finish IDLIncludesStatements before we finish the
# IDLInterfaces.
# XXX khuey hates this bit and wants to nuke it from orbit.
includesStatements = [p for p in self._productions if
isinstance(p, IDLIncludesStatement)]
otherStatements = [p for p in self._productions if
not isinstance(p, IDLIncludesStatement)]
for production in includesStatements:
production.finish(self.globalScope())
for production in otherStatements:
production.finish(self.globalScope())
# Do any post-finish validation we need to do
for production in self._productions:
production.validate()
# De-duplicate self._productions, without modifying its order.
seen = set()
result = []
for p in self._productions:
if p not in seen:
seen.add(p)
result.append(p)
return result
def reset(self):
return Parser(lexer=self.lexer)
# Builtin IDL defined by WebIDL
_builtins = """
typedef unsigned long long DOMTimeStamp;
typedef (ArrayBufferView or ArrayBuffer) BufferSource;
"""
def main():
# Parse arguments.
from optparse import OptionParser
usageString = "usage: %prog [options] files"
o = OptionParser(usage=usageString)
o.add_option("--cachedir", dest='cachedir', default=None,
help="Directory in which to cache lex/parse tables.")
o.add_option("--verbose-errors", action='store_true', default=False,
help="When an error happens, display the Python traceback.")
(options, args) = o.parse_args()
if len(args) < 1:
o.error(usageString)
fileList = args
baseDir = os.getcwd()
# Parse the WebIDL.
parser = Parser(options.cachedir)
try:
for filename in fileList:
fullPath = os.path.normpath(os.path.join(baseDir, filename))
f = open(fullPath, 'rb')
lines = f.readlines()
f.close()
print(fullPath)
parser.parse(''.join(lines), fullPath)
parser.finish()
except WebIDLError as e:
if options.verbose_errors:
traceback.print_exc()
else:
print(e)
if __name__ == '__main__':
main()
| mpl-2.0 |
Alwnikrotikz/visvis.dev | core/orientation.py | 5 | 6451 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
""" Module baseWobjects
Defines the mix class for orientable wobjects.
"""
import numpy as np
from visvis.core import misc
from visvis.pypoints import Point, is_Point
# todo: is this the best way to allow users to orient their objects,
# or might there be other ways?
class OrientationForWobjects_mixClass(object):
""" OrientationForWobjects_mixClass()
This class can be mixed with a wobject class to enable easy
orientation of the objects in space. It makes use of the
tranformation list that each wobject has.
The functionality provided by this class is not made part of the
Wobject class because it does not make sense for all kind of wobjects
(for example lines and images). The OrientableMesh is a class that
inherits from this class.
"""
def __init__(self):
# Set current and reference direction (default up)
self._refDirection = Point(0,0,1)
self._direction = Point(0,0,1)
# Create transformations
self._scaleTransform = misc.Transform_Scale()
self._translateTransform = misc.Transform_Translate()
self._rotateTransform = misc.Transform_Rotate()
self._directionTransform = misc.Transform_Rotate()
# Append transformations to THE list
self.transformations.append(self._translateTransform)
self.transformations.append(self._directionTransform)
self.transformations.append(self._rotateTransform)
self.transformations.append(self._scaleTransform)
@misc.PropWithDraw
def scaling():
""" Get/Set the scaling of the object. Can be set using
a 3-element tuple, a 3D point, or a scalar. The getter always
returns a Point.
"""
def fget(self):
s = self._scaleTransform
return Point(s.sx, s.sy, s.sz)
def fset(self, value):
if isinstance(value, (float, int)):
self._scaleTransform.sx = float(value)
self._scaleTransform.sy = float(value)
self._scaleTransform.sz = float(value)
elif isinstance(value, (list, tuple)) and len(value) == 3:
self._scaleTransform.sx = float(value[0])
self._scaleTransform.sy = float(value[1])
self._scaleTransform.sz = float(value[2])
elif is_Point(value) and value.ndim == 3:
self._scaleTransform.sx = value.x
self._scaleTransform.sy = value.y
self._scaleTransform.sz = value.z
else:
raise ValueError('Scaling should be a scalar, 3D Point, or 3-element tuple.')
return locals()
@misc.PropWithDraw
def translation():
""" Get/Set the transaltion of the object. Can be set using
a 3-element tuple or a 3D point. The getter always returns
a Point.
"""
def fget(self):
d = self._translateTransform
return Point(d.dx, d.dy, d.dz)
def fset(self, value):
if isinstance(value, (list, tuple)) and len(value) == 3:
self._translateTransform.dx = value[0]
self._translateTransform.dy = value[1]
self._translateTransform.dz = value[2]
elif is_Point(value) and value.ndim == 3:
self._translateTransform.dx = value.x
self._translateTransform.dy = value.y
self._translateTransform.dz = value.z
else:
raise ValueError('Translation should be a 3D Point or 3-element tuple.')
return locals()
@misc.PropWithDraw
def direction():
""" Get/Set the direction (i.e. orientation) of the object. Can
be set using a 3-element tuple or a 3D point. The getter always
returns a Point.
"""
def fget(self):
return self._direction.copy()
def fset(self, value):
# Store direction
if isinstance(value, (list, tuple)) and len(value) == 3:
self._direction = Point(*tuple(value))
elif is_Point(value) and value.ndim == 3:
self._direction = value
else:
raise ValueError('Direction should be a 3D Point or 3-element tuple.')
# Normalize
if self._direction.norm()==0:
raise ValueError('Direction vector must have a non-zero length.')
self._direction = self._direction.normalize()
# Create ref point
refPoint = self._refDirection
# Convert to rotation. The cross product of two vectors results
# in a vector normal to both vectors. This is the axis of rotation
# over which the minimal rotation is achieved.
axis = self._direction.cross(refPoint)
if axis.norm() < 0.1:
if self._direction.z > 0:
# No rotation
self._directionTransform.ax = 0.0
self._directionTransform.ay = 0.0
self._directionTransform.az = 1.0
self._directionTransform.angle = 0.0
else:
# Flipped
self._directionTransform.ax = 1.0
self._directionTransform.ay = 0.0
self._directionTransform.az = 0.0
self._directionTransform.angle = np.pi
else:
axis = axis.normalize()
angle = -refPoint.angle(self._direction)
self._directionTransform.ax = axis.x
self._directionTransform.ay = axis.y
self._directionTransform.az = axis.z
self._directionTransform.angle = angle * 180 / np.pi
return locals()
@misc.PropWithDraw
def rotation():
""" Get/Set the rotation of the object (in degrees, around its
direction vector).
"""
def fget(self):
return self._rotateTransform.angle
def fset(self, value):
self._rotateTransform.angle = float(value)
return locals()
| bsd-3-clause |
juanc27/myfavteam | news/nba_news.py | 2 | 9360 | from newspaper import Article, Config
from bs4 import BeautifulSoup
from urllib2 import urlopen
import datetime
import re
from collections import OrderedDict
import pytz
from pytz import timezone
from dateutil.parser import parse
nba_url = "http://www.nba.com"
espn_url = "http://espn.go.com"
#lxml didn't work for espn
my_config = Config()
my_config.parser_class = "soup"
def normalize_url(url, link):
if link.startswith("http://"):
return link
elif link.startswith("/"):
return url + link
else:
return url + "/" + link
def get_soup_from_url(url):
response = urlopen(url)
try:
data = response.read()
except:
print "**error visiting {}".format(url)
raise
return BeautifulSoup(data)
def print_article_dict(dict):
print ""
for field, value in dict.items():
if field == "keywords":
print "keywords: {}".format(value)
continue
if value != None:
print "{}: {} ". format(field, value.encode('utf-8', 'replace'))
else:
print "{}: {} ". format(field, value)
""" search string for timezone keywords lind PT and return a the date formated with timezone
"""
def format_date (date_string, date):
if date_string.find("PT") > -1:
tzone = "US/Pacific"
elif date_string.find("CT") > -1:
tzone = "US/Central"
elif date_string.find("ET") > -1:
tzone = "US/Eastern"
else:
print "timezone from date_string not found"
return (pytz.timezone(tzone).localize(date, is_dst=True))
def getESPN_date_from_article(soup):
mday = soup.find("div", class_ = "monthday")
if mday != None:
date = mday.find_previous("span").text
time = mday.find_next("div", class_ = "time").text
timeofday = mday.find_next("div", class_ = "timeofday").text
date = parse(date + " " + time + " " + timeofday)
date = format_date (timeofday, date)
resp_date = date.isoformat()
else:
dates = soup.find_all("div", class_ = "date")
resp_date = None
for date in dates:
txt = date.text
dt = None
if txt.startswith("Publish Date: Today, "):
dt = datetime.date.today()
#print "{} {}".format(dt.isoformat(), txt[21:32])
dt = parse("{} {}".format(dt.isoformat(), txt[21:32]))
elif txt.startswith("Publish Date: Yesterday, "):
dt = datetime.date.today()
dt = dt - datetime.timedelta(days=1)
#print "{} {}".format(dt.isoformat(), txt[25:36])
dt = parse("{} {}".format(dt.isoformat(), txt[25:36]))
elif txt.startswith("Publish Date: "):
#print "{}".format(txt[14:38])
dt = parse(txt[14:38])
if dt != None:
date = format_date(txt, dt)
resp_date = date.isoformat()
break
else:
continue
if resp_date == None:
gtl = soup.find("div", class_ = "game-time-location")
if gtl != None:
dt = gtl.find_next("p")
if dt != None:
date = parse(dt.text)
date = format_date (dt.text, date)
resp_date = date.isoformat()
return resp_date
def getESPN_dot_com_team_news(team_short_name = None, visited_links = []):
news = list()
if team_short_name == None:
return
team_short_name = team_short_name.lower()
try:
espn_team_blog = short_name_to_espn_blog[team_short_name]
except:
return None
url = espn_url + "/" + espn_team_blog + "/"
soup = get_soup_from_url(url)
if espn_team_blog.find('blog') > -1:
headers = soup.find_all("div", class_ = "mod-header")
else:
headers = soup.find_all("li", class_ = "result")
for header in headers:
resp = OrderedDict()
h3 = header.find("h3")
if h3 == None:
continue
link = h3.find("a").get("href")
if link == None:
continue
url = normalize_url(espn_url, link)
if url in visited_links:
continue
#avoid blog post from other teams
#if url.find('blog') > -1:
# if url.find(team_short_name) == -1:
# continue
article = Article(url, my_config)
article.download()
article.parse()
resp['title'] = article.title
resp['link'] = url
resp['description'] = article.meta_description
resp['text'] = article.text
resp['image'] = article.top_image
resp['keywords'] = article.meta_keywords
#extra fields not provided by newspaper (author and date)
article_soup = get_soup_from_url(url)
if len(article.authors) < 1 :
author = article_soup.find("cite", class_ = "byline")
if author != None:
author_txt = author.find("a")
if author_txt != None:
author = author_txt.text
else:
#print "found this as an author {}".format(author.text
author = author.text
else:
author = article_soup.find("cite", class_ = "source")
if author != None:
author = author.text
resp['author'] = author
else:
resp['author'] = article.authors[0]
resp['date'] = getESPN_date_from_article(article_soup)
print_article_dict(resp)
news.append(resp)
return news
"""
Converts nba team short names to espn style names used in blogs.
example: warriors -> "golden-state-warriors"
"""
short_name_to_espn_blog = {
'celtics' : 'blog/boston/celtics',
'nets' : 'blog/brooklyn-nets',
'knick' : 'blog/new-york-knicks',
'76ers' : 'nba/team/_/name/phi/philadelphia-76ers',
'raptors' : 'nba/team/_/name/tor/toronto-raptors',
'bulls' : 'blog/chicago-bulls',
'cavaliers' : 'blog/cleveland-cavaliers',
'pistons' : 'nba/team/_/name/det/detroit-pistons',
'pacers' : 'nba/team/_/name/ind/indiana-pacers',
'bucks' : 'nba/team/_/name/mil/milwaukee-bucks',
'hawks' : 'nba/team/_/name/atl/atlanta-hawks',
'hornets' : 'nba/team/_/name/cha/charlotte-hornets',
'heat' : 'blog/truehoopmiamiheat',
'magic' : 'nba/team/_/name/orl/orlando-magic',
'wizards' : 'nba/team/_/name/wsh/washington-wizards',
'warriors' : 'blog/golden-state-warriors',
'clippers' : 'blog/los-angeles-clippers',
'lakers' : 'blog/los-angeles-lakers',
'suns' : 'nba/team/_/name/phx/phoenix-suns',
'kings' : 'nba/team/_/name/sac/sacramento-kings',
'mavericks' : 'blog/dallas-mavericks',
'rockets' : 'nba/team/_/name/hou/houston-rockets',
'grizzlies' : 'nba/team/_/name/mem/memphis-grizzlies',
'pelicans' : 'nba/team/_/name/no/new-orleans-pelicans',
'spurs' : 'nba/team/_/name/sa/san-antonio-spurs',
'nuggets' : 'nba/team/_/name/den/denver-nuggets',
'timberwolves' : 'nba/team/_/name/min/minnesota-timberwolves',
'thunder' : 'nba/team/_/name/okc/oklahoma-city-thunder',
'blazers' : 'nba/team/_/name/por/portland-trail-blazers',
'jazz' : 'nba/team/_/name/utah/utah-jazz',
}
def getNBA_dot_com_team_news(team_short_name = None, visited_links = []):
news = list()
if team_short_name == None:
return
team_short_name = team_short_name.lower()
url = nba_url + "/" + team_short_name + "/news"
soup = get_soup_from_url(url)
headers = soup.find_all("div", class_ = re.compile("post__information"))
for header in headers:
resp = OrderedDict()
title = header.find("div", class_ = "post__title")
if title == None:
continue
link = title.find("a").get("href")
if link == None:
continue
url = normalize_url(nba_url, link)
if url in visited_links:
continue
#avoiding articles in other languages for nba.com/china/
if url.find("/china/") > -1:
continue
article = Article(url, my_config)
article.download()
article.parse()
resp['title'] = article.title
resp['link'] = url
resp['description'] = article.meta_description
resp['text'] = article.text
resp['image'] = article.top_image
resp['keywords'] = article.meta_keywords
#nba.com doesn't show a clear author for articles
resp['author'] = "nba.com"
article_soup = get_soup_from_url(url)
date = article_soup.find("div", class_ = "author-block__post-date")
resp['date'] = None
if date != None:
txt = date.text
if txt.startswith("Posted: "):
#print "{}".format(txt[8:])
dt = parse(txt[8:])
#nba.com doesn't provide the time, we will use now
now = datetime.datetime.now(pytz.timezone("US/Eastern"))
dt = dt.replace(hour=now.hour, minute=now.minute,
second=now.second, tzinfo=now.tzinfo)
resp['date'] = dt.isoformat()
print_article_dict(resp)
news.append(resp)
return news
if __name__ == "__main__":
# getESPN_dot_com_team_news("warriors")
getESPN_dot_com_team_news("heat")
getNBA_dot_com_team_news("heat")
| mit |
rollokb/django-mailgun | django_mailgun.py | 1 | 5894 | from __future__ import unicode_literals
import six
import requests
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address
from requests.packages.urllib3.filepost import encode_multipart_formdata
__version__ = '0.7.1'
version = '0.7.1'
# A mapping of smtp headers to API key names, along
# with a callable to transform them somehow (if nec.)
#
# https://documentation.mailgun.com/user_manual.html#sending-via-smtp
# https://documentation.mailgun.com/api-sending.html#sending
#
# structure is SMTP_HEADER: (api_name, data_transform_function)
HEADERS_MAP = {
'X-Mailgun-Tag': ('o:tag', lambda x: x),
'X-Mailgun-Campaign-Id': ('o:campaign', lambda x: x),
'X-Mailgun-Dkim': ('o:dkim', lambda x: x),
'X-Mailgun-Deliver-By': ('o:deliverytime', lambda x: x),
'X-Mailgun-Drop-Message': ('o:testmode', lambda x: x),
'X-Mailgun-Track': ('o:tracking', lambda x: x),
'X-Mailgun-Track-Clicks': ('o:tracking-clicks', lambda x: x),
'X-Mailgun-Track-Opens': ('o:tracking-opens', lambda x: x),
'X-Mailgun-Variables': ('v:my-var', lambda x: x),
}
class MailgunAPIError(Exception):
pass
class MailgunBackend(BaseEmailBackend):
"""A Django Email backend that uses mailgun.
"""
def __init__(self, fail_silently=False, *args, **kwargs):
access_key, server_name = (kwargs.pop('access_key', None),
kwargs.pop('server_name', None))
super(MailgunBackend, self).__init__(
fail_silently=fail_silently,
*args, **kwargs)
try:
self._access_key = access_key or getattr(settings, 'MAILGUN_ACCESS_KEY')
self._server_name = server_name or getattr(settings, 'MAILGUN_SERVER_NAME')
except AttributeError:
if fail_silently:
self._access_key, self._server_name = None
else:
raise
self._api_url = "https://api.mailgun.net/v3/%s/" % self._server_name
self._headers_map = HEADERS_MAP
def open(self):
"""Stub for open connection, all sends are done over HTTP POSTs
"""
pass
def close(self):
"""Close any open HTTP connections to the API server.
"""
pass
def _map_smtp_headers_to_api_parameters(self, email_message):
"""
Map the values passed in SMTP headers to API-ready
2-item tuples present in HEADERS_MAP
header values must be a single string or list or tuple of strings
:return: 2-item tuples of the form (api_name, api_values)
"""
api_data = []
for smtp_key, api_transformer in six.iteritems(self._headers_map):
data_to_transform = email_message.extra_headers.pop(smtp_key, None)
if data_to_transform is not None:
if type(data_to_transform) in (list, tuple):
# map each value in the tuple/list
for data in data_to_transform:
api_data.append((api_transformer[0], api_transformer[1](data)))
else:
# we only have one value
api_data.append((api_transformer[0], api_transformer[1](data_to_transform)))
return api_data
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
try:
post_data = []
post_data.append(('to', (",".join(recipients)),))
post_data.append(('text', email_message.body,))
post_data.append(('subject', email_message.subject,))
post_data.append(('from', from_email,))
# get our recipient variables if they were passed in
recipient_variables = email_message.extra_headers.pop('recipient_variables', None)
if recipient_variables is not None:
post_data.append(('recipient-variables', recipient_variables, ))
for name, value in self._map_smtp_headers_to_api_parameters(email_message):
post_data.append((name, value, ))
if hasattr(email_message, 'alternatives') and email_message.alternatives:
for alt in email_message.alternatives:
if alt[1] == 'text/html':
post_data.append(('html', alt[0],))
break
if email_message.attachments:
for attachment in email_message.attachments:
post_data.append(('attachment', (attachment[0], attachment[1],)))
content, header = encode_multipart_formdata(post_data)
headers = {'Content-Type': header}
else:
content = post_data
headers = None
response = requests.post(self._api_url + "messages",
auth=("api", self._access_key),
data=content, headers=headers)
except:
if not self.fail_silently:
raise
return False
if response.status_code != 200:
if not self.fail_silently:
raise MailgunAPIError(response)
return False
return True
def send_messages(self, email_messages):
"""Sends one or more EmailMessage objects and returns the number of
email messages sent.
"""
if not email_messages:
return
num_sent = 0
for message in email_messages:
if self._send(message):
num_sent += 1
return num_sent
| mit |
opendatateam/udata | udata/frontend/__init__.py | 1 | 1614 | import inspect
import logging
from importlib import import_module
from jinja2 import Markup
from udata import assets, entrypoints
from .markdown import UdataCleaner, init_app as init_markdown
log = logging.getLogger(__name__)
class SafeMarkup(Markup):
'''Markup object bypasses Jinja's escaping. This override allows to sanitize the resulting html.'''
def __new__(cls, base, *args, **kwargs):
cleaner = UdataCleaner()
return super().__new__(cls, cleaner.clean(base), *args, **kwargs)
def _load_views(app, module):
views = module if inspect.ismodule(module) else import_module(module)
blueprint = getattr(views, 'blueprint', None)
if blueprint:
app.register_blueprint(blueprint)
VIEWS = ['core.storages', 'core.tags', 'admin']
def init_app(app, views=None):
views = views or VIEWS
init_markdown(app)
for view in views:
_load_views(app, 'udata.{}.views'.format(view))
# Load all plugins views and blueprints
for module in entrypoints.get_enabled('udata.views', app).values():
_load_views(app, module)
# Load all plugins views and blueprints
for module in entrypoints.get_enabled('udata.front', app).values():
front_module = module if inspect.ismodule(module) else import_module(module)
front_module.init_app(app)
# Load core manifest
with app.app_context():
assets.register_manifest('udata')
for dist in entrypoints.get_plugins_dists(app, 'udata.views'):
if assets.has_manifest(dist.project_name):
assets.register_manifest(dist.project_name)
| agpl-3.0 |
CoDEmanX/ArangoDB | 3rdParty/V8-4.3.61/third_party/python_26/Lib/encodings/iso2022_jp_2.py | 816 | 1061 | #
# iso2022_jp_2.py: Python Unicode Codec for ISO2022_JP_2
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_2')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
Zen-CODE/kivybits | Examples/DynamicUI/main.py | 1 | 3087 | """
Small app demonstrating a dynamic UI
Author: ZenCODE
Date: 22/10/2013
"""
from kivy.lang import Builder
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.factory import Factory
class DataSource(object):
"""
This class would be an abstraction of your data source:
the MySQLdb in this case.
"""
def get_data(self):
"""Return the data. We use a list of dicts representing a list of rows
"""
return [{"image": "kivy1.png",
"row_id": 1,
"header": "Question 1",
"type": "Label",
"value_name": "text",
"value": "My Text"},
{"image": "kivy2.png",
"row_id": 2,
"header": "Question 2",
"type": "Button",
"value_name": "text",
"value": "Button"},
{"image": "kivy1.png",
"row_id": 3,
"header": "Question 3",
"type": "CheckBox",
"value_name": "active",
"value": "True"}]
Builder.load_string('''
<QuestionWidget>:
ques_image: ques_image # These do your mapping to the ObjectProperty
header_label: header_label
box_container: box_container
orientation: "vertical"
Image:
id: ques_image
Label:
id: header_label
BoxLayout:
id: box_container
''')
class QuestionWidget(BoxLayout):
"""
This widget would represent each Question
"""
def build(self, data_dict):
"""Build the widget based on the dictionary from the data source"""
# The widgets are part of every instance
self.ques_image.source = data_dict["image"]
self.header_label.text = data_dict["header"]
# But this content is generated dynamically
self.box_container.add_widget(self.get_content(data_dict))
@staticmethod
def get_content(data_dict):
"""Returns the instance specific widgets for the box_layout"""
# We get class based on it's name as registered in the factory and instantiate
content = Factory.get(data_dict["type"])()
# We noe set any of it's properties and return it
setattr(content, data_dict["value_name"], data_dict["value"])
return content
class TestApp(App):
def __init__(self, **kwargs):
"""
On initialization, register the classes we want to create dynamically
via the Factory object
"""
super(TestApp, self).__init__(**kwargs)
Factory.register('Label', module='kivy.uix.label')
Factory.register('Button', module='kivy.uix.button')
Factory.register('CheckBox', module='kivy.uix.checkbox')
def build(self):
container = BoxLayout() # or screen, carousel etc.
for item in DataSource().get_data():
ques_widget = QuestionWidget()
ques_widget.build(item)
container.add_widget(ques_widget)
return container
if __name__ == "__main__":
TestApp().run()
| mit |
a-parhom/edx-platform | openedx/core/djangoapps/credit/signals.py | 18 | 3705 | """
This file contains receivers of course publication signals.
"""
import logging
from django.dispatch import receiver
from django.utils import timezone
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.signals.signals import COURSE_GRADE_CHANGED
log = logging.getLogger(__name__)
def on_course_publish(course_key):
"""
Will receive a delegated 'course_published' signal from cms/djangoapps/contentstore/signals.py
and kick off a celery task to update the credit course requirements.
IMPORTANT: It is assumed that the edx-proctoring subsystem has been appropriate refreshed
with any on_publish event workflow *BEFORE* this method is called.
"""
# Import here, because signal is registered at startup, but items in tasks
# are not yet able to be loaded
from openedx.core.djangoapps.credit import api, tasks
if api.is_credit_course(course_key):
tasks.update_credit_course_requirements.delay(unicode(course_key))
log.info(u'Added task to update credit requirements for course "%s" to the task queue', course_key)
@receiver(COURSE_GRADE_CHANGED)
def listen_for_grade_calculation(sender, user, course_grade, course_key, deadline, **kwargs): # pylint: disable=unused-argument
"""Receive 'MIN_GRADE_REQUIREMENT_STATUS' signal and update minimum grade requirement status.
Args:
sender: None
user(User): User Model object
course_grade(CourseGrade): CourseGrade object
course_key(CourseKey): The key for the course
deadline(datetime): Course end date or None
Kwargs:
kwargs : None
"""
# This needs to be imported here to avoid a circular dependency
# that can cause migrations to fail.
from openedx.core.djangoapps.credit import api
course_id = CourseKey.from_string(unicode(course_key))
is_credit = api.is_credit_course(course_id)
if is_credit:
requirements = api.get_credit_requirements(course_id, namespace='grade')
if requirements:
criteria = requirements[0].get('criteria')
if criteria:
min_grade = criteria.get('min_grade')
passing_grade = course_grade.percent >= min_grade
now = timezone.now()
status = None
reason = None
if (deadline and now < deadline) or not deadline:
# Student completed coursework on-time
if passing_grade:
# Student received a passing grade
status = 'satisfied'
reason = {'final_grade': course_grade.percent}
else:
# Submission after deadline
if passing_grade:
# Grade was good, but submission arrived too late
status = 'failed'
reason = {
'current_date': now,
'deadline': deadline
}
else:
# Student failed to receive minimum grade
status = 'failed'
reason = {
'final_grade': course_grade.percent,
'minimum_grade': min_grade
}
# We do not record a status if the user has not yet earned the minimum grade, but still has
# time to do so.
if status and reason:
api.set_credit_requirement_status(
user, course_id, 'grade', 'grade', status=status, reason=reason
)
| agpl-3.0 |
mitchellcash/ion | qa/rpc-tests/txn_doublespend.py | 1 | 5059 | #!/usr/bin/env python2
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Copyright (c) 2015-2018 The PIVX developers
# Copyright (c) 2018 The Ion developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with malleable transactions
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from decimal import Decimal
from util import *
import os
import shutil
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].move("", "foo", 1220)
self.nodes[0].move("", "bar", 30)
assert_equal(self.nodes[0].getbalance(""), 0)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1210 BTC to node1_address,
# but don't broadcast:
(total_in, inputs) = gather_inputs(self.nodes[0], 1210)
change_address = self.nodes[0].getnewaddress("foo")
outputs = {}
outputs[change_address] = 40
outputs[node1_address] = 1210
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two transaction from node[0] to node[1]; the
# second must spend change from the first because the first
# spends all mature inputs:
txid1 = self.nodes[0].sendfrom("foo", node1_address, 1210, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 1210, minus 20, and minus transaction fees:
expected = starting_balance
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo"), 1220+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar"), 30+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend to miner:
mutated_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].setgenerate(True, 1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].setgenerate(True, 1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -1)
assert_equal(tx2["confirmations"], -1)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1210 for the double-spend:
expected = starting_balance + 100 - 1210
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# foo account should be debited, but bar account should not:
assert_equal(self.nodes[0].getbalance("foo"), 1220-1210)
assert_equal(self.nodes[0].getbalance("bar"), 30)
# Node1's "from" account balance should be just the mutated send:
assert_equal(self.nodes[1].getbalance("from0"), 1210)
if __name__ == '__main__':
TxnMallTest().main()
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/tools/test_numeric.py | 6 | 14437 | import pytest
import decimal
import numpy as np
import pandas as pd
from pandas import to_numeric, _np_version_under1p9
from pandas.util import testing as tm
from numpy import iinfo
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assert_raises_regex(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assert_raises_regex(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetimelike(self):
for tz in [None, 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# ToDo: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with tm.assert_raises_regex(TypeError, "Invalid object type"):
pd.to_numeric(s)
def test_downcast(self):
# see gh-13352
mixed_data = ['1', 2, 3]
int_data = [1, 2, 3]
date_data = np.array(['1970-01-02', '1970-01-03',
'1970-01-04'], dtype='datetime64[D]')
invalid_downcast = 'unsigned-integer'
msg = 'invalid downcasting method provided'
smallest_int_dtype = np.dtype(np.typecodes['Integer'][0])
smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0])
# support below np.float32 is rare and far between
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
for data in (mixed_data, int_data, date_data):
with tm.assert_raises_regex(ValueError, msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
for signed_downcast in ('integer', 'signed'):
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast='float')
tm.assert_numpy_array_equal(res, expected)
# if we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter
data = ['foo', 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors='ignore',
downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an unsigned integer because
# we have a negative number
data = ['-1', 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an integer (signed or unsigned)
# because we have a float number
data = (['1.1', 2, 3],
[10000.0, 20000, 3000, 40000.36, 50000, 50000.00])
expected = (np.array([1.1, 2, 3], dtype=np.float64),
np.array([10000.0, 20000, 3000,
40000.36, 50000, 50000.00], dtype=np.float64))
for _data, _expected in zip(data, expected):
for downcast in ('integer', 'signed', 'unsigned'):
res = pd.to_numeric(_data, downcast=downcast)
tm.assert_numpy_array_equal(res, _expected)
# the smallest integer dtype need not be np.(u)int8
data = ['256', 257, 258]
for downcast, expected_dtype in zip(
['integer', 'signed', 'unsigned'],
[np.int16, np.int16, np.uint16]):
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
def test_downcast_limits(self):
# Test the limits of each downcast. Bug: #14401.
# Check to make sure numpy is new enough to run this test.
if _np_version_under1p9:
pytest.skip("Numpy version is under 1.9")
i = 'integer'
u = 'unsigned'
dtype_downcast_min_max = [
('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]),
('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]),
('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]),
('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]),
('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]),
('uint64', u, [iinfo(np.uint64).min, iinfo(np.uint64).max]),
('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1])
]
for dtype, downcast, min_max in dtype_downcast_min_max:
series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
assert series.dtype == dtype
| mit |
Hassan93/openTima | node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| gpl-2.0 |
alxgu/ansible | lib/ansible/modules/network/netvisor/_pn_trunk.py | 47 | 14298 | #!/usr/bin/python
""" PN CLI trunk-create/trunk-delete/trunk-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_trunk
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: CLI command to create/delete/modify a trunk.
deprecated:
removed_in: '2.12'
why: Doesn't support latest Pluribus Networks netvisor
alternative: Latest modules will be pushed in Ansible future versions.
description:
- Execute trunk-create or trunk-delete command.
- Trunks can be used to aggregate network links at Layer 2 on the local
switch. Use this command to create a new trunk.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
default: 'local'
state:
description:
- State the action to perform. Use 'present' to create trunk,
'absent' to delete trunk and 'update' to modify trunk.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- Specify the name for the trunk configuration.
required: true
pn_ports:
description:
- Specify the port number(s) for the link(s) to aggregate into the trunk.
- Required for trunk-create.
pn_speed:
description:
- Specify the port speed or disable the port.
choices: ['disable', '10m', '100m', '1g', '2.5g', '10g', '40g']
pn_egress_rate_limit:
description:
- Specify an egress port data rate limit for the configuration.
pn_jumbo:
description:
- Specify if the port can receive jumbo frames.
type: bool
pn_lacp_mode:
description:
- Specify the LACP mode for the configuration.
choices: ['off', 'passive', 'active']
pn_lacp_priority:
description:
- Specify the LACP priority. This is a number between 1 and 65535 with a
default value of 32768.
pn_lacp_timeout:
description:
- Specify the LACP time out as slow (30 seconds) or fast (4seconds).
The default value is slow.
choices: ['slow', 'fast']
pn_lacp_fallback:
description:
- Specify the LACP fallback mode as bundles or individual.
choices: ['bundle', 'individual']
pn_lacp_fallback_timeout:
description:
- Specify the LACP fallback timeout in seconds. The range is between 30
and 60 seconds with a default value of 50 seconds.
pn_edge_switch:
description:
- Specify if the switch is an edge switch.
type: bool
pn_pause:
description:
- Specify if pause frames are sent.
type: bool
pn_description:
description:
- Specify a description for the trunk configuration.
pn_loopback:
description:
- Specify loopback if you want to use loopback.
type: bool
pn_mirror_receive:
description:
- Specify if the configuration receives mirrored traffic.
type: bool
pn_unknown_ucast_level:
description:
- Specify an unknown unicast level in percent. The default value is 100%.
pn_unknown_mcast_level:
description:
- Specify an unknown multicast level in percent. The default value is 100%.
pn_broadcast_level:
description:
- Specify a broadcast level in percent. The default value is 100%.
pn_port_macaddr:
description:
- Specify the MAC address of the port.
pn_loopvlans:
description:
- Specify a list of looping vlans.
pn_routing:
description:
- Specify if the port participates in routing on the network.
type: bool
pn_host:
description:
- Host facing port control setting.
type: bool
"""
EXAMPLES = """
- name: create trunk
pn_trunk:
state: 'present'
pn_name: 'spine-to-leaf'
pn_ports: '11,12,13,14'
- name: delete trunk
pn_trunk:
state: 'absent'
pn_name: 'spine-to-leaf'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the trunk command.
returned: always
type: list
stderr:
description: The set of error responses from the trunk command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
TRUNK_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks for idempotency using the trunk-show command.
If a trunk with given name exists, return TRUNK_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: TRUNK_EXISTS
"""
name = module.params['pn_name']
show = cli + ' trunk-show format switch,name no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
# Global flags
global TRUNK_EXISTS
if name in out:
TRUNK_EXISTS = True
else:
TRUNK_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'trunk-create'
if state == 'absent':
command = 'trunk-delete'
if state == 'update':
command = 'trunk-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_ports=dict(type='str'),
pn_speed=dict(type='str',
choices=['disable', '10m', '100m', '1g', '2.5g',
'10g', '40g']),
pn_egress_rate_limit=dict(type='str'),
pn_jumbo=dict(type='bool'),
pn_lacp_mode=dict(type='str', choices=[
'off', 'passive', 'active']),
pn_lacp_priority=dict(type='int'),
pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']),
pn_lacp_fallback=dict(type='str', choices=[
'bundle', 'individual']),
pn_lacp_fallback_timeout=dict(type='str'),
pn_edge_switch=dict(type='bool'),
pn_pause=dict(type='bool'),
pn_description=dict(type='str'),
pn_loopback=dict(type='bool'),
pn_mirror_receive=dict(type='bool'),
pn_unknown_ucast_level=dict(type='str'),
pn_unknown_mcast_level=dict(type='str'),
pn_broadcast_level=dict(type='str'),
pn_port_macaddr=dict(type='str'),
pn_loopvlans=dict(type='str'),
pn_routing=dict(type='bool'),
pn_host=dict(type='bool')
),
required_if=(
["state", "present", ["pn_name", "pn_ports"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Accessing the arguments
state = module.params['state']
name = module.params['pn_name']
ports = module.params['pn_ports']
speed = module.params['pn_speed']
egress_rate_limit = module.params['pn_egress_rate_limit']
jumbo = module.params['pn_jumbo']
lacp_mode = module.params['pn_lacp_mode']
lacp_priority = module.params['pn_lacp_priority']
lacp_timeout = module.params['pn_lacp_timeout']
lacp_fallback = module.params['pn_lacp_fallback']
lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout']
edge_switch = module.params['pn_edge_switch']
pause = module.params['pn_pause']
description = module.params['pn_description']
loopback = module.params['pn_loopback']
mirror_receive = module.params['pn_mirror_receive']
unknown_ucast_level = module.params['pn_unknown_ucast_level']
unknown_mcast_level = module.params['pn_unknown_mcast_level']
broadcast_level = module.params['pn_broadcast_level']
port_macaddr = module.params['pn_port_macaddr']
loopvlans = module.params['pn_loopvlans']
routing = module.params['pn_routing']
host = module.params['pn_host']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'trunk-delete':
check_cli(module, cli)
if TRUNK_EXISTS is False:
module.exit_json(
skipped=True,
msg='Trunk with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'trunk-create':
check_cli(module, cli)
if TRUNK_EXISTS is True:
module.exit_json(
skipped=True,
msg='Trunk with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
# Appending options
if ports:
cli += ' ports ' + ports
if speed:
cli += ' speed ' + speed
if egress_rate_limit:
cli += ' egress-rate-limit ' + egress_rate_limit
if jumbo is True:
cli += ' jumbo '
if jumbo is False:
cli += ' no-jumbo '
if lacp_mode:
cli += ' lacp-mode ' + lacp_mode
if lacp_priority:
cli += ' lacp-priority ' + lacp_priority
if lacp_timeout:
cli += ' lacp-timeout ' + lacp_timeout
if lacp_fallback:
cli += ' lacp-fallback ' + lacp_fallback
if lacp_fallback_timeout:
cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout
if edge_switch is True:
cli += ' edge-switch '
if edge_switch is False:
cli += ' no-edge-switch '
if pause is True:
cli += ' pause '
if pause is False:
cli += ' no-pause '
if description:
cli += ' description ' + description
if loopback is True:
cli += ' loopback '
if loopback is False:
cli += ' no-loopback '
if mirror_receive is True:
cli += ' mirror-receive-only '
if mirror_receive is False:
cli += ' no-mirror-receive-only '
if unknown_ucast_level:
cli += ' unknown-ucast-level ' + unknown_ucast_level
if unknown_mcast_level:
cli += ' unknown-mcast-level ' + unknown_mcast_level
if broadcast_level:
cli += ' broadcast-level ' + broadcast_level
if port_macaddr:
cli += ' port-mac-address ' + port_macaddr
if loopvlans:
cli += ' loopvlans ' + loopvlans
if routing is True:
cli += ' routing '
if routing is False:
cli += ' no-routing '
if host is True:
cli += ' host-enable '
if host is False:
cli += ' host-disable '
run_cli(module, cli)
if __name__ == '__main__':
main()
| gpl-3.0 |
DoubleNegativeVisualEffects/gaffer | python/Gaffer/InfoPathFilter.py | 5 | 2873 | ##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from PathFilter import PathFilter
## A PathFilter which filters based on an item from Path.info() and an
# arbitrary match function.
class InfoPathFilter( PathFilter ) :
def __init__( self, infoKey, matcher, leafOnly=True, userData={} ) :
PathFilter.__init__( self, userData )
self.__infoKey = infoKey
self.__matcher = matcher
self.__leafOnly = leafOnly
## Matcher and infoKey are set with one call, as otherwise
# the intermediate state may yield a new matcher which doesn't work
# with the old key.
def setMatcher( self, infoKey, matcher ) :
self.__infoKey = infoKey
self.__matcher = matcher
self.changedSignal()( self )
def getMatcher( self ) :
return self.__infoKey, self.__matcher
def _filter( self, paths ) :
if self.__matcher is None :
return paths
result = []
for p in paths :
if self.__leafOnly and not p.isLeaf() :
result.append( p )
else :
i = p.info()
if self.__infoKey in i :
if self.__matcher( i[self.__infoKey] ) :
result.append( p )
return result
| bsd-3-clause |
prestonso/intro-graphql | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/input_test.py | 1841 | 3207 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['a']]],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]],
self.nodes['a'].FindCycles())
self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
[self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles)
self.assertTrue(
[self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([[self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a']]],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
| mit |
kefo/moto | moto/dynamodb2/comparisons.py | 4 | 1528 | from __future__ import unicode_literals
# TODO add tests for all of these
EQ_FUNCTION = lambda item_value, test_value: item_value == test_value # flake8: noqa
NE_FUNCTION = lambda item_value, test_value: item_value != test_value # flake8: noqa
LE_FUNCTION = lambda item_value, test_value: item_value <= test_value # flake8: noqa
LT_FUNCTION = lambda item_value, test_value: item_value < test_value # flake8: noqa
GE_FUNCTION = lambda item_value, test_value: item_value >= test_value # flake8: noqa
GT_FUNCTION = lambda item_value, test_value: item_value > test_value # flake8: noqa
COMPARISON_FUNCS = {
'EQ': EQ_FUNCTION,
'=': EQ_FUNCTION,
'NE': NE_FUNCTION,
'!=': NE_FUNCTION,
'LE': LE_FUNCTION,
'<=': LE_FUNCTION,
'LT': LT_FUNCTION,
'<': LT_FUNCTION,
'GE': GE_FUNCTION,
'>=': GE_FUNCTION,
'GT': GT_FUNCTION,
'>': GT_FUNCTION,
'NULL': lambda item_value: item_value is None,
'NOT_NULL': lambda item_value: item_value is not None,
'CONTAINS': lambda item_value, test_value: test_value in item_value,
'NOT_CONTAINS': lambda item_value, test_value: test_value not in item_value,
'BEGINS_WITH': lambda item_value, test_value: item_value.startswith(test_value),
'IN': lambda item_value, *test_values: item_value in test_values,
'BETWEEN': lambda item_value, lower_test_value, upper_test_value: lower_test_value <= item_value <= upper_test_value,
}
def get_comparison_func(range_comparison):
return COMPARISON_FUNCS.get(range_comparison)
| apache-2.0 |
zstackorg/zstack-woodpecker | integrationtest/vm/multihosts/bs/test_iso_vm_del_ops_all_expg_hot_migrate.py | 1 | 4687 | '''
Test for deleting vm iso check vm all operations and expunge vm check change os.
@author: SyZhao
'''
import os
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_image as test_image
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.image_operations as img_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
image = None
def test():
global image
global test_obj_dict
allow_bs_list = [inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE, inventory.CEPH_BACKUP_STORAGE_TYPE, inventory.SFTP_BACKUP_STORAGE_TYPE]
test_lib.skip_test_when_bs_type_not_in_list(allow_bs_list)
allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, inventory.NFS_PRIMARY_STORAGE_TYPE, 'SharedMountPoint']
test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list)
#run condition
hosts = res_ops.query_resource(res_ops.HOST)
if len(hosts) <= 1:
test_util.test_skip("skip for host_num is not satisfy condition host_num>1")
bs_cond = res_ops.gen_query_conditions("status", '=', "Connected")
bss = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, bs_cond, None, fields=['uuid'])
#create disk offering
data_volume_size = 10737418240
disk_offering_option = test_util.DiskOfferingOption()
disk_offering_option.set_name('root-disk-iso')
disk_offering_option.set_diskSize(data_volume_size)
data_volume_offering = vol_ops.create_volume_offering(disk_offering_option)
test_obj_dict.add_disk_offering(data_volume_offering)
#create instance offering
cpuNum = 2
memorySize = 1024*1024*1024
name = 'iso-vm-offering'
new_offering_option = test_util.InstanceOfferingOption()
new_offering_option.set_cpuNum(cpuNum)
new_offering_option.set_memorySize(memorySize)
new_offering_option.set_name(name)
new_offering = vm_ops.create_instance_offering(new_offering_option)
test_obj_dict.add_instance_offering(new_offering)
#add iso
img_option = test_util.ImageOption()
img_option.set_name('iso1')
bs_uuid = res_ops.query_resource_fields(res_ops.BACKUP_STORAGE, [], None)[0].uuid
img_option.set_backup_storage_uuid_list([bs_uuid])
img_option.set_url(os.environ.get('isoForVmUrl'))
image_inv = img_ops.add_iso_template(img_option)
image_uuid = image_inv.uuid
image = test_image.ZstackTestImage()
image.set_image(image_inv)
image.set_creation_option(img_option)
test_obj_dict.add_image(image)
#create vm by iso
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
root_disk_uuid = data_volume_offering.uuid
vm = test_stub.create_vm_with_iso([l3_net_uuid], image_uuid, 'iso-vm', root_disk_uuid, new_offering.uuid)
host_ip = test_lib.lib_find_host_by_vm(vm.get_vm()).managementIp
test_obj_dict.add_vm(vm)
#check vm
vm_inv = vm.get_vm()
vm_ip = vm_inv.vmNics[0].ip
#cmd ='[ -e /root ]'
#ssh_timeout = test_lib.SSH_TIMEOUT
#test_lib.SSH_TIMEOUT = 3600
test_lib.lib_set_vm_host_l2_ip(vm_inv)
test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, 22, 1800)
#if not test_lib.lib_ssh_vm_cmd_by_agent_with_retry(host_ip, vm_ip, 'root', 'password', cmd):
# test_lib.SSH_TIMEOUT = ssh_timeout
# test_util.test_fail("iso has been failed to installed.")
#test_lib.SSH_TIMEOUT = ssh_timeout
#delete iso
image.delete()
test_obj_dict.rm_image(image)
#vm ops test
test_stub.vm_ops_test(vm, "VM_TEST_ALL")
#expunge iso
image.expunge()
#detach iso
img_ops.detach_iso(vm.vm.uuid)
#vm ops test
test_stub.vm_ops_test(vm, "VM_TEST_MIGRATE")
vm.destroy()
vol_ops.delete_disk_offering(root_disk_uuid)
vm_ops.delete_instance_offering(new_offering.uuid)
test_obj_dict.rm_vm(vm)
test_obj_dict.rm_disk_offering(data_volume_offering)
test_obj_dict.rm_instance_offering(new_offering)
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Create VM Image in Image Store Success')
#Will be called only if exception happens in test().
def error_cleanup():
global image
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
try:
image.delete()
except:
pass
| apache-2.0 |
kakunbsc/enigma2.1 | lib/python/Screens/VirtualKeyBoard.py | 2 | 10905 | # -*- coding: iso-8859-1 -*-
from Components.Language import language
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from enigma import eListboxPythonMultiContent, gFont, RT_HALIGN_CENTER, RT_VALIGN_CENTER
from Screen import Screen
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from Tools.LoadPixmap import LoadPixmap
class VirtualKeyBoardList(MenuList):
def __init__(self, list, enableWrapAround=False):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", 28))
self.l.setItemHeight(45)
def VirtualKeyBoardEntryComponent(keys, selectedKey,shiftMode=False):
key_backspace = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_backspace.png"))
key_bg = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_bg.png"))
key_clr = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_clr.png"))
key_esc = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_esc.png"))
key_ok = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_ok.png"))
key_sel = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_sel.png"))
key_shift = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_shift.png"))
key_shift_sel = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_shift_sel.png"))
key_space = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/vkey_space.png"))
res = [ (keys) ]
x = 0
count = 0
if shiftMode:
shiftkey_png = key_shift_sel
else:
shiftkey_png = key_shift
for key in keys:
width = None
if key == "EXIT":
width = key_esc.size().width()
res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=key_esc))
elif key == "BACKSPACE":
width = key_backspace.size().width()
res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=key_backspace))
elif key == "CLEAR":
width = key_clr.size().width()
res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=key_clr))
elif key == "SHIFT":
width = shiftkey_png.size().width()
res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=shiftkey_png))
elif key == "SPACE":
width = key_space.size().width()
res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=key_space))
elif key == "OK":
width = key_ok.size().width()
res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=key_ok))
#elif key == "<-":
# res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(45, 45), png=key_left))
#elif key == "->":
# res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(45, 45), png=key_right))
else:
width = key_bg.size().width()
res.extend((
MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=key_bg),
MultiContentEntryText(pos=(x, 0), size=(width, 45), font=0, text=key.encode("utf-8"), flags=RT_HALIGN_CENTER | RT_VALIGN_CENTER)
))
if selectedKey == count:
width = key_sel.size().width()
res.append(MultiContentEntryPixmapAlphaTest(pos=(x, 0), size=(width, 45), png=key_sel))
if width is not None:
x += width
else:
x += 45
count += 1
return res
class VirtualKeyBoard(Screen):
def __init__(self, session, title="", text=""):
Screen.__init__(self, session)
self.keys_list = []
self.shiftkeys_list = []
self.lang = language.getLanguage()
if self.lang == 'de_DE':
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"ü", u"+"],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"ö", u"ä", u"#"],
[u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"CLEAR"],
[u"SHIFT", u"SPACE", u"@", u"ß", u"OK"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"Ü", u"*"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"Ö", u"Ä", u"'"],
[u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"?", u"\\", u"OK"]]
elif self.lang == 'es_ES':
#still missing keys (u"ùÙ")
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"ú", u"+"],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"ó", u"á", u"#"],
[u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"CLEAR"],
[u"SHIFT", u"SPACE", u"@", u"£", u"à", u"é", u"è", u"í", u"ì", u"ñ", u"ò", u"OK"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"Ú", u"*"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"Ó", u"Á", u"'"],
[u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"?", u"\\", u"À", u"É", u"È", u"Í", u"Ì", u"Ñ", u"Ò", u"OK"]]
elif self.lang in ('sv_SE', 'fi_FI'):
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"é", u"+"],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"ö", u"ä", u"#"],
[u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"CLEAR"],
[u"SHIFT", u"SPACE", u"@", u"ß", u"å", u"OK"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"É", u"*"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"Ö", u"Ä", u"'"],
[u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"?", u"\\", u"Å", u"OK"]]
else:
self.keys_list = [
[u"EXIT", u"1", u"2", u"3", u"4", u"5", u"6", u"7", u"8", u"9", u"0", u"BACKSPACE"],
[u"q", u"w", u"e", u"r", u"t", u"z", u"u", u"i", u"o", u"p", u"+", u"@"],
[u"a", u"s", u"d", u"f", u"g", u"h", u"j", u"k", u"l", u"#", u"\\"],
[u"<", u"y", u"x", u"c", u"v", u"b", u"n", u"m", u",", ".", u"-", u"CLEAR"],
[u"SHIFT", u"SPACE", u"OK"]]
self.shiftkeys_list = [
[u"EXIT", u"!", u'"', u"§", u"$", u"%", u"&", u"/", u"(", u")", u"=", u"BACKSPACE"],
[u"Q", u"W", u"E", u"R", u"T", u"Z", u"U", u"I", u"O", u"P", u"*"],
[u"A", u"S", u"D", u"F", u"G", u"H", u"J", u"K", u"L", u"'", u"?"],
[u">", u"Y", u"X", u"C", u"V", u"B", u"N", u"M", u";", u":", u"_", u"CLEAR"],
[u"SHIFT", u"SPACE", u"OK"]]
self.shiftMode = False
self.text = text
self.selectedKey = 0
self["header"] = Label(title)
self["text"] = Label(self.text)
self["list"] = VirtualKeyBoardList([])
self["actions"] = ActionMap(["OkCancelActions", "WizardActions", "ColorActions"],
{
"ok": self.okClicked,
"cancel": self.exit,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down,
"red": self.backClicked,
"green": self.ok
}, -2)
self.onLayoutFinish.append(self.buildVirtualKeyBoard)
self.max_key=47+len(self.keys_list[4])
def buildVirtualKeyBoard(self, selectedKey=0):
list = []
if self.shiftMode:
self.k_list = self.shiftkeys_list
for keys in self.k_list:
if selectedKey < 12 and selectedKey > -1:
list.append(VirtualKeyBoardEntryComponent(keys, selectedKey,True))
else:
list.append(VirtualKeyBoardEntryComponent(keys, -1,True))
selectedKey -= 12
else:
self.k_list = self.keys_list
for keys in self.k_list:
if selectedKey < 12 and selectedKey > -1:
list.append(VirtualKeyBoardEntryComponent(keys, selectedKey))
else:
list.append(VirtualKeyBoardEntryComponent(keys, -1))
selectedKey -= 12
self["list"].setList(list)
def backClicked(self):
self.text = self["text"].getText()[:-1]
self["text"].setText(self.text)
def okClicked(self):
if self.shiftMode:
list = self.shiftkeys_list
else:
list = self.keys_list
selectedKey = self.selectedKey
text = None
for x in list:
if selectedKey < 12:
if selectedKey < len(x):
text = x[selectedKey]
break
else:
selectedKey -= 12
if text is None:
return
text = text.encode("utf-8")
if text == "EXIT":
self.close(None)
elif text == "BACKSPACE":
self.text = self["text"].getText()[:-1]
self["text"].setText(self.text)
elif text == "CLEAR":
self.text = ""
self["text"].setText(self.text)
elif text == "SHIFT":
if self.shiftMode:
self.shiftMode = False
else:
self.shiftMode = True
self.buildVirtualKeyBoard(self.selectedKey)
elif text == "SPACE":
self.text += " "
self["text"].setText(self.text)
elif text == "OK":
self.close(self["text"].getText())
else:
self.text = self["text"].getText()
self.text += text
self["text"].setText(self.text)
def ok(self):
self.close(self["text"].getText())
def exit(self):
self.close(None)
def left(self):
self.selectedKey -= 1
if self.selectedKey == -1:
self.selectedKey = 11
elif self.selectedKey == 11:
self.selectedKey = 23
elif self.selectedKey == 23:
self.selectedKey = 35
elif self.selectedKey == 35:
self.selectedKey = 47
elif self.selectedKey == 47:
self.selectedKey = self.max_key
self.showActiveKey()
def right(self):
self.selectedKey += 1
if self.selectedKey == 12:
self.selectedKey = 0
elif self.selectedKey == 24:
self.selectedKey = 12
elif self.selectedKey == 36:
self.selectedKey = 24
elif self.selectedKey == 48:
self.selectedKey = 36
elif self.selectedKey > self.max_key:
self.selectedKey = 48
self.showActiveKey()
def up(self):
self.selectedKey -= 12
if (self.selectedKey < 0) and (self.selectedKey > (self.max_key-60)):
self.selectedKey += 48
elif self.selectedKey < 0:
self.selectedKey += 60
self.showActiveKey()
def down(self):
self.selectedKey += 12
if (self.selectedKey > self.max_key) and (self.selectedKey > 59):
self.selectedKey -= 60
elif self.selectedKey > self.max_key:
self.selectedKey -= 48
self.showActiveKey()
def showActiveKey(self):
self.buildVirtualKeyBoard(self.selectedKey)
| gpl-2.0 |
rhjdjong/Slip | tests/integration/test_slip_file_unbuffered.py | 2 | 1139 | # Copyright (c) 2020. Ruud de Jong
# This file is part of the SlipLib project which is released under the MIT license.
# See https://github.com/rhjdjong/SlipLib for details.
# pylint: disable=relative-beyond-top-level
"""Test using SlipStream with an unbuffered file"""
from sliplib import encode, SlipStream
from .test_data import data, BaseFileTest
class TestUnbufferedFileAccess(BaseFileTest):
"""Test unbuffered SLIP file access."""
def test_reading_slip_file(self):
"""Test reading SLIP-encoded message"""
self.filepath.write_bytes(b''.join(encode(msg) for msg in data))
with self.filepath.open(mode='rb', buffering=0) as f:
slipstream = SlipStream(f)
for exp, act in zip(data, slipstream):
assert exp == act
def test_writing_slip_file(self):
"""Test writing SLIP-encoded messages"""
with self.filepath.open(mode='wb', buffering=0) as f:
slipstream = SlipStream(f)
for msg in data:
slipstream.send_msg(msg)
assert self.filepath.read_bytes() == b''.join(encode(msg) for msg in data)
| mit |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/abc.py | 488 | 7145 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
# Instance of old-style class
class _C: pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| gpl-2.0 |
baylee/django | tests/staticfiles_tests/cases.py | 38 | 4503 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import os
import shutil
import tempfile
from django.conf import settings
from django.core.management import call_command
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.utils import six
from django.utils.encoding import force_text
from .settings import TEST_SETTINGS
class BaseStaticFilesMixin(object):
"""
Test case with a couple utility assertions.
"""
def assertFileContains(self, filepath, text):
self.assertIn(
text,
self._get_file(force_text(filepath)),
"'%s' not in '%s'" % (text, filepath),
)
def assertFileNotFound(self, filepath):
with self.assertRaises(IOError):
self._get_file(filepath)
def render_template(self, template, **kwargs):
if isinstance(template, six.string_types):
template = Template(template)
return template.render(Context(kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return "{%% load static from staticfiles %%}{%% static '%s' as var %%}{{ var }}" % path
return "{%% load static from staticfiles %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
with self.assertRaises(exc):
self.assertStaticRenders(path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
class StaticFilesTestCase(BaseStaticFilesMixin, SimpleTestCase):
pass
@override_settings(**TEST_SETTINGS)
class CollectionTestCase(BaseStaticFilesMixin, SimpleTestCase):
"""
Tests shared by all file finding features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in BaseStaticFilesTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super(CollectionTestCase, self).setUp()
temp_dir = tempfile.mkdtemp()
# Override the STATIC_ROOT for all tests from setUp to tearDown
# rather than as a context manager
self.patched_settings = self.settings(STATIC_ROOT=temp_dir)
self.patched_settings.enable()
self.run_collectstatic()
# Same comment as in runtests.teardown.
self.addCleanup(shutil.rmtree, six.text_type(temp_dir))
def tearDown(self):
self.patched_settings.disable()
super(CollectionTestCase, self).tearDown()
def run_collectstatic(self, **kwargs):
call_command('collectstatic', interactive=False, verbosity=0,
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
with codecs.open(filepath, "r", "utf-8") as f:
return f.read()
class TestDefaults(object):
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app static/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
"""
Can find a file with non-ASCII character in an app static/ directory.
"""
self.assertFileContains('test/⊗.txt', '⊗ in the app dir')
def test_camelcase_filenames(self):
"""
Can find a file with capital letters.
"""
self.assertFileContains('test/camelCase.txt', 'camelCase')
| bsd-3-clause |
calroc/Tkinter3D | neat_demo.py | 1 | 1755 | #!/usr/bin/env python
#
# Copyright 2011, 2012, 2013, 2014 Simon Forman
#
# This file is part of Tkinter3D.
#
# Tkinter3D is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tkinter3D is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tkinter3D. If not, see <http://www.gnu.org/licenses/>.
#
from tkinter import Tk
from canvas3d import Canvas3D, Frame3D, roty
from scene import Thing3D as dot
root = Tk()
root.title("Rotating Cube Demo")
c = Canvas3D(root)
c.pack(expand=1, fill='both')
# Zoom out a little.
c.frame.T.z += -300.0
# Create a dot at world origin (0, 0, 0).
origin = dot(c, width=4)
c.frame.things.append(origin)
# Make a frame for some objects to be in.
cube_frame = Frame3D()
c.frame.subframes.append(cube_frame)
# Add a Z component to the cube frame's translation to offset the cube
# from the world frame.
cube_frame.T.z += 300.0
# Make a cube.
F = -100.0, 100.0
for x, y, z in ((x, y, z) for x in F for y in F for z in F):
cube_frame.things.append(dot(c, x, y, z, width=6))
# Apply a rotation repeatedly to our cube.
rotation = roty(360/30/3) # 4 degrees.
def delta():
cube_frame.RM *= rotation
c.after(60, delta) # Retrigger every sixty milliseconds.
# Start everything running.
delta()
c.start_updating()
root.mainloop()
| gpl-3.0 |
nanchenchen/emoticon-analysis | emoticonvis/apps/enhance/management/commands/build_tweet_dictionary.py | 1 | 1427 | from django.core.management.base import BaseCommand, make_option, CommandError
from time import time
import path
from django.db import transaction
class Command(BaseCommand):
help = "From Tweet Parser results, extract words and connect with messages for a dataset."
args = '<dataset_id> <parsed_filename> [...]'
def handle(self, dataset_id, *filenames, **options):
if not dataset_id:
raise CommandError("Dataset id is required.")
try:
dataset_id = int(dataset_id)
except ValueError:
raise CommandError("Dataset id must be a number.")
if len(filenames) == 0:
raise CommandError('At least one filename must be provided.')
for f in filenames:
if not path.path(f).exists():
raise CommandError("Filename %s does not exist" % f)
from emoticonvis.apps.enhance.tasks import import_from_tweet_parser_results
start = time()
for i, parsed_tweet_filename in enumerate(filenames):
if len(filenames) > 1:
print "Reading file %d of %d %s" % (i + 1, len(filenames), parsed_tweet_filename)
else:
print "Reading file %s" % parsed_tweet_filename
with transaction.atomic(savepoint=False):
import_from_tweet_parser_results(dataset_id, parsed_tweet_filename)
print "Time: %.2fs" % (time() - start) | mit |
hamiltont/CouchPotatoServer | libs/oauthlib/oauth1/rfc5849/signature.py | 112 | 19020 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
"""
oauthlib.oauth1.rfc5849.signature
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of `section 3.4`_ of the spec.
Terminology:
* Client: software interfacing with an OAuth API
* Server: the API provider
* Resource Owner: the user who is granting authorization to the client
Steps for signing a request:
1. Collect parameters from the uri query, auth header, & body
2. Normalize those parameters
3. Normalize the uri
4. Pass the normalized uri, normalized parameters, and http method to
construct the base string
5. Pass the base string and any keys needed to a signing function
.. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
"""
import binascii
import hashlib
import hmac
import urlparse
from . import utils
from oauthlib.common import extract_params
def construct_base_string(http_method, base_string_uri,
normalized_encoded_request_parameters):
"""**String Construction**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += u'&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.4.6`: http://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(base_string_uri)
# 4. An "&" character (ASCII code 38).
base_string += u'&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.4.6`: http://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string
def normalize_base_string_uri(uri):
"""**Base String URI**
Per `section 3.4.1.2`_ of the spec.
For example, the HTTP request::
GET /r%20v/X?id=123 HTTP/1.1
Host: EXAMPLE.COM:80
is represented by the base string URI: "http://example.com/r%20v/X".
In another example, the HTTPS request::
GET /?q=1 HTTP/1.1
Host: www.example.net:8080
is represented by the base string URI: "https://www.example.net:8080/".
.. _`section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2
"""
if not isinstance(uri, unicode):
raise ValueError('uri must be a unicode object.')
# FIXME: urlparse does not support unicode
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
# The scheme, authority, and path of the request resource URI `RFC3986`
# are included by constructing an "http" or "https" URI representing
# the request resource (without the query or fragment) as follows:
#
# .. _`RFC2616`: http://tools.ietf.org/html/rfc3986
# 1. The scheme and host MUST be in lowercase.
scheme = scheme.lower()
netloc = netloc.lower()
# 2. The host and port values MUST match the content of the HTTP
# request "Host" header field.
# TODO: enforce this constraint
# 3. The port MUST be included if it is not the default port for the
# scheme, and MUST be excluded if it is the default. Specifically,
# the port MUST be excluded when making an HTTP request `RFC2616`_
# to port 80 or when making an HTTPS request `RFC2818`_ to port 443.
# All other non-default port numbers MUST be included.
#
# .. _`RFC2616`: http://tools.ietf.org/html/rfc2616
# .. _`RFC2818`: http://tools.ietf.org/html/rfc2818
default_ports = (
(u'http', u'80'),
(u'https', u'443'),
)
if u':' in netloc:
host, port = netloc.split(u':', 1)
if (scheme, port) in default_ports:
netloc = host
return urlparse.urlunparse((scheme, netloc, path, u'', u'', u''))
# ** Request Parameters **
#
# Per `section 3.4.1.3`_ of the spec.
#
# In order to guarantee a consistent and reproducible representation of
# the request parameters, the parameters are collected and decoded to
# their original decoded form. They are then sorted and encoded in a
# particular manner that is often different from their original
# encoding scheme, and concatenated into a single string.
#
# .. _`section 3.4.1.3`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3
def collect_parameters(uri_query='', body=[], headers=None,
exclude_oauth_signature=True):
"""**Parameter Sources**
Parameters starting with `oauth_` will be unescaped.
Body parameters must be supplied as a dict, a list of 2-tuples, or a
formencoded query string.
Headers must be supplied as a dict.
Per `section 3.4.1.3.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D"
c2&a3=2+q
contains the following (fully decoded) parameters used in the
signature base sting::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | =%3D |
| a3 | a |
| c@ | |
| a2 | r b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2 q |
+------------------------+------------------+
Note that the value of "b5" is "=%3D" and not "==". Both "c@" and
"c2" have empty values. While the encoding rules specified in this
specification for the purpose of constructing the signature base
string exclude the use of a "+" character (ASCII code 43) to
represent an encoded space character (ASCII code 32), this practice
is widely used in "application/x-www-form-urlencoded" encoded values,
and MUST be properly decoded, as demonstrated by one of the "a3"
parameter instances (the "a3" parameter is used twice in this
request).
.. _`section 3.4.1.3.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
"""
headers = headers or {}
params = []
# The parameters from the following sources are collected into a single
# list of name/value pairs:
# * The query component of the HTTP request URI as defined by
# `RFC3986, Section 3.4`_. The query component is parsed into a list
# of name/value pairs by treating it as an
# "application/x-www-form-urlencoded" string, separating the names
# and values and decoding them as defined by
# `W3C.REC-html40-19980424`_, Section 17.13.4.
#
# .. _`RFC3986, Section 3.4`: http://tools.ietf.org/html/rfc3986#section-3.4
# .. _`W3C.REC-html40-19980424`: http://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
if uri_query:
params.extend(urlparse.parse_qsl(uri_query, keep_blank_values=True))
# * The OAuth HTTP "Authorization" header field (`Section 3.5.1`_) if
# present. The header's content is parsed into a list of name/value
# pairs excluding the "realm" parameter if present. The parameter
# values are decoded as defined by `Section 3.5.1`_.
#
# .. _`Section 3.5.1`: http://tools.ietf.org/html/rfc5849#section-3.5.1
if headers:
headers_lower = dict((k.lower(), v) for k, v in headers.items())
authorization_header = headers_lower.get(u'authorization')
if authorization_header is not None:
params.extend([i for i in utils.parse_authorization_header(
authorization_header) if i[0] != u'realm'])
# * The HTTP request entity-body, but only if all of the following
# conditions are met:
# * The entity-body is single-part.
#
# * The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# `W3C.REC-html40-19980424`_.
# * The HTTP request entity-header includes the "Content-Type"
# header field set to "application/x-www-form-urlencoded".
#
# .._`W3C.REC-html40-19980424`: http://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
# TODO: enforce header param inclusion conditions
bodyparams = extract_params(body) or []
params.extend(bodyparams)
# ensure all oauth params are unescaped
unescaped_params = []
for k, v in params:
if k.startswith(u'oauth_'):
v = utils.unescape(v)
unescaped_params.append((k, v))
# The "oauth_signature" parameter MUST be excluded from the signature
# base string if present.
if exclude_oauth_signature:
unescaped_params = filter(lambda i: i[0] != u'oauth_signature',
unescaped_params)
return unescaped_params
def normalize_parameters(params):
"""**Parameters Normalization**
Per `section 3.4.1.3.2`_ of the spec.
For example, the list of parameters from the previous section would
be normalized as follows:
Encoded::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | %3D%253D |
| a3 | a |
| c%40 | |
| a2 | r%20b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2%20q |
+------------------------+------------------+
Sorted::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| a2 | r%20b |
| a3 | 2%20q |
| a3 | a |
| b5 | %3D%253D |
| c%40 | |
| c2 | |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_nonce | 7d8f3e4a |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_token | kkk9d7dh3k39sjv7 |
+------------------------+------------------+
Concatenated Pairs::
+-------------------------------------+
| Name=Value |
+-------------------------------------+
| a2=r%20b |
| a3=2%20q |
| a3=a |
| b5=%3D%253D |
| c%40= |
| c2= |
| oauth_consumer_key=9djdj82h48djs9d2 |
| oauth_nonce=7d8f3e4a |
| oauth_signature_method=HMAC-SHA1 |
| oauth_timestamp=137131201 |
| oauth_token=kkk9d7dh3k39sjv7 |
+-------------------------------------+
and concatenated together into a single string (line breaks are for
display purposes only)::
a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj
dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1
&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7
.. _`section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
"""
# The parameters collected in `Section 3.4.1.3`_ are normalized into a
# single string as follows:
#
# .. _`Section 3.4.1.3`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3
# 1. First, the name and value of each parameter are encoded
# (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
# 2. The parameters are sorted by name, using ascending byte value
# ordering. If two or more parameters share the same name, they
# are sorted by their value.
key_values.sort()
# 3. The name of each parameter is concatenated to its corresponding
# value using an "=" character (ASCII code 61) as a separator, even
# if the value is empty.
parameter_parts = [u'{0}={1}'.format(k, v) for k, v in key_values]
# 4. The sorted name/value pairs are concatenated together into a
# single string by using an "&" character (ASCII code 38) as
# separator.
return u'&'.join(parameter_parts)
def sign_hmac_sha1(base_string, client_secret, resource_owner_secret):
"""**HMAC-SHA1**
The "HMAC-SHA1" signature method uses the HMAC-SHA1 signature
algorithm as defined in `RFC2104`_::
digest = HMAC-SHA1 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC2104`: http://tools.ietf.org/html/rfc2104
.. _`section 3.4.2`: http://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA1 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = base_string
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or u'')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += u'&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or u'')
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha1)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Section 6.8`: http://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
def sign_rsa_sha1(base_string, rsa_private_key):
"""**RSA-SHA1**
Per `section 3.4.3`_ of the spec.
The "RSA-SHA1" signature method uses the RSASSA-PKCS1-v1_5 signature
algorithm as defined in `RFC3447, Section 8.2`_ (also known as
PKCS#1), using SHA-1 as the hash function for EMSA-PKCS1-v1_5. To
use this method, the client MUST have established client credentials
with the server that included its RSA public key (in a manner that is
beyond the scope of this specification).
NOTE: this method requires the python-rsa library.
.. _`section 3.4.3`: http://tools.ietf.org/html/rfc5849#section-3.4.3
.. _`RFC3447, Section 8.2`: http://tools.ietf.org/html/rfc3447#section-8.2
"""
# TODO: finish RSA documentation
import rsa
key = rsa.PrivateKey.load_pkcs1(rsa_private_key)
sig = rsa.sign(base_string, key, 'SHA-1')
return binascii.b2a_base64(sig)[:-1]
def sign_plaintext(client_secret, resource_owner_secret):
"""Sign a request using plaintext.
Per `section 3.4.4`_ of the spec.
The "PLAINTEXT" method does not employ a signature algorithm. It
MUST be used with a transport-layer mechanism such as TLS or SSL (or
sent over a secure channel with equivalent protections). It does not
utilize the signature base string or the "oauth_timestamp" and
"oauth_nonce" parameters.
.. _`section 3.4.4`: http://tools.ietf.org/html/rfc5849#section-3.4.4
"""
# The "oauth_signature" protocol parameter is set to the concatenated
# value of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
signature = utils.escape(client_secret or u'')
# 2. An "&" character (ASCII code 38), which MUST be included even
# when either secret is empty.
signature += u'&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
signature += utils.escape(resource_owner_secret or u'')
return signature
| gpl-3.0 |
indashnet/InDashNet.Open.UN2000 | android/external/antlr/antlr-3.4/runtime/Python/tests/t047treeparser.py | 20 | 4342 | import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
class T(testbase.ANTLRTest):
def walkerClass(self, base):
class TWalker(base):
def __init__(self, *args, **kwargs):
base.__init__(self, *args, **kwargs)
self.traces = []
def traceIn(self, ruleName, ruleIndex):
self.traces.append('>'+ruleName)
def traceOut(self, ruleName, ruleIndex):
self.traces.append('<'+ruleName)
def recover(self, input, re):
# no error recovery yet, just crash!
raise
return TWalker
def setUp(self):
self.compileGrammar()
self.compileGrammar('t047treeparserWalker.g', options='-trace')
def testWalker(self):
input = textwrap.dedent(
'''\
char c;
int x;
void bar(int x);
int foo(int y, char d) {
int i;
for (i=0; i<3; i=i+1) {
x=3;
y=5;
}
}
''')
cStream = antlr3.StringStream(input)
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
r = parser.program()
self.failUnlessEqual(
r.tree.toStringTree(),
"(VAR_DEF char c) (VAR_DEF int x) (FUNC_DECL (FUNC_HDR void bar (ARG_DEF int x))) (FUNC_DEF (FUNC_HDR int foo (ARG_DEF int y) (ARG_DEF char d)) (BLOCK (VAR_DEF int i) (for (= i 0) (< i 3) (= i (+ i 1)) (BLOCK (= x 3) (= y 5)))))"
)
nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
nodes.setTokenStream(tStream)
walker = self.getWalker(nodes)
walker.program()
# FIXME: need to crosscheck with Java target (compile walker with
# -trace option), if this is the real list. For now I'm happy that
# it does not crash ;)
self.failUnlessEqual(
walker.traces,
[ '>program', '>declaration', '>variable', '>type', '<type',
'>declarator', '<declarator', '<variable', '<declaration',
'>declaration', '>variable', '>type', '<type', '>declarator',
'<declarator', '<variable', '<declaration', '>declaration',
'>functionHeader', '>type', '<type', '>formalParameter',
'>type', '<type', '>declarator', '<declarator',
'<formalParameter', '<functionHeader', '<declaration',
'>declaration', '>functionHeader', '>type', '<type',
'>formalParameter', '>type', '<type', '>declarator',
'<declarator', '<formalParameter', '>formalParameter', '>type',
'<type', '>declarator', '<declarator', '<formalParameter',
'<functionHeader', '>block', '>variable', '>type', '<type',
'>declarator', '<declarator', '<variable', '>stat', '>forStat',
'>expr', '>expr', '>atom', '<atom', '<expr', '<expr', '>expr',
'>expr', '>atom', '<atom', '<expr', '>expr', '>atom', '<atom',
'<expr', '<expr', '>expr', '>expr', '>expr', '>atom', '<atom',
'<expr', '>expr', '>atom', '<atom', '<expr', '<expr', '<expr',
'>block', '>stat', '>expr', '>expr', '>atom', '<atom', '<expr',
'<expr', '<stat', '>stat', '>expr', '>expr', '>atom', '<atom',
'<expr', '<expr', '<stat', '<block', '<forStat', '<stat',
'<block', '<declaration', '<program'
]
)
def testRuleLabelPropertyRefText(self):
self.compileGrammar()
self.compileGrammar('t047treeparserWalker.g', options='-trace')
input = textwrap.dedent(
'''\
char c;
''')
cStream = antlr3.StringStream(input)
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
r = parser.variable()
nodes = antlr3.tree.CommonTreeNodeStream(r.tree)
nodes.setTokenStream(tStream)
walker = self.getWalker(nodes)
r = walker.variable()
self.failUnlessEqual(r, 'c')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
hillwoodroc/deepin-music-player | src/common.py | 1 | 3015 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Hou Shaohui
#
# Author: Hou Shaohui <[email protected]>
# Maintainer: Hou Shaohui <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gio
from mutagen import File as MutagenFile
from mutagen.asf import ASF
from mutagen.apev2 import APEv2File
from mutagen.flac import FLAC
from mutagen.id3 import ID3FileType
from mutagen.oggflac import OggFLAC
from mutagen.oggspeex import OggSpeex
from mutagen.oggtheora import OggTheora
from mutagen.oggvorbis import OggVorbis
from mutagen.trueaudio import TrueAudio
from mutagen.wavpack import WavPack
try: from mutagen.mp4 import MP4 #@UnusedImport
except: from mutagen.m4a import M4A as MP4 #@Reimport
from mutagen.musepack import Musepack
from mutagen.monkeysaudio import MonkeysAudio
from mutagen.optimfrog import OptimFROG
from easymp3 import EasyMP3
FORMATS = [EasyMP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC,
FLAC, APEv2File, MP4, ID3FileType, WavPack, Musepack,
MonkeysAudio, OptimFROG, ASF]
UNTRUST_AUDIO_EXT = [
"669", "ac3", "aif", "aiff", "ape", "amf", "au",
"dsm", "far", "it", "med", "mka", "mpc", "mid",
"mod", "mtm", "midi", "oga", "ogx", "okt", "ra",
"ram", "s3m", "sid", "shn", "snd", "spc", "spx",
"stm", "tta", "ult", "wv", "xm"
]
TRUST_AUDIO_EXT = [
"wav", "wma", "mp2", "mp3", "mp4", "m4a", "flac", "ogg"
]
def file_is_supported(filename, strict=False):
''' whther file is supported. '''
results = gio.File(filename).get_basename().split(".")
if len(results) < 2:
return False
else:
extension = results[-1].lower()
if extension in TRUST_AUDIO_EXT:
return True
elif extension in UNTRUST_AUDIO_EXT:
try:
fileobj = file(filename, "rb")
except:
return False
try:
header = fileobj.read(128)
results = [Kind.score(filename, fileobj, header) for Kind in FORMATS]
except:
return False
finally:
fileobj.close()
results = zip(results, FORMATS)
results.sort()
score, Kind = results[-1]
if score > 0: return True
else: return False
else:
return False
| gpl-3.0 |
Stavitsky/neutron | neutron/common/topics.py | 45 | 1863 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NETWORK = 'network'
SUBNET = 'subnet'
PORT = 'port'
SECURITY_GROUP = 'security_group'
L2POPULATION = 'l2population'
DVR = 'dvr'
CREATE = 'create'
DELETE = 'delete'
UPDATE = 'update'
AGENT = 'q-agent-notifier'
PLUGIN = 'q-plugin'
L3PLUGIN = 'q-l3-plugin'
DHCP = 'q-dhcp-notifer'
FIREWALL_PLUGIN = 'q-firewall-plugin'
METERING_PLUGIN = 'q-metering-plugin'
LOADBALANCER_PLUGIN = 'n-lbaas-plugin'
L3_AGENT = 'l3_agent'
DHCP_AGENT = 'dhcp_agent'
METERING_AGENT = 'metering_agent'
LOADBALANCER_AGENT = 'n-lbaas_agent'
def get_topic_name(prefix, table, operation, host=None):
"""Create a topic name.
The topic name needs to be synced between the agent and the
plugin. The plugin will send a fanout message to all of the
listening agents so that the agents in turn can perform their
updates accordingly.
:param prefix: Common prefix for the plugin/agent message queues.
:param table: The table in question (NETWORK, SUBNET, PORT).
:param operation: The operation that invokes notification (CREATE,
DELETE, UPDATE)
:param host: Add host to the topic
:returns: The topic name.
"""
if host:
return '%s-%s-%s.%s' % (prefix, table, operation, host)
return '%s-%s-%s' % (prefix, table, operation)
| apache-2.0 |
hellozt/gnome15 | src/gamewrap/gw/__init__.py | 8 | 4907 | # Gnome15 - Suite of tools for the Logitech G series keyboards and headsets
# Copyright (C) 2011 Brett Smith <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gobject
import subprocess
import signal
import sys
import dbus.service
import threading
import re
# Logging
import logging
logger = logging.getLogger(__name__)
NAME = "GameWrap"
VERSION = "0.1"
BUS_NAME = "org.gnome15.GameWrap"
OBJECT_PATH = "/org/gnome15/GameWrap"
IF_NAME = "org.gnome15.GameWrap"
class RunThread(threading.Thread):
def __init__(self, controller):
threading.Thread.__init__(self, name = "ExecCommand")
self.controller = controller
def run(self):
logger.info("Running '%s'", str(self.controller.args))
self.process = subprocess.Popen(self.controller.args, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
logger.info("Process started OK")
while True:
line = self.process.stdout.readline(1024)
if line:
logger.info(">%s<", line)
for pattern_id in self.controller.patterns:
pattern = self.controller.patterns[pattern_id]
match = re.search(pattern, line)
if match:
logger.info("Match! %s", str(match))
gobject.idle_add(self.controller.PatternMatched(patter_id, line))
else:
break
logger.info("Waiting for process to complete")
self.controller.status = self.process.wait()
logger.info("Process complete with %s", self.controller.status)
self.controller.Stop()
class G15GameWrapperServiceController(dbus.service.Object):
def __init__(self, args, bus, no_trap=False):
bus_name = dbus.service.BusName(BUS_NAME, bus=bus, replace_existing=False, allow_replacement=False, do_not_queue=True)
dbus.service.Object.__init__(self, None, OBJECT_PATH, bus_name)
self._page_sequence_number = 1
self._bus = bus
self.args = args
self.status = 0
self.patterns = {}
logger.info("Exposing service for '%s'. Wait for signal to wait", str(args))
if not no_trap:
signal.signal(signal.SIGINT, self.sigint_handler)
signal.signal(signal.SIGTERM, self.sigterm_handler)
self._loop = gobject.MainLoop()
def start_loop(self):
logger.info("Starting GLib loop")
self._loop.run()
logger.debug("Exited GLib loop")
def sigint_handler(self, signum, frame):
logger.info("Got SIGINT signal, shutting down")
self.shutdown()
def sigterm_handler(self, signum, frame):
logger.info("Got SIGTERM signal, shutting down")
self.shutdown()
"""
DBUS API
"""
@dbus.service.method(IF_NAME)
def Start(self):
RunThread(self).start()
@dbus.service.method(IF_NAME)
def Stop(self):
gobject.idle_add(self._shutdown())
@dbus.service.method(IF_NAME, in_signature='ss')
def AddPattern(self, pattern_id, pattern):
logger.info("Adding pattern '%s' with id '%s'", pattern, pattern_id)
if pattern_id in self.patterns:
raise Exception("Pattern with ID %s already registered." % pattern_id)
self.patterns[pattern_id] = pattern
@dbus.service.method(IF_NAME, in_signature='s')
def RemovePattern(self, pattern_id):
logger.info("Removing pattern with id '%s'", pattern_id)
if not pattern_id in self.patterns:
raise Exception("Pattern with ID %s not registered." % pattern_id)
del self.patterns[id]
@dbus.service.method(IF_NAME, in_signature='', out_signature='ssssas')
def GetInformation(self):
return ("GameWrapper Service", "Gnome15 Project", VERSION, "1.0", self.args)
"""
Signals
"""
"""
DBUS Signals
"""
@dbus.service.signal(SCREEN_IF_NAME, signature='ss')
def PatternMatch(self, pattern_id, line):
pass
"""
Private
"""
def _shutdown(self):
logger.info("Shutting down")
self._loop.quit()
sys.exit(self.status) | gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.