code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from amara import tree
from amara.writers import outputparameters
from amara.xpath import datatypes
from amara.xslt.tree import xslt_element, content_model, attribute_types
EXSL_COMMON_NS = "http://exslt.org/common"
def nodeset_function(context, arg0):
"""
The purpose of the exsl:node-set function is to return a node-set from a
result tree fragment. If the argument is a node-set already, it is simply
returned as is. If the argument to exsl:node-set is not a node-set or a
result tree fragment, then it is converted to a string as by the string()
function, and the function returns a node-set consisting of a single text
node with that string value.
The exsl:node-set function does not have side-effects: the result tree
fragment used as an argument is still available as a result tree fragment
after it is passed as an argument to exsl:node-set.
"""
obj = arg0.evaluate(context)
if not isinstance(obj, datatypes.nodeset):
if not isinstance(obj, tree.entity):
obj = (tree.text(datatypes.string(obj)),)
obj = datatypes.nodeset([obj])
return obj
def object_type_function(context, arg0):
"""
The exsl:object-type function returns a string giving the type of the
object passed as the argument. The possible object types are: 'string',
'number', 'boolean', 'node-set', 'RTF' or 'external'.
"""
obj = arg0.evaluate(context)
if isinstance(obj, datatypes.nodeset):
tp_name = 'node-set'
elif isinstance(obj, datatypes.string):
tp_name = 'string'
elif isinstance(obj, datatypes.number):
tp_name = 'number'
elif isinstance(obj, datatypes.boolean):
tp_name = 'boolean'
elif isinstance(obj, tree.entity):
tp_name = 'RTF'
else:
tp_name = 'external'
return datatypes.string(tp_name)
class document_element(xslt_element):
"""
For the basic specification, see:
http://www.exslt.org/exsl/elements/document/index.html
The only URI scheme supported by 4Suite currently is 'file:'
Security note:
As a precaution, if you try to overwrite an existing file, it will be
saved to a temporary file (there will be a warning with the file name).
If this this precaution fails, the instruction will abort. You can
override this precaution, always allowing the function to overwrite
a document by using the f:overwrite-okay extension attribute.
"""
content_model = content_model.template
attribute_types = {
'href' : attribute_types.uri_reference_avt(required=True),
'method' : attribute_types.qname_avt(),
'version' : attribute_types.nmtoken_avt(),
'encoding' : attribute_types.string_avt(),
'omit-xml-declaration' : attribute_types.yesno_avt(),
'standalone' : attribute_types.yesno_avt(),
'doctype-public' : attribute_types.string_avt(),
'doctype-system' : attribute_types.string_avt(),
'cdata-section-elements' : attribute_types.qnames_avt(),
'indent' : attribute_types.yesno_avt(),
'media-type' : attribute_types.string_avt(),
'f:byte-order-mark' : attribute_types.yesno_avt(
default='no',
description=("Whether to force output of a byte order mark (BOM). "
"Usually used to generate a UTF-8 BOM. Do not use "
"this unless you're sure you know what you're doing")),
'f:overwrite-safeguard' : attribute_types.yesno_avt(
default='no',
description=("Whether or not to make backup copies of any file "
"before it's overwritten.")),
}
def setup(self):
self._output_parameters = outputparameters.outputparameters()
return
def instantiate(self, context):
context.instruction, context.namespaces = self, self.namespaces
# this uses attributes directly from self
self._output_parameters.avtParse(self, context)
href = self._href.evaluate(context)
if Uri.IsAbsolute(href):
uri = href
else:
try:
uri = Uri.Absolutize(href,
Uri.OsPathToUri(processor.writer.getStream().name))
except Exception, e:
raise XsltRuntimeException(
ExsltError.NO_EXSLTDOCUMENT_BASE_URI,
context.currentInstruction, href)
path = Uri.UriToOsPath(uri)
if (self.attributes.get((FT_EXT_NAMESPACE, 'overwrite-safeguard') == u'yes')
and os.access(path, os.F_OK)):
# Kick in the safety measures
# FIXME: Security hole. Switch to the mkstemp as soon as we
# mandate Python 2.3 mnimum
savefile = tempfile.mktemp('', os.path.split(path)[-1]+'-')
processor.warn("The file you are trying to create with"
" exsl:document already exists. As a safety"
" measure it will be copied to a temporary file"
" '%s'." % savefile) #FIXME: l10n
try:
shutil.copyfile(path, savefile)
except:
raise XsltRuntimeException(
ExsltError.ABORTED_EXSLDOCUMENT_OVERWRITE,
context.currentInstruction, path, savefile)
try:
stream = open(path, 'w')
except IOError:
dirname = os.path.dirname(path)
# Should we also check base path writability with os.W_OK?
if not os.access(dirname, os.F_OK):
os.makedirs(dirname)
stream = open(path, 'w')
else:
raise
processor.addHandler(self._output_parameters, stream)
try:
self.processChildren(context, processor)
finally:
processor.removeHandler()
stream.close()
return
## XSLT Extension Module Interface ####################################
extension_namespaces = {
EXSL_COMMON_NS : 'exsl',
}
extension_functions = {
(EXSL_COMMON_NS, 'node-set'): nodeset_function,
(EXSL_COMMON_NS, 'object-type'): object_type_function,
}
extension_elements = {
(EXSL_COMMON_NS, 'document'): document_element,
}
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/exslt/common.py
|
common.py
|
import re
import binascii
import operator
import itertools
from amara import tree
from amara.xpath import datatypes
EXSL_STRINGS_NS = "http://exslt.org/strings"
def align_function(context, target, padding, alignment=None):
"""
The str:align function aligns a string within another string.
See http://exslt.org/str/functions/align/str.align.html for further
explanation.
"""
target = target.evaluate_as_string(context)
padding = padding.evaluate_as_string(context)
alignment = alignment and alignment.evaluate_as_string(context)
# If the target string is longer than the padding string, then it is
# truncated to be the same length as the padding string and returned.
if len(target) > len(padding):
result = target[:len(padding)]
# If no third argument is given or if it is not one of 'left', 'right'
# or 'center', then it defaults to left alignment.
elif alignment == 'right':
result = padding[:-len(target)] + target
elif alignment == 'center':
# With center alignment, the range of characters replaced by the target
# string is in the middle of the padding string, such that either the
# number of unreplaced characters on either side of the range is the
# same or there is one less on the left than there is on the right.
left = (len(padding) - len(target)) / 2
right = left + len(target)
result = padding[:left] + target + padding[right:]
else:
result = target + padding[len(target):]
return datatypes.string(result)
def concat_function(context, nodeset):
"""
The str:concat function takes a node set and returns the concatenation of
the string values of the nodes in that node set. If the node set is empty,
it returns an empty string.
"""
nodeset = nodeset.evaluate_as_nodeset(context)
strings = map(datatypes.string, nodeset)
return datatypes.string(u''.join(strings))
def decode_uri_function(context, uri, encoding=None):
"""
The str:decode-uri function decodes a percent-encoded string, such as
one would find in a URI.
"""
uri = uri.evaluate_as_string(context)
encoding = encoding.evaluate_as_string(context) if encoding else 'UTF-8'
try:
decoder = codecs.getdecoder(encoding)
except LookupError:
# Unsupported encoding
return datatypes.EMPTY_STRING
def repl(match, decoder=decoder):
# Note, there may be multiple encoded characters that are required
# to produce a single Unicode character.
hexlified = match.group().replace('%', '')
bytes = binascii.unhexlify(hexlified)
# Ignore any invalid sequences in this encoding
string, consumed = decoder(bytes, 'ignore')
return string
return datatypes.string(re.sub('(?:%[0-9a-fA-F]{2})+', repl, uri))
_unreserved = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
"-_.!~*'()"
'%') # not really unreserved, but handled specially before these
_reserved = ';/?:@&=+$,[]'
_reserved = re.compile(r"[^" + re.escape(_unreserved + _reserved) + "]")
_unreserved = re.compile(r"[^" + re.escape(_unreserved) + "]")
def encode_uri_function(context, uri, escapeReserved, encoding=None):
"""
The str:encode-uri function percent-encodes a string for embedding in a URI.
The second argument is a boolean indicating whether to escape reserved characters;
if true, the given string can be a URI already, with just some of its characters
needing to be escaped (not recommended, but users who don't understand the nuances
of the URI syntax tend to prefer it over assembling a URI piece-by-piece).
"""
uri = uri.evaluate_as_string(context)
escape_reserved = escapeReserved.evaluate_as_boolean(context)
encoding = encoding.evaluate_as_string(context) if encoding else 'UTF-8'
try:
encoder = codecs.getencoder(encoding)
except LookupError:
return datatypes.EMPTY_STRING
# The "%" is escaped only if it is not followed by two hexadecimal digits.
uri = re.sub('%(?![0-9A-Fa-f]{2})', u'%25', uri)
def repl(match, encoder=encoder):
ch = match.group()
ordinal = ord(ch)
if ordinal > 127:
try:
encoded, consumed = encoder(ch, 'strict')
except UnicodeError:
# Not valid in this encoding
result = '%3F'
else:
# The Unicode character could map to multiple bytes
result = u''.join([ '%%%02X' % ord(ch) for ch in encoded ])
else:
result = '%%%02X' % ordinal
return result
if escape_reserved:
result = _reserved.sub(repl, uri)
else:
result = _unreserved.sub(repl, uri)
return datatypes.string(result)
def padding_function(context, length, chars=None):
"""
The str:padding function creates a padding string of a certain length.
The second argument gives a string to be used to create the padding.
This string is repeated as many times as is necessary to create a string
of the length specified by the first argument; if the string is more than
a character long, it may have to be truncated to produce the required
length. If no second argument is specified, it defaults to a space (' ').
"""
length = int(length.evaluate_as_number(context))
chars = chars.evaluate_as_string(context) if chars else u' '
return datatypes.string((chars*length)[:length])
def _replace(context, string, replacement=None, *replacements):
"""
Supports str:replace(). s is a string. replmap is a list of tuples,
where each tuple is a search string and a replacement node or None.
This recursive function will cause the original string to have
occurrences of the search strings replaced with the corresponding
node or deleted. When a replacement is made, that portion of the
original string is no longer available for further replacements.
All replacements are made for each search string before moving on
to the next. Empty search strings match in between every character
of the original string.
"""
if replacement:
search, replace, key = replacement
if search:
segments = string.split(search)
else:
segments = list(string)
last_i = len(segments) - 1
for i, segment in enumerate(segments):
if segment:
_replace(context, segment, *replacements)
if replace and i < last_i:
context.copy_node(replace)
else:
context.text(string)
return
def replace_function(context, string, search, replace):
"""
The str:replace function converts a string to a node-set, with
each instance of a substring from a given list (obtained from the
string-values of nodes in the second argument) replaced by the
node at the corresponding position of the node-set given as the
third argument. Unreplaced substrings become text nodes.
The second and third arguments can be any type of object; if
either is not a node-set, it is treated as if it were a node-set
of just one text node, formed from the object's string-value.
Attribute and namespace nodes in the replacement set are
erroneous but are treated as empty text nodes.
All occurrences of the longest substrings are replaced first,
and once a replacement is made, that span of the original string
is no longer eligible for future replacements.
An empty search string matches between every character of the
original string.
See http://exslt.org/str/functions/replace/str.replace.html for details.
"""
#FIXME: http://www.exslt.org/str/functions/replace/ doesn't say we have
#to convert the first arg to a string, but should we, anyway?
#If not, we should at least check and flag non-strings with a clear error?
# prepare a list of strings to search for (based on searchNodeSet)
string = string.evaluate_as_string(context)
search = search.evaluate(context)
replace = replace.evaluate(context)
if isinstance(search, datatypes.nodeset):
search = map(datatypes.string, search)
else:
search = [datatypes.string(search)]
if isinstance(replace, datatypes.nodeset):
# use `replace` but replace attr, ns nodes with empty text nodes
for index, node in enumerate(replace):
if isinstance(node, (tree.attribute, tree.namespace)):
replace[index] = tree.text(u'')
else:
replace = [tree.text(datatypes.string(replace))]
# Unpaired search patterns are to be deleted (replacement is None)
replace = itertools.chain(replace, itertools.repeat(None))
# Sort the tuples in ascending order by length of string.
# So that the longest search strings will be replaced first,
replacements = zip(search, replace, itertools.imap(len, search))
replacements.sort(key=operator.itemgetter(2), reverse=True)
# generate a result tree fragment
context.push_tree_writer(context.instruction.baseUri)
_replace(context, string, *replacements)
writer = context.pop_writer()
rtf = writer.get_result()
return datatypes.nodeset(rtf.xml_children)
def split_function(context, string, pattern=None):
"""
The str:split function splits up a string and returns a node set of
token elements, each containing one token from the string.
The first argument is the string to be split. The second argument is a
pattern string (default=' '). The string given by the first argument is
split at any occurrence of this pattern. An empty string pattern will
result in a split on every character in the string.
"""
string = string.evaluate_as_string(context)
pattern = pattern.evaluate_as_string(context) if pattern else u' '
context.push_tree_writer(context.instruction.baseUri)
if pattern:
tokens = string.split(pattern)
else:
tokens = string
for token in tokens:
context.start_element(u'token')
context.text(token)
context.end_element(u'token')
writer = context.pop_writer()
rtf = writer.get_result()
return datatypes.nodeset(rtf.xml_children)
def tokenize_function(context, string, delimiters=None):
"""
The str:tokenize function splits up a string and returns a node set of
'token' elements, each containing one token from the string.
The first argument is the string to be tokenized. The second argument
is a string consisting of a number of characters. Each character in
this string is taken as a delimiting character. The string given by the
first argument is split at any occurrence of any of these characters.
"""
string = string.evaluate_as_string(context)
if delimiters:
delimiters = delimiters.evaluate_as_string(context)
else:
delimiters = '\t\n\r '
if delimiters:
tokens = re.split('[%s]' % re.escape(delimiters), string)
else:
tokens = string
context.push_tree_writer(context.instruction.baseUri)
for token in tokens:
context.start_element(u'token')
context.text(token)
context.end_element(u'token')
writer = context.pop_writer()
rtf = writer.get_result()
return datatypes.nodeset(rtf.xml_children)
## XSLT Extension Module Interface ####################################
extension_namespaces = {
EXSL_STRINGS_NS : 'str',
}
extension_functions = {
(EXSL_STRINGS_NS, 'align'): align_function,
(EXSL_STRINGS_NS, 'concat'): concat_function,
(EXSL_STRINGS_NS, 'decode-uri'): decode_uri_function,
(EXSL_STRINGS_NS, 'encode-uri'): encode_uri_function,
(EXSL_STRINGS_NS, 'padding'): padding_function,
(EXSL_STRINGS_NS, 'replace'): replace_function,
(EXSL_STRINGS_NS, 'split'): split_function,
(EXSL_STRINGS_NS, 'tokenize'): tokenize_function,
}
extension_elements = {
}
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/exslt/strings.py
|
strings.py
|
from __future__ import absolute_import
import traceback
from amara import tree
from amara.xpath import XPathError, datatypes
from amara.xpath.parser import parse as parse_xpath
from .common import EXSL_COMMON_NS
EXSL_DYNAMIC_NS = "http://exslt.org/dynamic"
def closure_function(context, nodeset, string):
"""
The dyn:closure function creates a node set resulting from transitive
closure of evaluating the expression passed as the second argument on
each of the nodes passed as the first argument, then on the node set
resulting from that and so on until no more nodes are found.
http://www.exslt.org/dyn/functions/closure/index.html
"""
nodeset = nodeset.evaluate_as_nodeset(context)
string = string.evaluate_as_string(context)
try:
expr = parse_xpath(string)
except XPathError:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Syntax error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
return datatypes.nodeset()
result = datatypes.nodeset()
while nodeset:
nodeset = _map(context, nodeset, expr)
result.extend(nodeset)
return result
def evaluate_function(context, string):
"""
The dyn:evaluate function evaluates a string as an XPath expression and
returns the resulting value, which might be a boolean, number, string,
node set, result tree fragment or external object. The sole argument is
the string to be evaluated. If the string is an invalid XPath expression,
an empty node-set is returned.
http://www.exslt.org/dyn/functions/evaluate/index.html
"""
string = string.evaluate_as_string(context)
try:
expr = parse_xpath(string)
except XPathError:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Syntax error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
return datatypes.nodeset()
try:
result = expr.evaluate(context)
except:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Runtime error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
return datatypes.nodeset()
return result
def _map(context, nodeset, expr):
focus = context.node, context.position, context.size
context.size = len(nodeset)
position = 1
inputs = iter(nodeset)
return_type = None
result = set()
for node in inputs:
context.node = node
context.position = position
position += 1
try:
obj = expr.evaluate(context)
except:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Runtime error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
else:
if not return_type:
if isinstance(obj, datatypes.nodeset):
tag_name = None
elif isinstance(obj, datatypes.number):
tag_name = 'exsl:number'
converter = datatypes.string
elif isinstance(obj, datatypes.boolean):
tag_name = 'exsl:boolean'
converter = lambda obj: u'true' if obj else u''
else:
tag_name = 'exsl:string'
converter = datatypes.string
return_type = True
if tag_name:
E = tree.element(EXSL_COMMON_NS, tag_name)
E.xml_append(tree.text(converter(obj)))
result.add(E)
else:
result.update(obj)
context.node, context.position, context.size = focus
return datatypes.nodeset(result)
def map_function(context, nodeset, string):
"""
The dyn:map function evaluates the expression passed as the second
argument for each of the nodes passed as the first argument, and returns
a node set of those values.
http://www.exslt.org/dyn/functions/map/index.html
"""
nodeset = nodeset.evaluate_as_nodeset(context)
string = string.evaluate_as_string(context)
try:
expr = parse_xpath(string)
except XPathError:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Syntax error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
return datatypes.nodeset()
return _map(context, nodeset, expr)
def max_function(context, nodeset, string):
"""
The dyn:max function calculates the maximum value for the nodes passed as
the first argument, where the value of each node is calculated dynamically
using an XPath expression passed as a string as the second argument.
http://www.exslt.org/dyn/functions/max/index.html
"""
nodeset = nodeset.evaluate_as_nodeset(context)
string = string.evaluate_as_string(context)
try:
expr = parse_xpath(string)
except XPathError:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Syntax error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
return datatypes.nodeset()
return max(map(datatypes.number, _map(context, nodeset, expr)))
def min_function(context, nodeset, string):
"""
The dyn:min function calculates the minimum value for the nodes passed as
the first argument, where the value of each node is calculated dynamically
using an XPath expression passed as a string as the second argument.
http://www.exslt.org/dyn/functions/min/index.html
"""
nodeset = nodeset.evaluate_as_nodeset(context)
string = string.evaluate_as_string(context)
try:
expr = parse_xpath(string)
except XPathError:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Syntax error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
return datatypes.nodeset()
return min(map(datatypes.number, _map(context, nodeset, expr)))
def sum_function(context, nodeset, string):
"""
The dyn:sum function calculates the sum for the nodes passed as the first
argument, where the value of each node is calculated dynamically using an
XPath expression passed as a string as the second argument.
http://www.exslt.org/dyn/functions/sum/index.html
"""
nodeset = nodeset.evaluate_as_nodeset(context)
string = string.evaluate_as_string(context)
try:
expr = parse_xpath(string)
except XPathError:
lines = traceback.format_exception(*sys.exc_info())
lines[:1] = [("Syntax error in XPath expression '%(expr)s', "
"lower-level traceback:\n") % {'expr': string}]
context.processor.warning(''.join(lines))
return datatypes.nodeset()
return sum(map(datatypes.number, _map(context, nodeset, expr)))
## XSLT Extension Module Interface ####################################
extension_namespaces = {
EXSL_DYNAMIC_NS : 'dyn',
}
extension_functions = {
(EXSL_DYNAMIC_NS, 'closure') : closure_function,
(EXSL_DYNAMIC_NS, 'evaluate') : evaluate_function,
(EXSL_DYNAMIC_NS, 'map') : map_function,
(EXSL_DYNAMIC_NS, 'max') : max_function,
(EXSL_DYNAMIC_NS, 'min') : min_function,
(EXSL_DYNAMIC_NS, 'sum') : sum_function,
}
extension_elements = {
}
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/exslt/dynamic.py
|
dynamic.py
|
from __future__ import absolute_import
import math
import random
import itertools
from amara.xpath import datatypes
EXSL_MATH_NS = "http://exslt.org/math"
def abs_function(context, number):
"""
The math:abs function returns the absolute value of a number.
"""
result = abs(number.evaluate_as_number(context))
return datatypes.number(result)
def acos_function(context, number):
"""
The math:acos function returns the arccosine value of a number.
"""
try:
result = math.acos(number.evaluate_as_number(context))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(result)
def asin_function(context, number):
"""
The math:asin function returns the arcsine value of a number.
"""
try:
result = math.asin(number.evaluate_as_number(context))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(result)
def atan_function(context, number):
"""
The math:atan function returns the arctangent value of a number.
"""
try:
result = math.atan(number.evaluate_as_number(context))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(result)
def atan2_function(context, y, x):
"""
The math:atan2 function returns the angle ( in radians ) from the X axis
to a point (y,x).
"""
x = x.evaluate_as_number(context)
y = y.evaluate_as_number(context)
try:
result = math.atan2(y, x)
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(result)
_named_constants = {
'PI' : math.pi,
'E' : math.e,
'SQRRT2' : math.sqrt(2),
'LN2' : math.log(2),
'LN10' : math.log(10),
'LOG2E' : 1 / math.log(2),
'SQRT1_2' : math.sqrt(0.5),
}
def constant_function(context, name, precision):
"""
The math:constant function returns the specified constant to a set precision.
"""
name = nam.evaluate_as_string(context)
precision = precision.evaluate_as_number(context)
try:
result = _named_constants[name]
except KeyError:
return datatypes.NOT_A_NUMBER
return datatypes.number('%0.*f' % (int(precision), result))
def cos_function(context, number):
"""
The math:cos function returns cosine of the passed argument.
"""
result = math.cos(number.evaluate_as_number(context))
return datatypes.number(result)
def exp_function(context, number):
"""
The math:exp function returns e (the base of natural logarithms) raised to
a power.
"""
result = math.exp(number.evaluate_as_number(context))
return datatypes.number(result)
def highest_function(context, nodeset):
"""
The math:highest function returns the nodes in the node set whose value is
the maximum value for the node set. The maximum value for the node set is
the same as the value as calculated by math:max. A node has this maximum
value if the result of converting its string value to a number as if by the
number function is equal to the maximum value, where the equality
comparison is defined as a numerical comparison using the = operator.
"""
nodeset = nodeset.evaluate_as_nodeset(context)
highest = max(nodeset, key=datatypes.number)
numbers = itertools.imap(datatypes.number, nodeset)
result = datatypes.nodeset()
for number, node in itertools.izip(numbers, nodeset):
if number == highest:
result.append(node)
return result
def log_function(context, number):
"""
The math:log function returns the natural logarithm of a number.
"""
result = math.log(number.evaluate_as_number(context))
return datatypes.number(result)
def lowest_function(context, nodeset):
"""
The math:lowest function returns the nodes in the node set whose value is
the minimum value for the node set. The minimum value for the node set is
the same as the value as calculated by math:min. A node has this minimum
value if the result of converting its string value to a number as if by the
number function is equal to the minimum value, where the equality
comparison is defined as a numerical comparison using the = operator.
"""
nodeset = nodeset.evaluate_as_nodeset(context)
lowest = min(nodeset, key=datatypes.number)
numbers = itertools.imap(datatypes.number, nodeset)
result = datatypes.nodeset()
for number, node in itertools.izip(numbers, nodeset):
if number == lowest:
result.append(node)
return result
def max_function(context, nodeset):
"""
The math:max function returns the maximum value of the nodes passed as
the argument.
"""
nodeset = nodeset.evaluate_as_nodeset(context)
numbers = itertools.imap(datatypes.number, nodeset)
try:
maximum = numbers.next()
except StopIteration:
return datatypes.NOT_A_NUMBER
for number in numbers:
if number > maximum:
maximum = number
elif number != number:
# Not-A-Number
return number
return maximum
def min_function(context, nodeset):
"""
The math:min function returns the minimum value of the nodes passed as
the argument.
"""
nodeset = nodeset.evaluate_as_nodeset(context)
numbers = itertools.imap(datatypes.number, nodeset)
try:
minimum = numbers.next()
except StopIteration:
return datatypes.NOT_A_NUMBER
for number in numbers:
if number < minimum:
minimum = number
elif number != number:
# Not-A-Number
return number
return minimum
def power_function(context, base, exponent):
"""
The math:power function returns the value of a base expression taken to
a specified power.
"""
base = base.evaluate_as_number(context)
exponent = exponent.evaluate_as_number(context)
return base**exponent
def random_function(context):
"""
The math:random function returns a random number from 0 to 1.
"""
return datatypes.number(random.random())
def sin_function(context, number):
"""
The math:sin function returns the sine of the number.
"""
result = math.sin(number.evaluate_as_number(context))
return datatypes.number(result)
def sqrt_function(context, number):
"""
The math:sqrt function returns the square root of a number.
"""
# The platform C library determines what math.sqrt() returns.
# On some platforms, especially prior to Python 2.4,
# nan may be returned for a negative or nan argument.
# On other platforms, and especially since Python 2.4,
# a ValueError is raised.
#
# EXSLT requires that we return zero for negative arg.
# The result for a nan arg is undefined, but we'll return nan.
number = number.evaluate_as_number(context)
if number.isnan():
return number
if n < 0.0:
result = 0.0
else:
try:
result = math.sqrt(number)
except ValueError:
result = 0.0
return datatypes.number(result)
def tan_function(context, number):
"""
The math:tan function returns the tangent of the number passed as
an argument.
"""
result = math.tan(number.evaluate_as_number(context))
return datatypes.number(result)
## XSLT Extension Module Interface ####################################
extension_namespaces = {
EXSL_MATH_NS : 'math',
}
extension_functions = {
(EXSL_MATH_NS, 'abs'): abs_function,
(EXSL_MATH_NS, 'acos'): acos_function,
(EXSL_MATH_NS, 'asin'): asin_function,
(EXSL_MATH_NS, 'atan'): atan_function,
(EXSL_MATH_NS, 'atan2'): atan2_function,
(EXSL_MATH_NS, 'constant'): constant_function,
(EXSL_MATH_NS, 'cos'): cos_function,
(EXSL_MATH_NS, 'exp'): exp_function,
(EXSL_MATH_NS, 'highest'): highest_function,
(EXSL_MATH_NS, 'log'): log_function,
(EXSL_MATH_NS, 'lowest'): lowest_function,
(EXSL_MATH_NS, 'max'): max_function,
(EXSL_MATH_NS, 'min'): min_function,
(EXSL_MATH_NS, 'power'): power_function,
(EXSL_MATH_NS, 'random'): random_function,
(EXSL_MATH_NS, 'sin'): sin_function,
(EXSL_MATH_NS, 'sqrt'): sqrt_function,
(EXSL_MATH_NS, 'tan'): tan_function,
}
extension_elements = {
}
2
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/exslt/math.py
|
math.py
|
import re
from amara.xpath import datatypes
EXSL_REGEXP_NS = "http://exslt.org/regular-expressions"
def match_function(context, source, pattern, flags=None):
"""
The regexp:match function lets you get hold of the substrings of the
string passed as the first argument that match the captured parts of
the regular expression passed as the second argument.
The second argument is a regular expression that follows the Javascript
regular expression syntax.
The third argument is a string consisting of character flags to be used
by the match. If a character is present then that flag is true. The
flags are:
g: global match - the submatches from all the matches in the string
are returned. If this character is not present, then
only the submatches from the first match in the
string are returned.
i: case insensitive - the regular expression is treated as case
insensitive. If this character is not present,
then the regular expression is case sensitive.
The regexp:match function returns a node set of 'match' elements, each of
whose string value is equal to a portion of the first argument string
that was captured by the regular expression. If the match is not global,
the first match element has a value equal to the portion of the string
matched by the entire regular expression.
"""
source = source.evaluate_as_string(context)
pattern = pattern.evaluate_as_string(context)
flags = flags.evaluate_as_string(context) if flags else ''
regexp = re.compile(pattern, re.IGNORECASE if 'i' in flags else 0)
match = regexp.search(source)
if match is None:
return datatypes.nodeset()
context.push_tree_writer(context.instruction.baseUri)
if 'g' in flags:
# find all matches in the source
while match:
context.start_element(u'match')
# return everything that matched the pattern
context.text(match.group())
context.end_element(u'match')
match = regexp.search(source, match.end())
else:
# the first 'match' element contains entire matched text
all = [match.group()]
groups = match.groups()
groups and all.extend(list(groups))
for match in all:
context.start_element(u'match')
match and context.text(match)
context.end_element(u'match')
writer = context.pop_writer()
rtf = writer.get_result()
return datatypes.nodeset(rtf.xml_children)
def replace_function(context, source, pattern, flags, repl):
"""
The regexp:replace function replaces the parts of a string that match
a regular expression with another string.
The first argument is the string to be matched and replaced. The second
argument is a regular expression that follows the Javascript regular
expression syntax. The fourth argument is the string to replace the
matched parts of the string.
The third argument is a string consisting of character flags to be used
by the match. If a character is present then that flag is true. The flags
are:
g: global replace - all occurrences of the regular expression in the
string are replaced. If this character is not
present, then only the first occurrence of the
regular expression is replaced.
i: case insensitive - the regular expression is treated as case
insensitive. If this character is not present,
then the regular expression is case sensitive.
"""
source = source.evaluate_as_string(context)
pattern = pattern.evaluate_as_string(context)
flags = flags.evaluate_as_string(context)
repl = repl.evaluate_as_string(context)
regexp = re.compile(pattern, re.IGNORECASE if 'i' in flags else 0)
# a count of zero means replace all in RE.sub()
result = regexp.sub(repl, source, 'g' not in flags)
return datatypes.string(result)
def test_function(context, source, pattern, flags=''):
"""
The regexp:test function returns true if the string given as the first
argument matches the regular expression given as the second argument.
The second argument is a regular expression that follows the Javascript
regular expression syntax.
The third argument is a string consisting of flags to be used by the test.
If a character is present then that flag is true. The flags are:
g: global test - has no effect on this function, but is retained for
consistency with regexp:match and regexp:replace.
i: case insensitive - the regular expression is treated as case
insensitive. If this character is not present,
then the regular expression is case sensitive.
"""
source = source.evaluate_as_string(context)
pattern = pattern.evaluate_as_string(context)
flags = flags.evaluate_as_string(context) if flags else ''
regexp = re.compile(pattern, re.IGNORECASE if 'i' in flags else 0)
return datatypes.TRUE if regexp.search(source) else datatypes.FALSE
## XSLT Extension Module Interface ####################################
extension_namespaces = {
EXSL_REGEXP_NS : 'regexp',
}
extension_functions = {
(EXSL_REGEXP_NS, 'match') : match_function,
(EXSL_REGEXP_NS, 'replace') : replace_function,
(EXSL_REGEXP_NS, 'test') : test_function,
}
extension_elements = {
}
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/exslt/regular_expressions.py
|
regular_expressions.py
|
import itertools
from amara.xpath import datatypes
EXSL_SETS_NS = "http://exslt.org/sets"
def difference_function(context, nodeset1, nodeset2):
"""
The set:difference function returns the difference between two node
sets - those nodes that are in the node set passed as the first argument
that are not in the node set passed as the second argument.
"""
nodeset1 = set(nodeset1.evaluate_as_nodeset(context))
nodeset2 = set(nodeset2.evaluate_as_nodeset(context))
return datatypes.nodeset(nodeset1 - nodeset2)
def distinct_function(context, nodeset):
"""
The set:distinct function returns a subset of the nodes contained in the
node-set NS passed as the first argument. Specifically, it selects a node
N if there is no node in NS that has the same string value as N, and that
precedes N in document order.
"""
nodeset = nodeset.evaluate_as_nodeset(context)
# Process the nodes in reverse document-order so that same-value keys
# will be mapped to the first (in document order) node.
nodeset.reverse()
strings = itertools.imap(datatypes.string, nodeset)
result = dict(itertools.izip(strings, nodeset))
return datatypes.nodeset(result.values())
def has_same_node_function(context, nodeset1, nodeset2):
"""
The set:has-same-node function returns true if the node set passed as the
first argument shares any nodes with the node set passed as the second
argument. If there are no nodes that are in both node sets, then it
returns false.
"""
nodeset1 = nodeset1.evaluate_as_nodeset(context)
nodeset2 = nodeset2.evaluate_as_nodeset(context)
nodeset2 = set(nodeset2)
for node in nodeset1:
if node in nodeset2:
return datatypes.TRUE
return datatypes.FALSE
def intersection_function(context, nodeset1, nodeset2):
"""
The set:intersection function returns a node set comprising the nodes that
are within both the node sets passed as arguments to it.
"""
nodeset1 = set(nodeset1.evaluate_as_nodeset(context))
nodeset2 = set(nodeset2.evaluate_as_nodeset(context))
return datatypes.nodeset(nodeset1 & nodeset2)
def leading_function(context, nodeset1, nodeset2):
"""
The set:leading function returns the nodes in the node set passed as the
first argument that precede, in document order, the first node in the node
set passed as the second argument. If the first node in the second node
set is not contained in the first node set, then an empty node set is
returned. If the second node set is empty, then the first node set is
returned.
"""
nodeset1 = nodeset1.evaluate_as_nodeset(context)
nodeset2 = nodeset2.evaluate_as_nodeset(context)
try:
index = nodeset.index(nodeset2[0])
except IndexError:
# `nodeset2` is empty
return nodeset1
except ValueError:
# `nodeset2[0]` not in `nodeset1`
index = 0
return nodeset1[:index]
def trailing_function(context, nodeset1, nodeset2):
"""
The set:trailing function returns the nodes in the node set passed as the
first argument that follow, in document order, the first node in the node
set passed as the second argument. If the first node in the second node
set is not contained in the first node set, then an empty node set is
returned. If the second node set is empty, then the first node set is
returned.
"""
nodeset1 = nodeset1.evaluate_as_nodeset(context)
nodeset2 = nodeset2.evaluate_as_nodeset(context)
try:
index = nodeset.index(nodeset2[0])
except IndexError:
# `nodeset2` is empty
return nodeset1
except ValueError:
# `nodeset2[0]` not in `nodeset1`
index = len(nodeset1)
else:
index += 1
return nodeset1[index:]
## XSLT Extension Module Interface ####################################
extension_namespaces = {
EXSL_SETS_NS : 'set',
}
extension_functions = {
(EXSL_SETS_NS, 'difference'): difference_function,
(EXSL_SETS_NS, 'distinct'): distinct_function,
(EXSL_SETS_NS, 'has-same-node'): has_same_node_function,
(EXSL_SETS_NS, 'intersection'): intersection_function,
(EXSL_SETS_NS, 'leading'): leading_function,
(EXSL_SETS_NS, 'trailing'): trailing_function,
}
extension_elements = {
}
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/exslt/sets.py
|
sets.py
|
import re
import calendar
from amara.xpath import datatypes
EXSL_DATE_TIME_NS = 'http://exslt.org/dates-and-times'
## EXSLT Core Functions ##
def date_time_function(context):
"""
The `date:date-time` function returns the current local date/time as an
ISO 8601 formatted date/time string, with a time zone.
Implements version 1.
"""
return datatypes.string(_datetime.now())
def date_function(context, date=None):
"""
The date:date function returns the date portion of the dateTime
argument if present, or of the current local date/time. The
argument can be given in xs:dateTime or xs:date format.
Implements version 2.
"""
if date is None:
datetime = _datetime.now()
else:
try:
datetime = _datetime.parse(date.evaluate_as_string(context),
('dateTime', 'date'))
except ValueError:
return datatypes.EMPTY_STRING
return datatypes.string(u'%-.4d-%02d-%02d%s' % (datetime.year,
datetime.month,
datetime.day,
datetime.timezone or ''))
def time_function(context, time=None):
"""
The date:time function returns the time portion of the dateTime
argument if present, or of the current local date/time. The
argument can be given in xs:dateTime or xs:time format.
Implements version 2.
"""
try:
datetime = _coerce(context, time, ('dateTime', 'time'))
except ValueError:
return datatypes.EMPTY_STRING
return datatypes.string(u'%02d:%02d:%02.12g%s' % (datetime.hour,
datetime.minute,
datetime.second,
datetime.timezone or ''))
def year_function(context, date=None):
"""
The date:year function returns the year portion of the
dateTime supplied, or of the current year, as an integer.
Implements version 2.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date', 'gYearMonth',
'gYear'))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(datetime.year)
def leap_year_function(context, date=None):
"""
The date:leap-year function returns true if the year argument
(defaults to current year) is a leap year, false otherwise.
Implements version 1.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date', 'gYearMonth',
'gYear'))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.TRUE if _is_leap(datetime.year) else datatypes.FALSE
def month_in_year_function(context, date=None):
"""
The date:month-in-year function returns the month portion of
the dateTime argument (defaults to current month) as an integer.
Implements version 2.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date', 'gYearMonth',
'gMonthDay', 'gMonth'))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(datetime.month)
def month_name_function(context, date=None):
"""
The date:month-name function returns the full English name
of the month portion of a date.
Implements version 2.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date', 'gYearMonth',
'gMonthDay', 'gMonth'))
except ValueError:
return datatypes.EMPTY_STRING
return datatypes.string(
(u'', u'January', u'February', u'March', u'April', u'May', u'June',
u'July', u'August', u'September', u'October', u'November',
u'December')[datetime.month])
def month_abbreviation_function(context, date=None):
"""
The date:month-abbreviation function returns the abbreviation
of the month of a date.
Implements version 2.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date', 'gYearMonth',
'gMonthDay', 'gMonth'))
except ValueError:
return datatypes.EMPTY_STRING
return datatypes.string(
(u'', u'Jan', u'Feb', u'Mar', u'Apr', u'May', u'Jun', u'Jul', u'Aug',
u'Sep', u'Oct', u'Nov', u'Dec')[datetime.month])
def week_in_year_function(context, date=None):
"""
The date:week-in-year function returns a number representing
the week of the year a date is in.
Implements version 3.
"""
# Notes:
# - ISO 8601 specifies that Week 01 of the year is the week containing
# the first Thursday;
try:
datetime = _coerce(context, date, ('dateTime', 'date'))
except ValueError:
return datatypes.NOT_A_NUMBER
year, month, day = datetime.year, datetime.month, datetime.day
# Find Jan 1 weekday for Y
# _dayOfWeek returns 0=Sun, we need Mon=0
day_of_week_0101 = (_day_of_week(year, 1, 1) + 6) % 7
# Find weekday for Y M D
day_number = _day_in_year(year, month, day)
day_of_week = (day_number + day_of_week_0101 - 1) % 7
# Find if Y M D falls in year Y-1, week 52 or 53
# (i.e., the first 3 days of the year and DOW is Fri, Sat or Sun)
if day_of_week_0101 > 3 and day_number <= (7 - day_of_week_0101):
week = 52 + (day_of_week_0101 == (4 + _is_leap(year - 1)))
# Find if Y M D falls in Y+1, week 1
# (i.e., the last 3 days of the year and DOW is Mon, Tue, or Wed)
elif (365 + _is_leap(year) - day_number) < (3 - day_of_week):
week = 1
else:
week = (day_number + (6 - day_of_week) + day_of_week_0101) / 7
if day_of_week_0101 > 3:
week -= 1
return datatypes.number(week)
def day_in_year_function(context, date=None):
"""
The date:day-in-year function returns a number representing
the position of a date in the year.
Implements version 2.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date'))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(_day_in_year(datetime.year,
datetime.month,
datetime.day))
def day_in_month_function(context, date=None):
"""
The date:day-in-month function returns the numerical date, i.e.
27 for the 27th of March.
Implements version 2.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date', 'gMonthDay',
'gDay'))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(datetime.day)
def day_of_week_in_month_function(context, date=None):
"""
The date:day-of-week-in-month function returns the day-of-the-week
in a month of a date as a number (e.g. 3 for the 3rd Tuesday in May).
Implements version 2.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date'))
except ValueError:
return datatypes.NOT_A_NUMBER
# Note, using floor divison (//) to aid with `2to3` conversion
result = ((datetime.day - 1) // 7) + 1
return datatypes.number(result)
def day_in_week_function(context, date=None):
"""
The date:day-in-week function returns a number representing the
weekday of a given date. Sunday is 1, Saturday is 7.
Implements version 2.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date'))
except ValueError:
return datatypes.NOT_A_NUMBER
# `_day_of_week()` is zero-based Sunday, EXSLT needs 1-based
result = _day_of_week(datetime.year, datetime.month, datetime.day) + 1
return datatypes.number(result)
def day_name_function(context, date=None):
"""
The date:day-name function returns the full English day name of
a given date.
Implements version 2.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date'))
except ValueError:
return datatypes.EMPTY_STRING
weekday = _day_of_week(datetime.year, datetime.month, datetime.day)
weekday = (u'Sunday', u'Monday', u'Tuesday', u'Wednesday', u'Thursday',
u'Friday', u'Saturday')[weekday]
return datatypes.string(weekday)
def day_abbreviation_function(context, date=None):
"""
The date:day-abbreviation function returns the English abbreviation
for the day name of a given date.
Implements version 2.
"""
try:
datetime = _coerce(context, date, ('dateTime', 'date'))
except ValueError:
return datatypes.EMPTY_STRING
weekday = _day_of_week(datetime.year, datetime.month, datetime.day)
weekday = (u'Sun', u'Mon', u'Tue', u'Wed', u'Thu', u'Fri', u'Sat')[weekday]
return datatypes.string(weekday)
def hour_in_day_function(context, time=None):
"""
The date:hour-in-date function returns the hour portion of a date-
time string as an integer.
Implements version 2.
"""
try:
datetime = _coerce(context, time, ('dateTime', 'time'))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(datetime.hour)
def minute_in_hour_function(context, time=None):
"""
The date:minute-in-hour function returns the minute portion of a
date-time string as an integer.
Implements version 2.
"""
try:
datetime = _coerce(context, time, ('dateTime', 'time'))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(datetime.minute)
def second_in_minute_function(context, time=None):
"""
The date:second-in-minute function returns the seconds portion
of a date-time string as an integer.
Implements version 2.
"""
try:
datetime = _coerce(context, time, ('dateTime', 'time'))
except ValueError:
return datatypes.NOT_A_NUMBER
return datatypes.number(datetime.second)
## EXSLT Other Functions (unstable) ##
_re_SimpleDateFormat = re.compile(r"(?P<symbol>([GyMdhHmsSEDFwWakKz])\2*)"
r"|'(?P<escape>(?:[^']|'')*)'")
def format_date_function(context, datetime, pattern):
"""
The date:format-date function formats a date/time according to a pattern.
The first argument to date:format-date specifies the date/time to be
formatted. It must be right or left-truncated date/time strings in one of
the formats defined in XML Schema Part 2: Datatypes. The permitted
formats are as follows: xs:dateTime, xs:date, xs:time, xs:gYearMonth,
xs:gYear, xs:gMonthDay, xs:gMonth and xs:gDay.
The second argument is a string that gives the format pattern used to
format the date. The format pattern must be in the syntax specified by
the JDK 1.1 SimpleDateFormat class. The format pattern string is
interpreted as described for the JDK 1.1 SimpleDateFormat class.
Implements version 2.
"""
try:
datetime = _coerce(context, datetime, ('dateTime', 'date', 'time',
'gYearMonth', 'gYear',
'gMonthDay', 'gMonth', 'gDay'))
except ValueError:
return datatypes.EMPTY_STRING
pattern = pattern.evaluate_as_string(context)
# Fill in missing components for right-truncated formats
if datetime.year is not None:
if datetime.month is None:
datetime.month = 1
if datetime.day is None:
datetime.day = 1
if datetime.hour is None:
datetime.hour = 0
if datetime.minute is None:
datetime.minute = 0
if datetime.second is None:
datetime.second = 0.0
def repl(match):
# NOTE: uses inherited `context` and `datetime` variables
groups = match.groupdict()
if groups['symbol'] is not None:
symbol = groups['symbol']
width = len(symbol)
symbol = symbol[:1]
if symbol == 'G': # era designator
if dateTime.year is None:
rt = u''
elif dateTime.year > 0:
rt = u'AD'
else:
rt = u'BC'
elif symbol == 'y': # year
if dateTime.year is None:
rt = u''
elif width > 2:
rt = u'%0.*d' % (width, dateTime.year)
else:
rt = u'%0.2d' % (dateTime.year % 100)
elif symbol == 'M': # month in year
if dateTime.month is None:
rt = u''
elif width >= 4:
rt = MonthName(context, dateTime)
elif width == 3:
rt = MonthAbbreviation(context, dateTime)
else:
rt = u'%0.*d' % (width, dateTime.month)
elif symbol == 'd': # day in month
if dateTime.day is None:
rt = u''
else:
rt = u'%0.*d' % (width, dateTime.day)
elif symbol == 'h': # hour in am/pm (1-12)
hours = dateTime.hour
if hours > 12:
hours -= 12
elif hours == 0:
hours = 12
rt = u'%0.*d' % (width, hours)
elif symbol == 'H': # hour in day (0-23)
rt = u'%0.*d' % (width, dateTime.hour)
elif symbol == 'm': # minute in hour
rt = u'%0.*d' % (width, dateTime.minute)
elif symbol =='s': # second in minute
rt = u'%0.*d' % (width, dateTime.second)
elif symbol == 'S': # millisecond
fraction, second = math.modf(dateTime.second)
fraction, millisecond = math.modf(fraction * 10**width)
rt = u'%0.*d' % (width, millisecond + round(fraction))
elif symbol == 'E': # day in week
if (dateTime.year is None or
dateTime.month is None or
dateTime.day is None):
rt = u''
elif width >= 4:
rt = DayName(context, dateTime)
else:
rt = DayAbbreviation(context, dateTime)
elif symbol == 'D': # day in year
if (dateTime.year is None or
dateTime.month is None or
dateTime.day is None):
rt = u''
else:
rt = u'%0.*d' % (width, DayInYear(context, dateTime))
elif symbol == 'F': # day of week in month
if dateTime.day is None:
rt = u''
else:
day_of_week = DayOfWeekInMonth(context, dateTime)
rt = u'%0.*d' % (width, day_of_week)
elif symbol == 'w': # week in year
if (dateTime.year is None or
dateTime.month is None or
dateTime.day is None):
rt = u''
else:
rt = u'%0.*d' % (width, WeekInYear(context, dataTime))
elif symbol == 'W': # week in month
if (dateTime.year is None or
dateTime.month is None or
dateTime.day is None):
rt = u''
else:
rt = u'%0.*d' % (width, WeekInMonth(context, dateTime))
elif symbol == 'a':
if dateTime.hour < 12:
rt = u'AM'
else:
rt = u'PM'
elif symbol == 'k': # hour in day (1-24)
rt = u'%0.*d' % (width, dateTime.hour + 1)
elif symbol == 'K': # hour in am/pm (0-11)
hours = dateTime.hour
if hours >= 12:
hours -= 12
rt = u'%0.*d' % (width, hours)
elif symbol == 'z':
rt = dateTime.timezone or u''
else:
# not reached due to regular expression (supposedly)
raise RuntimeException("bad format symbol '%s'" % symbol)
elif groups['escape']:
rt = groups['escape'].replace(u"''", u"'")
else:
# 'escape' group was empty, just matched '' (escaped single quote)
rt = u"'"
return rt
return datatypes.string(_re_SimpleDateFormat.sub(repl, pattern))
def week_in_month_function(context, date=None):
"""
The date:week-in-month function returns the week in a month of a date as
a number. If no argument is given, then the current local date/time, as
returned by date:date-time is used the default argument. For the purposes
of numbering, the first day of the month is in week 1 and new weeks begin
on a Monday (so the first and last weeks in a month will often have less
than 7 days in them).
Implements version 3.
"""
try:
dateTime = _coerce(dateTime, ('dateTime', 'date'))
except ValueError:
return number.nan
day_of_week = _dayOfWeek(dateTime.year, dateTime.month, dateTime.day)
# _dayOfWeek returns 0=Sun, we need Sun=7
day_of_week = ((day_of_week + 6) % 7) + 1
week_offset = dateTime.day - day_of_week
return (week_offset / 7) + (week_offset % 7 and 2 or 1)
def difference_function(context, start, end):
"""
The date:difference function returns the difference between the first date
and the second date as a duration in string form.
Implements version 1.
"""
try:
start = _coerce(start, ('dateTime', 'date', 'gYearMonth', 'gYear'))
end = _coerce(end, ('dateTime', 'date', 'gYearMonth', 'gYear'))
except ValueError:
return u''
return unicode(_difference(start, end))
def add_function(context, date, duration):
"""
The date:add function returns the result of adding a duration to a dateTime.
Implements version 2.
"""
try:
dateTime = _coerce(dateTime, ('dateTime', 'date', 'gYearMonth',
'gYear'))
duration = _Duration.parse(Conversions.StringValue(duration))
except ValueError:
return u''
result = _datetime()
# Get the "adjusted" duration values
if duration.negative:
years, months, days, hours, minutes, seconds = (-duration.years,
-duration.months,
-duration.days,
-duration.hours,
-duration.minutes,
-duration.seconds)
else:
years, months, days, hours, minutes, seconds = (duration.years,
duration.months,
duration.days,
duration.hours,
duration.minutes,
duration.seconds)
# Months (may be modified below)
months += (dateTime.month or 1)
carry, result.month = divmod(months - 1, 12)
result.month += 1
# Years (may be modified below)
result.year = dateTime.year + years + carry
# Timezone
result.timezone = dateTime.timezone
# Seconds
seconds += (dateTime.second or 0)
carry, result.second = divmod(seconds, 60)
# Minutes
minutes += (dateTime.minute or 0) + carry
carry, result.minute = divmod(minutes, 60)
# Hours
hours += (dateTime.hour or 0) + carry
carry, result.hour = divmod(hours, 24)
# Days
max_day = _daysInMonth(result.year, result.month)
if dateTime.day > max_day:
day = max_day
if dateTime.day < 1:
day = 1
else:
day = dateTime.day
result.day = day + days + carry
while True:
max_day = _daysInMonth(result.year, result.month)
if result.day > max_day:
result.day -= max_day
carry = 1
elif result.day < 1:
if result.month == 1:
max_day = _daysInMonth(result.year - 1, 12)
else:
max_day = _daysInMonth(result.year, result.month - 1)
result.day += max_day
carry = -1
else:
break
carry, result.month = divmod(result.month + carry - 1, 12)
result.month += 1
result.year += carry
# Create output representation based in dateTime input
# xs:gYear
if dateTime.month is None:
result = u'%0.4d%s' % (result.year, result.timezone or '')
# xs:gYearMonth
elif dateTime.day is None:
result = u'%0.4d-%02d%s' % (result.year, result.month,
result.timezone or '')
# xs:date
elif dateTime.hour is None:
result = u'%0.4d-%02d-%02d%s' % (result.year, result.month, result.day,
result.timezone or '')
# xs:dateTime
else:
result = unicode(result)
return result
def add_duration_function(context, duration1, duration2):
"""
The date:add-duration function returns the duration resulting from adding
two durations together.
Implements version 2.
"""
duration1 = Conversions.StringValue(duration1)
duration2 = Conversions.StringValue(duration2)
try:
duration1 = _Duration.parse(duration1)
duration2 = _Duration.parse(duration2)
duration = _addDurations(duration1, duration2)
except ValueError:
return u''
return unicode(duration)
def sum_function(context, nodeset):
"""
The date:sum function adds a set of durations together. The string values
of the nodes in the node set passed as an argument are interpreted as
durations and added together as if using the date:add-duration function.
Implements version 1.
"""
if not isinstance(nodeset, XPathTypes.NodesetType):
return u''
try:
strings = map(Conversions.StringValue, nodeset)
durations = map(_Duration.parse, strings)
duration = _addDurations(*durations)
except ValueError:
return u''
return unicode(duration)
def seconds_function(context, string=None):
"""
The date:seconds function returns the number of seconds specified by the
argument string. If no argument is given, then the current local
date/time, as returned by date:date-time is used as a default argument.
Implements version 1.
"""
if string is None:
string = str(_datetime.now())
else:
string = Conversions.StringValue(string)
try:
if 'P' in string:
# its a duration
duration = _Duration.parse(string)
else:
# its a dateTime
dateTime = _datetime.parse(string, ('dateTime', 'date',
'gYearMonth', 'gYear'))
duration = _difference(_EPOCH, dateTime)
except ValueError:
return number.nan
# The number of years and months must both be equal to zero
if duration.years or duration.months:
return number.nan
# Convert the duration to just seconds
seconds = (duration.days * 86400 + duration.hours * 3600 +
duration.minutes * 60 + duration.seconds )
if duration.negative:
seconds *= -1
return seconds
def duration_function(context, seconds=None):
"""
The date:duration function returns a duration string representing the
number of seconds specified by the argument string. If no argument is
given, then the result of calling date:seconds without any arguments is
used as a default argument.
Implements version 1.
"""
if seconds is None:
# The epoch for EXSLT is 1970-01-01T00:00:00Z
# FIXME: we could code around this, but most (all?) platforms we
# support have a time() epoch of 1970-01-01, so why bother.
if time.mktime((1970, 1, 1, 0, 0, 0, 0, 0, 0)) != time.timezone:
warnings.warn("platform epoch != 1970-01-01", RuntimeWarning)
# Don't use fractional seconds to keep with constructed dateTimes
seconds = int(time.time())
else:
seconds = Conversions.NumberValue(seconds)
if not number.finite(seconds):
# +/-Inf or NaN
return u''
duration = _Duration(negative=(seconds < 0), seconds=abs(seconds))
return unicode(duration)
## Internals ##########################################################
class _datetime(object):
"""
INTERNAL: representation of an exact point on a timeline.
"""
__slots__ = ('year', 'month', 'day', 'hour', 'minute', 'second', 'timezone')
patterns = {
'year' : '[-]?[0-9]{4,}',
'month' : '[0-9]{2}',
'day' : '[0-9]{2}',
'hour' : '[0-9]{2}',
'minute' : '[0-9]{2}',
'second' : '[0-9]{2}(?:[.][0-9]+)?',
'timezone' : 'Z|[-+][0-9]{2}:[0-9]{2}'
}
for name, pattern in patterns.iteritems():
patterns[name] = '(?P<%s>%s)' % (name, pattern)
del name, pattern
datatypes = {
'dateTime' : '%(date)sT%(time)s',
'date' : '%(year)s-%(month)s-%(day)s',
'time' : '%(hour)s:%(minute)s:%(second)s',
'gYearMonth' : '%(year)s-%(month)s',
'gYear' : '%(year)s',
'gMonthDay' : '--%(month)s-%(day)s',
'gMonth' : '--%(month)s',
'gDay' : '---%(day)s',
}
datatypes['dateTime'] = datatypes['dateTime'] % datatypes
for name, pattern in datatypes.iteritems():
pattern = '^' + pattern + '%(timezone)s?$'
datatypes[name] = re.compile(pattern % patterns)
del name, pattern
def parse(cls, string, datatypes=None):
if not datatypes:
datatypes = cls.datatypes
for name in datatypes:
try:
regexp = cls.datatypes[name]
except KeyError:
raise RuntimeException('unsupported datatype: %r' % name)
match = regexp.match(string)
if match:
return cls(**match.groupdict())
raise ValueError('invalid date/time literal: %r' % string)
parse = classmethod(parse)
def now(cls):
year, month, day, hour, minute, second = time.gmtime()[:6]
return cls(year=year, month=month, day=day, hour=hour, minute=minute,
second=second, timezone='Z')
now = classmethod(now)
def __init__(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, timezone=None):
self.year = year and int(year)
self.month = month and int(month)
self.day = day and int(day)
self.hour = hour and int(hour)
self.minute = minute and int(minute)
self.second = second and float(second)
self.timezone = timezone and unicode(timezone)
return
def utcoffset(self):
"""
Returns the offset from UTC in minutes.
"""
if not self.timezone:
offset = None
elif self.timezone == 'Z':
offset = 0
else:
# timezone is in +/-HH:MM format
hours, minutes = map(int, self.timezone.split(':'))
if hours < 0:
offset = hours * 60 - minutes
else:
offset = hours * 60 + minutes
return offset
def __str__(self):
if not self.second:
second_as_string = '00'
elif self.second < 10:
second_as_string = '0%.12g' % self.second
else:
second_as_string = '%.12g' % self.second
return '%-.4d-%02d-%02dT%02d:%02d:%s%s' % (self.year or 0,
self.month or 0,
self.day or 0,
self.hour or 0,
self.minute or 0,
second_as_string,
self.timezone or '')
def __repr__(self):
return '%s(%r, %r, %r, %r, %r, %r, %r)' % (
self.__class__.__name__, self.year, self.month, self.day,
self.hour, self.minute, self.second, self.timezone)
_EPOCH = _datetime.parse('1970-01-01T00:00:00Z')
class _Duration(object):
__slots__ = ('negative', 'years', 'months', 'days', 'hours', 'minutes',
'seconds')
regexp = re.compile('^(?P<negative>[-])?P(?:(?P<years>[0-9]+)Y)?'
'(?:(?P<months>[0-9]+)M)?(?:(?P<days>[0-9]+)D)?'
'(?P<time>T(?:(?P<hours>[0-9]+)H)?'
'(?:(?P<minutes>[0-9]+)M)?'
'(?:(?P<seconds>[0-9]+(?:[.][0-9]+)?)S)?)?$')
def parse(cls, string):
match = cls.regexp.match(string)
if match:
parts = match.groupdict()
# Verify that if the time designator is given, there is at least
# one time component as well. This cannot be done easily with
# just the RE.
time = parts['time']
try:
time is None or time[1]
except IndexError:
# Fall through to the ValueError below
pass
else:
del parts['time']
return cls(**parts)
raise ValueError('invalid duration literal: %r' % string)
parse = classmethod(parse)
def __init__(self, negative=None, years=None, months=None, days=None,
hours=None, minutes=None, seconds=None):
self.negative = negative and True or False
self.years = years and int(years) or 0
self.months = months and int(months) or 0
self.days = days and int(days) or 0
self.hours = hours and int(hours) or 0
self.minutes = minutes and int(minutes) or 0
self.seconds = seconds and float(seconds) or 0
# Normalize the values to range
minutes, self.seconds = divmod(self.seconds, 60)
hours, self.minutes = divmod(self.minutes + int(minutes), 60)
days, self.hours = divmod(self.hours + hours, 24)
self.days += days
years, self.months = divmod(self.months, 12)
self.years += years
return
def __repr__(self):
return '%s(%r, %r, %r, %r, %r, %r, %r)' % (
self.__class__.__name__, self.negative, self.years, self.months,
self.days, self.hours, self.minutes, self.seconds)
def __str__(self):
have_time = (self.hours or self.minutes or self.seconds)
# Always format the duration in minimized form
if not (self.years or self.months or self.days or have_time):
# at least one designator MUST be present (arbitrary decision)
return 'PT0S'
parts = [self.negative and '-P' or 'P']
if self.years:
parts.append('%dY' % self.years)
if self.months:
parts.append('%dM' % self.months)
if self.days:
parts.append('%dD' % self.days)
if have_time:
parts.append('T')
if self.hours:
parts.append('%dH' % self.hours)
if self.minutes:
parts.append('%dM' % self.minutes)
if self.seconds:
parts.append('%0.12gS' % self.seconds)
return ''.join(parts)
def _coerce(obj, datatypes):
"""
INTERNAL: converts an XPath object to a `_datetime` instance.
"""
if obj is None:
obj = _datetime.now()
elif not isinstance(obj, _datetime):
obj = _datetime.parse(Conversions.StringValue(obj), datatypes)
return obj
def _daysInMonth(year, month):
"""
INTERNAL: calculates the number of days in a month for the given date.
"""
days = (None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)[month]
if month == 2 and calendar.isleap(year):
days += 1
return days
def _dayInYear(year, month, day):
"""
INTERNAL: calculates the ordinal date for the given date.
"""
days = (None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334)[month]
if month > 2 and calendar.isleap(year):
days += 1
return days + day
def _julianDay(year, month, day):
"""
INTERNAL: calculates the Julian day (1-1-1 is day 1) for the given date.
"""
date = _dayInYear(year, month, day)
year -= 1
return year*365 + (year / 4) - (year / 100) + (year / 400) + date
def _dayOfWeek(year, month, day):
"""
INTERNAL: calculates the day of week (0=Sun, 6=Sat) for the given date.
"""
return _julianDay(year, month, day) % 7
def _difference(start, end):
"""
INTERNAL: subtracts the end date from the start date.
"""
if type(start.timezone) is not type(end.timezone):
raise TypeError('cannot subtract dateTimes with timezones and '
'dateTimes without timezones')
years = end.year - start.year
negative = start.year > end.year
# If the least specific format is xs:gYear, just subtract the years.
if start.month is None or end.month is None:
return _Duration(negative=negative, years=abs(years))
# If the least specific format is xs:gYearMonth, just subtract the years
# and months.
if start.day is None or end.day is None:
months = abs(end.month - start.month + (years * 12))
years, months = divmod(months, 12)
negative = negative or (start.month > end.month)
return _Duration(negative=negative, years=years, months=months)
start_days = _julianDay(start.year, start.month, start.day)
end_days = _julianDay(end.year, end.month, end.day)
days = end_days - start_days
negative = start_days > end_days
# If the least specific format is xs:date, just subtract the days
if start.hour is None or end.hour is None:
return _Duration(negative=negative, days=abs(days))
# They both are in the xs:dateTime format, continue to subtract the time.
start_secs = start.hour * 3600 + start.minute * 60 + start.second
end_secs = end.hour * 3600 + end.minute * 60 + end.second
seconds = abs(end_secs - start_secs + (days * 86400))
if start.timezone:
# adjust seconds to be UTC
assert end.timezone
# Note, utcoffset() returns minutes
seconds += (end.utcoffset() - start.utcoffset()) * 60
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
negative = negative or (start_secs > end_secs)
return _Duration(negative=negative, days=days, hours=hours,
minutes=minutes, seconds=seconds)
def _addDurations(*durations):
"""
INTERNAL: returns a new duration from the sum of the sequence of durations
"""
if not durations:
raise ValueError('no durations')
months, seconds = 0, 0
for duration in durations:
other_months = duration.years * 12 + duration.months
other_seconds = (duration.days * 86400 + duration.hours * 3600 +
duration.minutes * 60 + duration.seconds)
if duration.negative:
months -= other_months
seconds -= other_seconds
else:
months += other_months
seconds += other_seconds
if (months < 0 and seconds > 0) or (months > 0 and seconds < 0):
raise ValueError('months/seconds sign mismatch')
return _Duration(negative=(months < 0 or seconds < 0),
months=abs(months), seconds=abs(seconds))
## XSLT Extension Module Interface ####################################
extension_namespaces = {
EXSL_DATE_TIME_NS : 'date',
}
extension_functions = {
# Core Functions
(EXSL_DATE_TIME_NS, 'date-time'): date_time_function,
(EXSL_DATE_TIME_NS, 'date'): date_function,
(EXSL_DATE_TIME_NS, 'time'): time_function,
(EXSL_DATE_TIME_NS, 'year'): year_function,
(EXSL_DATE_TIME_NS, 'leap-year'): leap_year_function,
(EXSL_DATE_TIME_NS, 'month-in-year'): month_in_year_function,
(EXSL_DATE_TIME_NS, 'month-name'): month_name_function,
(EXSL_DATE_TIME_NS, 'month-abbreviation'): month_abbreviation_function,
(EXSL_DATE_TIME_NS, 'week-in-year'): week_in_year_function,
(EXSL_DATE_TIME_NS, 'day-in-year'): day_in_year_function,
(EXSL_DATE_TIME_NS, 'day-in-month'): day_in_month_function,
(EXSL_DATE_TIME_NS, 'day-of-week-in-month'): day_of_week_in_month_function,
(EXSL_DATE_TIME_NS, 'day-in-week'): day_in_week_function,
(EXSL_DATE_TIME_NS, 'day-name'): day_name_function,
(EXSL_DATE_TIME_NS, 'day-abbreviation'): day_abbreviation_function,
(EXSL_DATE_TIME_NS, 'hour-in-day'): hour_in_day_function,
(EXSL_DATE_TIME_NS, 'minute-in-hour'): minute_in_hour_function,
(EXSL_DATE_TIME_NS, 'second-in-minute'): second_in_minute_function,
# Other Functions
(EXSL_DATE_TIME_NS, 'format-date'): format_date_function,
#(EXSL_DATE_TIME_NS, 'parse-date'): parse_date_function,
(EXSL_DATE_TIME_NS, 'week-in-month'): week_in_month_function,
(EXSL_DATE_TIME_NS, 'difference'): difference_function,
(EXSL_DATE_TIME_NS, 'add'): add_function,
(EXSL_DATE_TIME_NS, 'add-duration'): add_duration_function,
(EXSL_DATE_TIME_NS, 'sum'): sum_function,
(EXSL_DATE_TIME_NS, 'seconds'): seconds_function,
(EXSL_DATE_TIME_NS, 'duration'): duration_function,
}
extension_elements = {
#(EXSL_DATE_TIME_NS, 'date-format'): date_format_element
}
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/exslt/datetime.py
|
datetime.py
|
import itertools
from amara.namespaces import XSL_NAMESPACE
from amara.xpath import datatypes
from amara.xslt import XsltError, XsltRuntimeError
from amara.xslt.expressions import rtf_expression
from amara.xslt.tree import xslt_element, content_model, attribute_types
from amara.xslt.tree.variable_elements import param_element
EXSL_FUNCTIONS_NS = "http://exslt.org/functions"
class function_element(xslt_element):
content_model = content_model.seq(
content_model.rep(content_model.qname(XSL_NAMESPACE, 'xsl:param')),
content_model.template,
)
attribute_types = {
'name': attribute_types.qname_but_not_ncname(required=True),
}
def setup(self):
params = self._params = []
for child in self.children:
if isinstance(child, param_element):
params.append(child)
elif isinstance(child, xslt_element):
break
if self._params:
self._instructions = self.children[len(self._params)+1:-1]
else:
self._instructions = self.children
return
def prime(self, context):
context.add_function(self._name, self)
return
def __call__(self, context, *args):
# Save context state as XPath is side-effect free
focus = context.node, context.position, context.size
state = context.instruction, context.namespaces, context.variables
context.instruction, context.namespaces = self, self.namespaces
# Set the return value
self.result = datatypes.EMPTY_STRING
# Set the parameter list
if self._params:
context.variables = context.variables.copy()
params = iter(self._params)
# Handle the passed in arguments
for arg, param in itertools.izip(args, params):
context.variables[param._name] = arg.evaluate(context)
# Handle remaining parameter defaults
for param in params:
param.instantiate(context)
# Process the instruction template
for child in self._instructions:
child.instantiate(context)
# Restore context state
context.instruction, context.namespaces, context.variables = state
context.node, context.position, context.size = focus
return self.result
class result_element(xslt_element):
"""
When an func:result element is instantiated, during the
instantiation of a func:function element, the function returns
with its value.
"""
content_model = content_model.template
attribute_types = {
'select' : attribute_types.expression(),
}
_function = None
def setup(self):
if not self._select:
self._select = rtf_expression(self)
return
def prime(self, context):
current = self.parent
while current:
# this loop will stop when it hits the top of the tree
if current.expanded_name == (EXSL_FUNCTIONS_NS, 'function'):
self._function = current
break
current = current.parent
if not self._function:
raise XsltRuntimeError(XsltError.RESULT_NOT_IN_FUNCTION)
if not self.isLastChild():
siblings = iter(self.parent.children)
for node in siblings:
if node is self:
break
for node in siblings:
if node.expanded_name != (XSL_NAMESPACE, 'fallback'):
raise XsltRuntimeError(XsltError.ILLEGAL_RESULT_SIBLINGS)
return
def instantiate(self, context):
context.instruction, context.namespaces = self, self.namespaces
self._function.result = self._select.evaluate(context)
return
## XSLT Extension Module Interface ####################################
extension_namespaces = {
EXSL_FUNCTIONS_NS: 'func',
}
extension_functions = {
}
extension_elements = {
(EXSL_FUNCTIONS_NS, 'function'): function_element,
(EXSL_FUNCTIONS_NS, 'result'): result_element,
}
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/exslt/functions.py
|
functions.py
|
from amara.xpath import datatypes
from amara.xslt.numbers import formatter
ASCII_DIGITS = '0123456789'
ASCII_LOWER = 'abcdefghijklmnopqrstuvwxyz'
ASCII_UPPER = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class english_formatter(formatter):
language = 'en'
_roman_digits = _roman_upper, _roman_lower = [], []
for multiplier, combining in ((1, ''), (1000, u'\u0305')):
for base, one, five, ten in ((1, u'I', u'V', u'X'),
(10, u'X', u'L', u'C'),
(100, u'C', u'D', u'M')):
base *= multiplier
one += combining
five += combining
ten += combining
digits = (u'', one, one*2, one*3, one+five,
five, five+one, five+one*2, five+one*3, one+ten)
_roman_upper.append((base, digits))
_roman_lower.append((base, map(unicode.lower, digits)))
_roman_max = base * len(_roman_upper[-1][1])
def _alpha_sequence(self, number, alphabet):
size = len(alphabet)
digits = ''
while number > size:
number, ordinal = divmod(number - 1, size)
digits += alphabet[ordinal]
digits += alphabet[number - 1]
return digits
def _format(self, number, token, letter_value, separator, grouping):
if token in ('I', 'i') and letter_value != 'alphabetic':
# roman numerals
if 0 < number < self._roman_max:
result = []
for bound, digits in self._roman_digits[token == 'i']:
if number > bound:
index, number = divmod(number, bound)
result.append(digits[index])
last_digits = digits
result = u''.join(result)
else:
result = '%d' % number
elif token in ('A', 'a'):
# alphabetic numbering
alphabet = ASCII_LOWER if token == 'a' else ASCII_UPPER
result = self._alpha_sequence(number, alphabet)
else:
# arabic numerals
if token[-1:] != '1':
# unsupported format token, using '1'
token == '1'
result = '%0*d' % (len(token), number)
if separator and grouping:
start = -len(numeric)
step = -grouping
if start < step:
groups = []
for next in reversed(xrange(step, start, step)):
groups.append(result[start:next])
start = next
groups.append(result[start:])
result = separator.join(groups)
return datatypes.string(result)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/numbers/en.py
|
en.py
|
import re
import itertools
__all__ = ['formatter']
DEFAULT_LANG = 'en'
DEFAULT_FORMAT = '1'
DEFAULT_SEPARATOR = '.'
def _repeat_last(seq):
for item in seq:
yield item
while 1:
yield item
return
# Bind the class name in the global scope so that the metaclass can be
# safely called for the construction of the initial class.
formatter = None
class formatter(object):
language = None
_classmap = {}
class __metaclass__(type):
def __init__(cls, name, bases, namespace):
if formatter is not None:
cls._classmap[cls.language] = cls
# Allow sub-classes to be instaniated directly
cls.__new__ = object.__new__
def __new__(cls, language, format):
"""
Creates a `numberer` appropriate for the given language
or a default, English-based formatter.
Raises an exception if the language is unsupported. Currently, if the
language value is given, it must indicate English.
"""
# lang specifies the language whose alphabet is to be used
# for numbering when a format token is alphabetic.
#
# "if no lang value is specified, the language should be
# determined from the system environment." -- unsupported;
# we just default to English.
if not language:
language = DEFAULT_LANG
if language not in cls._classmap:
languages = [language]
if '-' in language:
primary, subpart = language.split('-', 1)
languages.append(primary)
for language in languages:
name = language.replace('-', '_')
try:
module = __name__ + '.' + name
__import__(module, globals(), globals(), ['*'])
except ImportError:
pass
else:
assert language in cls._classmap
break
else:
language = DEFAULT_LANG
try:
cls = cls._classmap[language]
except KeyError:
cls = cls._classmap[DEFAULT_LANG]
return object.__new__(cls)
_tokenize_re = re.compile('(?u)(\W*)(\w+)(\W*)')
def __init__(self, language, format):
if not format:
format = DEFAULT_FORMAT
parts = self._tokenize_re.findall(format)
if not parts:
# No alphanumeric token in the format string
self._prefix = self._suffix = format
self._toks = (DEFAULT_FORMAT,)
self._seps = (DEFAULT_SEPARATOR,)
elif len(parts) == 1:
self._prefix, token, self._suffix = parts[0]
self._toks = (token,)
self._seps = (DEFAULT_SEPARATOR,)
else:
self._prefix, token, sep = parts[0]
toks = self._toks = [token]
seps = self._seps = []
for prefix, token, suffix in parts[1:]:
seps.append(sep)
toks.append(token)
sep = suffix
self._suffix = suffix
return
def _format(self, number, token, letter_value, grouping, separator):
raise NotImplementedError
def format(self, number, letter_value, grouping, separator):
token = self._toks[0]
result = self._format(number, token, letter_value, grouping, separator)
return self._prefix + result + self._suffix
def formatmany(self, numbers, letter_value, grouping, separator):
result = [self._prefix]
for number, tok, sep in itertools.izip(numbers,
_repeat_last(self._toks),
_repeat_last(self._seps)):
result.append(self._format(number, tok, letter_value, grouping,
separator))
result.append(sep)
result[-1] = self._suffix
return u''.join(result)
# Load the default formatter
from amara.xslt.numbers import en
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/numbers/__init__.py
|
__init__.py
|
from amara import tree
from amara.xslt import XsltError
from amara.xslt.tree import xslt_element, content_model, attribute_types
__all__ = ['copy_element']
class copy_element(xslt_element):
content_model = content_model.template
attribute_types = {
'use-attribute-sets': attribute_types.qnames(),
}
def instantiate(self, context):
context.instruction = self
context.namespaces = self.namespaces
node = context.node
if isinstance(node, tree.element):
# Namespace nodes are automatically copied as well
# See XSLT 1.0 Sect 7.5
context.start_element(node.xml_qname, node.xml_namespace,
node.xmlns_attributes.copy())
if self._use_attribute_sets:
attribute_sets = context.transform.attribute_sets
for name in self._use_attribute_sets:
try:
attribute_set = attribute_sets[name]
except KeyError:
raise XsltError(XsltError.UNDEFINED_ATTRIBUTE_SET,
self, name)
attribute_set.instantiate(context)
self.process_children(context)
context.end_element(node.xml_qname, node.xml_namespace)
elif isinstance(node, (tree.text, tree.comment)):
context.text(node.xml_value)
elif isinstance(node, tree.entity):
self.process_children(context)
elif isinstance(node, tree.attribute):
context.attribute(node.xml_qname, node.xml_value,
node.xml_namespace)
elif isinstance(node, tree.processing_instruction):
context.processing_instruction(node.xml_target, node.xml_data)
elif isinstance(node, tree.namespace):
# Relies on XmlWriter rules, which is very close to spec:
# http://www.w3.org/1999/11/REC-xslt-19991116-errata/#E25
context.namespace(node.xml_qname, node.xml_value)
else:
raise RuntimeError("Unupported node type: %r" % type(node))
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/tree/copy_element.py
|
copy_element.py
|
import sys
import operator
import itertools
import collections
from gettext import gettext as _
from amara.namespaces import XMLNS_NAMESPACE, XSL_NAMESPACE
from amara import tree, xpath
from amara.writers import outputparameters
from amara.xpath import XPathError, datatypes
from amara.xslt import XsltError, xsltcontext
from amara.xslt.tree import (xslt_element, content_model, attribute_types,
literal_element, variable_elements)
__all__ = ['match_tree', 'transform_element']
TEMPLATE_CONFLICT_LOCATION = _(
'In stylesheet %s, line %s, column %s, pattern %r')
BUILTIN_TEMPLATE_WITH_PARAMS = _(
'Built-in template invoked with parameters that will be ignored.')
_template_location = operator.attrgetter('baseUri', 'lineNumber',
'columnNumber', '_match')
def _fixup_aliases(node, aliases):
for child in node:
if isinstance(child, literal_element.literal_element):
child.fixup_aliases(aliases)
_fixup_aliases(child, aliases)
elif isinstance(child, xslt_element):
_fixup_aliases(child, aliases)
return
# The dispatch table is first keyed by mode, then keyed by node type. If an
# element type, it is further keyed by the name test.
class _type_dispatch_table(dict):
def __missing__(self, type_key):
if type_key == tree.element.xml_typecode:
value = self[type_key] = collections.defaultdict(list)
else:
value = self[type_key] = []
return value
class _key_dispatch_table(dict):
__slots__ = ('_match_table', '_matches_attribute')
_unpack_key = operator.attrgetter('_match', '_use', 'namespaces')
_unpack_pattern = operator.attrgetter('node_test', 'axis_type', 'node_type')
def __init__(self, keys):
match_table = _type_dispatch_table()
for key in keys:
match, use, namespaces = self._unpack_key(key)
for pattern in match:
node_test, axis_type, node_type = self._unpack_pattern(pattern)
info = (node_test, axis_type, namespaces, use)
# Add the template rule to the dispatch table
type_key = node_type.xml_typecode
if type_key == tree.element.xml_typecode:
# Element types are further keyed by the name test.
name_key = node_test.name_key
if name_key:
prefix, local = name_key
# Unprefixed names are in the null-namespace
try:
namespace = prefix and namespaces[prefix]
except KeyError:
raise XPathError(XPathError.UNDEFINED_PREFIX,
prefix=prefix)
else:
name_key = namespace, local
match_table[type_key][name_key].append(info)
else:
# Every other node type gets lumped into a single list
# for that node type
match_table[type_key].append(info)
# Now expanded the tables and convert to regular dictionaries to
# prevent inadvertant growth when non-existant keys are used.
# Add those patterns that don't have a distinct type:
# node(), id() and key() patterns
any_patterns = match_table[tree.node.xml_typecode]
match_table = self._match_table = dict(match_table)
for type_key, patterns in match_table.iteritems():
if type_key == tree.element.xml_typecode:
# Add those that are wildcard tests ('*' and 'prefix:*')
wildcard_names = patterns[None]
name_table = match_table[type_key] = dict(patterns)
for name_key, patterns in name_table.iteritems():
if name_key is not None:
patterns.extend(wildcard_names)
patterns.extend(any_patterns)
name_table[name_key] = tuple(patterns)
else:
patterns.extend(any_patterns)
match_table[type_key] = tuple(patterns)
self._matches_attribute = tree.attribute.xml_typecode in match_table
def _match_nodes(self, context, nodes):
initial_focus = context.node, context.position, context.size
context.size = len(nodes)
position = 1
for node in nodes:
context.node = context.current_node = node
context.position = position
position += 1
# Get the possible matches for `node`
type_key = node.xml_typecode
type_table = self._match_table
if type_key in type_table:
if type_key == tree.element.xml_typecode:
element_table = type_table[type_key]
name_key = node.xml_name
if name_key in element_table:
matches = element_table[name_key]
else:
matches = element_table[None]
else:
matches = type_table[type_key]
else:
matches = type_table[tree.node.xml_typecode]
for pattern, axis_type, namespaces, use_expr in matches:
context.namespaces = namespaces
try:
m = pattern.match(context, node, axis_type)
except XPathError, exc:
raise XsltError(exc.code)
if m:
focus = context.node, context.position, context.size
context.node, context.position, context.size = node, 1, 1
value = use_expr.evaluate(context)
if isinstance(value, datatypes.nodeset):
for value in value:
yield datatypes.string(value), node
else:
yield datatypes.string(value), node
context.node, context.position, context.size = focus
if isinstance(node, tree.element):
for item in self._match_nodes(context, node.xml_children):
yield item
if self._matches_attribute and node.xml_attributes:
attributes = tuple(node.xml_attributes.nodes())
for item in self._match_nodes(context, attributes):
yield item
elif isinstance(node, tree.entity):
for item in self._match_nodes(context, node.xml_children):
yield item
context.node, context.position, context.size = initial_focus
return
def __missing__(self, key):
assert isinstance(key, tree.entity), key
values = collections.defaultdict(set)
context = xsltcontext.xsltcontext(key, 1, 1)
for value, node in self._match_nodes(context, [key]):
values[value].add(node)
# Now store the unique nodes as an XPath nodeset
values = self[key] = dict(values)
for value, nodes in values.iteritems():
values[value] = datatypes.nodeset(nodes)
return values
class transform_element(xslt_element):
content_model = content_model.seq(
content_model.rep(content_model.qname(XSL_NAMESPACE, 'xsl:import')),
content_model.top_level_elements,
)
attribute_types = {
'id': attribute_types.id(),
'extension-element-prefixes': attribute_types.prefixes(),
'exclude-result-prefixes': attribute_types.prefixes(),
'version': attribute_types.number(required=True),
}
space_rules = None
decimal_formats = None
namespace_aliases = None
attribute_sets = None
match_templates = None
named_templates = None
parameters = None
variables = None
global_variables = None
initial_functions = None
builtin_param_warning = True
def setup(self, _param_element=variable_elements.param_element):
"""
Called only once, at the first initialization
"""
self.output_parameters = outputparameters.outputparameters()
# Sort the top-level elements in decreasing import precedence to ease
# processing later.
precedence_key = operator.attrgetter('import_precedence')
elements = sorted(self.children, key=precedence_key, reverse=True)
# Merge the top-level stylesheet elements into their respective
# lists. Any element name not in the mapping is discarded.
# Note, by sharing the same list no merging is required later.
whitespace_elements, variable_elements = [], []
top_level_elements = {
'strip-space' : whitespace_elements,
'preserve-space' : whitespace_elements,
'output' : [],
'key' : [],
'decimal-format' : [],
'namespace-alias' : [],
'attribute-set': [],
'variable' : variable_elements,
'param' : variable_elements,
'template' : [],
}
# Using `groupby` takes advantage of series of same-named elements
# appearing adjacent to each other.
key = operator.attrgetter('expanded_name')
for (namespace, name), nodes in itertools.groupby(self.children, key):
if namespace == XSL_NAMESPACE and name in top_level_elements:
top_level_elements[name].extend(nodes)
# - process the `xsl:preserve-space` and `xsl:strip-space` elements
# RECOVERY: Multiple matching patterns use the last occurance
space_rules = {}
for element in whitespace_elements:
strip = element._strip_whitespace
for token in element._elements:
namespace, name = token
space_rules[token] = (namespace, name, strip)
self.space_rules = space_rules.values()
# sort in decreasing priority, where `*` is lowest, followed by
# `prefix:*`, then all others.
self.space_rules.sort(reverse=True)
# - process the `xsl:output` elements
# Sort in increasing import precedence, so the last one added
# will have the highest import precedence
elements = top_level_elements['output']
getter = operator.attrgetter(
'_method', '_version', '_encoding', '_omit_xml_declaration',
'_standalone', '_doctype_system', '_doctype_public',
'_cdata_section_elements', '_indent', '_media_type',
'_byte_order_mark', '_canonical_form')
for element in elements:
(method, version, encoding, omit_xmldecl, standalone,
doctype_system, doctype_public, cdata_elements, indent,
media_type, byte_order_mark, canonical_form) = getter(element)
if method is not None:
self.output_parameters.method = method
if version is not None:
self.output_parameters.version = version
if encoding is not None:
self.output_parameters.encoding = encoding
if omit_xmldecl is not None:
self.output_parameters.omit_xml_declaration = omit_xmldecl
if standalone is not None:
self.output_parameters.standalone = standalone
if doctype_system is not None:
self.output_parameters.doctype_system = doctype_system
if doctype_public is not None:
self.output_parameters.doctype_public = doctype_public
if cdata_elements:
self.output_parameters.cdata_section_elements += cdata_elements
if indent is not None:
self.output_parameters.indent = indent
if media_type is not None:
self.output_parameters.media_type = media_type
if byte_order_mark is not None:
self.output_parameters.byte_order_mark = byte_order_mark
if canonical_form is not None:
self.output_parameters.canonical_form = canonical_form
# - process the `xsl:key` elements
# Group the keys by name
elements = top_level_elements['key']
name_key = operator.attrgetter('_name')
elements.sort(key=name_key)
keys = self._keys = {}
for name, elements in itertools.groupby(elements, name_key):
keys[name] = tuple(elements)
# - process the `xsl:decimal-format` elements
formats = self.decimal_formats = {}
getter = operator.attrgetter(
'_decimal_separator', '_grouping_separator', '_infinity',
'_minus_sign', '_NaN', '_percent', '_per_mille', '_zero_digit',
'_digit', '_pattern_separator')
for element in top_level_elements['decimal-format']:
name = element._name
format = getter(element)
# It is an error to declare a decimal-format more than once
# (even with different import precedence) with different values.
if name in formats and formats[name] != format:
# Construct a useful name for the error message.
if name:
namespace, name = name
if namespace:
name = element.namespaces[namespace] + ':' + name
else:
name = '#default'
raise XsltError(XsltError.DUPLICATE_DECIMAL_FORMAT, name)
else:
formats[name] = format
# Add the default decimal format, if not declared.
if None not in formats:
formats[None] = ('.', ',', 'Infinity', '-', 'NaN', '%',
unichr(0x2030), '0', '#', ';')
# - process the `xsl:namespace-alias` elements
elements = top_level_elements['namespace-alias']
elements.reverse()
aliases = self.namespace_aliases = {}
for precedence, group in itertools.groupby(elements, precedence_key):
mapped = {}
for element in group:
namespace = element.namespaces[element._stylesheet_prefix]
if namespace not in aliases:
mapped[namespace] = True
result_prefix = element._result_prefix
result_namespace = element.namespaces[result_prefix]
aliases[namespace] = (result_namespace, result_prefix)
# It is an error for a namespace URI to be mapped to multiple
# different namespace URIs (with the same import precedence).
elif namespace in mapped:
raise XsltError(XsltError.DUPLICATE_NAMESPACE_ALIAS,
element._stylesheet_prefix)
if aliases:
# apply namespace fixup for the literal elements
_fixup_aliases(self, aliases)
# - process the `xsl:attribute-set` elements
sets = self.attribute_sets = {}
for element in top_level_elements['attribute-set']:
sets[element._name] = element
# - process the `xsl:param` and `xsl:variable` elements
index, self._variables = {}, variable_elements[:]
variable_elements.reverse()
for element in variable_elements:
name = element._name
if name not in index:
# unique (or first) variable binding
index[name] = 1
else:
# shadowed variable binding, remove from processing list
self._variables.remove(element)
self.parameters = frozenset(element._name for element in self._variables
if isinstance(element, _param_element))
# - process the `xsl:template` elements
match_templates = collections.defaultdict(_type_dispatch_table)
named_templates = self.named_templates = {}
elements = top_level_elements['template']
elements.reverse()
getter = operator.attrgetter('node_test', 'axis_type', 'node_type')
for position, element in enumerate(elements):
match, name = element._match, element._name
precedence = element.import_precedence
if match:
namespaces = element.namespaces
template_priority = element._priority
mode_table = match_templates[element._mode]
for pattern in match:
node_test, axis_type, node_type = getter(pattern)
if template_priority is None:
priority = node_test.priority
else:
priority = template_priority
sort_key = (precedence, priority, position)
info = (sort_key, node_test, axis_type, element)
# Add the template rule to the dispatch table
type_key = node_type.xml_typecode
if type_key == tree.element.xml_typecode:
# Element types are further keyed by the name test.
name_key = node_test.name_key
if name_key:
prefix, local = name_key
# Unprefixed names are in the null-namespace
try:
namespace = prefix and namespaces[prefix]
except KeyError:
raise XPathError(XPathError.UNDEFINED_PREFIX,
prefix=prefix)
else:
name_key = namespace, local
mode_table[type_key][name_key].append(info)
else:
# Every other node type gets lumped into a single list
# for that node type
mode_table[type_key].append(info)
if name:
# XSLT 1.0, Section 6, Paragraph 3:
# It is an error if a stylesheet contains more than one
# template with the same name and same import precedence.
if name not in named_templates:
named_templates[name] = element
elif named_templates[name].import_precedence == precedence:
# Construct a useful name for the error message.
namespace, name = name
if namespace:
name = element.namespaces[namespace] + ':' + name
raise XsltError(XsltError.DUPLICATE_NAMED_TEMPLATE, name)
# Now expanded the tables and convert to regular dictionaries to
# prevent inadvertant growth when non-existant keys are used.
match_templates = self.match_templates = dict(match_templates)
for mode, type_table in match_templates.iteritems():
# Add those patterns that don't have a distinct type:
# node(), id() and key() patterns
any_patterns = type_table[tree.node.xml_typecode]
type_table = match_templates[mode] = dict(type_table)
for type_key, patterns in type_table.iteritems():
if type_key == tree.element.xml_typecode:
# Add those that are wildcard tests ('*' and 'prefix:*')
wildcard_names = patterns[None]
name_table = type_table[type_key] = dict(patterns)
for name_key, patterns in name_table.iteritems():
if name_key is not None:
patterns.extend(wildcard_names)
patterns.extend(any_patterns)
patterns.sort(reverse=True)
name_table[name_key] = tuple(patterns)
else:
patterns.extend(any_patterns)
patterns.sort(reverse=True)
type_table[type_key] = tuple(patterns)
#self._dump_match_templates(match_templates)
return
def _dump_match_templates(self, match_templates=None):
from pprint import pprint
if match_templates is None:
match_templates = self.match_templates
print "=" * 50
for mode, type_table in match_templates.iteritems():
print "mode:", mode
for node_type, patterns in type_table.iteritems():
print " node type:", node_type
print " patterns: ",
pprint(patterns)
#for patterninfo in self.match_templates[mode][nodetype]:
# pat, axistype, template = patterninfo
# print " template matching pattern %r for axis type %s" % (pat, axistype)
# templates[template] = 1
print '-'*30
return
############################# Prime Routines #############################
def prime(self, context):
processed = context.variables
elements, deferred = self._variables, []
num_writers = len(context._writers)
while 1:
for element in elements:
if element._name in processed:
continue
try:
element.instantiate(context)
except XPathError, error:
if error.code != XPathError.UNDEFINED_VARIABLE:
raise
# Remove any aborted and possibly unbalanced
# outut handlers on the stack.
del context._writers[num_writers:]
deferred.append(element)
if not deferred:
break
elif deferred == elements:
# Just pick the first one as being the "bad" variable.
raise XsltError(XsltError.CIRCULAR_VARIABLE,
name=deferred[0]._name)
# Re-order stored variable elements to simplify processing for
# the next transformation.
for element in deferred:
self._variables.remove(element)
self._variables.append(element)
# Try again, but this time processing only the ones that
# referenced, as of yet, undefined variables.
elements, deferred = deferred, []
for name, keys in self._keys.iteritems():
context.keys[name] = _key_dispatch_table(keys)
return
def update_keys(self, context):
"""
Update all the keys for all documents in the context
Only used as an override for the default lazy key eval
"""
node = context.node
for doc in context.documents.itervalues():
context.node = doc
for key_name in self._keys:
self.update_key(context, key_name)
context.node = node
return
############################# Exit Routines #############################
def reset(self):
"""
Called whenever the processor is reset, i.e. after each run
Also called whenever multiple stylesheets are appended to
a processor, because the new top-level elements from the
new stylesheet need to be processed into the main one
"""
self.reset1()
self.reset2()
return
############################ Runtime Routines ############################
def apply_imports(self, context, precedence):
node, mode = context.node, context.mode
# Get the possible template rules for `node`
type_key = node.xml_typecode
if mode in self.match_templates:
type_table = self.match_templates[mode]
if type_key in type_table:
if type_key == tree.element.xml_typecode:
element_table = type_table[type_key]
name = node.xml_name
if name in element_table:
template_rules = element_table[name]
else:
template_rules = element_table[None]
else:
template_rules = type_table[type_key]
else:
template_rules = type_table[tree.node.xml_typecode]
else:
template_rules = ()
first_template = locations = None
for sort_key, pattern, axis_type, template in template_rules:
# Filter out those patterns with a higher import precedence than
# what was specified.
if sort_key[0] < precedence:
context.namespaces = template.namespaces
try:
m = pattern.match(context, node, axis_type)
except XpathError, exc:
raise XsltError(exc.code, node, locations)
if m:
# Make sure the template starts with a clean slate
state = context.template, context.variables
context.template = template
context.variables = context.global_variables
try:
template.instantiate(context)
finally:
context.template, context.variables = state
break
else:
# Nothing matched, use builtin templates
if isinstance(node, (tree.element, tree.entity)):
self.apply_templates(context, node.xml_children, mode)
elif isinstance(node, (tree.text, tree.attribute)):
context.text(node.xml_value)
return
def apply_templates(self, context, nodes, mode=None, params=None):
"""
Intended to be used by XSLT instruction implementations only.
Implements the xsl:apply-templates instruction by attempting to
let the stylesheet apply its own template for the given context.
If the stylesheet does not have a matching template, the
built-in templates are invoked.
context is an XsltContext instance. params is a dictionary of
parameters being passed in, defaulting to None.
"""
initial_focus = context.node, context.position, context.size
initial_state = context.template, context.mode
if params is None:
params = {}
context.size, context.mode = len(nodes), mode
# Note, it is quicker to increment the `position` variable than it
# is to use enumeration: itertools.izip(nodes, itertools.count(1))
position = 1
for node in nodes:
# Set the current node for this template application
context.node = context.current_node = node
context.position = position
position += 1
# Get the possible template rules for `node`
type_key = node.xml_typecode
if mode in self.match_templates:
type_table = self.match_templates[mode]
if type_key in type_table:
if type_key == tree.element.xml_typecode:
element_table = type_table[type_key]
name = node.xml_name
if name in element_table:
template_rules = element_table[name]
else:
template_rules = element_table[None]
else:
template_rules = type_table[type_key]
else:
template_rules = type_table[tree.node.xml_typecode]
else:
template_rules = ()
first_template = locations = None
for sort_key, pattern, axis_type, template in template_rules:
context.namespaces = template.namespaces
if pattern.match(context, node, axis_type):
if 1: # recovery_method == Recovery.SILENT
# (default until recovery behaviour is selectable)
# Just use the first matching pattern since they are
# already sorted in descending order.
break
else: # recovery_method in (Recovery.WARNING, Recovery.NONE)
if not first_template:
first_template = template
else:
if not locations:
locations = [_template_location(first_template)]
locations.append(_template_location(template))
else:
# All template rules have been processed
if locations:
# Multiple template rules have matched. Report the
# template rule conflicts, sorted by position
locations.sort()
locations = '\n'.join(TEMPLATE_CONFLICT_LOCATION % location
for location in locations)
exception = XsltError(XsltError.MULTIPLE_MATCH_TEMPLATES,
node, locations)
if 1: # recovery_method == Recovery.WARNING
processor.warning(str(exception))
else:
raise exception
if first_template:
template = first_template
context.namespaces = template.namespaces
else:
template = None
if template:
context.template = template
# Make sure the template starts with a clean slate
variables = context.variables
context.variables = context.global_variables
try:
template.instantiate(context, params)
finally:
context.variables = variables
else:
# Nothing matched, use builtin templates
if params and self.builtin_param_warning:
context.processor.warning(BUILTIN_TEMPLATE_WITH_PARAMS)
self.builtin_param_warning = False
if isinstance(node, (tree.element, tree.entity)):
self.apply_templates(context, node.xml_children, mode)
elif isinstance(node, (tree.text, tree.attribute)):
context.text(node.xml_value)
# Restore context
context.node, context.position, context.size = initial_focus
context.template, context.mode = initial_state
return
#def PrintStylesheetTree(node, stream=None, indentLevel=0, showImportIndex=0,
# lastUri=None):
# """
# Function to print the nodes in the stylesheet tree, to aid in debugging.
# """
# stream = stream or sys.stdout
# if lastUri != node.xml_base:
# stream.write(indentLevel * ' ')
# stream.write('====%s====\n' % node.xml_base)
# lastUri = node.xml_base
# stream.write(indentLevel * ' ' + str(node))
# if showImportIndex:
# stream.write(' [' + str(node.import_precedence) + ']')
# stream.write('\n')
# stream.flush()
# show_ii = isinstance(node, xslt_element) and \
# node.expandedName in [(XSL_NAMESPACE, 'stylesheet'),
# (XSL_NAMESPACE, 'transform')]
# for child in node.children:
# PrintStylesheetTree(child, stream, indentLevel+1, show_ii, lastUri)
# return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/tree/transform_element.py
|
transform_element.py
|
from amara.xslt import XsltError
from amara.xslt.tree import xslt_element
__all__ = ['literal_element']
class literal_element(xslt_element):
# usually not supplied so default it
_use_attribute_sets = None
# This will be called by the stylesheet if it contains any
# xsl:namespace-alias declarations
def fixup_aliases(self, aliases):
# handle the element itself
if self._output_namespace in aliases:
self._output_namespace, self.prefix = \
aliases[self._output_namespace]
# reprocess the attributes
pos = 0
for name, namespace, value in self._output_attrs:
# NOTE - attributes do not use the default namespace
if namespace and namespace in aliases:
prefix, name = name.split(':', 1)
namespace, prefix = aliases[namespace]
if prefix:
name = prefix + ':' + name
self._output_attrs[pos] = (name, namespace, value)
pos += 1
# handle the namespaces
for prefix, namespace in self._output_nss.items():
if namespace in aliases:
# remove the old entry
del self._output_nss[prefix]
# get the aliased namespace and set that pairing
namespace, prefix = aliases[namespace]
self._output_nss[prefix] = namespace
return
def instantiate(self, context):
context.instruction = self
context.namespaces = self.namespaces
context.start_element(self.nodeName, self._output_namespace,
self._output_nss)
for name, namespace, value in self._output_attrs:
value = value.evaluate(context)
context.attribute(name, value, namespace)
if self._use_attribute_sets:
attribute_sets = context.transform.attribute_sets
for name in self._use_attribute_sets:
try:
attribute_set = attribute_sets[name]
except KeyError:
raise XsltError(XsltError.UNDEFINED_ATTRIBUTE_SET, name=name)
attribute_set.instantiate(context)
self.process_children(context)
context.end_element(self.nodeName, self._output_namespace)
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/tree/literal_element.py
|
literal_element.py
|
from amara._expat import ContentModel
from amara.namespaces import XSL_NAMESPACE
from amara.lib.xmlstring import isqname
__all__ = ['qname', 'seq', 'alt', 'rep1', 'rep', 'opt',
'empty', 'text', 'resultelements', 'instructions', 'template',
'toplevelelements',
]
RESULT_ELEMENT = (None, None)
TEXT_NODE = '#PCDATA'
EMPTY = '/empty/'
END_ELEMENT = ContentModel.FINAL_EVENT
def qname(namespace, name):
"""
Matches a fully qualified name (e.g., xsl:sort)
"""
assert isqname(name)
if ':' in name:
local = name[name.index(':')+1:]
else:
local = name
return ContentModel(ContentModel.TYPE_NAME, (namespace, local), label=name)
def seq(*args):
"""
Matches the each argument in sequential order.
"""
return ContentModel(ContentModel.TYPE_SEQ, args)
def alt(*args):
"""
Matches one of the given arguments.
"""
return ContentModel(ContentModel.TYPE_ALT, args)
def rep1(arg):
"""
Matches one or more occurrences of 'arg'.
"""
assert isinstance(arg, ContentModel)
arg.quant = ContentModel.QUANT_PLUS
return arg
def opt(arg):
"""
Matches zero or one occurrences of 'arg'
"""
assert isinstance(arg, ContentModel)
arg.quant = ContentModel.QUANT_OPT
return arg
def rep(arg):
"""
Matches zero or more occurrences of 'arg'
"""
assert isinstance(arg, ContentModel)
arg.quant = ContentModel.QUANT_REP
return arg
# special match that matches nothing
empty = ContentModel(ContentModel.TYPE_NAME, EMPTY, ContentModel.QUANT_OPT,
label='/empty/',
doc="`empty` is the content model for childless elements")
text = ContentModel(ContentModel.TYPE_NAME, TEXT_NODE,
ContentModel.QUANT_REP,
label="#PCDATA",
doc="`text` is the content model for text content")
result_elements = ContentModel(ContentModel.TYPE_NAME, RESULT_ELEMENT,
ContentModel.QUANT_REP,
label='/result-elements/',
doc=("`result_elements` is the set of elements "
" not declared in the XSL namespace"))
instructions = (qname(XSL_NAMESPACE, 'xsl:apply-templates'),
qname(XSL_NAMESPACE, 'xsl:call-template'),
qname(XSL_NAMESPACE, 'xsl:apply-imports'),
qname(XSL_NAMESPACE, 'xsl:for-each'),
qname(XSL_NAMESPACE, 'xsl:value-of'),
qname(XSL_NAMESPACE, 'xsl:copy-of'),
qname(XSL_NAMESPACE, 'xsl:number'),
qname(XSL_NAMESPACE, 'xsl:choose'),
qname(XSL_NAMESPACE, 'xsl:if'),
qname(XSL_NAMESPACE, 'xsl:text'),
qname(XSL_NAMESPACE, 'xsl:copy'),
qname(XSL_NAMESPACE, 'xsl:variable'),
qname(XSL_NAMESPACE, 'xsl:message'),
qname(XSL_NAMESPACE, 'xsl:fallback'),
qname(XSL_NAMESPACE, 'xsl:processing-instruction'),
qname(XSL_NAMESPACE, 'xsl:comment'),
qname(XSL_NAMESPACE, 'xsl:element'),
qname(XSL_NAMESPACE, 'xsl:attribute'))
instructions = ContentModel(ContentModel.TYPE_ALT, instructions,
ContentModel.QUANT_REP, label='/instructions/',
doc=("`instructions` is the set of elements which"
" have a category of instruction"))
template = ContentModel(ContentModel.TYPE_ALT,
(text, instructions, result_elements),
ContentModel.QUANT_REP, label='/template/',
doc=("`template` is the set of `text`, `instructions`"
" or `result-elements`"))
top_level_elements = (qname(XSL_NAMESPACE, 'xsl:include'),
qname(XSL_NAMESPACE, 'xsl:strip-space'),
qname(XSL_NAMESPACE, 'xsl:preserve-space'),
qname(XSL_NAMESPACE, 'xsl:output'),
qname(XSL_NAMESPACE, 'xsl:key'),
qname(XSL_NAMESPACE, 'xsl:decimal-format'),
qname(XSL_NAMESPACE, 'xsl:attribute-set'),
qname(XSL_NAMESPACE, 'xsl:variable'),
qname(XSL_NAMESPACE, 'xsl:param'),
qname(XSL_NAMESPACE, 'xsl:template'),
qname(XSL_NAMESPACE, 'xsl:namespace-alias'),
result_elements)
top_level_elements = ContentModel(ContentModel.TYPE_ALT, top_level_elements,
ContentModel.QUANT_REP,
label='/top-level-elements/',
doc=("`toplevelelements` is the set of "
"elements which have a category of "
"`top-level-element` or are a "
"`result-element`."))
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/tree/content_model.py
|
content_model.py
|
from amara.xpath import datatypes
from amara.xslt.tree import xslt_element, content_model, attribute_types
class sort_element(xslt_element):
content_model = content_model.empty
attribute_types = {
'select': attribute_types.string_expression(default='.'),
'lang': attribute_types.nmtoken_avt(),
# We don't support any additional data-types, hence no
# attribute_types.QNameButNotNCName()
'data-type': attribute_types.choice_avt(('text', 'number'),
default='text'),
'order': attribute_types.choice_avt(('ascending', 'descending'),
default='ascending'),
'case-order': attribute_types.choice_avt(('upper-first',
'lower-first')),
}
# Using `object` as a sentinel as `None` is a valid compare function
_missing = object()
_compare = _missing
_reverse = _missing
def setup(self):
# optimize for constant AVT attribute values (i.e., no {})
if self._data_type.constant and self._case_order.constant:
self._compare = self._get_compare(self._data_type.evaluate(None),
self._case_order.evaluate(None))
if self._order.constant:
self._reverse = self._order.evaluate(None) == 'descending'
return
def _get_compare(self, data_type, case_order):
if data_type == 'number':
comparer = _number_compare
else:
if case_order == 'lower-first':
comparer = _lower_first_compare
elif case_order == 'upper-first':
comparer = _upper_first_compare
else:
# use default for this locale
comparer = None
return comparer
def get_parameters(self, context, _missing=_missing):
compare, reverse = self._compare, self._reverse
if compare is _missing:
data_type = self._data_type.evaluate(context)
case_order = self._case_order and self._case_order.evaluate(context)
compare = self._get_compare(data_type, case_order)
if reverse is _missing:
reverse = self._order.evaluate(context) == 'descending'
return (compare, reverse)
def get_key(self, context):
data_type = self._data_type.evaluate(context)
if data_type == 'text':
# Use "real" strings as XPath string objects implement
# XPath semantics for relational (<,>) operators.
return unicode(self._select.evaluate_as_string(context))
elif data_type == 'number':
return self._select.evaluate_as_number(context)
return self._select.evaluate(context)
### Comparision Functions ###
def _number_compare(a, b):
# NaN seems to always equal everything else, so we'll do it ourselves
# the IEEE definition of NaN makes it the smallest possible number
if a.isnan():
return 0 if b.isnan() else -1
elif b.isnan():
return 1
return cmp(a, b)
def _lower_first_compare(a, b):
# case only matters if the strings are equal ignoring case
if a.lower() == b.lower():
for i, ch in enumerate(a):
if ch != b[i]:
return -1 if ch.islower() else 1
# they are truly equal
return 0
else:
return cmp(a, b)
def _upper_first_compare(a, b):
# case only matters if the strings are equal ignoring case
if a.lower() == b.lower():
for i, ch in enumerate(a):
if ch != b[i]:
return ch.isupper() and -1 or 1
# they are truly equal
return 0
else:
return cmp(a, b)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/tree/sort_element.py
|
sort_element.py
|
#from amara import TranslateMessage as _
from gettext import gettext as _
import cStringIO, traceback
#from amara.xpath import RuntimeException as XPathRuntimeException
from amara.xpath import datatypes, parser
from amara.xpath.parser import _parse as parse_xpath
from amara.xslt import XsltError, XsltStaticError, XsltRuntimeError
from amara.xslt.xpatterns import _parse as parse_xpattern
from amara.namespaces import XML_NAMESPACE, XMLNS_NAMESPACE
from amara.lib.xmlstring import isqname, splitqname
from amara.xslt.expressions.avt import avt_expression
class attribute_type(object):
__slots__ = ('required', 'default', 'description')
display = 'unknown'
def __init__(self, required=False, default=None, description=''):
self.required = required
self.default = default
self.description = description
return
def __str__(self):
return self.display
def prepare(self, element, value):
if value is None:
return self.default
return value
# `reprocess` is used in avt_expression
reprocess = prepare
def validate(self, validation):
return True
class _avt_constant(avt_expression):
# optimization hook
constant = True
def __init__(self, element, attribute_type, value):
self._format = attribute_type.reprocess(element, value)
self._args = None
def __str__(self):
return repr(self._format)
def __nonzero__(self):
return self._format is not None
class _avt_wrapper(avt_expression):
__slots__ = ('constant', '_element', '_attribute_type')
def __init__(self, element, attribute_type, value):
avt_expression.__init__(self, value)
self._element = element
self._attribute_type = attribute_type
self.constant = not self._args
def evaluate_as_string(self, context):
result = avt_expression.evaluate_as_string(self, context)
return self._attribute_type.reprocess(self._element, result)
evaluate = evaluate_as_string
class choice(attribute_type):
__slots__ = ('values',)
def __init__(self, values, required=False, default=None, description=''):
attribute_type.__init__(self, required, default, description)
self.values = values
return
def prepare(self, element, value):
if value is None:
return self.default
if value not in self.values:
# check for an `attribute_type` instance
for allowed in self.values:
if isinstance(allowed, self.__class__):
try:
allowed.prepare(element, value)
except:
pass
else:
break
else:
# if we get here it is an error
raise XsltError(XsltError.INVALID_ATTR_CHOICE, value=value)
return value
reprocess = prepare
def __str__(self):
return ' | '.join('"' + v + '"' for v in self.values)
class avt:
def __str__(self):
return '{ %s }' % self.display
def prepare(self, element, value):
if value is None:
return _avt_constant(element, self, self.default)
elif '{' not in value and '}' not in value:
return _avt_constant(element, self, value)
try:
return _avt_wrapper(element, self, value)
except XsltError, error:
# an error from the AVT parser
raise XsltError(XsltError.INVALID_AVT, value=value,
baseuri=element.baseUri, line=element.lineNumber,
col=element.columnNumber, msg=str(error))
class choice_avt(avt, choice):
def __str__(self):
return '{ %s }' % choice.__str__(self)
class any_avt(avt, attribute_type):
display = _('any avt')
class string(attribute_type):
display = _('string')
class string_avt(avt, string):
pass
class char(attribute_type):
"""
A string value with a length of one
"""
display = _('char')
def prepare(self, element, value):
if value is None:
return self.default
if len(value) > 1:
raise XsltError(XsltError.INVALID_CHAR_ATTR, value=value)
return value
reprocess = prepare
class char_avt(avt, char):
pass
class number(attribute_type):
display = _('number')
def prepare(self, element, value):
if value is None:
return self.default
try:
return float(value or self.default)
except:
raise XsltError(XsltError.INVALID_NUMBER_ATTR, value=value)
reprocess = prepare
class number_avt(avt, number):
reprocess = number.prepare
class uri_reference(attribute_type):
display = _('uri-reference')
def prepare(self, element, value):
if value is None:
return self.default
return value
reprocess = prepare
class uri_reference_avt(avt, uri_reference):
pass
class namespace_uri(uri_reference):
def prepare(self, element, value):
if value is None:
return self.default
if value in (XML_NAMESPACE, XMLNS_NAMESPACE):
raise XsltError(XsltError.INVALID_NS_URIREF_ATTR, value=value)
return value
reprocess = prepare
class namespace_uri_avt(avt, namespace_uri):
pass
class id(attribute_type):
display = _('id')
def prepare(self, element, value):
if value is None:
return self.default
if not value:
raise XsltError(XsltError.INVALID_ID_ATTR, value=value)
return value
reprocess = prepare
class id_avt(avt, id):
pass
class qname(attribute_type):
display = _('qname')
def prepare(self, element, value):
if value is None:
if self.default is None:
return None
value = self.default
elif not isqname(value):
raise XsltError(XsltError.INVALID_QNAME_ATTR, value=value)
prefix, local = splitqname(value)
if prefix:
try:
namespace = element.namespaces[prefix]
except KeyError:
raise XsltRuntimeException(XsltError.UNDEFINED_PREFIX,
elem=element, prefix=prefix)
else:
namespace = None
return (namespace, local)
reprocess = prepare
class qname_avt(avt, qname):
pass
class raw_qname(qname):
def prepare(self, element, value):
if value is None:
if self.default is None:
return None
value = self.default
elif not isqname(value):
raise XsltError(XsltError.INVALID_QNAME_ATTR, value=value)
return splitqname(value)
reprocess = prepare
class raw_qname_avt(avt, raw_qname):
pass
class ncname(attribute_type):
display = _('ncname')
def prepare(self, element, value):
if value is None:
return self.default
if not value:
raise XsltError(XsltError.INVALID_NCNAME_ATTR, value=value)
if ':' in value:
raise XsltError(XsltError.INVALID_NCNAME_ATTR, value=value)
return value
reprocess = prepare
class ncname_avt(avt, ncname):
pass
class prefix(attribute_type):
display = _('prefix')
def prepare(self, element, value):
if value is None:
return self.default
if not value:
raise XsltError(XsltError.INVALID_PREFIX_ATTR, value=value)
if ':' in value:
raise XsltError(XsltError.INVALID_PREFIX_ATTR, value=value)
if value == '#default':
value = None
return value
reprocess = prepare
class prefix_avt(avt, prefix):
pass
class nmtoken(attribute_type):
display = _('nmtoken')
def prepare(self, element, value):
if value is None:
return self.default
if not value:
raise XsltError(XsltError.INVALID_NMTOKEN_ATTR, value=value)
return value
reprocess = prepare
class nmtoken_avt(avt, nmtoken):
pass
class qname_but_not_ncname(attribute_type):
display = _('qname-but-not-ncname')
def prepare(self, element, value):
if value is None:
if self.default is None:
return None
value = self.default
elif not value:
raise XsltError(XsltError.QNAME_BUT_NOT_NCNAME, value=value)
try:
index = value.index(':')
except ValueError:
raise XsltError(XsltError.QNAME_BUT_NOT_NCNAME, value=value)
prefix, local = value[:index], value[index+1:]
try:
namespace = element.namespaces[prefix]
except KeyError:
raise XsltRuntimeException(XsltError.UNDEFINED_PREFIX,
elem=element, prefix=prefix)
return (namespace, local)
reprocess = prepare
class token(attribute_type):
"""
An attribute whose value is used as an XPath NameTest
"""
display = _('token')
def prepare(self, element, value):
# a 'token' is really an XPath NameTest; '*' | NCName ':' '*' | QName
# From XPath 1.0 section 2.3:
# if the QName does not have a prefix, then the namespace URI is null
index = value.rfind(':')
if index == -1:
namespace = None
local = value
else:
prefix = value[:index]
local = value[index+1:]
try:
namespace = element.namespaces[prefix]
except KeyError:
raise XsltRuntimeException(XsltError.UNDEFINED_PREFIX,
elem=element, prefix=prefix)
return (namespace, local)
reprocess = prepare
class token_avt(avt, token):
pass
class expression_wrapper:
def __init__(self, expression, element, original):
self.expression = expression
self.element = element
self.original = original
return
def __nonzero__(self):
# True if self.expression is not None, which is always the case
# otherwise this instance would not exist!
return True
def __getattr__(self, attr):
"""Make this behave as if it was the expression object itself."""
return getattr(self.expression, attr)
# Provide the copy/pickle helpers so as to not get them from the
# wrapped expression.
def __getstate__(self):
return (self.expression, self.element, self.original)
def __setstate__(self, state):
self.expression, self.element, self.original = state
return
def evaluate(self,context):
try:
return self.expression.evaluate(context)
except XPathRuntimeException, e:
import MessageSource
e.message = MessageSource.EXPRESSION_POSITION_INFO % (
self.element.baseUri, self.element.lineNumber,
self.element.columnNumber, self.original, str(e))
# By modifying the exception value directly, we do not need
# to raise with that value, thus leaving the frame stack
# intact (original traceback is displayed).
raise
except XsltError, e:
import MessageSource
e.message = MessageSource.XSLT_EXPRESSION_POSITION_INFO % (
str(e), self.original)
# By modifying the exception value directly, we do not need
# to raise with that value, thus leaving the frame stack
# intact (original traceback is displayed).
raise
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
import MessageSource
tb = cStringIO.StringIO()
tb.write("Lower-level traceback:\n")
traceback.print_exc(1000, tb)
raise RuntimeError(MessageSource.EXPRESSION_POSITION_INFO % (
self.element.baseUri, self.element.lineNumber,
self.element.columnNumber, self.original, tb.getvalue()))
class expression(attribute_type):
"""
An attribute whose value is used as an XPath expression
"""
display = _('expression')
def prepare(self, element, value):
if value is None:
if self.default is None:
return None
value = self.default
try:
return parse_xpath(value)
except SyntaxError, error:
raise XsltError(XsltError.INVALID_EXPRESSION, value=value,
baseuri=element.baseUri, line=element.lineNumber,
col=element.columnNumber, msg=str(error))
class nodeset_expression(expression):
display = _('nodeset-expression')
class string_expression(expression):
display = _('string-expression')
class number_expression(expression):
display = _('number-expression')
class boolean_expression(expression):
display = _('boolean-expression')
class pattern(attribute_type):
"""
An attribute whose value is used as an XPattern expression
"""
display = _('pattern')
def prepare(self, element, value):
if value is None:
if self.default:
value = self.default
else:
return None
try:
return parse_xpattern(value)
except XsltError, err:
if err.__class__ is XsltError:
XsltRuntimeError.update_error(err, element)
raise
class tokens(token):
"""
A whitespace separated list of tokens (see Token for description of a token)
"""
display = _('tokens')
def prepare(self, element, value):
if value is None:
return []
tokens = []
for value in value.split():
prepared = token.prepare(self, element, value)
tokens.append(prepared)
return tokens
reprocess = prepare
class tokens_avt(avt, tokens):
pass
class qnames(qname):
"""
A whitespace separated list of qnames (see QName for description of a qname)
"""
display = _('qnames')
def prepare(self, element, value):
if value is None:
return []
qnames = []
for value in value.split():
prepared = qname.prepare(self, element, value)
qnames.append(prepared)
return qnames
reprocess = prepare
class qnames_avt(avt, qnames):
pass
class prefixes(prefix):
"""
A whitespace separated list of prefixes (see Prefix for more information)
"""
display = _('prefixes')
def prepare(self, element, value):
if value is None:
return []
prefixes = []
for value in value.split():
prepared = prefix.prepare(self, element, value)
prefixes.append(prepared)
return prefixes
reprocess = prepare
class prefixes_avt(avt, prefixes):
pass
class yesno(attribute_type):
display = '"yes" | "no"'
def prepare(self, element, value):
if value is None:
return self.default and self.default == 'yes'
elif value not in ['yes', 'no']:
raise XsltError(XsltError.INVALID_ATTR_CHOICE, value=value)#, str(self))
return value == 'yes'
reprocess = prepare
class yesno_avt(avt, yesno):
pass
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/tree/attribute_types.py
|
attribute_types.py
|
from amara.namespaces import XSL_NAMESPACE
from amara.xslt import XsltError
from amara.xslt.tree import (xslt_element, content_model, attribute_types,
choose_elements, if_element)
class call_template_element(xslt_element):
content_model = content_model.rep(
content_model.qname(XSL_NAMESPACE, 'xsl:with-param')
)
attribute_types = {
'name': attribute_types.qname(required=True),
}
_tail_recursive = False
def setup(self):
self._params = [ (child, child._name, child._select)
for child in self.children ]
return
def prime(self, context,
_test_elements=(if_element.if_element,),
_choose_elements=(choose_elements.when_element,
choose_elements.otherwise_element,)):
transform = self.root.stylesheet
try:
template = self._template = transform.named_templates[self._name]
except KeyError:
raise XsltError(XsltError.NAMED_TEMPLATE_NOT_FOUND,
self, self._name)
# NOTE: Tail recursion is now checked for in the xsl:template setup().
return
def instantiate(self, context):
# We need to calculate the parameters before the variable context
# is changed back in the template element
params = {}
for param, name, select in self._params:
context.instruction, context.namespaces = param, param.namespaces
params[name] = select.evaluate(context)
if self._tail_recursive:
context.recursive_parameters = params
else:
#context.current_node = context.node
self._template.instantiate(context, params)
return
#def __getstate__(self):
# del self._params
# return xslt_element.__getstate__(self)
#def __setstate__(self, state):
# xslt_element.__setstate__(self, state)
# self._params = [ (child, child._name, child._select.evaluate)
# for child in self.children ]
# return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/tree/call_template_element.py
|
call_template_element.py
|
from amara import tree
from amara.namespaces import XSL_NAMESPACE
from amara.xslt import XsltError, numbers
from amara.xslt.tree import xslt_element, content_model, attribute_types
from amara.xpath import datatypes
DEFAULT_LANG = 'en'
DEFAULT_FORMAT = '1'
SINGLE = 0
MULTIPLE = 1
ANY = 2
SIMPLE = 3 # no count or from
class number_element(xslt_element):
content_model = content_model.empty
attribute_types = {
'level': attribute_types.choice(('single', 'multiple', 'any'),
default='single'),
'count': attribute_types.pattern(),
'from': attribute_types.pattern(),
'value': attribute_types.expression(),
'format': attribute_types.string_avt(default='1'),
'lang': attribute_types.nmtoken_avt(),
'letter-value': attribute_types.choice_avt(('alphabetic',
'traditional'),
default='traditional'),
'grouping-separator': attribute_types.char_avt(),
'grouping-size': attribute_types.number_avt(),
}
def setup(self):
if self._level == 'single':
if not self._count and not self._from:
self._level = SIMPLE
else:
self._level = SINGLE
elif self._level == 'multiple':
self._level = MULTIPLE
elif self._level == 'any':
self._level = ANY
if self._format.constant and self._lang.constant:
format = self._format.evaluate_as_string(None)
lang = self._lang.evaluate_as_string(None)
self._formatter = numbers.formatter(lang, format)
else:
self._formatter = None
return
def instantiate(self, context):
context.instruction = self
context.namespaces = self.namespaces
formatter = self._formatter
if not formatter:
format = self._format
if format:
format = format.evaluate_as_string(context)
if not format:
format = DEFAULT_FORMAT
lang = self._lang
if lang:
lang = lang.evaluate_as_string(context)
if not lang:
lang = DEFAULT_LANG
formatter = numbers.formatter(lang, format)
letter_value = self._letter_value.evaluate_as_string(context)
if self._grouping_separator and self._grouping_size:
separator = self._grouping_separator.evaluate_as_string(context)
grouping = int(self._grouping_size.evaluate_as_number(context))
else:
separator = grouping = None
# get the formatted value(s)
if self._value:
value = self._value.evaluate_as_number(context)
# XSLT 1.0, Section 7.7, Paragraph 1
# ERROR: the number is NaN, infinite or less than 0.5
# RECOVERY: convert to string and insert into the result tree
if not value.isfinite() or value < 0.5:
result = datatypes.string(value)
else:
value = int(round(value))
result = formatter.format(value, letter_value, grouping,
separator)
else:
node = context.node
if self._level == SINGLE:
value = self._single_value(context, node, self._count,
self._from)
if value == 0:
value = None
result = formatter.format(value, letter_value, separator,
grouping)
elif self._level == MULTIPLE:
values = self._multiple_values(context, node)
result = formatter.formatmany(values, letter_value, grouping,
separator)
elif self._level == ANY:
value = self._any_value(context, node)
if value == 0:
value = None
result = formatter.format(value, letter_value, grouping,
separator)
else:
# 'single' without count or from attributes
value = 1
prev = node.xml_preceding_sibling
node_type = node.xml_type
node_name = node.xml_name
while prev:
if prev.xml_type == node_type and \
prev.xml_name == node_name:
value += 1
prev = prev.xml_preceding_sibling
result = formatter.format(value, letter_value, grouping,
separator)
# add the resulting formatted value(s) to the result tree
context.text(result)
return
def _single_value(self, context, node, countPattern, fromPattern):
if not countPattern:
if not node.xml_local:
# text, comment and processing instruction
countPattern = NodeTypeTest(node)
else:
countPattern = NameTest(node)
if fromPattern:
start = node.xml_parent
while start and not fromPattern.match(context, start):
start = start.xml_parent
else:
start = node.xml_root
while not countPattern.match(context, node):
node = node.xml_parent
if node is None or node == start:
return 0
value = 0
while node:
value += 1
node = node.xml_preceding_sibling
while node and not countPattern.match(context, node):
node = node.xml_preceding_sibling
return value
def _multiple_values(self, context, node):
count = self._count
if not count:
if isinstance(node, (tree.element, tree.attribute)):
count = name_pattern(node.xml_type, node.xml_name)
else:
count = type_pattern(node.xml_type)
values = []
while node:
if count.match(context, node):
value = self._single_value(context, node, count, None)
values.insert(0, value)
node = node.xml_parent
if node and self._from and self._from.match(context, node):
break
return values
def _any_value(self, context, node):
count = self._count
if not count:
if isinstance(node, (tree.element, tree.attribute)):
count = name_pattern(node.xml_type, node.xml_name)
else:
count = type_pattern(node.xml_type)
value = 0
while node:
if self._from and self._from.match(context, node):
break
if count.match(context, node):
value += 1
next = node.xml_preceding_sibling
if not next:
node = node.xml_parent
else:
node = next
next = getattr(node, 'xml_last_child', None)
while next:
node = next
next = getattr(node, 'xml_last_child', None)
return value
class type_pattern:
def __init__(self, xml_type):
self.xml_type = xml_type
return
def match(self, context, node):
return (node.xml_type == self.xml_type)
class name_pattern:
def __init__(self, xml_type, xml_name):
self.xml_type = xml_type
self.xml_name = xml_name
return
def match(self, context, node):
return (node.xml_type == self.xml_type and
node.xml_name == self.xml_name)
##Note: emacs can uncomment the ff automatically.
##To: [email protected]
##Subject: Re: number format test
##From: MURAKAMI Shinyu <[email protected]>
##Date: Thu, 3 Aug 2000 01:18:10 +0900 (Wed 10:18 MDT)
##Kay Michael <[email protected]> wrote:
##>> 5. Saxon
##>> - Fullwidth 1 (#xff11) are supported.
##>> - Hiragana/Katakana/Kanji format generates incorrect result.
##>> (Unicode codepoint order, such as #x3042, #x3043, #x3044,...)
##>> useless and trouble with Non-European style processing.
##>> fix it please!!
##>
##>If you could tell me what the correct sequence is, I'll be happy to include
##>it. Help me please!
##XSLT 1.0 spec says:
## 7.7.1 Number to String Conversion Attributes
## ...
## - Any other format token indicates a numbering sequence that starts
## with that token. If an implementation does not support a numbering
## sequence that starts with that token, it must use a format token of 1.
##The last sentence is important. ...it must use a format token of 1.
##If Saxon will support... the following are Japanese Hiragana/Katakana sequences
##-- modern(A...) and traditional(I...) -- and Kanji(CJK ideographs) numbers.
##format="あ" (Hiragana A)
##あいうえおかきくけこ
##さしすせそたちつてと
##なにぬねのはひふへほ
##まみむめもやゆよらり
##るれろわをん
##format="ア" (Katakana A)
##アイウエオカキクケコ
##サシスセソタチツテト
##ナニヌネノハヒフヘホ
##マミムメモヤユヨラリ
##ルレロワヲン
##format="い" (Hiragana I)
##いろはにほへとちりぬ
##るをわかよたれそつね
##ならむうゐのおくやま
##けふこえてあさきゆめ
##みしゑひもせす
##format="イ" (Katakana I)
##イロハニホヘトチリヌ
##ルヲワカヨタレソツネ
##ナラムウヰノオクヤマ
##ケフコエテアサキユメ
##ミシヱヒモセス
##format="一" (Kanji 1) (decimal notation)
##一(=1) 二(=2) 三(=3) 四(=4) 五(=5)
##六(=6) 七(=7) 八(=8) 九(=9) 〇(=0)
##e.g. 一〇(=10) 二五六(=256)
##There are more ideographic(kanji)-number formats, but the above will be sufficient.
##Thanks,
##MURAKAMI Shinyu
##[email protected]
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/tree/number_element.py
|
number_element.py
|
from amara.namespaces import XSL_NAMESPACE, EXTENSION_NAMESPACE
from amara.xslt import XsltError
from amara.xslt.tree import xslt_element, content_model, attribute_types
__all__ = (
'import_element', 'include_element', 'strip_space_element',
'preserve_space_element', 'output_element', 'key_element',
'decimal_format_element', 'namespace_alias_element',
'attribute_set_element'
)
class _combining_element(xslt_element):
content_model = content_model.empty
attribute_types = {
'href': attribute_types.uri_reference(required=True),
}
class import_element(_combining_element):
"""Implementation of the `xsl:import` element"""
pass
class include_element(_combining_element):
"""Implementation of the `xsl:include_elementelement"""
pass
class _whitespace_element(xslt_element):
content_model = content_model.empty
attribute_types = {
'elements' : attribute_types.tokens(required=1),
}
_strip_whitespace = None
class strip_space_element(_whitespace_element):
"""Implementation of the `xsl:strip-space` element"""
_strip_whitespace = True
class preserve_space_element(_whitespace_element):
"""Implementation of the `xsl:preserve-space` element"""
_strip_whitespace = False
class output_element(xslt_element):
"""Implementation of the `xsl:output` element"""
content_model = content_model.empty
attribute_types = {
'method': attribute_types.qname(),
'version': attribute_types.nmtoken(),
'encoding': attribute_types.string(),
'omit-xml-declaration': attribute_types.yesno(),
'standalone': attribute_types.yesno(),
'doctype-public': attribute_types.string(),
'doctype-system': attribute_types.string(),
'cdata-section-elements': attribute_types.qnames(),
'indent': attribute_types.yesno(),
'media-type': attribute_types.string(),
'f:byte-order-mark': attribute_types.yesno(
description=("Whether to force output of a byte order mark (BOM). "
"Usually used to generate a UTF-8 BOM. Do not use "
"unless you're sure you know what you're doing")),
'f:canonical-form': attribute_types.yesno(),
}
def setup(self):
if (EXTENSION_NAMESPACE, 'byte-order-mark') in self.attributes:
value = self.attributes[EXTENSION_NAMESPACE, 'byte-order-mark']
self._byte_order_mark = value == 'yes'
else:
self._byte_order_mark = None
if (EXTENSION_NAMESPACE, 'canonical-form') in self.attributes:
value = self.attributes[EXTENSION_NAMESPACE, 'canonical-form']
self._canonical_form = value == 'yes'
else:
self._canonical_form = None
return
class key_element(xslt_element):
"""Implementation of the `xsl:key` element"""
content_model = content_model.empty
attribute_types = {
'name': attribute_types.qname(required=True),
'match': attribute_types.pattern(required=True),
'use': attribute_types.expression(required=True),
}
class decimal_format_element(xslt_element):
content_model = content_model.empty
attribute_types = {
'name': attribute_types.qname(),
'decimal-separator': attribute_types.char(default='.'),
'grouping-separator': attribute_types.char(default=','),
'infinity': attribute_types.string(default='Infinity'),
'minus-sign': attribute_types.char(default='-'),
'NaN': attribute_types.string(default='NaN'),
'percent': attribute_types.char(default='%'),
'per-mille': attribute_types.char(default=unichr(0x2030)),
'zero-digit': attribute_types.char(default='0'),
'digit': attribute_types.char(default='#'),
'pattern-separator': attribute_types.char(default=';'),
}
class namespace_alias_element(xslt_element):
content_model = content_model.empty
attribute_types = {
'stylesheet-prefix': attribute_types.prefix(required=True),
'result-prefix': attribute_types.prefix(required=True),
}
class attribute_set_element(xslt_element):
content_model = content_model.rep(
content_model.qname(XSL_NAMESPACE, 'xsl:attribute')
)
attribute_types = {
'name': attribute_types.qname(required=True),
'use-attribute-sets': attribute_types.qnames(),
}
def instantiate(self, context, used=None):
if used is None:
used = []
if self in used:
raise XsltError(XsltError.CIRCULAR_ATTRIBUTE_SET,
self, self._name)
else:
used.append(self)
# XSLT 1.0, Section 7.1.4, Paragraph 4:
# The available variable bindings are only the top-level ones.
variables = context.variables
context.variables = context.global_variables
attribute_sets = context.transform.attribute_sets
for name in self._use_attribute_sets:
try:
attribute_set = attribute_sets[name]
except KeyError:
raise XsltError(XsltError.UNDEFINED_ATTRIBUTE_SET,
self, attr_set_name)
else:
attribute_set.instantiate(context)
self.process_children(context)
context.variables = variables
used.remove(self)
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/tree/declaration_elements.py
|
declaration_elements.py
|
from amara.namespaces import XSL_NAMESPACE
from amara.xslt import XsltError
from amara.xslt.tree import xslt_element, content_model, attribute_types
from call_template_element import call_template_element
from choose_elements import choose_element
from if_element import if_element
from variable_elements import param_element
class template_element(xslt_element):
content_model = content_model.seq(
content_model.rep(content_model.qname(XSL_NAMESPACE, 'xsl:param')),
content_model.template,
)
attribute_types = {
'match': attribute_types.pattern(),
'name': attribute_types.qname(),
'priority': attribute_types.number(),
'mode': attribute_types.qname(),
}
_tail_recursive = False
def __repr__(self):
return "<template_element match='%s', name='%s', mode='%s', priority='%s'>" % (
self._match, self._name, self._mode, self._priority)
def setup(self):
params = self._params = []
for child in self.children:
if isinstance(child, param_element):
params.append((child, child._name))
elif isinstance(child, xslt_element):
break
if self._params:
self._instructions = self.children[len(self._params)+1:-1]
else:
self._instructions = self.children
# Check for tail-recursive invocation (i.e, call-tempates of self)
if self._name and self._instructions:
endpoints = [self._instructions[-1]]
queue = endpoints.append
for last in endpoints:
if isinstance(last, call_template_element):
if last._name == self._name:
self._tail_recursive = True
last._tail_recursive = True
break
elif isinstance(last, if_element):
last = last.last_instruction
if last: queue(last)
elif isinstance(last, choose_element):
for choice in last.children:
last = choice.last_instruction
if last: queue(last)
return
def _printTemplateInfo(self):
info, tname = self.getTemplateInfo()
if tname:
print "Template named %r:" % tname
else:
print "Template matching pattern %r :" % self._match
print " location: line %d, col %d of %s" % \
(self.lineNumber, self.columnNumber, self.baseUri)
for shortcut in info:
print " shortcut:"
importidx, priority, tmode, patterninfo, quickkey = shortcut
print " ...import index:", importidx
print " .......priority:", priority
print " ...........mode:", tmode
if not tname:
print " ......quick key: node type %s, expanded-name %r" % quickkey
print " ........pattern: %r for axis type %s" % patterninfo[0:2]
return
def instantiate(self, context, params=None):
if params is None:
params = {}
if self._params:
variables = context.variables
context.variables = variables.copy()
# The optimizer converts this to, roughly, a do/while loop
while 1:
context.recursive_parameters = None
for child, param in self._params:
if param in params:
context.variables[param] = params[param]
else:
child.instantiate(context)
for child in self._instructions:
child.instantiate(context)
# Update the params from the values given in
# `recursive_parameters`.
params = context.recursive_parameters
if params is None:
break
if self._params:
context.variables = variables
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/tree/template_element.py
|
template_element.py
|
import cStringIO
from xml.dom import Node
from xml.sax import SAXParseException
from xml.sax.handler import property_dom_node
from amara import sax
from amara.lib import IriError, inputsource
from amara.lib.xmlstring import isspace
from amara.namespaces import XML_NAMESPACE, XMLNS_NAMESPACE, XSL_NAMESPACE
from amara.xslt import XsltError, XsltStaticError
from amara.xslt import extensions, exslt
from amara.xslt.tree import *
__all__ = ['stylesheet_reader']
# Whitespace stripping rules for a stylesheet:
# preserve all whitespace within xsl:text elements;
# strip whitespace from all other elements
_XSLT_WHITESPACE_STRIPPING = ((XSL_NAMESPACE, 'text', False), (None, '*', True))
# pseudo-nodes for save/restore of variable bindings
class push_variables_node(xslt_node):
pseudo_node = True
def __init__(self, root, scope):
xslt_node.__init__(self, root)
self._scope = scope
return
def instantiate(self, context):
variables = context.variables
self._scope.append(variables)
context.variables = variables.copy()
return
class pop_variables_node(xslt_node):
pseudo_node = True
def __init__(self, root, scope):
xslt_node.__init__(self, root)
self._scope = scope
return
def instantiate(self, context):
scope = self._scope
context.variables = scope[-1]
del scope[-1]
return
ELEMENT_CLASSES = {
'apply-imports': apply_imports_element.apply_imports_element,
'apply-templates': apply_templates_element.apply_templates_element,
'attribute': attribute_element.attribute_element,
'call-template': call_template_element.call_template_element,
'choose': choose_elements.choose_element,
'otherwise': choose_elements.otherwise_element,
'when': choose_elements.when_element,
'comment': comment_element.comment_element,
'copy': copy_element.copy_element,
'copy-of': copy_of_element.copy_of_element,
'element': element_element.element_element,
'fallback': fallback_elements.fallback_element,
'for-each': for_each_element.for_each_element,
'if': if_element.if_element,
'message': message_element.message_element,
'number': number_element.number_element,
'processing-instruction':
processing_instruction_element.processing_instruction_element,
'stylesheet': transform_element.transform_element,
'transform': transform_element.transform_element,
'template': template_element.template_element,
'text': text_element.text_element,
'value-of': value_of_element.value_of_element,
'variable': variable_elements.variable_element,
'param': variable_elements.param_element,
'sort': sort_element.sort_element,
'with-param': with_param_element.with_param_element,
'import': declaration_elements.import_element,
'include': declaration_elements.include_element,
'strip-space': declaration_elements.strip_space_element,
'preserve-space': declaration_elements.preserve_space_element,
'output': declaration_elements.output_element,
'key': declaration_elements.key_element,
'decimal-format': declaration_elements.decimal_format_element,
'namespace-alias': declaration_elements.namespace_alias_element,
'attribute-set': declaration_elements.attribute_set_element,
}
# The XSL attributes allowed on literal elements
_RESULT_ELEMENT_XSL_ATTRS = {
'exclude-result-prefixes' : attribute_types.prefixes(),
'extension-element-prefixes' : attribute_types.prefixes(),
'use-attribute-sets' : attribute_types.qnames(),
'version' : attribute_types.number(),
}
_RESULT_ELEMENT_ATTR_INFO = attribute_types.any_avt()
_root_content_model = content_model.alt(
content_model.qname(XSL_NAMESPACE, 'xsl:stylesheet'),
content_model.qname(XSL_NAMESPACE, 'xsl:transform'),
content_model.result_elements)
_XSLT_ROOT_VALIDATION = _root_content_model.compile()
_LITERAL_ELEMENT_VALIDATION = content_model.template.compile()
class parse_state:
"""
Stores the current state of the parser.
Constructor arguments/instance variables:
validation - validation state for the current containing node.
localVariables - set of in-scope variable bindings to determine
variable shadowing.
forwardsCompatible - flag indicating whether or not forwards-compatible
processing is enabled.
currentNamespaces - set of in-scope namespaces for the current node.
extensionNamespaces - set of namespaces defining extension elements
outputNamespaces - set of in-scope namespaces for literal result elements
"""
def __init__(self, node, validation, localVariables, forwardsCompatible,
currentNamespaces, extensionNamespaces, outputNamespaces):
self.node = node
self.validation = validation
self.localVariables = localVariables
self.forwardsCompatible = forwardsCompatible
self.currentNamespaces = currentNamespaces
self.extensionNamespaces = extensionNamespaces
self.outputNamespaces = outputNamespaces
return
class stylesheet_reader(object):
"""
This class can be used to read, from a variety of sources, a
stylesheet and all its included and imported stylesheets, building
from them a single, compact representation of an XSLT stylesheet
tree (an Ft.Xml.Xslt.Stylesheet.Stylesheet object).
This is done with the most efficient parsing method available, and
avoids creating a Domlette document for each document it reads.
"""
# runtime instance variables
_input_source = None
_locator = None
_stylesheet = None
_root = None
def __init__(self):
self._import_index = 0
self._global_vars = {}
self._visited_stylesheet_uris = {}
self._document_state_stack = []
self._element_state_stack = []
self._extelements = {}
self._extelements.update(exslt.extension_elements)
self._extelements.update(extensions.extension_elements)
self._extelement_cache = {}
return
def reset(self):
self._root = None
self._import_index = 0
self._global_vars = {}
self._visited_stylesheet_uris = {}
self._document_state_stack = []
self._element_state_stack = []
return
def addExtensionElementMapping(self, elementMapping):
"""
Add a mapping of extension element names to classes to the
existing mapping of extension elements.
This should only be used for standalone uses of this class. The
only known standalone use for this class is for creating compiled
stylesheets. The benefits of compiled stylesheets are now so minor
that this use case may also disappear and then so will this function.
You have been warned.
"""
self._extelements.update(elementMapping)
for name in elementMapping:
if name in self._extelement_cache:
del self._extelement_cache[name]
return
# -- ContentHandler interface --------------------------------------
def setDocumentLocator(self, locator):
"""
Callback interface for SAX.
"""
# Save the current document state for nested parsing (inclusions)
document_state = (self._locator, self._stylesheet)
self._document_state_stack.append(document_state)
self._locator = locator
self._stylesheet = None
return
def startDocument(self):
"""
Callback interface for SAX.
"""
# Our root is always a document
# We use a document for this because of error checking and
# because we explicitly pass ownerDocument to the nodes as
# they are created
document_uri = self._locator.getSystemId()
root = xslt_root(document_uri)
if not self._root:
self._root = root
self._element_state_stack.append(
parse_state(node=root,
validation=_XSLT_ROOT_VALIDATION,
localVariables={},
forwardsCompatible=False,
currentNamespaces={'xml': XML_NAMESPACE, None: None},
extensionNamespaces={},
outputNamespaces={},
)
)
# for recursive include checks for xsl:include/xsl:import
self._visited_stylesheet_uris[document_uri] = True
# namespaces added for the next element
self._new_namespaces = {}
return
def endDocument(self):
"""
Callback interface for SAX.
"""
stack = self._element_state_stack
state = stack[-1]
del stack[-1]
root = state.node
# ----------------------------------------------------------
# remove URI from recursive inclusion checking
del self._visited_stylesheet_uris[root.baseUri]
# ----------------------------------------------------------
# finalize the children for the document
#root.children = tuple(state.nodes)
# ----------------------------------------------------------
# finalize the stylesheet AST
if stack:
# An xsl:import or xsl:include
# Merge the top-level elements into the "parent" stylesheet
# IMPLEMENTATION NOTE: stack[-1] is the import/include element,
# stack[-2] is the "parent" stylesheet
stack[-2].node._merge(self._stylesheet)
#parent_node = stack[-2].node
#for child in self._stylesheet.children:
# child.parent = parent_node
else:
# A top-most stylesheet
stylesheet = self._root.stylesheet
if stylesheet is not self._stylesheet:
# An additional stylesheet (e.g., an <?xml-stylesheet ...?>);
# treat it as an xsl:import into the "master" stylesheet.
stylesheet.reset()
# Always update the precedence from the included stylesheet
# because it may have contained imports thus increasing its
# import precedence.
self._import_index += 1
stylesheet.import_precedence = self._import_index
# Merge the top-level elements into the "master" stylesheet
stylesheet._merge(self._stylesheet)
#stylesheet.children += self._stylesheet.children
#for child in self._stylesheet.children:
# child.parent = stylesheet
else:
# Prepare for a possible subsequent parse.
self._import_index += 1
# Prepare the "master" stylesheet
stylesheet.setup()
document_state = self._document_state_stack[-1]
del self._document_state_stack[-1]
self._locator, self._stylesheet = document_state
return
def startPrefixMapping(self, prefix, uri):
"""
Callback interface for SAX.
"""
self._new_namespaces[prefix] = uri
return
def startElementNS(self, expandedName, qualifiedName, attribs,
_literal_element=literal_element.literal_element,
_element_classes=ELEMENT_CLASSES,
_element_cache={}, ):
"""
Callback interface for SAX.
"""
parent_state = self._element_state_stack[-1]
state = parse_state(**parent_state.__dict__)
self._element_state_stack.append(state)
# ----------------------------------------------------------
# update in-scope namespaces
if self._new_namespaces:
d = state.currentNamespaces = state.currentNamespaces.copy()
d.update(self._new_namespaces)
d = state.outputNamespaces = state.outputNamespaces.copy()
for prefix, uri in self._new_namespaces.iteritems():
if uri not in (XML_NAMESPACE, XSL_NAMESPACE):
d[prefix] = uri
# reset for next element
self._new_namespaces = {}
# ----------------------------------------------------------
# get the class defining this element
namespace, local = expandedName
xsl_class = ext_class = None
if namespace == XSL_NAMESPACE:
try:
xsl_class, validation, validation_token, legal_attrs = \
_element_cache[local]
except KeyError:
# We need to try to import (and cache) it
try:
xsl_class = _element_classes[local]
except KeyError:
if not state.forwardsCompatible:
raise XsltStaticError(XsltError.XSLT_ILLEGAL_ELEMENT,
parent_state.node, element=local)
xsl_class = fallback_elements.undefined_xslt_element
validation_token = content_model.RESULT_ELEMENT
else:
validation_token = expandedName
validation = xsl_class.content_model.compile()
legal_attrs = xsl_class.attribute_types.items()
_element_cache[local] = (
xsl_class, validation, validation_token, legal_attrs)
elif namespace in state.extensionNamespaces:
try:
ext_class, validation, legal_attrs = \
self._extelement_cache[expandedName]
except KeyError:
try:
ext_class = self._extelements[expandedName]
except KeyError:
ext_class = fallback_elements.undefined_extension_element
validation = ext_class.content_model.compile()
legal_attrs = ext_class.attribute_types
if legal_attrs is not None:
legal_attrs = legal_attrs.items()
self._extelement_cache[expandedName] = (
ext_class, validation, legal_attrs)
validation_token = content_model.RESULT_ELEMENT
else:
validation = _LITERAL_ELEMENT_VALIDATION
validation_token = content_model.RESULT_ELEMENT
state.validation = validation
# ----------------------------------------------------------
# verify that this element can be declared here
try:
next = parent_state.validation[validation_token]
except KeyError:
#self._debug_validation(expandedName)
# ignore whatever elements are defined within an undefined
# element as an exception will occur when/if this element
# is actually instantiated
if not isinstance(parent_state.node,
fallback_elements.undefined_extension_element):
raise XsltStaticError(XsltError.ILLEGAL_ELEMENT_CHILD,
parent_state.node, element=qualifiedName)
else:
# save this state for next go round
parent_state.validation = next
# ----------------------------------------------------------
# create the instance defining this element
klass = (xsl_class or ext_class or _literal_element)
state.node = instance = klass(self._root, expandedName, qualifiedName,
state.currentNamespaces)
instance.baseUri = self._locator.getSystemId()
instance.lineNumber = self._locator.getLineNumber()
instance.columnNumber = self._locator.getColumnNumber()
instance.import_precedence = self._import_index
if xsl_class: # -- XSLT element --------------------------------
# Handle attributes in the null-namespace
standand_attributes = local in ('stylesheet', 'transform')
inst_dict = instance.__dict__
for attr_name, attr_info in legal_attrs:
attr_expanded = (None, attr_name)
if attr_expanded in attribs:
value = attribs[attr_expanded]
del attribs[attr_expanded]
elif attr_info.required:
raise XsltStaticError(XsltError.MISSING_REQUIRED_ATTRIBUTE,
instance, element=qualifiedName,
attribute=attr_name)
else:
value = None
try:
value = attr_info.prepare(instance, value)
except XsltError, e:
#raise self._mutate_exception(e, qualifiedName)
raise
if standand_attributes:
self._stylesheet = instance
self._handle_standard_attr(state, instance, attr_name,
value)
else:
if '-' in attr_name:
attr_name = attr_name.replace('-', '_')
inst_dict['_' + attr_name] = value
if attribs:
# Process attributes with a namespace-uri and check for
# any illegal attributes in the null-namespace
for expanded in attribs:
attr_ns, attr_name = expanded
if attr_ns is None:
if not state.forwardsCompatible:
raise XsltStaticError(
XsltError.ILLEGAL_NULL_NAMESPACE_ATTR, instance,
attribute=attr_name, element=qualifiedName)
else:
instance.setAttribute(attr_ns, attr_name,
attribs[expanded])
# XSLT Spec 2.6 - Combining Stylesheets
if local in ('import', 'include'):
self._combine_stylesheet(instance, (local == 'import'))
elif ext_class: # -- extension element -------------------------
validate_attributes = (legal_attrs is not None)
if validate_attributes:
# Handle attributes in the null-namespace
inst_dict = instance.__dict__
for attr_name, attr_info in legal_attrs:
attr_expanded = (None, attr_name)
if attr_expanded in attribs:
value = attribs[attr_expanded]
del attribs[attr_expanded]
elif attr_info.required:
raise XsltStaticError(
XsltError.MISSING_REQUIRED_ATTRIBUTE, instance,
element=qualifiedName, attribute=attr_name)
else:
value = None
try:
value = attr_info.prepare(instance, value)
except XsltError, e:
#raise self._mutate_exception(e, qualifiedName)
raise
if '-' in attr_name:
attr_name = attr_name.replace('-', '_')
inst_dict['_' + attr_name] = value
# Process attributes with a namespace-uri and check for
# any illegal attributes in the null-namespace
if attribs:
for expanded in attribs:
attr_ns, attr_name = expanded
value = attribs[expanded]
if validate_attributes and attr_ns is None:
raise XsltStaticError(
XsltError.ILLEGAL_NULL_NAMESPACE_ATTR, instance,
attribute=attr_name, element=qualifiedName)
elif attr_ns == XSL_NAMESPACE:
self._handle_result_element_attr(state, instance,
qualifiedName,
attr_name, value)
else:
instance.setAttribute(attr_ns, attr_name, value)
else: # -- literal result element ------------------------------
output_attrs = []
for expanded in attribs:
attr_ns, attr_local = expanded
value = attribs[expanded]
if attr_ns == XSL_NAMESPACE:
self._handle_result_element_attr(state, instance,
qualifiedName,
attr_local, value)
else:
# prepare attributes for literal output
value = _RESULT_ELEMENT_ATTR_INFO.prepare(instance, value)
attr_qname = attribs.getQNameByName(expanded)
output_attrs.append((attr_qname, attr_ns, value))
# save information for literal output
instance._output_namespace = namespace
instance._output_nss = state.outputNamespaces
instance._output_attrs = output_attrs
# Check for top-level result-element in null namespace
if parent_state.node is self._stylesheet and \
not namespace and not state.forwardsCompatible:
raise XsltStaticError(XsltError.ILLEGAL_ELEMENT_CHILD,
parent_state.node, element=qualifiedName)
return
def endElementNS(self, expandedName, qualifiedName,
_literal_element=literal_element.literal_element,
_variable_element=variable_elements.variable_element):
"""
Callback interface for SAX.
"""
stack = self._element_state_stack
state = stack[-1]
del stack[-1]
parent_state = stack[-1]
element = state.node
# ----------------------------------------------------------
# verify that this element has all required content
try:
state.validation[content_model.END_ELEMENT]
except KeyError:
if expandedName == (XSL_NAMESPACE, u'choose'):
raise XsltStaticError(XsltError.MISSING_REQUIRED_ELEMENT,
element, element=element.nodeName,
child='xsl:when')
raise
# ----------------------------------------------------------
# setup variable context
if state.localVariables is not parent_state.localVariables:
# add context save/restore nodes
binding_stack = []
node = push_variables_node(self._root, binding_stack)
element.insertChild(0, node)
node = pop_variables_node(self._root, binding_stack)
element.appendChild(node)
# ----------------------------------------------------------
# finalize the children for this element
#element.children = tuple(state.nodes)
#for child in element.children:
# if child.doesSetup:
#s child.setup()
del state
# ----------------------------------------------------------
# update parent state
parent_node = parent_state.node
if self._stylesheet is None and parent_node is element.root:
# a literal result element as stylesheet
assert isinstance(element, _literal_element), element
try:
version = element._version
except AttributeError:
raise XsltStaticError(XsltError.LITERAL_RESULT_MISSING_VERSION,
element)
# Reset the root's validation as it has already seen an element.
parent_state.validation = _XSLT_ROOT_VALIDATION
# FIXME: use the prefix from the document for the XSL namespace
stylesheet = (XSL_NAMESPACE, u'stylesheet')
self.startElementNS(stylesheet, u'xsl:stylesheet',
{(None, u'version') : version})
template = (XSL_NAMESPACE, u'template')
self.startElementNS(template, u'xsl:template',
{(None, u'match') : u'/'})
# make this element the template's content
# Note, this MUST index the stack as the stack has changed
# due to the startElementNS() calls.
stack[-1].node.appendChild(element)
self.endElementNS(template, u'xsl:template')
self.endElementNS(stylesheet, u'xsl:stylesheet')
return
parent_node.appendChild(element)
if isinstance(element, _variable_element):
name = element._name
if parent_node is self._stylesheet:
# global variables
if name in self._global_vars:
existing = self._global_vars[name]
if self._import_index > existing:
self._global_vars[name] = self._import_index
elif self._import_index == existing:
raise XsltStaticError(XsltError.DUPLICATE_TOP_LEVEL_VAR,
element, variable=name)
else:
self._global_vars[name] = self._import_index
else:
# local variables
# it is safe to ignore import precedence here
local_vars = parent_state.localVariables
if name in local_vars:
raise XsltStaticError(XsltError.ILLEGAL_SHADOWING,
element, variable=name)
# Copy on use
if local_vars is stack[-2].localVariables:
local_vars = local_vars.copy()
parent_state.localVariables = local_vars
local_vars[name] = True
return
def characters(self, data):
"""
Callback interface for SAX.
"""
parent_state = self._element_state_stack[-1]
# verify that the current element can have text children
try:
next = parent_state.validation[content_model.TEXT_NODE]
except KeyError:
# If the parent can have element children, but not text nodes,
# ignore pure whitespace nodes. This clarification is from
# XSLT 2.0 [3.4] Whitespace Stripping.
# e.g. xsl:stylesheet, xsl:apply-templates, xsl:choose
#self._debug_validation(content_model.TEXT_NODE)
#if (content_model.EMPTY in parent_state.validation or
# not isspace(data)):
if 1:
if len(data) > 10:
data = data[:10] + '...'
raise XsltStaticError(XsltError.ILLEGAL_TEXT_CHILD,
parent_state.node, data=data,
element=parent_state.node.nodeName)
#self._debug_validation(content_model.TEXT_NODE)
else:
# update validation
parent_state.validation = next
node = xslt_text(self._root, data)
parent_state.node.appendChild(node)
return
# -- utility functions ---------------------------------------------
def _combine_stylesheet(self, element, is_import):
href = element._href
try:
new_source = self._input_source.resolve(href,
self._input_source.uri)
except (OSError, IriError):
# FIXME: create special inputsource for 4xslt command-line
#for uri in self._alt_base_uris:
# try:
# new_href = self._input_source.getUriResolver().normalize(href, uri)
# #Do we need to figure out a way to pass the hint here?
# new_source = self._input_source.factory.fromUri(new_href)
# break
# except (OSError, IriError):
# pass
#else:
raise XsltStaticError(XsltError.INCLUDE_NOT_FOUND, element,
uri=href, base=self._locator.getSystemId())
# XSLT Spec 2.6.1, Detect circular references in stylesheets
# Note, it is NOT an error to include/import the same stylesheet
# multiple times, rather that it may lead to duplicate definitions
# which are handled regardless (variables, params, templates, ...)
if new_source.uri in self._visited_stylesheet_uris:
raise XsltStaticError(XsltError.CIRCULAR_INCLUDE, element,
uri=new_source.uri)
self.parse(new_source)
self._import_index += is_import
# Always update the precedence as the included stylesheet may have
# contained imports thus increasing the import precedence.
self._stylesheet.import_precedence = self._import_index
return
def _handle_standard_attr(self, state, instance, name, value):
if name == 'extension-element-prefixes':
# a whitespace separated list of prefixes
ext = state.extensionNamespaces = state.extensionNamespaces.copy()
out = state.outputNamespaces = state.outputNamespaces.copy()
for prefix in value:
# add the namespace URI to the set of extension namespaces
try:
uri = instance.namespaces[prefix]
except KeyError:
raise XsltStaticError(XsltError.UNDEFINED_PREFIX, instance,
prefix=prefix or '#default')
ext[uri] = True
# remove all matching namespace URIs
for output_prefix, output_uri in out.items():
if output_uri == uri:
del out[output_prefix]
elif name == 'exclude-result-prefixes':
# a whitespace separated list of prefixes
out = state.outputNamespaces = state.outputNamespaces.copy()
for prefix in value:
try:
uri = instance.namespaces[prefix]
except KeyError:
raise XsltStaticError(XsltError.UNDEFINED_PREFIX, instance,
prefix=prefix or '#default')
# remove all matching namespace URIs
for output_prefix, output_uri in out.items():
if output_uri == uri:
del out[output_prefix]
elif name == 'version':
# XSLT Spec 2.5 - Forwards-Compatible Processing
state.forwardsCompatible = (value != 1.0)
instance._version = value
else:
if '-' in name:
name = name.replace('-', '_')
instance.__dict__['_' + name] = value
return
def _handle_result_element_attr(self, state, instance, elementName,
attributeName, value):
try:
attr_info = _RESULT_ELEMENT_XSL_ATTRS[attributeName]
except KeyError:
raise XsltStaticError(XsltError.ILLEGAL_XSL_NAMESPACE_ATTR,
instance, attribute=attributeName,
element=elementName)
value = attr_info.prepare(instance, value)
self._handle_standard_attr(state, instance, attributeName, value)
return
def _mutate_exception(self, exception, elementName):
assert isinstance(exception, XsltError)
exception.message = MessageSource.EXPRESSION_POSITION_INFO % (
self._locator.getSystemId(), self._locator.getLineNumber(),
self._locator.getColumnNumber(), elementName, exception.message)
return exception
# -- debugging routines --------------------------------------------
def _debug_validation(self, token=None):
from pprint import pprint
state = self._element_state_stack[-1]
parent = state.node
print '='*60
print 'parent =',parent
print 'parent class =',parent.__class__
print 'parent content =', parent.content_model
print 'initial validation'
pprint(parent.content_model.compile())
print 'current validation'
pprint(state.validation)
if token:
print 'token', token
print '='*60
return
# -- parsing routines ----------------------------------------------
def fromDocument(self, document, baseUri='', factory=None):
"""
Read in a stylesheet source document from a Domlette and add it to
the stylesheet tree. If a document with the same URI has already been
read, the cached version will be used instead (so duplicate imports,
includes, or stylesheet appends do not result in multiple reads).
"""
if not baseUri:
if hasattr(document, 'documentURI'):
baseUri = document.documentURI
elif hasattr(document, 'baseURI'):
baseUri = document.baseURI
else:
raise TypeError('baseUri required')
if factory is None:
factory = inputsource.default_factory
# check cache
if self._root is not None:
# We prefer to use an already-parsed doc, as it has had its
# external entities and XIncludes resolved already
if uri in self._root.sourceNodes:
document = self._root.sourceNodes[baseUri]
# It's OK to use cached string content, but we have no idea
# whether we're using the same InputSource class as was used to
# parse it the first time, and we don't cache external entities
# or XIncludes, so there is the possibility of those things
# being resolved differently this time around. Oh well.
elif uri in self._root.sources:
content = self._root.sources[baseUri]
isrc = factory.fromString(content, baseUri)
# temporarily uncache it so `parse()` will process it;
# `parse()` will add it back to the cache when finished
del self._root.sources[baseUri]
return self.parse(isrc)
isrc = factory.fromStream(None, baseUri)
features = []
properties = [(property_dom_node, document)]
stylesheet = self._parseSrc(isrc, features, properties)
# Cache for XSLT document() function
self._root.sourceNodes[baseUri] = document
return stylesheet
def parse(self, source):
"""
Read in a stylesheet source document from an InputSource and add it to
the stylesheet tree. If a document with the same URI has already been
read, the cached version will be used instead (so duplicate imports,
includes, or stylesheet appends do not result in multiple reads).
"""
uri = source.uri
#Check cache
content = ''
if self._root is not None:
# We prefer to use an already-parsed doc, as it has had its
# external entities and XIncludes resolved already
if uri in self._root.sourceNodes:
doc = self._root.sourceNodes[uri]
# temporarily uncache it so fromDocument will process it;
# fromDocument will add it back to the cache when finished
del self._root.sourceNodes[uri]
return self.fromDocument(doc, baseUri=uri)
# It's OK to use cached string content, but we have no idea
# whether we're using the same InputSource class as was used to
# parse it the first time, and we don't cache external entities
# or XIncludes, so there is the possibility of those things
# being resolved differently this time around. Oh well.
elif uri in self._root.sources:
content = self._root.sources[uri]
source = inputsource(content, uri)
if not content:
content = source.stream.read()
source = inputsource(cStringIO.StringIO(content), source.uri)
#features = [(sax.FEATURE_PROCESS_XINCLUDES, True)]
features, properties = [], []
stylesheet = self._parseSrc(source, features, properties)
# Cache the string content for subsequent uses
# e.g., xsl:import/xsl:include and document()
self._root.sources[uri] = content
return stylesheet
def _parseSrc(self, isrc, features, properties):
parser = sax.create_parser()
parser.setContentHandler(self)
for featurename, value in features:
parser.setFeature(featurename, value)
# Always set whitespace rules property
parser.setProperty(sax.PROPERTY_WHITESPACE_RULES,
_XSLT_WHITESPACE_STRIPPING)
for propertyname, value in properties:
parser.setProperty(propertyname, value)
prev_source = self._input_source
try:
self._input_source = isrc
try:
parser.parse(isrc)
except SAXParseException, e:
e = e.getException() or e
if isinstance(e, XsltError):
raise e
raise XsltError(XsltError.STYLESHEET_PARSE_ERROR,
uri=isrc.uri, text=str(e))
finally:
self._input_source = prev_source
return self._root.stylesheet
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/reader/__init__.py
|
__init__.py
|
import itertools
from amara.xpath import parser as xpath_parser
from amara.xpath import locationpaths
from amara.xpath.locationpaths import axisspecifiers
from amara.xpath.locationpaths import nodetests
from amara.xpath.functions import nodesets
from amara.xpath.expressions import booleans
import amara.xpath.expressions.nodesets # another nodesets!
from amara.xpath.expressions import basics
counter = itertools.count(1)
# [@x]
class AttributeExistsPred(object):
def __init__(self, name):
self.name = name
# [@x="a"]
class AttributeBinOpPred(object):
def __init__(self, name, op, value):
self.name = name
self.op = op
self.value = value
class AttributeFunctionCallPred(object):
def __init__(self, func):
self.func = func
#####
# This would yield nodes, attribute, PIs, and comments
#class AnyTest(object):
# pass
class BaseNodeTest(object):
match_type = "node"
# What about *[@spam] ?
class AnyNodeTest(BaseNodeTest):
def __str__(self):
return "AnyNode (*)"
class NodeTest(BaseNodeTest):
def __init__(self, name, predicates):
# (ns, name)
self.name = name
self.predicates = predicates
def __str__(self):
return "Node ns=%r localname=%r predicates=%r" % (self.name[0], self.name[1],
self.predicates)
# predicates make no sense here because we only support downward axes
# and these have no downward axes. (XXX I think.)
class AttributeTest(object):
match_type = "attr"
def __init__(self, name, predicates):
self.name = name
assert not predicates
self.predicates = predicates
def __str__(self):
return "Attr name=%r" % (self.name,)
class ProcessingInstructionTest(object):
match_type = "processing-instruction"
def __init__(self, target):
self.target = target
def __str__(self):
return "processing-instruction(%r)" % (self.target,)
class CommentTest(object):
match_type = "comment"
class NFA(object):
def __init__(self):
self.start_edges = []
self.edges = {} # from_node_id -> [(to_node_id, test), ...]
self.terminal_nodes = set()
# The start node has no type
self.match_types = {} # node_id -> match_type
self.labeled_handlers = {} # node_id -> (label, PushtreeHandler)
def copy(self):
nfa = NFA()
nfa.start_edges[:] = self.start_edges
nfa.edges.update(self.edges)
nfa.terminal_nodes.update(self.terminal_nodes)
nfa.match_types.update(self.match_types)
nfa.labeled_handlers.update(self.labeled_handlers)
return nfa
def get_edges(self, node_id):
if node_id is None:
return self.start_edges
return self.edges[node_id]
def add_handler(self, labeled_handler):
for node_id in self.terminal_nodes:
self.labeled_handlers[node_id].append(labeled_handler)
def new_node(self, from_node_id, test):
edges = self.get_edges(from_node_id)
to_node_id = next(counter)
self.edges[to_node_id] = []
self.match_types[to_node_id] = test.match_type
self.labeled_handlers[to_node_id] = []
edges.append( (to_node_id, test) )
return to_node_id
def connect(self, from_node_id, to_node_id, test):
self.get_edges(from_node_id).append( (to_node_id, test) )
def extend(self, other):
assert not set(self.edges) & set(other.edges), "non-empty intersection"
if not self.start_edges:
self.start_edges[:] = other.start_edges
self.edges.update(other.edges)
self.match_types.update(other.match_types)
for node_id in self.terminal_nodes:
self.edges[node_id].extend(other.start_edges)
self.terminal_nodes.clear()
self.terminal_nodes.update(other.terminal_nodes)
self.labeled_handlers.update(other.labeled_handlers)
def union(self, other):
assert not set(self.edges) & set(other.edges), "non-empty intersection"
self.start_edges.extend(other.start_edges)
self.edges.update(other.edges)
self.match_types.update(other.match_types)
self.terminal_nodes.update(other.terminal_nodes)
self.labeled_handlers.update(other.labeled_handlers)
def dump(self):
for node_id, edges in [(None, self.start_edges)] + sorted(self.edges.items()):
if node_id is None:
node_name = "(start)"
labels = ""
else:
node_name = str(node_id)
action = str(self.match_types[node_id])
labels += " " + str([x[0] for x in self.labeled_handlers[node_id]])
is_terminal = "(terminal)" if (node_id in self.terminal_nodes) else ""
print node_name, is_terminal, labels
self._dump_edges(edges)
print "======"
def _dump_edges(self, edges):
for (to_node_id, test) in edges:
print "", test, "->", to_node_id
def _add_initial_loop(nfa):
start_edges = nfa.start_edges[:]
any_node = nfa.new_node(None, AnyNodeTest())
for (to_node_id, test) in start_edges:
nfa.connect(any_node, to_node_id, test)
nfa.connect(any_node, any_node, AnyNodeTest()) # loop
def to_nfa(expr, namespaces):
#print "Eval", expr.__class__
if (expr.__class__ is locationpaths.relative_location_path):
# This is a set of path specifiers like
# "a" "a/b", "a/b/c[0]/d[@x]" (relative location path)
# "@a", "a/@b", and even "@a/@b", which gives nothing
nfa = NFA()
for step in expr._steps:
nfa.extend(to_nfa(step, namespaces))
_add_initial_loop(nfa)
return nfa
if (expr.__class__ is locationpaths.absolute_location_path):
# This is an absolute path like
# "/a", "/a[0]/b[@x]"
nfa = NFA()
for step in expr._steps:
axis = step.axis
axis_name = axis.name
assert axis_name in ("child", "descendant"), axis_name
subnfa = to_nfa(step, namespaces)
if axis_name == "descendant":
_add_initial_loop(subnfa)
nfa.extend(subnfa)
return nfa
if (expr.__class__ is locationpaths.abbreviated_absolute_location_path):
# This is an abbreviated_absolute_location_path
# "//a", "a//b"
nfa = NFA()
for step in expr._steps:
nfa.extend(to_nfa(step, namespaces))
_add_initial_loop(nfa)
return nfa
if expr.__class__ is locationpaths.location_step:
# This is a step along some axis, such as:
# "a" - step along the child axis
# "a[@x][@y='1']" - step along the child axis, with two predicates
# "@a" - step along the attribute axis
axis = expr.axis
axis_name = axis.name
assert axis_name in ("child", "descendant", "attribute"), axis_name
if axis_name == "attribute":
klass = AttributeTest
else:
klass = NodeTest
nfa = NFA()
node_test = expr.node_test
if node_test.__class__ is nodetests.local_name_test:
# Something without a namespace, like "a"
node_id = nfa.new_node(None,
klass(node_test.name_key, expr.predicates))
elif node_test.__class__ is nodetests.namespace_test:
# Namespace but no name, like "a:*"
namespace = namespaces[node_test._prefix]
node_id = nfa.new_node(None,
klass((namespace, None), expr.predicates))
elif node_test.__class__ is nodetests.qualified_name_test:
prefix, localname = node_test.name_key
namespace = namespaces[prefix]
node_id = nfa.new_node(None,
klass((namespace, localname), expr.predicates))
elif node_test.__class__ is nodetests.processing_instruction_test:
node_id = nfa.new_node(None,
ProcessingInstructionTest(node_test._target))
elif node_test.__class__ is locationpaths.nodetests.principal_type_test:
node_id = nfa.new_node(None,
klass((None, None), None))
else:
die(node_test)
nfa.terminal_nodes.add(node_id)
#if axis_name == "descendant":
# _add_initial_loop(nfa)
#print "QWERQWER"
#nfa.dump()
return nfa
if expr.__class__ is amara.xpath.expressions.nodesets.union_expr:
# "a|b"
nfa = to_nfa(expr._paths[0], namespaces)
for path in expr._paths[1:]:
nfa.union(to_nfa(path, namespaces))
return nfa
die(expr)
def node_intersect(parent_test, child_test):
if parent_test is not None:
assert isinstance(parent_test, BaseNodeTest), parent_test
assert getattr(parent_test, "predicates", None) is None
assert isinstance(child_test, BaseNodeTest), child_test
assert getattr(child_test, "predicates", None) is None
if parent_test is None:
if isinstance(child_test, AnyNodeTest):
return True, False
if isinstance(child_test, NodeTest):
return child_test, False
if isinstance(parent_test, AnyNodeTest):
if isinstance(child_test, AnyNodeTest):
return True, False
if isinstance(child_test, NodeTest):
return child_test, False
elif isinstance(parent_test, NodeTest):
if isinstance(child_test, AnyNodeTest):
return True, True
if isinstance(child_test, NodeTest):
# XXX This is wrong. Resolved namespaces can be the same even
# if the namespace fields are different.
# XXX check for predicates!
if parent_test.name == child_test.name:
return True, False
return False, child_test
def attr_intersect(parent_test, child_test):
if parent_test is not None:
assert isinstance(parent_test, AttributeTest), parent_test
assert isinstance(child_test, AttributeTest), child_test
if parent_test is None:
return child_test, False
if parent_test.name == child_test.name:
return True, False
return False, child_test
def pi_intersect(parent_test, child_test):
if parent_test is not None:
assert isinstance(parent_test, ProcessingInstructionTest), parent_test
assert isinstance(child_test, ProcessingInstructionTest), child_test
if parent_test is None:
return child_test, False
# Is there any way to match *any* PI?
# Looks like Amara support XPath 1.0, where this is a string
if parent_test.target == child_test.target:
return True, False
return False, child_test
def comment_intersect(parent_test, child_test):
if parent_test is not None:
assert isinstance(parent_test, CommentTest), parent_test
assert isinstance(child_test, CommentTest), child_test
return True, False
# Used to make a decision tree. Either the test passes or it fails.
# TODO: something more sophisticated? For example, if there are a
# large number of element tag tests then sort the tags and start
# in the middle. Should give O(log(number of tags)) performance
# instead of O(n). However, for now, n is no more than 10 or so.
class Branch(object):
def __init__(self, test, if_true, if_false):
self.test = test
self.if_true = if_true
self.if_false = if_false
class StateTable(object):
def __init__(self, match_type, nfa, nfa_node_ids):
self.match_type = match_type # 'None' for the start node
self.nfa = nfa
self.nfa_node_ids = nfa_node_ids
self.node_tree = Branch(None, set(), set())
self.attr_tree = Branch(None, set(), set())
self.pi_tree = Branch(None, set(), set())
self.comment_tree = Branch(None, set(), set())
def add(self, test, to_node_id):
if isinstance(test, BaseNodeTest):
self._add(self.node_tree, test, to_node_id, node_intersect)
elif isinstance(test, AttributeTest):
self._add(self.attr_tree, test, to_node_id, attr_intersect)
elif isinstance(test, ProcessingInstructionTest):
self._add(self.pi_tree, test, to_node_id, pi_intersect)
elif isinstance(test, CommentTest):
self._add(self.comment_tree, test, to_node_id, comment_intersect)
else:
raise AssertionError(test)
def _add(self, tree, test, to_node_id, intersect):
new_true_test, new_false_test = intersect(tree.test, test)
if new_true_test == True:
self._add_to_leaves(tree.if_true, to_node_id)
elif new_true_test:
if isinstance(tree.if_true, set):
new_branch = Branch(new_true_test,
tree.if_true | set([to_node_id]),
tree.if_true)
tree.if_true = new_branch
else:
self._add(tree.if_true, new_true_test, to_node_id, intersect)
if new_false_test == True:
self._add_to_leaves(tree.if_false, to_node_id)
elif new_false_test:
if isinstance(tree.if_false, set):
new_branch = Branch(new_false_test,
tree.if_false | set([to_node_id]),
tree.if_false)
tree.if_false = new_branch
else:
self._add(tree.if_false, new_false_test, to_node_id, intersect)
def _add_to_leaves(self, tree, to_node_id):
if isinstance(tree, set):
tree.add(to_node_id)
else:
self._add_to_leaves(tree.if_true, to_node_id)
self._add_to_leaves(tree.if_false, to_node_id)
def get_final_nodes(self):
result = {}
for match_type, tree in ( (BaseNodeTest.match_type, self.node_tree),
(AttributeTest.match_type, self.attr_tree),
(ProcessingInstructionTest.match_type, self.pi_tree),
(CommentTest.match_type, self.comment_tree) ):
visit = [tree]
while visit:
node = visit.pop()
if isinstance(node, set):
if node:
result[frozenset(node)] = match_type
elif node is not None:
visit.append(node.if_true)
visit.append(node.if_false)
return result.items()
def dump(self, numbering):
# Do I report anything for having reached here?
if list(self.nfa_node_ids) != [None]:
for nfa_node in self.nfa_node_ids:
labels = [x[0] for x in self.nfa.labeled_handlers[nfa_node]]
if labels:
print "Report", self.nfa.match_types[nfa_node], labels
for (name, tree) in ( ("NODE", self.node_tree),
("ATTR", self.attr_tree),
("PROCESSING-INSTRUCTION", self.pi_tree),
("COMMENT", self.comment_tree) ):
if tree is None:
print " No", name, "tree"
else:
print name, "tree:"
# The first branch is always true
self._dump(tree.if_true, 0, numbering)
def _dump(self, tree, depth, numbering):
s = "-"*depth
if isinstance(tree, set):
if tree:
k = sorted(tree)
print s, "<>", numbering[frozenset(tree)], k
else:
print s, "<> (empty)"
else:
print s, tree.test, "?"
self._dump(tree.if_true, depth+1, numbering)
self._dump(tree.if_false, depth+1, numbering)
def all_transitions(nfa, current_dfa):
transitions = []
for node_id in current_dfa:
if node_id is None:
new_transitions = nfa.start_edges
else:
# XXX I can't transition from something
# which wasn't a node or a record
match_type = nfa.match_types[node_id]
if match_type not in ("node", "record"):
continue
new_transitions = nfa.edges[node_id]
transitions.extend(new_transitions)
return transitions
def transition(nfa_state, event):
for (to_node_id, test) in edge in nfa_state.edges:
if edge[0] == event:
yield edge[1]
# Raymond's code
def nfa_to_dfa(nfa):
numbering = {} # from frozenset -> 0, 1, 2, ...
dfa_start = frozenset([None]) # nfa start node
result = {} # []
seen = set([dfa_start])
todo = [(dfa_start, None)]
while todo:
current_dfa, match_type = todo.pop()
#print "All transitions from", current_dfa
transitions = all_transitions(nfa, current_dfa)
if not transitions:
# Make sure there's always a target.
# This also stores any handler events
result[current_dfa] = StateTable(match_type, nfa, current_dfa)
numbering[current_dfa] = len(numbering)
continue
# This adds element, attribute, comment, etc. transitions
state_table = StateTable(match_type, nfa, current_dfa)
for to_node_id, test in transitions:
state_table.add(test, to_node_id)
for nfa_nodes, match_type in state_table.get_final_nodes():
some_dfa = frozenset(nfa_nodes)
if some_dfa not in seen:
seen.add(some_dfa)
todo.append( (some_dfa, match_type) )
result[current_dfa] = state_table
numbering[current_dfa] = len(numbering)
# for k, table in sorted(result.items(), key=lambda x:sorted(x[0])):
# print "State", sorted(k)
# table.dump()
return result, numbering
def die(expr):
import inspect
print " == FAILURE =="
print type(expr)
print dir(expr)
for k, v in inspect.getmembers(expr):
if k.startswith("__") and k.endswith("__"):
continue
print repr(k), repr(v)
raise AssertionError(expr)
def build_states(nfa, dfa, numbering):
# unique node numbers
states = []
for dfa_id, node_ids in sorted( (dfa_id, node_ids)
for (node_ids, dfa_id) in numbering.items() ):
assert dfa_id == len(states)
if dfa_id == 0:
assert node_ids == set([None])
# handlers (which are in (id, class) pairs)
table = dfa[node_ids]
if dfa_id == 0:
handlers = ()
else:
handler_map = {}
for node_id in node_ids:
for (label, handler) in nfa.labeled_handlers[node_id]:
handler_map[label] = handler
# This are PushtreeHandler instances. I could find the
# actual instances I need except the startElement and
# endElement use different method.
handlers = []
for (label, handler) in sorted(handler_map.items()):
handlers.append(handler)
# node tree
tree = table.node_tree.if_true
if isinstance(tree, set):
# Special case when there are no decisions to make
if not tree:
node_ops = [] # ... because there are no states
else:
node_ops = [(None, None, None, -numbering[frozenset(tree)])]
else:
node_ops = [tree]
todo = [0]
while todo:
i = todo.pop()
#print "Access", i, len(node_ops)
tree = node_ops[i]
if isinstance(tree.if_true, set):
if tree.if_true:
if_true = -numbering[frozenset(tree.if_true)]
else:
if_true = 0
else:
if_true = len(node_ops)
node_ops.append(tree.if_true)
if isinstance(tree.if_false, set):
if tree.if_false:
if_false = -numbering[frozenset(tree.if_false)]
else:
if_false = 0
else:
if_false = len(node_ops)
node_ops.append(tree.if_false)
namespace, localname = tree.test.name
node_ops[i] = (namespace, localname, None, if_true, if_false)
#print "Added", node_ops[i]
if if_false > 0:
todo.append(if_false)
if if_true > 0:
todo.append(if_true)
node_ops = tuple(node_ops)
# attr tree
attr_ops = []
tree = table.attr_tree.if_true
while not isinstance(tree, set):
namespace, localname = tree.test.name
attr_ops.append( (namespace, localname, numbering[frozenset(tree.if_true)]) )
tree = tree.if_false
if tree:
# Match any attribute
attr_ops.append( (None, None, numbering[frozenset(tree)]) )
attr_ops = tuple(attr_ops)
# processing instruction tree
pi_ops = []
tree = table.pi_tree.if_true
while not isinstance(tree, set):
target = tree.test.target
pi_ops.append( (target, numbering[frozenset(tree.if_true)]) )
tree = tree.if_false
if tree:
pi_ops.append( (None, numbering[frozenset(tree)]) )
pi_ops = tuple(pi_ops)
# comment tree
tree = table.comment_tree.if_true
assert isinstance(tree, set)
if tree:
comment_state = numbering[frozenset(tree)]
else:
comment_state = 0
states.append( (handlers, node_ops, attr_ops, pi_ops, comment_state) )
return tuple(states)
class Expression(object):
def __init__(self, id, xpath, nfa):
self.id = id
self.xpath = xpath
self._nfa = nfa
def nfas_to_machine_states(nfas):
union_nfa = nfas[0].copy() # XXX start with empty and union everything?
for nfa in nfas[1:]:
union_nfa.union(nfa)
dfa, numbering = nfa_to_dfa(union_nfa)
return build_states(union_nfa, dfa, numbering)
class PushtreeManager(object):
def __init__(self, subtree_xpath, subtree_handler = None, namespaces = None):
if namespaces is None:
namespaces = {}
self.namespaces = namespaces
self.expressions = []
self._add(subtree_xpath, subtree_handler)
def _add(self, xpath, xpath_handler):
nfa = to_nfa(xpath_parser.parse(xpath), self.namespaces)
i = len(self.expressions)
nfa.add_handler((i, xpath_handler))
exp = Expression(i, xpath, nfa)
self.expressions.append(exp)
return exp
def add(self, xpath, xpath_handler=None):
return self._add(xpath, xpath_handler)
def _build_machine_states(self):
return nfas_to_machine_states([x._nfa for x in self.expressions])
def build_pushtree_handler(self):
return RuleMachineHandler(self._build_machine_states())
# Special handler object to bridge with pushbind support in the builder
# Implemented by beazley. Note: This is not a proper SAX handler
class RuleMachineHandler(object):
def __init__(self, machine_states):
self.machine_states = machine_states
def startDocument(self,node):
self.stack = [0]
#dump_machine_states(self.machine_states)
def startElementNS(self, node, name, qname, attrs):
state = self.stack[-1]
#print "startElementNS", name, qname, attrs, "state", state
if state == -1:
#print "goto -1"
self.stack.append(-1)
return
element_ops = self.machine_states[state][1]
if not element_ops:
# This was a valid target, but there's nothing leading off from it
#print "GOTO -1"
self.stack.append(-1)
return
namespace, localname = name
i = 0
while 1:
ns, ln, test_function, if_true, if_false = element_ops[i]
assert test_function is None
if ((ns is None or ns == namespace) and
(ln is None or ln == localname)):
i = if_true
else:
i = if_false
if i == 0:
# dead-end; no longer part of the DFA and the
# 0 node is defined to have no attributes
self.stack.append(-1)
return
if i < 0:
next_state = -i
break
# otherwise, loop
#print "GoTo", next_state
self.stack.append(next_state)
handlers = self.machine_states[next_state][0]
for handler in handlers:
handler.startElementMatch(node)
# Also handle any attributes
attr_ops = self.machine_states[next_state][2]
if not attr_ops:
return
for namespace, localname in attrs.keys():
for (ns, ln, attr_state_id) in attr_ops:
#print "attr test:", (ns, ln), (namespace, localname)
if ((ns is None or namespace == ns) and
(ln is None or localname == ln)):
# Match!
handlers = self.machine_states[attr_state_id][0]
for handler in handlers:
#print "Notify attribute match:", event_ids, (namespace, localname)
# This is a hack until I can figure out how to get
# the attribute node
handler.attributeMatch( (node, (namespace, localname) ) )
def endElementNS(self, node, name, qname):
#print "endElementNS", node, name, qname
last_state = self.stack.pop()
if last_state == -1:
return
handlers = self.machine_states[last_state][0]
for handler in reversed(handlers):
handler.endElementMatch(node)
def processingInstruction(self, node, target, data):
state = self.stack[-1]
if state == -1:
return
pi_ops = self.machine_states[state][3]
for (pi_target, pi_state) in pi_ops:
if pi_target == target:
handlers = self.machine_states[pi_state][0]
for handler in handlers:
handler.processingInstruction(node)
# For Dave
class RulePatternHandler(RuleMachineHandler):
def __init__(self, pattern, end_node_handler, attr_handler, namespaces=None):
self.xpm = xpm = ExpressionManager(namespaces=namespaces);
xpm.add(pattern)
nfa, dfa, numbering = xpm.build_dfa_tables()
machine_states = build_instructions(nfa,dfa,numbering)
RuleMachineHandler.__init__(self, machine_states,
end_node_handler = end_node_handler,
attr_handler = attr_handler)
def dump_machine_states(machine_states):
for i, x in enumerate(machine_states):
print "== INFO FOR", i, "=="
handlers, node_ops, attr_ops, pi_ops, comment_state = x
print " HANDLERS", handlers
print " NODE OPS"
for node_op in node_ops:
print node_op
print " ATTR OPS"
for attr_op in attr_ops:
print attr_op
print " PI OPS"
for pi_op in pi_ops:
print pi_op
print " COMMENT STATE =", comment_state
class PushtreeHandler(object):
def startSubtree(self, element):
pass
def endSubtree(self, element):
pass
def startElementMatch(self, node):
pass
def endElementMatch(self, node):
pass
def attributeMatch(self, node):
pass
def commentMatch(self, node):
pass
def processingInstructionMatch(self, node):
pass
class VerbosePushtreeHandler(PushtreeHandler):
def __init__(self, prefix=None):
if prefix is None:
prefix = ""
else:
prefix = "(%s) " % (prefix,)
self.prefix = prefix
def startSubtree(self, element):
print self.prefix+"startSubtree", element
def endSubtree(self, element):
print self.prefix+"endSubtree", element
def startElementMatch(self, node):
print self.prefix+"startElementMatch", node
def endElementMatch(self, node):
print self.prefix+"endElementMatch", node
def attributeMatch(self, node):
print self.prefix+"attributeMatch", node
def commentMatch(self, node):
print self.prefix+"commentMatch", node
def processingInstructionMatch(self, node):
print self.prefix+"processingInstructionMatch", node
if __name__ == '__main__':
testxml = """\
<body>
<li>Ignore me<b/></li>
<ul>
<li x='1'>This <i>is</i> test</li>
<li x='2'><a href='spam'>that</a> was nothing</li>
</ul>
</body>
"""
manager = PushtreeManager("body/ul/li", VerbosePushtreeHandler("main"))
manager.expressions[0]._nfa.dump()
manager.add("pre/post", VerbosePushtreeHandler("pre/post"))
manager.expressions[1]._nfa.dump()
manager.add("//a", VerbosePushtreeHandler("//a"))
manager.expressions[2]._nfa.dump()
manager.add("@x", VerbosePushtreeHandler("@x"))
manager.expressions[3]._nfa.dump()
manager.add("a", VerbosePushtreeHandler("a"))
manager.expressions[4]._nfa.dump()
#manager.add(".//*")
machine_states = manager._build_machine_states()
dump_machine_states(machine_states)
hand = RuleMachineHandler(machine_states)
import os
doc = amara.parse(testxml,rule_handler=hand)
os._exit(0)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/pushtree/pushtree_nfa.py
|
pushtree_nfa.py
|
from xml.dom import Node as stdlib_node
from xml.dom import EMPTY_NAMESPACE, XML_NAMESPACE
from xml.dom import NoModificationAllowedErr, NamespaceErr, NotFoundErr
from xml.dom import NotSupportedErr, HierarchyRequestErr, WrongDocumentErr
from xml.dom import InvalidCharacterErr#, UnspecifiedEventTypeErr
from functools import *
from amara.lib.xmlstring import *
from amara import tree
import itertools
import warnings
class Node(object):
nodeValue = None
nodeName = None
attributes = None
readOnly = False
childNodes = []
firstChild = None
lastChild = None
previousSibling = None
nextSibling = None
_ownerDocument = None
def _set_ownerdoc(self, owner):
self._ownerDocument = owner
@property
def ownerDocument(self): return self._ownerDocument
@property
def localName(self): return self.xml_local
@property
def namespaceURI(self): return self.xml_namespace
@property
def prefix(self): return self.xml_prefix
@property
def nodeType(self): return self.xml_type
#@property
#def ownerDocument(self):
# result = self.xml_select(u'/')
# return result[0] if result else None
@property
def parentNode(self): return self.xml_parent
#def normalize(self): return self.xml_normalize()
#def isSupported(self)
def isSameNode(self, other): return self is other
#def isSupported(self)
def cloneNode(self, deep=False): return copy.deepcopy(self) if deep else copy.copy(self)
def hasChildNodes(self): return False
#Don't just assign the functions since there are subtle differences we'll need to fix up
def appendChild(self, newChild):
raise HierarchyRequestErr("%s does not support child node operations" % (repr(self)))
def replaceChild(self, newChild, oldChild):
raise HierarchyRequestErr("%s does not support child node operations" % (repr(self)))
def removeChild(self, node):
raise HierarchyRequestErr("%s does not support child node operations" % (repr(self)))
def appendChild(self, node):
raise HierarchyRequestErr("%s does not support child node operations" % (repr(self)))
def isSupported(self, feature, version):
return self.ownerDocument.implementation.hasFeature(feature, version)
def getInterface(self, feature):
if self.isSupported(feature, None):
return self
else:
return None
class _container(Node):
@property
def childNodes(self): return self.xml_children
@property
def firstChild(self): return self.xml_first_child
@property
def lastChild(self): return self.xml_last_child
@property
def previousSibling(self): return self.xml_preceding_sibling
@property
def nextSibling(self): return self.xml_following_sibling
def hasChildNodes(self): return bool(self.xml_children)
#Don't just assign the functions since there are subtle differences we'll need to fix up
def appendChild(self, node): return self.xml_append(node)
def replaceChild(self, newChild, oldChild): return self.xml_replace(oldChild, newChild)
def removeChild(self, node): return self.xml_remove(node)
def insertBefore(self, newChild, refNode):
index = self.xml_index(refChild)
return self.xml_insert(index, newChild)
def getElementsByTagNameNS(self, namespaceURI, localName): return ( e for e in self.xml_select(u'//*') if (e.xml_namespace, e.xml_local) == (namespaceURI, localName))
class DocumentFragment(_container, tree.entity):
nodeType = stdlib_node.DOCUMENT_FRAGMENT_NODE
nodeName = "#document-fragment"
class ProcessingInstruction(Node, tree.processing_instruction):
@property
def nodeName(self): return self.xml_target
@property
def target(self): return self.xml_target
@property
def data(self): return self.xml_data
class Attr(Node, tree.attribute):
specified = False
@property
def nodeName(self): return self.xml_qname
class Element(_container, tree.element):
def xml_attribute_factory(self, ns, local, value=u''):
node = Attr(ns, local, value)
node._set_ownerdoc(self)
return node
@property
def nodeName(self): return self.xml_qname
tagName = nodeName
def hasAttributeNS(self, namespaceURI, localName): return bool(self.xml_attributes.get(namespaceURI, localName))
def hasAttributes(self): return bool(self.xml_attributes)
def getAttributeNS(self, namespaceURI, localName):
return self.xml_attributes.get((namespaceURI, localName))
def setAttributeNS(self, namespaceURI, localName, value): self.xml_attributes[namespaceURI, localName] = value
def removeAttributeNS(self, namespaceURI, localName): del self.xml_attributes[namespaceURI, localName]
def getAttributeNodeNS(self, namespaceURI, localName): return self.xml_attributes.get(namespaceURI, localName)
def setAttributeNodeNS(self, node): self.xml_attributes[node.xml_namespace, node.xml_local] = node
def removeAttributeNodeNS(self, node): del self.xml_attributes[node.xml_namespace, node.xml_local]
class CharacterData(Node):
@property
def nodeValue(self): return self.xml_value
data = nodeValue
def __repr__(self):
MAXREPR = 10
data = self.data
if len(data) > MAXREPR:
dotdotdot = "..."
else:
dotdotdot = ""
return "<Amara DOM %s node \"%s%s\">" % (
self.__class__.__name__, data[0:MAXREPR], dotdotdot)
def substringData(self, offset, count):
if offset < 0:
raise IndexSizeErr("offset cannot be negative")
if offset > len(self.xml_value):
raise IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise IndexSizeErr("count cannot be negative")
return self.xml_value[offset:offset+count]
def appendData(self, more):
self.xml_value += more
def insertData(self, offset, more):
if offset < 0:
raise IndexSizeErr("offset cannot be negative")
if offset > len(self.xml_value):
raise IndexSizeErr("offset cannot be beyond end of data")
if more:
self.xml_value = "%s%s%s" % (
self.xml_value[:offset], more, self.xml_value[offset:])
def deleteData(self, offset, count):
if offset < 0:
raise IndexSizeErr("offset cannot be negative")
if offset > len(self.data):
raise IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise IndexSizeErr("count cannot be negative")
if count:
self.xml_value = self.xml_value[:offset] + self.xml_value[offset+count:]
def replaceData(self, offset, count, repl):
if offset < 0:
raise IndexSizeErr("offset cannot be negative")
if offset > len(self.xml_value):
raise IndexSizeErr("offset cannot be beyond end of data")
if count < 0:
raise IndexSizeErr("count cannot be negative")
if count:
self.xml_value = "%s%s%s" % (
self.xml_value[:offset], repl, self.xml_value[offset+count:])
class Text(CharacterData, tree.text):
nodeName = "#text"
class Comment(CharacterData, tree.comment):
nodeName = "#comment"
class CDATASection(Text):
nodeType = stdlib_node.CDATA_SECTION_NODE
nodeName = "#cdata-section"
class DocumentType(Node, tree.node):
nodeType = stdlib_node.DOCUMENT_TYPE_NODE
name = None
publicId = None
systemId = None
internalSubset = None
def __init__(self, qualifiedName):
self.entities = NamedNodeMap() #Note, technically should be read-only NNN that preserves order (see minidom.ReadOnlySequentialNamedNodeMap)
self.notations = NamedNodeMap()
if qualifiedName:
prefix, localname = splitqname(qualifiedName)
self.name = localname
self.nodeName = self.name
class Entity(Node, tree.node):
attributes = None
nodeType = stdlib_node.ENTITY_NODE
nodeValue = None
actualEncoding = None
encoding = None
version = None
def __init__(self, name, publicId, systemId, notation):
self.nodeName = name
self.notationName = notation
class Notation(Node, tree.node):
nodeType = stdlib_node.NOTATION_NODE
nodeValue = None
class DOMImplementation(object):
_features = [("core", "1.0"),
("core", "2.0"),
("core", "3.0"),
("core", None),
("xml", "1.0"),
("xml", "2.0"),
("xml", "3.0"),
("xml", None),
("ls-load", "3.0"),
("ls-load", None),
]
def hasFeature(self, feature, version):
if version == "":
version = None
return (feature.lower(), version) in self._features
def createDocument(self, namespaceURI, qualifiedName, doctype):
if doctype and doctype.xml_parent is not None:
raise WrongDocumentErr(
"doctype object owned by another DOM tree")
doc = Document()
add_root_element = not (namespaceURI is None
and qualifiedName is None
and doctype is None)
if not qualifiedName and add_root_element:
# The spec is unclear what to raise here; SyntaxErr
# would be the other obvious candidate. Since Xerces raises
# InvalidCharacterErr, and since SyntaxErr is not listed
# for createDocument, that seems to be the better choice.
# XXX: need to check for illegal characters here and in
# createElement.
# DOM Level III clears this up when talking about the return value
# of this function. If namespaceURI, qName and DocType are
# Null the document is returned without a document element
# Otherwise if doctype or namespaceURI are not None
# Then we go back to the above problem
raise InvalidCharacterErr("Element with no name")
if add_root_element:
prefix, localname = splitqname(qualifiedName)
if prefix == "xml" \
and namespaceURI != "http://www.w3.org/XML/1998/namespace":
raise NamespaceErr("illegal use of 'xml' prefix")
if prefix and not namespaceURI:
raise NamespaceErr(
"illegal use of prefix without namespaces")
element = doc.createElementNS(namespaceURI, qualifiedName)
if doctype:
doc.appendChild(doctype)
doc.appendChild(element)
if doctype:
doctype.parentNode = doctype.ownerDocument = doc
doc.doctype = doctype
doc.implementation = self
return doc
def createDocumentType(self, qualifiedName, publicId, systemId):
doctype = DocumentType(qualifiedName)
doctype.publicId = publicId
doctype.systemId = systemId
return doctype
def getInterface(self, feature):
if self.hasFeature(feature, None):
return self
else:
return None
class Document(_container, tree.entity):
_child_node_types = (stdlib_node.ELEMENT_NODE, stdlib_node.PROCESSING_INSTRUCTION_NODE,
stdlib_node.COMMENT_NODE, stdlib_node.DOCUMENT_TYPE_NODE)
nodeType = stdlib_node.DOCUMENT_NODE
nodeName = "#document"
doctype = None
parentNode = None
implementation = DOMImplementation()
actualEncoding = None
encoding = None
standalone = None
version = None
strictErrorChecking = False
errorHandler = None
documentURI = None
def xml_comment_factory(self, data):
node = Comment(data)
node._set_ownerdoc(self)
return node
def xml_processing_instruction_factory(self, target, data):
node = ProcessingInstruction(target, data)
node._set_ownerdoc(self)
return node
def xml_text_factory(self, data):
node = Text(data)
node._set_ownerdoc(self)
return node
def xml_element_factory(self, ns, local):
node = Element(ns, local)
node._set_ownerdoc(self)
return node
def createDocumentFragment(self):
d = DocumentFragment()
d.ownerDocument = self
return d
def createElement(self, tagName):
e = Element(tagName)
e.ownerDocument = self
return e
def createTextNode(self, data):
if not isinstance(data, basestring):
raise TypeError, "node contents must be a string"
t = Text()
t.data = data
t.ownerDocument = self
return t
def createCDATASection(self, data):
if not isinstance(data, basestring):
raise TypeError, "node contents must be a string"
c = CDATASection()
c.data = data
c.ownerDocument = self
return c
def createComment(self, data):
c = Comment(data)
c.ownerDocument = self
return c
def createProcessingInstruction(self, target, data):
p = ProcessingInstruction(target, data)
p.ownerDocument = self
return p
def createAttribute(self, qName):
a = Attr(qName)
a.ownerDocument = self
a.xml_value = ""
return a
def createElementNS(self, namespaceURI, qualifiedName):
prefix, localName = splitqname(qualifiedName)
e = Element(qualifiedName, namespaceURI, prefix)
e.ownerDocument = self
return e
def createAttributeNS(self, namespaceURI, qualifiedName):
prefix, localName = splitqname(qualifiedName)
a = Attr(qualifiedName, namespaceURI, localName, prefix)
a.ownerDocument = self
a.xml_value = ""
return a
# A couple of implementation-specific helpers to create node types
# not supported by the W3C DOM specs:
def _create_entity(self, name, publicId, systemId, notationName):
e = Entity(name, publicId, systemId, notationName)
e.ownerDocument = self
return e
def _create_notation(self, name, publicId, systemId):
n = Notation(name, publicId, systemId)
n.ownerDocument = self
return n
@property
def documentElement(self):
child_elements = [ ch for ch in self.xml_children if isinstance(ch, tree.element) ]
return child_elements[0] if child_elements else None
def importNode(self, node, deep):
if node.nodeType == stdlib_node.DOCUMENT_NODE:
raise NotSupportedErr("cannot import document nodes")
elif node.nodeType == stdlib_node.DOCUMENT_TYPE_NODE:
raise NotSupportedErr("cannot import document type nodes")
new_tree = self.cloneNode(deep)
def set_owner(node):
node.ownerDocument = self
for child in node.xml_children:
set_owner(child)
if isinstance(node, tree.element):
for attr in node.xml_attributes:
set_owner(attr)
return new_tree
def getDOMImplementation(features=None):
if features:
if isinstance(features, StringTypes):
features = domreg._parse_feature_string(features)
for f, v in features:
if not Document.implementation.hasFeature(f, v):
return None
return Document.implementation
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/dom/nodes.py
|
nodes.py
|
from itertools import *
from amara import tree
def element_subtree_iter(node, include_root=False):
'''
An iterator over the subtree, in document order, elements in the given subtree
Basically equivalent to the descendant-or-self axis of XPath
'''
if isinstance(node, tree.element) or (include_root and isinstance(node, tree.entity)):
process_stack = [(node,)]
else:
process_stack = [(ch for ch in node.xml_children if isinstance(ch, tree.element))]
while process_stack:
curr_iter = process_stack.pop()
for e in curr_iter:
yield e
process_stack.append(( ch for ch in e.xml_children if isinstance(ch, tree.element) ))
return
def top_namespaces(doc):
'''
Return a namespace mapping for an entity node derived from its children
It is not safe to pass this function anything besides an entity node
'''
elems = [ dict([(prefix, ns) for (prefix, ns) in e.xml_namespaces.iteritems()])
for e in doc.xml_children if hasattr(e, 'xml_namespaces') ]
elems.reverse()
return reduce(lambda a, b: a.update(b) or a, elems, {})
#
def set_namespaces(node, prefixes):
"""
Sets namespace mapping on an entity node by updating its children, or
on element by just updating its own mappings. As such it can be used as
a batch namespace update on an element
node - the root of the tree on which namespaces are to be declared
prefixes - the any additional/overriding namespace mappings
in the form of a dictionary of prefix: namespace
the base namespace mappings are taken from in-scope
declarations on the given node. These new declarations are
superimposed on existing ones
"""
to_update = chain(node.xml_elements, (node,)) if isinstance(node, tree.entity) else (node,)
for target in to_update:
for k, v in prefixes.items():
target.xml_namespaces[k] = v
return
def replace_namespace(node, oldns, newns):
'''
Checks the subtree at node for elements in oldns, and changes their xml_namespace to newns.
If newprefix is provided, update the xml_qname as well.
Update namespace declarations on node accordingly
You should probably ensure the appropriate namespace declaration is ready first:
node.xmlns_attributes[newprefix] = newns
'''
for elem in node.xml_select(u'.//*'):
if elem.xml_namespace == oldns:
elem.xml_namespace = newns
return
def first_item(seq, default=None):
'''
Return the first item in a sequence, or the default result (None by default),
or if it can reasonably determine it's not a seqyence, just return the identity
This is a useful, blind unpacker tool, e.g. when dealing with XML models that
sometimes provide scalars and sometimes sequences, which are sometimes empty.
This is somewhat analogous to the Python get() method on dictionaries, and
is an idiom which in functional chains is less clumsy than its equivalent:
try:
return seq.next()
except StopIteration:
return default
'''
from amara import tree
if isinstance(seq, basestring) or isinstance(seq, tree.node): return seq
return chain(seq or (), (default,)).next()
identity = lambda x: x
def omit_blanks():
return "..."
def make_date():
return "..."
#See: http://docs.python.org/dev/howto/functional.html
#from functional import *
#Will work as of 2.7
#mcompose = partial(reduce, compose)
def mcompose(*funcs):
def f(val):
result = val
for func in funcs:
result = pipeline_stage(func, result)
return result
return f
# return result
# result = tuple(pipeline_stage(func, result))
# for item in result:
# yield item
def pipeline_stage(obj, arg):
if callable(obj):
fresult = obj(arg)
else:
fresult = arg
# try:
# it = (fresult,)
# if not isinstance(fresult, basestring): it = iter(fresult)
# except TypeError:
# pass
# else:
# it = (arg,)
# return iter(it)
return fresult
from copy import *
def trim_word_count(node, maxcount):
'''
import amara
from amara.lib.util import trim_word_count
x = amara.parse('<a>one two <b>three four </b><c>five <d>six seven</d> eight</c> nine</a>')
trim_word_count(x, 1).xml_write()
trim_word_count(x, 2).xml_write()
trim_word_count(x, 3).xml_write()
trim_word_count(x, 4).xml_write()
trim_word_count(x, 5).xml_write()
trim_word_count(x, 6).xml_write()
trim_word_count(x, 7).xml_write()
trim_word_count(x, 8).xml_write()
trim_word_count(x, 9).xml_write()
trim_word_count(x, 10).xml_write()
'''
def trim(node, count):
newnode = copy(node)
for child in node.xml_children:
if count >= maxcount:
break
words = len(child.xml_select(u'string(.)').split())
if count + words < maxcount:
newnode.xml_append(deepcopy(child))
count += words
else:
if isinstance(child, tree.text):
words_required = maxcount - count
chunk = child.xml_value.rsplit(None,
words-words_required)[0]
newnode.xml_append(tree.text(chunk))
else:
newnode.xml_append(trim(child, count))
count = maxcount
return newnode
return trim(node, 0)
def coroutine(func):
'''
A simple tool to eliminate the need to call next() to kick-start a co-routine
From David Beazley: http://www.dabeaz.com/generators/index.html
'''
def start(*args,**kwargs):
coro = func(*args,**kwargs)
coro.next()
return coro
return start
def strip_namespaces(node, strip_decls=False):
#from amara.lib.util import element_subtree_iter
for e in element_subtree_iter(node):
e.xml_namespace = None
if strip_decls and e.xml_namespaces:
for prefix in e.xml_namespaces:
del e.xml_namespaces[prefix]
return
#class UTemplate(Template):
# '''
# Unicode-safe version of string.Template
# '''
# pattern = unicode(Template.pattern)
# delimiter = u'$'
# idpattern = ur'[_a-z][_a-z0-9]*'
#Loosely based on methods in unittest.py
def assert_(test, obj, msg=None):
"""
Raise an error if the test expression is not true. The test can be a function that takes the context object.
"""
if (callable(test) and not test(obj)) or not test:
raise AssertionError(msg)
return obj
def assert_false(test, obj, msg=None):
"""
Raise an error if the test expression is true. The test can be a function that takes the context object.
"""
if (callable(test) and test(obj)) or test:
raise AssertionError(msg)
return obj
def assert_equal(other, obj, msg=None):
"""
Fail if the two objects are unequal as determined by the '==' operator.
from functools import *
from amara.lib.util import *
f = partial(assert_equal, u'', msg="POW!")
print (f(''),) # -> ''
"""
if not obj == other:
raise AssertionError(msg or '%r != %r' % (obj, other))
return obj
def assert_not_equal(other, obj, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if obj == other:
raise AssertionError(msg or '%r == %r' % (obj, other))
return obj
DEF_BUFFERSIZE = 1000
# read by specified separator, not newlines
def readbysep(f, sep, buffersize=DEF_BUFFERSIZE):
'''
from amara.lib.util import readbysep
import cStringIO
f = cStringIO.StringIO('a\fb\fc')
[ x for x in readbysep(f, '\f') ]
['a', 'b', 'c']
#OK next 2 go in test suite, not docstrings
f = cStringIO.StringIO('a\fb\fc\f')
[ x for x in readbysep(f, '\f') ]
['a', 'b', 'c']
f = cStringIO.StringIO('\fa\fb\fc')
[ x for x in readbysep(f, '\f') ]
['a', 'b', 'c']
from amara import parse
from amara.lib.util import readbysep
import cStringIO
f = cStringIO.StringIO('<a/>\f<b/>\f<c/>')
[ parse(x).xml_select(u'name(*)') for x in readbysep(f, '\f') ]
'''
leftover = ''
while 1:
next = f.read(buffersize)
if not next: #empty string at EOF
yield leftover
break
chunks = (leftover + next).split(sep)
leftover = chunks[-1]
for chunk in chunks[:-1]: yield chunk
return
#Though I've found this very hard to reproduce simply, the plain string.Template seems susceptible to Unicode error
#Rest of this file is an adaptation from Python 2.6.1 string.py
#It creates a drop-in replacement for string.Template to address the Unicode problem
import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(unicode(pattern), _re.IGNORECASE | _re.VERBOSE | _re.UNICODE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
val = val if isinstance(val, unicode) else unicode(val, 'utf-8')
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/lib/util.py
|
util.py
|
import time
def parse(s):
"""Parse an ISO-8601 date/time string, returning the value in seconds
since the epoch."""
m = __datetime_rx.match(s)
if m is None or m.group() != s:
raise ValueError, "unknown or illegal ISO-8601 date format: " + `s`
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
return time.mktime(gmt) + __extract_tzd(m) - time.timezone
def parse_timezone(timezone):
"""Parse an ISO-8601 time zone designator, returning the value in seconds
relative to UTC."""
m = __tzd_rx.match(timezone)
if not m:
raise ValueError, "unknown timezone specifier: " + `timezone`
if m.group() != timezone:
raise ValueError, "unknown timezone specifier: " + `timezone`
return __extract_tzd(m)
def tostring(t, timezone=0):
"""Format a time in ISO-8601 format.
If `timezone' is specified, the time will be specified for that timezone,
otherwise for UTC.
Some effort is made to avoid adding text for the 'seconds' field, but
seconds are supported to the hundredths.
"""
if type(timezone) is type(''):
timezone = parse_timezone(timezone)
else:
timezone = int(timezone)
if timezone:
sign = (timezone < 0) and "+" or "-"
timezone = abs(timezone)
hours = timezone / (60 * 60)
minutes = (timezone % (60 * 60)) / 60
tzspecifier = "%c%02d:%02d" % (sign, hours, minutes)
else:
tzspecifier = "Z"
psecs = t - int(t)
t = time.gmtime(int(t) - timezone)
year, month, day, hours, minutes, seconds = t[:6]
if seconds or psecs:
if psecs:
psecs = int(round(psecs * 100))
f = "%4d-%02d-%02dT%02d:%02d:%02d.%02d%s"
v = (year, month, day, hours, minutes, seconds, psecs, tzspecifier)
else:
f = "%4d-%02d-%02dT%02d:%02d:%02d%s"
v = (year, month, day, hours, minutes, seconds, tzspecifier)
else:
f = "%4d-%02d-%02dT%02d:%02d%s"
v = (year, month, day, hours, minutes, tzspecifier)
return f % v
def ctime(t):
"""Similar to time.ctime(), but using ISO-8601 format."""
return tostring(t, time.timezone)
# Internal data and functions:
import re
__date_re = ("(?P<year>\d\d\d\d)"
"(?:(?P<dsep>-|)"
"(?:(?P<julian>\d\d\d)"
"|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?")
__tzd_re = "(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)"
__tzd_rx = re.compile(__tzd_re)
__time_re = ("(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)"
"(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?"
+ __tzd_re)
__datetime_re = "%s(?:T%s)?" % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
del re
def __extract_date(m):
year = int(m.group("year"))
julian = m.group("julian")
if julian:
return __find_julian(year, int(julian))
month = m.group("month")
day = 1
if month is None:
month = 1
else:
month = int(month)
if not 1 <= month <= 12:
raise ValueError, "illegal month number: " + m.group("month")
else:
day = m.group("day")
if day:
day = int(day)
if not 1 <= day <= 31:
raise ValueError, "illegal day number: " + m.group("day")
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group("hours")
if not hours:
return 0, 0, 0
hours = int(hours)
if not 0 <= hours <= 23:
raise ValueError, "illegal hour number: " + m.group("hours")
minutes = int(m.group("minutes"))
if not 0 <= minutes <= 59:
raise ValueError, "illegal minutes number: " + m.group("minutes")
seconds = m.group("seconds")
if seconds:
seconds = float(seconds)
if not 0 <= seconds <= 60:
raise ValueError, "illegal seconds number: " + m.group("seconds")
# Python 2.3 requires seconds to be an integer
seconds=int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
"""Return the Time Zone Designator as an offset in seconds from UTC."""
if not m:
return 0
tzd = m.group("tzd")
if not tzd:
return 0
if tzd == "Z":
return 0
hours = int(m.group("tzdhours"))
minutes = m.group("tzdminutes")
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == "+":
return -offset
return offset
def __find_julian(year, julian):
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/lib/iso8601.py
|
iso8601.py
|
import re
import datetime
from string import *
from amara._xmlstring import *
__all__ = [
'lstrip', 'rstrip', 'strip',
'isname', 'isncname', 'isnmtoken', 'isqname', 'isspace', 'splitqname',
'legalize',
'U',
]
def U(s, encoding='utf-8', noneok=False):
"""
Apply a set of heuristics to the object to figure out how best
to get text from it.
XML is just text. Unfortunately there's a lot that gets in the way of the
text in common usage: data types, XPath strings (very close, but not exactly
the same thing as Python Unicode objects), Python string objects, character
encodings, etc. This function does its best to cut through all the complexity
and get you back as conveniently as possible to what's important: the text
import amara
from amara.lib import U
x = amara.parse('<a x="1">spam</a>')
U(x.xml_select('a'))
Note: you can make U always just convert None to u'' as follows:
>>> from functools import partial
>>> from amara.lib import U
>>> U = partial(U, noneok=True)
>>> U(None)
u''
"""
from amara import tree
from amara.xpath import datatypes
#xpath.datatypes.string is a subclass of Unicode object, so it won't fall through
#the test below into the XPath section proper
if isinstance(s, datatypes.string): return unicode(s)
#If it's already a Unicode object, nothing to do
if isinstance(s, unicode): return s
#If it's a string, decode it to yield Unicode
if isinstance(s, str): return s.decode(encoding)
#If it's an Amara node, return its XPath-based string value
if isinstance(s, tree.node): return unicode(datatypes.string(s))
#If it's an XPath data type object, apply the equivalent of the XPath string() function
if isinstance(s, datatypes.xpathobject): return unicode(datatypes.string(s))
#Specialize datetime object treatment, because the default unicode coercion doesn't go to W3C ISO flavor
if isinstance(s, datetime.datetime): return s.isoformat()
if s is None:
#FIXME: L10N
if noneok:
return u''
else:
raise TypeError('Refusing to coerce None into Unicode')
#Otherwise just leap into default coercions
try:
return unicode(s)
except TypeError, e:
return str(s).decode(encoding)
#Basic idea is as old as the XML spec, but was good to reuse a regex at
#http://maxharp3r.wordpress.com/2008/05/15/pythons-minidom-xml-and-illegal-unicode-characters/
#from http://boodebr.org/main/python/all-about-python-and-unicode#UNI_XML
RE_XML_ILLEGAL_PAT = u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])' + \
u'|' + \
u'([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])' % \
(unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff))
RE_XML_ILLEGAL = re.compile(RE_XML_ILLEGAL_PAT)
def legalize(s, repl=u'?'):
'''
>>> from amara.lib.xmlstring import legalize
>>> legalize(u'A\u001aB\u0000C')
u'A?B?C'
'''
return RE_XML_ILLEGAL.subn(repl, s)[0]
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/lib/xmlstring.py
|
xmlstring.py
|
import os, sys
from cStringIO import StringIO
import urllib, urllib2
import mimetools
from email.Utils import formatdate as _formatdate
from amara.lib import IriError
#from amara.lib import inputsource
from amara.lib.iri import *
__all__ = [
'DEFAULT_URI_SCHEMES',
'DEFAULT_RESOLVER',
'scheme_registry_resolver',
'facade_resolver',
'uridict',
'resolver',
]
# URI schemes supported by resolver_base
DEFAULT_URI_SCHEMES = ('http', 'file', 'ftp', 'data', 'pkgdata')
if hasattr(urllib2, 'HTTPSHandler'):
DEFAULT_URI_SCHEMES += ('https',)
DEFAULT_HIERARCHICAL_SEP = '/' #a separator to place between path segments when creating URLs
class resolver:
"""
"""
_supported_schemes = DEFAULT_URI_SCHEMES
def __init__(self, authorizations=None, lenient=True):
"""
"""
self.authorizations = authorizations
self.lenient = lenient
def resolve(self, uriRef, baseUri=None):
"""
Takes a URI or a URI reference plus a base URI, produces a absolutized URI
if a base URI was given, then attempts to obtain access to an entity
representing the resource identified by the resulting URI,
returning the entity as a stream (a file-like object).
Raises a IriError if the URI scheme is unsupported or if a stream
could not be obtained for any reason.
"""
if not isinstance(uriRef, urllib2.Request):
if baseUri is not None:
uri = self.absolutize(uriRef, baseUri)
scheme = get_scheme(uri)
else:
uri = uriRef
scheme = get_scheme(uriRef)
# since we didn't use absolutize(), we need to verify here
if scheme not in self._supported_schemes:
if scheme is None:
raise ValueError('When the URI to resolve is a relative '
'reference, it must be accompanied by a base URI.')
else:
raise IriError(IriError.UNSUPPORTED_SCHEME,
scheme=scheme, resolver=self.__class__.__name__)
req = urllib2.Request(uri)
else:
req, uri = uriRef, uriRef.get_full_url()
if self.authorizations and not self.authorize(uri):
raise IriError(IriError.DENIED_BY_RULE, uri=uri)
# Bypass urllib for opening local files.
if scheme == 'file':
path = uri_to_os_path(uri, attemptAbsolute=False)
try:
stream = open(path, 'rb')
except IOError, e:
raise IriError(IriError.RESOURCE_ERROR,
loc='%s (%s)' % (uri, path),
uri=uri, msg=str(e))
# Add the extra metadata that urllib normally provides (sans
# the poorly guessed Content-Type header).
stats = os.stat(path)
size = stats.st_size
mtime = _formatdate(stats.st_mtime)
headers = mimetools.Message(StringIO(
'Content-Length: %s\nLast-Modified: %s\n' % (size, mtime)))
stream = urllib.addinfourl(stream, headers, uri)
else:
# urllib2.urlopen, wrapped by us, will suffice for http, ftp,
# data and gopher
try:
stream = urllib2.urlopen(req)
except IOError, e:
raise IriError(IriError.RESOURCE_ERROR,
uri=uri, loc=uri, msg=str(e))
return stream
def authorize(self, uri):
"""
Implement an authorization mechanism for resolvers, allowing you to create "jails" where only certain URIs are allowed
"""
for match, allow in self.authorizations:
if callable(match):
if match(uri):
return allow
elif match:
return allow
#If authoriztions are specified, and none allow the URI, deny by default
#The user can easily reverse this by adding an auth (True, True)
return False
def absolutize(self, uriRef, baseUri):
"""
For most cases iri.absolutize is good enough, and does the main work of this function.
Resolves a URI reference to absolute form, effecting the result of RFC
3986 section 5. The URI reference is considered to be relative to
the given base URI.
Also verifies that the resulting URI reference has a scheme that
resolve() supports, raising a IriError if it doesn't.
Default implementation does not perform any validation on the base
URI beyond that performed by iri.absolutize().
If leniency has been turned on (self.lenient=True), accepts a base URI
beginning with '/', in which case the argument is assumed to be an absolute
path component of 'file' URI with no authority component.
"""
# since we know how absolutize works, we can anticipate the scheme of
# its return value and verify that it's supported first
if self.lenient:
# assume file: if leading "/"
if baseUri.startswith('/'):
baseUri = 'file://' + baseUri
return absolutize(uriRef, baseUri, limit_schemes=self._supported_schemes)
DEFAULT_RESOLVER = resolver()
class scheme_registry_resolver(resolver):
"""
Resolver that handles URI resolution with a registry for handling different
URI schemes. The default action if there is nothing registered for the scheme
will be to fall back to base behavior *unless* you have in the mapping a special
scheme None. The callable object that is the value on that key will then be used
as the default for all unknown schemes.
The expected function signature for scheme call-backs matches
inputsource.resolve, without the instance argument:
resolve(uri, base=None)
Reminder: Since this does not include self, if you are registering
a method, use the method instance (i.e. myresolver().handler
rather than myresolver.handler)
You can manipulate the mapping directly using the "handlers" attribute.
handlers - a Python dictionary with scheme names as keys (e.g. "http")
and callable objects as values
"""
def __init__(self, authorizations=None, lenient=True, handlers=None):
"""
"""
self.lenient = lenient
self.handlers = handlers or {}
resolver.__init__(self, authorizations, lenient)
def resolve(self, uri, base=None):
scheme = get_scheme(uri)
if not scheme:
if base:
scheme = get_scheme(base)
if not scheme:
#Another option is to fall back to Base class behavior
raise Iri.IriError(Iri.IriError.SCHEME_REQUIRED,
base=base, ref=uri)
func = self.handlers.get(scheme)
#import sys; print >> sys.stderr, (self, self.handlers)
if not func:
func = self.handlers.get(None)
if not func:
return resolver.resolve(self, uri, base)
return func(uri, base)
#Eliminate this class by demonstrating a standard memoize tool with the resolver
class facade_resolver(resolver):
"""
A type of resolver that uses a cache of resources, a dictionary of URI
to result mappings (similar to memoizing the resolve method). When a
URI is provided for resolution, the mapping is first checked, and a
stream is constructed by wrapping the mapping value string.
If no match is found in the mapping, fall back to the base
resolver logic.
You can manipulate the mapping directly using the "cache" attribute.
"""
def __init__(self, cache=None, observer=None):
"""
cache - a dictionary with mapings from URI to value (as an object
to be converted to a UTF-8 encoded string)
observer - callable object invoked on each resolution request
"""
default_resolver.__init__(self)
self.cache = cache or {}
self.observer = observer
return
def resolve(self, uri, base=None):
self.observer(uri, base)
#Does not factor in base. Should it noramlize before checking?
if uri in self.cache:
cachedval = self.cache[uri]
if isinstance(cachedval, unicode):
return StringIO(cachedval.encode('utf-8'))
else:
return StringIO(str(cachedval))
return default_resolver.resolve(self, uri, base)
#
class uridict(dict):
"""
A dictionary that uses URIs as keys. It attempts to observe some degree of
URI equivalence as defined in RFC 3986 section 6. For example, if URIs
A and B are equivalent, a dictionary operation involving key B will return
the same result as one involving key A, and vice-versa.
This is useful in situations where retrieval of a new representation of a
resource is undesirable for equivalent URIs, such as "file:///x" and
"file://localhost/x" (see RFC 1738), or "http://spam/~x/",
"http://spam/%7Ex/" and "http://spam/%7ex" (see RFC 3986).
Normalization performed includes case normalization on the scheme and
percent-encoded octets, percent-encoding normalization (decoding of
octets corresponding to unreserved characters), and the reduction of
'file://localhost/' to 'file:///', in accordance with both RFC 1738 and
RFC 3986 (although RFC 3986 encourages using 'localhost' and doing
this for all schemes, not just file).
An instance of this class is used by Ft.Xml.Xslt.XsltContext for caching
documents, so that the XSLT function document() will return identical
nodes, without refetching/reparsing, for equivalent URIs.
"""
# RFC 3986 requires localhost to be the default host no matter
# what the scheme, but, being descriptive of existing practices,
# leaves it up to the implementation to decide whether to use this
# and other tests of URI equivalence in the determination of
# same-document references. So our implementation results in what
# is arguably desirable, but not strictly required, behavior.
#
#FIXME: make localhost the default for all schemes, not just file
def _normalizekey(self, key):
key = normalize_case(normalize_percent_encoding(key))
if key[:17] == 'file://localhost/':
return 'file://' + key[16:]
else:
return key
def __getitem__(self, key):
return super(uridict, self).__getitem__(self._normalizekey(key))
def __setitem__(self, key, value):
return super(uridict, self).__setitem__(self._normalizekey(key), value)
def __delitem__(self, key):
return super(uridict, self).__delitem__(self._normalizekey(key))
def has_key(self, key):
return super(uridict, self).has_key(self._normalizekey(key))
def __contains__(self, key):
return super(uridict, self).__contains__(self._normalizekey(key))
def __iter__(self):
return iter(self.keys())
iterkeys = __iter__
def iteritems(self):
for key in self.iterkeys():
yield key, self.__getitem__(key)
#FIXME: Port to more amara.lib.iri functions
def get_filename_from_url(url):
fullname = url.split('/')[-1].split('#')[0].split('?')[0]
return fullname
def get_filename_parts_from_url(url):
fullname = url.split('/')[-1].split('#')[0].split('?')[0]
t = list(os.path.splitext(fullname))
if t[1]:
t[1] = t[1][1:]
return t
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/lib/irihelpers.py
|
irihelpers.py
|
from __future__ import with_statement
import os
import urllib, urllib2
from cStringIO import StringIO
from uuid import uuid4
from amara.lib import IriError
from amara._expat import InputSource
from amara.lib.xmlstring import isxml
__all__ = [
'_inputsource', 'XMLSTRING', 'XMLURI', 'XMLFILE',
]
MAX_URI_LENGTH_FOR_HEURISTIC = 1024
#Classifications of raw input sources
XMLSTRING = 1
XMLURI = 2
XMLFILE = 3
class _inputsource(InputSource):
"""
The representation of a resource. Supports further, relative resolution of
URIs, including resolution to absolute form of URI references.
Standard object attributes:
_supported_schemes is a list of URI schemes supported
for dereferencing (representation retrieval).
"""
def __new__(cls, arg, uri=None, encoding=None, resolver=None, sourcetype=0):
"""
arg - a string, Unicode object (only if you really know what you're doing),
file-like object (stream), file path or URI. You can also pass an
InputSource object, in which case the return value is just the same
object, possibly with the URI modified
uri - optional override URI. The base URI for the IS will be set to this
value
Returns an input source which can be passed to Amara APIs.
"""
#do the imports within the function to avoid circular crap
#from amara._xmlstring import IsXml as isxml
#These importa are tucked in here because amara.lib.iri is an expensive import
from amara.lib.iri import is_absolute, os_path_to_uri
from amara.lib.irihelpers import DEFAULT_RESOLVER
resolver = resolver or DEFAULT_RESOLVER
if isinstance(arg, InputSource):
return arg
#if arg == (u'', ''): -> UnicodeWarning: Unicode equal comparison failed to convert both arguments to Unicode - interpreting them as being unequal
if arg == '':
#FIXME L10N
raise ValueError("Cannot parse an empty string as XML")
if isinstance(arg, urllib2.Request):
uri = arg.get_full_url() #One of the rightly labeled "lame" helper methods in urllib2 ;)
stream = resolver.resolve(arg)
elif hasattr(arg, 'read'):
#Create dummy Uri to use as base
uri = uri or uuid4().urn
stream = arg
#XXX: Should we at this point refuse to proceed unless it's a basestring?
elif sourcetype == XMLSTRING or isxml(arg):
#See this article about XML detection heuristics
#http://www.xml.com/pub/a/2007/02/28/what-does-xml-smell-like.html
uri = uri or uuid4().urn
stream = StringIO(arg)
elif is_absolute(arg) and not os.path.isfile(arg):
uri = arg
stream = resolver.resolve(uri)
#If the arg is beyond a certain length, don't even try it as a URI
elif len(arg) < MAX_URI_LENGTH_FOR_HEURISTIC:
uri = os_path_to_uri(arg)
stream = resolver.resolve(uri)
else:
#FIXME L10N
raise ValueError("Does not appear to be well-formed XML")
#We might add the ability to load zips, gzips & bzip2s
#http://docs.python.org/lib/module-zlib.html
#http://docs.python.org/lib/module-gzip.html
#http://docs.python.org/lib/module-bz2.html
#http://docs.python.org/lib/zipfile-objects.html
#import inspect; print inspect.stack()
#InputSource.__new__ is in C: expat/input_source.c:inputsource_new
return InputSource.__new__(cls, stream, uri, encoding)
def __init__(self, arg, uri=None, encoding=None, resolver=None, sourcetype=0):
#uri is set
from amara.lib.irihelpers import DEFAULT_RESOLVER
self.resolver = resolver or DEFAULT_RESOLVER
@staticmethod
def text(arg, uri=None, encoding=None, resolver=None):
'''
Set up an input source from text, according to the markup convention of the term
(i.e. in Python terms a string with XML, HTML, fragments thereof, or tag soup)
Supports processing content sources that are not unambiguously XML or HTML strings
'''
return _inputsource(arg, uri, encoding, resolver, sourcetype=XMLSTRING)
def resolve(self, uriRef, baseUri=None):
"""
Takes a URI or a URI reference plus a base URI, produces an absolutized URI
if a base URI was given, then attempts to obtain access to an entity
representing the resource identified by the resulting URI,
returning the entity as a stream (a file-like object).
(this work is done in self.resolver)
Raises a IriError if the URI scheme is unsupported or if a stream
could not be obtained for any reason.
"""
if baseUri:
uriRef = self.resolver.absolutize(uriRef, baseUri)
return self.__class__(uriRef)
def absolutize(self, uriRef, baseUri):
"""
Resolves a URI reference to absolute form, effecting the result of RFC
3986 section 5. The URI reference is considered to be relative to
the given base URI.
Also verifies that the resulting URI reference has a scheme that
resolve() supports, raising a IriError if it doesn't.
The default implementation does not perform any validation on the base
URI beyond that performed by absolutize().
If leniency has been turned on (self.lenient=True), accepts a base URI
beginning with '/', in which case the argument is assumed to be an absolute
path component of 'file' URI with no authority component.
"""
return self.resolver.absolutize(uriRef, baseUri)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/lib/_inputsource.py
|
_inputsource.py
|
import os
import sys
import imp
import time
import types
import cStringIO
from zipimport import zipimporter
__all__ = [
# Module Utilities
'find_loader', 'find_importer', 'get_importer', 'iter_modules',
'get_last_modified', 'get_search_path', 'proxy_module',
# Resource Utilities
'os_path_to_resource', 'normalize_resource', 'get_resource_filename',
'get_resource_string', 'get_resource_stream', 'get_resource_last_modified',
]
# Indicate that the use of "special" names is handled in a "zip-safe" way.
__zipsafe__ = True
IMP_SEARCH_ORDER = [ desc[0] for desc in imp.get_suffixes() ]
# ZIP imports always search for .pyc AND .pyo, but reverse their order
# depending on the optimzation flag (-O).
ZIP_SEARCH_ORDER = [ '.py', '.pyc', '.pyo']
if not __debug__:
ZIP_SEARCH_ORDER.remove('.pyc')
ZIP_SEARCH_ORDER.append('.pyc')
from pkgutil import iter_importers, get_loader, find_loader, iter_modules, get_importer
try:
from pkg_resources import get_provider, resource_filename
except ImportError:
#Then setuptools is not installed
class default_provider(object):
"""Resource provider for "classic" loaders"""
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(module.__file__)
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def get_resource_string(self, manager, resource_name):
stream = self.get_resource_stream(manager, resource_name)
try:
return stream.read()
finally:
stream.close()
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def _fn(self, base, resource_name):
return os.path.join(base, *resource_name.split('/'))
def _has(self, pathname):
return os.path.exists(pathname)
def _isdir(self, pathname):
return os.path.isdir(pathname)
def _listdir(self, pathname):
return os.listdir(pathname)
class zip_provider(default_provider):
"""Resource provider for ZIP loaders"""
_dirindex = None
def __init__(self, module):
default_provider.__init__(self, module)
self.zipinfo = self.loader._files
self.zip_pre = self.loader.archive + os.sep
def get_resource_filename(self, manager, resource_name):
raise NotImplementedError("not supported by ZIP loaders")
def get_resource_stream(self, manager, resource_name):
data = self.get_resource_string(manager, resource_name)
return cStringIO.StringIO(data)
def get_resource_string(self, manager, resource_name):
pathname = self._fn(self.module_path, resource_name)
return self.loader.get_data(pathname)
def _zipinfo_name(self, pathname):
# Convert a virtual filename (full path to file) into a zipfile
# subpath usable with the zipimport directory cache for our
# target archive.
if pathname.startswith(self.zip_pre):
return pathname[len(self.zip_pre):]
raise ValueError("%s not in %s" % (pathname, self.zip_pre))
def _build_index(self):
self._dirindex = index = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in index:
index[parent].append(parts[-1])
break
else:
index[parent] = [parts.pop()]
return index
def _has(self, pathname):
arcname = self._zipinfo_name(fspath)
return (arcname in self.zipinfo or
arcname in (self._dirindex or self._build_index()))
def _isdir(self, pathname):
arcname = self._zipinfo_name(pathname)
return arcname in (self._dirindex or self._build_index())
def _listdir(self, pathname):
arcname = self._zipinfo_name(pathname)
if arcname in (self._dirindex or self._build_index()):
return self._dirindex[arcname][:]
return []
def get_provider(fullname):
if fullname not in sys.modules:
__import__(fullname)
module = sys.modules[fullname]
loader = getattr(module, '__loader__', None)
if loader is None:
provider = default_provider(module)
elif isinstance(loader, zipimporter):
provider = zip_provider(module)
else:
raise NotImplementedError('unsupported loader type: %s' % loader)
return provider
_resource_manager = None
else:
# pkg_resources (aka setuptools) installed.; the resource_filename
# top-level name is actually the bound method of the global
# ResourceManager (at least that is what the PkgResources docs say).
_resource_manager = resource_filename.im_self
del resource_filename
def find_importer(fullname):
"""Find a PEP 302 "loader" object for fullname
If fullname contains dots, path must be the containing package's
__path__. Returns None if the module cannot be found or imported.
"""
for importer in iter_importers(fullname):
if importer.find_module(fullname) is not None:
return importer
return None
def get_last_modified(fullname):
"""
Returns the last modified timestamp for the given module.
"""
loader = get_loader(fullname)
if hasattr(loader, 'get_filename'):
suffixes = IMP_SEARCH_ORDER
elif isinstance(loader, zipimporter):
suffixes = ZIP_SEARCH_ORDER
else:
raise NotImplementedError("unsupported loader %s" % laoder)
barename = '/' + fullname.replace('.', '/')
if loader.is_package(fullname):
barename += '/__init__'
for suffix in suffixes:
resource = barename + suffix
try:
timestamp = get_resource_last_modified(fullname, resource)
except EnvironmentError:
timestamp = 0
else:
break
return timestamp
def get_search_path(fullname):
loader = get_loader(fullname)
if loader.is_package(fullname):
if fullname in sys.modules:
package = sys.modules[fullname]
else:
package = loader.load_module(fullname)
return package.__path__
return None
def proxy_module(fullname, realname):
class moduleproxy(types.ModuleType):
def __getattribute__(self, name):
if realname not in sys.modules:
# Load the module
module = __import__(realname, {}, {}, [name])
# Replace ourselves in `sys.modules`
sys.modules[fullname] = module
else:
module = sys.modules[realname]
return module.__getattribute__(name)
def __repr__(self):
return "<moduleproxy '%s' to '%s'>" % (fullname, realname)
module = sys.modules[fullname] = moduleproxy(fullname)
return module
# -- Resource Handling ------------------------------------------------
def OsPathToResource(pathname):
components = []
for component in pathname.split(os.sep):
if component == '..':
del components[-1:]
elif component not in ('', '.'):
components.append(component)
resource = '/'.join(components)
if pathname.startswith(os.sep):
resource = '/' + resource
return resource
def normalize_resource(package, resource):
# normalize the resource pathname
# Note, posixpath is not used as it doesn't remove leading '..'s
components = []
for component in resource.split('/'):
if component == '..':
del components[-1:]
elif component not in ('', '.'):
components.append(component)
absolute = resource.startswith('/')
resource = '/'.join(components)
provider = get_provider(package)
if absolute:
# Find the provider for the distribution directory
module_path = provider.module_path
packages = package.split('.')
if not get_loader(package).is_package(package):
del packages[-1]
for module in packages:
module_path = os.path.dirname(module_path)
provider.module_path = module_path
return (provider, resource)
def get_resource_filename(package, resource):
"""Returns a true filesystem name for the specified resource.
"""
provider, resource = normalize_resource(package, resource)
return provider.get_resource_filename(_resource_manager, resource)
def get_resource_string(package, resource):
"""Return a string containing the contents of the specified resource.
If the pathname is absolute it is retrieved starting at the path of
the importer for 'fullname'. Otherwise, it is retrieved relative
to the module within the loader.
"""
provider, resource = NormalizeResource(package, resource)
return provider.get_resource_string(_resource_manager, resource)
def get_resource_stream(package, resource):
"""Return a readable stream for specified resource"""
provider, resource = NormalizeResource(package, resource)
return provider.get_resource_stream(_resource_manager, resource)
def get_resource_last_modified(package, resource):
"""Return a timestamp indicating the last-modified time of the
specified resource. Raises IOError is the pathname cannot be found
from the loader for 'fullname'.
"""
provider, resource = normalize_resource(package, resource)
if isinstance(provider.loader, zipimporter):
if not resource:
# it is the archive itself
timestamp = os.stat(provider.module_path).st_mtime
else:
filename = provider._fn(provider.module_path, resource)
zipinfo_name = provider._zipinfo_name(filename)
try:
dostime, dosdate = provider.zipinfo[zipinfo_name][5:7]
except:
import errno
errorcode = errno.ENOENT
raise IOError(errorcode, os.strerror(errorcode), zipinfo_name)
timestamp = time.mktime((
((dosdate >> 9) & 0x7f) + 1980, # tm_year
((dosdate >> 5) & 0x0f) - 1, # tm_mon
((dosdate >> 0) & 0x1f), # tm_mday
((dostime >> 11) & 0x1f), # tm_hour
((dostime >> 5) & 0x3f), # tm_min
((dostime >> 0) & 0x1f) * 2, # tm_secs
0, 0, -1))
else:
filename = provider.get_resource_filename(_resource_manager, resource)
timestamp = os.stat(filename).st_mtime
return timestamp
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/lib/importutil.py
|
importutil.py
|
#See also: http://bitbucket.org/ianb/formencode/src/tip/formencode/doctest_xml_compare.py#cl-70
#Though that is written by people who don't really know XML (e.g. see the make_xml function that generates guaranteed broken XML given the illegal "xml" GI)
import re
import difflib
import itertools
import HTMLParser
from xml.parsers import expat
from amara.lib.xmlstring import isspace
_S = "[\x20\x09\x0D\x0A]"
_VersionNum = "[a-zA-Z0-9_.:-]+"
_Eq = "%s?=%s?" % (_S, _S)
_VersionInfo = _S + "version" + _Eq + \
"(?:(?:'" + _VersionNum + "')|" + '(?:"' + _VersionNum + '"))'
_EncName = "[A-Za-z][A-Za-z0-9._-]*"
_EncodingDecl = _S + "encoding" + _Eq + \
"(?:(?:'" + _EncName + "')|" + '(?:"' + _EncName + '"))'
_SDDecl = _S + "standalone" + _Eq + \
"(?:(?:'(?:yes|no)')|" + '(?:"(?:yes|no)"))'
_xmldecl_match = re.compile(r"<\?xml" +
r"(?P<VersionInfo>%s)" % _VersionInfo +
r"(?P<EncodingDecl>%s)?" % _EncodingDecl +
r"(?P<SDDecl>%s)?" % _SDDecl +
r"%s?\?>" % _S).match
_textdecl_match = re.compile(r"<\?xml" +
r"(?P<VersionInfo>%s)?" % _VersionInfo +
r"(?P<EncodingDecl>%s)" % _EncodingDecl +
r"%s?\?>" % _S).match
_doctype_find = re.compile("<!DOCTYPE" + _S).search
_starttag_find = re.compile("<[^!?]").search
_html_find = re.compile("(<!DOCTYPE html)|(<html)", re.IGNORECASE).search
def check_xml(result, expected):
'''
A useful XML comparison routine for test cases, error reports, etc.
'''
diff = xml_diff(result, expected)
diff = '\n'.join(diff)
assert not diff, "Expected=%r\nresult=%r\ndiff=%r" % (expected, result, diff)
def document_compare(expected, compared, whitespace=True):
for line in document_diff(expected, compared, whitespace):
# There is a difference
return False
return True
def document_diff(expected, compared, whitespace=True):
# See if we need to use XML or HTML
if not _xmldecl_match(expected) and _html_find(expected):
diff = html_diff
else:
diff = xml_diff
return diff(expected, compared, whitespace)
def html_compare(expected, compared, whitespace=True):
"""
Compare two HTML strings. Returns `True` if the two strings are
equivalent, otherwise it returns `False`.
"""
for line in html_diff(expected, compared, whitespace):
# differences found
return False
# No differences
return True
def html_diff(expected, compared, whitespace=True):
"""
Compare two HTML strings; generate the delta as a unified diff.
`ignorews` controls whether whitespace differences in text
events are ignored.
"""
expected = _html_sequence(expected, whitespace)
compared = _html_sequence(compared, whitespace)
return difflib.unified_diff(expected, compared, 'expected', 'compared',
n=2, lineterm='')
def xml_compare(expected, compared, whitespace=True, lexical=True):
"""
Compare two XML strings. Returns `True` if the two strings are
equivalent, otherwise it returns `False`.
"""
for line in xml_diff(expected, compared, whitespace):
# differences found
return False
# No differences
return True
def xml_diff(expected, compared, whitespace=True):
# External Parsed Entities cannot have a standalone declaration or
# DOCTYPE declaration.
# See XML 1.0 2nd, 4.3.2, Well-Formed Parsed Entities
sequencer = _xml_sequence
if _textdecl_match(expected):
# Limit the search for DOCTYPE to the content before the first element.
# If no elements exist, it *MUST* be a parsed entity.
match = _starttag_find(expected)
if not match or not _doctype_find(expected, 0, match.start()):
sequencer = _entity_sequence
expected = sequencer(expected, whitespace)
compared = sequencer(compared, whitespace)
return difflib.unified_diff(expected, compared, 'expected', 'compared',
n=2, lineterm='')
def entity_compare(expected, compared, ignorews=False):
return
class _markup_sequence(list):
__slots__ = ('_data', '_nsdecls')
def __init__(self, data, whitespace=True):
list.__init__(self)
if not whitespace:
self._flush = self._flush_whitespace
self._data = u''
self._nsdecls = []
self.feed(data)
self.close()
self._flush()
def _flush(self):
data = self._data
if data:
self.append('#text: ' + repr(data))
self._data = u''
def _flush_whitespace(self):
data = self._data
if data:
if not isspace(data):
self.append('#text: ' + repr(data))
self._data = u''
def namespace_decl(self, prefix, uri):
self._nsdecls.append((prefix, uri))
_prepare_attrs = sorted
def start_element(self, name, attrs):
if self._data: self._flush()
self.append('start-tag: ' + name)
if self._nsdecls:
nsdecls = sorted(self._nsdecls)
nsdecls = [ '%s=%r' % pair for pair in nsdecls ]
self.append(' namespaces: ' + ', '.join(nsdecls))
del self._nsdecls[:]
if attrs:
attrs = self._prepare_attrs(attrs)
attrs = [ '%s=%r' % pair for pair in attrs ]
self.append(' attributes: ' + ', '.join(attrs))
return
def end_element(self, name):
if self._data: self._flush()
self.append('end-tag: ' + name)
def characters(self, data):
if data:
self._data += data
def processing_instruction(self, target, data):
if self._data: self._flush()
event = 'processing-instruction: target=%s, data=%r' % (target, data)
self.append(event)
def entity_ref(self, name):
if self._data: self._flush()
self.append('entity-ref: name=' + name)
def comment(self, data):
if self._data: self._flush()
self.append('#comment: ' + repr(data))
def start_cdata(self):
if self._data: self._flush()
self.append('start-cdata')
def end_cdata(self):
if self._data: self._flush()
self.append('end-cdata')
def doctype_decl(self, name, sysid, pubid, has_internal_subset):
if self._data: self._flush()
event = 'doctype-decl: name=%s, sysid=%r, pubid=%r, subset=%s' % (
name, sysid, pubid, ('yes' if has_internal_subset else 'no'))
self.append(event)
class _xml_sequence(_markup_sequence):
__slots__ = ('_parser',)
def __init__(self, data, whitespace=True, lexical=True):
self._parser = parser = self._create_parser()
parser.ordered_attributes = True
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_NEVER)
parser.StartElementHandler = self.start_element
parser.EndElementHandler = self.end_element
parser.CharacterDataHandler = self.characters
parser.ProcessingInstructionHandler = self.processing_instruction
parser.StartNamespaceDeclHandler = self.namespace_decl
parser.SkippedEntityHandler = self.entity_ref
if lexical:
parser.CommentHandler = self.comment
parser.StartCdataSectionHandler = self.start_cdata
parser.EndCdataSectionHandler = self.end_cdata
parser.StartDoctypeDeclHandler = self.doctype_decl
_markup_sequence.__init__(self, data, whitespace)
def _create_parser(self):
return expat.ParserCreate(namespace_separator='#')
def _prepare_attrs(self, attrs):
it = iter(attrs)
return sorted(itertools.izip(it, it))
def feed(self, data):
self._parser.Parse(data, 0)
def close(self):
self._parser.Parse('', 1)
# break cycle created by expat handlers pointing to our methods
self._parser = None
class _entity_sequence(_xml_sequence):
def _create_parser(self):
parser = _xml_sequence._create_parser(self)
context = 'xml=http://www.w3.org/XML/1998/namespace'
return parser.ExternalEntityParserCreate(context)
class _html_sequence(HTMLParser.HTMLParser, _markup_sequence):
_forbidden_end_elements = frozenset([
'area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param',
])
def __init__(self, data, whitespace=True, lexical=True):
HTMLParser.HTMLParser.__init__(self)
if lexical:
self.handle_comment = self.comment
_markup_sequence.__init__(self, data, whitespace)
handle_starttag = _markup_sequence.start_element
handle_endtag = _markup_sequence.end_element
handle_charref = _markup_sequence.entity_ref
handle_entityref = _markup_sequence.entity_ref
handle_data = _markup_sequence.characters
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/lib/treecompare.py
|
treecompare.py
|
__all__ = ['IriError', 'inputsource']
from amara import Error
class IriError(Error):
"""
Exception related to URI/IRI processing
"""
RESOURCE_ERROR = 1
INVALID_BASE_URI = 100
#RELATIVE_DOCUMENT_URI = 110
RELATIVE_BASE_URI = 111
OPAQUE_BASE_URI = 112
NON_FILE_URI = 120
UNIX_REMOTE_HOST_FILE_URI = 121
RESOURCE_ERROR = 130
SCHEME_REQUIRED = 200 # for SchemeRegistryResolver
UNSUPPORTED_SCHEME = 201
IDNA_UNSUPPORTED = 202
DENIED_BY_RULE = 300
INVALID_PUBLIC_ID_URN = 400
UNSUPPORTED_PLATFORM = 1000
@classmethod
def _load_messages(cls):
from gettext import gettext as _
# %r preferred for reporting URIs because the URI refs can be empty
# strings or, if invalid, could contain characters unsafe for the
# error # message stream.
return {
IriError.INVALID_BASE_URI: _(
"Invalid base URI: %(base)r cannot be used to resolve "
" reference %(ref)r"),
IriError.RELATIVE_BASE_URI: _(
"Invalid base URI: %(base)r cannot be used to resolve "
"reference %(ref)r; the base URI must be absolute, not "
"relative."),
IriError.NON_FILE_URI: _(
"Only a 'file' URI can be converted to an OS-specific path; "
"URI given was %(uri)r"),
IriError.UNIX_REMOTE_HOST_FILE_URI: _(
"A URI containing a remote host name cannot be converted to a "
" path on posix; URI given was %(uri)r"),
IriError.RESOURCE_ERROR: _(
"Error retrieving resource %(loc)r: %(msg)s"),
IriError.UNSUPPORTED_PLATFORM: _(
"Platform %(platform)r not supported by URI function "
"%(function)s"),
IriError.SCHEME_REQUIRED: _(
"Scheme-based resolution requires a URI with a scheme; "
"neither the base URI %(base)r nor the reference %(ref)r "
"have one."),
IriError.INVALID_PUBLIC_ID_URN: _(
"A public ID cannot be derived from URN %(urn)r "
"because it does not conform to RFC 3151."),
IriError.UNSUPPORTED_SCHEME: _(
"The URI scheme %(scheme)s is not supported by resolver "),
IriError.IDNA_UNSUPPORTED: _(
"The URI ref %(uri)r cannot be made urllib-safe on this "
"version of Python (IDNA encoding unsupported)."),
IriError.DENIED_BY_RULE: _(
"Access to IRI %(uri)r was denied by action of an IRI restriction"),
}
from amara.lib._inputsource import _inputsource as inputsource
# Alias amara.test to amara.lib.testsupport
from amara.lib import importutil
testsupport = importutil.proxy_module('amara.lib.testsupport', 'amara.test')
from amara.lib.xmlstring import *
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/lib/__init__.py
|
__init__.py
|
__all__ = [
# IRI tools
"iri_to_uri",
"nfc_normalize",
"convert_ireg_name",
# RFC 3986 implementation
'matches_uri_ref_syntax', 'matches_uri_syntax',
'percent_encode', 'percent_decode',
'split_uri_ref', 'unsplit_uri_ref',
'split_authority', 'split_fragment',
'absolutize', 'relativize', 'remove_dot_segments',
'normalize_case', 'normalize_percent_encoding',
'normalize_path_segments', 'normalize_path_segments_in_uri',
# RFC 3151 implementation
'urn_to_public_id', 'public_id_to_urn',
# Miscellaneous
'is_absolute', 'get_scheme', 'strip_fragment',
'os_path_to_uri', 'uri_to_os_path', 'basejoin', 'join',
'make_urllib_safe',
'WINDOWS_SLASH_COMPAT',
'urlopen', 'path_resolve',
]
import os, sys
import urllib, urllib2
import re, cStringIO
import mimetools
from string import ascii_letters
from email.Utils import formatdate as _formatdate
from uuid import UUID, uuid1, uuid4
from amara.lib import IriError, importutil
# whether os_path_to_uri should treat "/" same as "\" in a Windows path
WINDOWS_SLASH_COMPAT = True
DEFAULT_HIERARCHICAL_SEP = '/'
def iri_to_uri(iri, convertHost=False):
r"""
Converts an IRI or IRI reference to a URI or URI reference,
implementing sec. 3.1 of draft-duerst-iri-10.
The convertHost flag indicates whether to perform conversion of
the ireg-name (host) component of the IRI to an RFC 2396-compatible
URI reg-name (IDNA encoded), e.g.
iri_to_uri(u'http://r\xe9sum\xe9.example.org/', convertHost=False)
=> u'http://r%C3%A9sum%C3%A9.example.org/'
iri_to_uri(u'http://r\xe9sum\xe9.example.org/', convertHost=True)
=> u'http://xn--rsum-bpad.example.org/'
Ordinarily, the IRI should be given as a unicode string. If the IRI
is instead given as a byte string, then it will be assumed to be
UTF-8 encoded, will be decoded accordingly, and as per the
requirements of the conversion algorithm, will NOT be normalized.
"""
if not isinstance(iri, str):
iri = nfc_normalize(iri)
if convertHost and sys.version_info[0:2] >= (2,3):
# first we have to get the host
(scheme, auth, path, query, frag) = split_uri_ref(iri)
if auth and auth.find('@') > -1:
userinfo, hostport = auth.split('@')
else:
userinfo = None
hostport = auth
if hostport and hostport.find(':') > -1:
host, port = hostport.split(':')
else:
host = hostport
port = None
if host:
host = convert_ireg_name(host)
auth = ''
if userinfo:
auth += userinfo + '@'
auth += host
if port:
auth += ':' + port
iri = unsplit_uri_ref((scheme, auth, path, query, frag))
res = u''
pos = 0
#FIXME: use re.subn with substitution function for big speed-up
surrogate = None
for c in iri:
cp = ord(c)
if cp > 128:
if cp < 160:
# FIXME: i18n
raise ValueError("Illegal character at position %d (0-based) of IRI %r" % (pos, iri))
# 'for c in iri' may give us surrogate pairs
elif cp > 55295:
if cp < 56320:
# d800-dbff
surrogate = c
continue
elif cp < 57344:
# dc00-dfff
if surrogate is None:
raise ValueError("Illegal surrogate pair in %r" % iri)
c = surrogate + c
else:
raise ValueError("Illegal surrogate pair in %r" % iri)
surrogate = None
for octet in c.encode('utf-8'):
res += u'%%%02X' % ord(octet)
else:
res += c
pos += 1
return res
def nfc_normalize(iri):
"""
Normalizes the given unicode string according to Unicode Normalization Form C (NFC)
so that it can be used as an IRI or IRI reference.
"""
from unicodedata import normalize
return normalize('NFC', iri)
def convert_ireg_name(iregname):
"""
Converts the given ireg-name component of an IRI to a string suitable for use
as a URI reg-name in pre-rfc2396bis schemes and resolvers. Returns the ireg-name
"""
# I have not yet verified that the default IDNA encoding
# matches the algorithm required by the IRI spec, but it
# does work on the one simple example in the spec.
return iregname.encode('idna')
#=============================================================================
# Functions that implement aspects of RFC 3986
#
_validation_setup_completed = False
def _init_uri_validation_regex():
"""
Called internally to compile the regular expressions needed by
URI validation functions, just once, the first time a function
that needs them is called.
"""
global _validation_setup_completed
if _validation_setup_completed:
return
#-------------------------------------------------------------------------
# Regular expressions for determining the non-URI-ness of strings
#
# A given string's designation as a URI or URI reference comes from the
# context in which it is being used, not from its syntax; a regular
# expression can at most only determine whether a given string COULD be a
# URI or URI reference, based on its lexical structure.
#
# 1. Altova's regex (in the public domain; courtesy Altova)
#
# # based on the BNF grammar in the original RFC 2396
# ALTOVA_REGEX = r"(([a-zA-Z][0-9a-zA-Z+\-\.]*:)?/{0,2}" + \
# r"[0-9a-zA-Z;/?:@&=+$\.\-_!~*'()%]+)?" + \
# r"(#[0-9a-zA-Z;/?:@&=+$\.\-_!~*'()%]+)?"
#
# This regex matches URI references, and thus URIs as well. It is also
# lenient; some strings that are not URI references can falsely match.
#
# It is also not very useful as-is, because it essentially has the form
# (group1)?(group2)? -- this matches the empty string, and in fact any
# string or substring can be said to match this pattern. To be useful,
# this regex (and any like it) must be changed so that it only matches
# an entire string. This is accomplished in Python by using the \A and \Z
# delimiters around the pattern:
#
# BETTER_ALTOVA_REGEX = r"\A(?!\n)%s\Z" % ALTOVA_REGEX
#
# The (?!\n) takes care of an edge case where a string consisting of a
# sole linefeed character would falsely match.
#
# 2. Python regular expressions for strict validation of URIs and URI
# references (in the public domain; courtesy Fourthought, Inc.)
#
# Note that we do not use any \d or \w shortcuts, as these are
# potentially locale or Unicode sensitive.
#
# # based on the ABNF in RFC 3986,
# # "Uniform Resource Identifier (URI): Generic Syntax"
pchar = r"(?:[0-9A-Za-z\-_\.!~*'();:@&=+$,]|(?:%[0-9A-Fa-f]{2}))"
fragment = r"(?:[0-9A-Za-z\-_\.!~*'();:@&=+$,/?]|(?:%[0-9A-Fa-f]{2}))*"
query = fragment
segment_nz_nc = r"(?:[0-9A-Za-z\-_\.!~*'();@&=+$,]|(?:%[0-9A-Fa-f]{2}))+"
segment_nz = r'%s+' % pchar
segment = r'%s*' % pchar
#path_empty = r'' # zero characters
path_rootless = r'%s(?:/%s)*' % (segment_nz, segment) # begins with a segment
path_noscheme = r'%s(?:/%s)*' % (segment_nz_nc, segment) # begins with a non-colon segment
path_absolute = r'/(?:%s)?' % path_rootless # begins with "/" but not "//"
path_abempty = r'(?:/%s)*' % segment # begins with "/" or is empty
#path = r'(?:(?:%s)|(?:%s)|(?:%s)|(?:%s))?' % (path_abempty, path_absolute, path_noscheme, path_rootless)
domainlabel = r'[0-9A-Za-z](?:[0-9A-Za-z\-]{0,61}[0-9A-Za-z])?'
qualified = r'(?:\.%s)*\.?' % domainlabel
reg_name = r"(?:(?:[0-9A-Za-z\-_\.!~*'();&=+$,]|(?:%[0-9A-Fa-f]{2}))*)"
dec_octet = r'(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])'
IPv4address = r'(?:%s\.){3}(?:%s)' % (dec_octet, dec_octet)
h16 = r'[0-9A-Fa-f]{1,4}'
ls32 = r'(?:(?:%s:%s)|%s)' % (h16, h16, IPv4address)
IPv6address = r'(?:' + \
r'(?:(?:%s:){6}%s)' % (h16, ls32) + \
r'|(?:::(?:%s:){5}%s)' % (h16, ls32) + \
r'|(?:%s?::(?:%s:){4}%s)' % (h16, h16, ls32) + \
r'|(?:(?:(?:%s:)?%s)?::(?:%s:){3}%s)' % (h16, h16, h16, ls32) + \
r'|(?:(?:(?:%s:)?%s){0,2}::(?:%s:){2}%s)' % (h16, h16, h16, ls32) + \
r'|(?:(?:(?:%s:)?%s){0,3}::%s:%s)' % (h16, h16, h16, ls32) + \
r'|(?:(?:(?:%s:)?%s){0,4}::%s)' % (h16, h16, ls32) + \
r'|(?:(?:(?:%s:)?%s){0,5}::%s)' % (h16, h16, h16) + \
r'|(?:(?:(?:%s:)?%s){0,6}::)' % (h16, h16) + \
r')'
IPvFuture = r"(?:v[0-9A-Fa-f]+\.[0-9A-Za-z\-\._~!$&'()*+,;=:]+)"
IP_literal = r'\[(?:%s|%s)\]' % (IPv6address, IPvFuture)
port = r'[0-9]*'
host = r'(?:%s|%s|%s)?' % (IP_literal, IPv4address, reg_name)
userinfo = r"(?:[0-9A-Za-z\-_\.!~*'();:@&=+$,]|(?:%[0-9A-Fa-f]{2}))*"
authority = r'(?:%s@)?%s(?::%s)?' % (userinfo, host, port)
scheme = r'[A-Za-z][0-9A-Za-z+\-\.]*'
#absolute_URI = r'%s:%s(?:\?%s)?' % (scheme, hier_part, query)
relative_part = r'(?:(?://%s%s)|(?:%s)|(?:%s))?' % (authority, path_abempty,
path_absolute, path_noscheme)
relative_ref = r'%s(?:\?%s)?(?:#%s)?' % (relative_part, query, fragment)
hier_part = r'(?:(?://%s%s)|(?:%s)|(?:%s))?' % (authority, path_abempty,
path_absolute, path_rootless)
URI = r'%s:%s(?:\?%s)?(?:#%s)?' % (scheme, hier_part, query, fragment)
URI_reference = r'(?:%s|%s)' % (URI, relative_ref)
STRICT_URI_PYREGEX = r"\A%s\Z" % URI
STRICT_URIREF_PYREGEX = r"\A(?!\n)%s\Z" % URI_reference
global URI_PATTERN, URI_REF_PATTERN
URI_PATTERN = re.compile(STRICT_URI_PYREGEX) # strict checking for URIs
URI_REF_PATTERN = re.compile(STRICT_URIREF_PYREGEX) # strict checking for URI refs
_validation_setup_completed = True
return
def matches_uri_ref_syntax(s):
"""
This function returns true if the given string could be a URI reference,
as defined in RFC 3986, just based on the string's syntax.
A URI reference can be a URI or certain portions of one, including the
empty string, and it can have a fragment component.
"""
if not _validation_setup_completed:
_init_uri_validation_regex()
return URI_REF_PATTERN.match(s) is not None
def matches_uri_syntax(s):
"""
This function returns true if the given string could be a URI, as defined
in RFC 3986, just based on the string's syntax.
A URI is by definition absolute (begins with a scheme) and does not end
with a #fragment. It also must adhere to various other syntax rules.
"""
if not _validation_setup_completed:
_init_uri_validation_regex()
return URI_PATTERN.match(s) is not None
_split_uri_ref_setup_completed = False
def _init_split_uri_ref_pattern():
"""
Called internally to compile the regular expression used by
split_uri_ref() just once, the first time the function is called.
"""
global _split_uri_ref_setup_completed
if _split_uri_ref_setup_completed:
return
# Like the others, this regex is also in the public domain.
# It is based on this one, from RFC 3986 appendix B
# (unchanged from RFC 2396 appendix B):
# ^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?
regex = r"^(?:(?P<scheme>[^:/?#]+):)?(?://(?P<authority>[^/?#]*))?(?P<path>[^?#]*)(?:\?(?P<query>[^#]*))?(?:#(?P<fragment>.*))?$"
global SPLIT_URI_REF_PATTERN
SPLIT_URI_REF_PATTERN = re.compile(regex)
_split_uri_ref_setup_completed = True
return
def split_uri_ref(uriref):
"""
Given a valid URI reference as a string, returns a tuple representing the
generic URI components, as per RFC 3986 appendix B. The tuple's structure
is (scheme, authority, path, query, fragment).
All values will be strings (possibly empty) or None if undefined.
Note that per RFC 3986, there is no distinction between a path and
an "opaque part", as there was in RFC 2396.
"""
if not _split_uri_ref_setup_completed:
_init_split_uri_ref_pattern()
# the pattern will match every possible string, so it's safe to
# assume there's a groupdict method to call.
g = SPLIT_URI_REF_PATTERN.match(uriref).groupdict()
scheme = g['scheme']
authority = g['authority']
path = g['path']
query = g['query']
fragment = g['fragment']
return (scheme, authority, path, query, fragment)
def unsplit_uri_ref(uriRefSeq):
"""
Given a sequence as would be produced by split_uri_ref(), assembles and
returns a URI reference as a string.
"""
if not isinstance(uriRefSeq, (tuple, list)):
raise TypeError("sequence expected, got %s" % type(uriRefSeq))
(scheme, authority, path, query, fragment) = uriRefSeq
uri = ''
if scheme is not None:
uri += scheme + ':'
if authority is not None:
uri += '//' + authority
uri += path
if query is not None:
uri += '?' + query
if fragment is not None:
uri += '#' + fragment
return uri
_split_authority_setup_completed = False
def _init_split_authority_pattern():
"""
Called internally to compile the regular expression used by
split_authority() just once, the first time the function is called.
"""
global _split_authority_setup_completed
if _split_authority_setup_completed:
return
global SPLIT_AUTHORITY_PATTERN
regex = r'(?:(?P<userinfo>[^@]*)@)?(?P<host>[^:]*)(?::(?P<port>.*))?'
SPLIT_AUTHORITY_PATTERN = re.compile(regex)
_split_authority_setup_completed = True
return
def split_authority(authority):
"""
Given a string representing the authority component of a URI, returns
a tuple consisting of the subcomponents (userinfo, host, port). No
percent-decoding is performed.
"""
if not _split_authority_setup_completed:
_init_split_authority_pattern()
m = SPLIT_AUTHORITY_PATTERN.match(authority)
if m:
return m.groups()
else:
return (None, authority, None)
def split_fragment(uri):
"""
Given a URI or URI reference, returns a tuple consisting of
(base, fragment), where base is the portion before the '#' that
precedes the fragment component.
"""
# The only '#' in a legit URI will be the fragment separator,
# but in the wild, people get sloppy. Assume the last '#' is it.
pos = uri.rfind('#')
if pos == -1:
return (uri, uri[:0])
else:
return (uri[:pos], uri[pos+1:])
# "unreserved" characters are allowed in a URI, and do not have special
# meaning as delimiters of URI components or subcomponents. They may
# appear raw or percent-encoded, but percent-encoding is discouraged.
# This set of characters is sufficiently long enough that using a
# compiled regex is faster than using a string with the "in" operator.
#UNRESERVED_PATTERN = re.compile(r"[0-9A-Za-z\-\._~!*'()]") # RFC 2396
UNRESERVED_PATTERN = re.compile(r'[0-9A-Za-z\-\._~]') # RFC 3986
# "reserved" characters are allowed in a URI, but they may or always do
# have special meaning as delimiters of URI components or subcomponents.
# When being used as delimiters, they must be raw, and when not being
# used as delimiters, they must be percent-encoded.
# This set of characters is sufficiently short enough that using a
# string with the "in" operator is faster than using a compiled regex.
# The characters in the string are ordered according to how likely they
# are to be found (approximately), for faster operation with "in".
#RESERVED = "/&=+?;@,:$[]" # RFC 2396 + RFC 2732
RESERVED = "/=&+?#;@,:$!*[]()'" # RFC 3986
# workaround for Py 2.2 bytecode issue; see
# http://mail.python.org/pipermail/python-list/2005-March/269948.html
SURR_DC00 = unichr(0xdc00)
def _chars(s):
"""
This generator function helps iterate over the characters in a
string. When the string is unicode and a surrogate pair is
encountered, the pair is returned together, regardless of whether
Python was built with 32-bit ('wide') or 16-bit code values for
its internal representation of unicode. This function will raise a
ValueError if it detects an illegal surrogate pair.
For example, given s = u'\ud800\udc00\U00010000',
with narrow-char unicode, "for c in s" normally iterates 4 times,
producing u'\ud800', u'\udc00', 'u\ud800', u'\udc00', while
"for c in _chars(s)" will iterate 2 times: producing
u'\ud800\udc00' both times; and with wide-char unicode,
"for c in s" iterates 3 times, producing u'\ud800', u'\udc00',
and u'\U00010000', while "for c in _chars(s)" will iterate 2 times,
producing u'\U00010000' both times.
With this function, the value yielded in each iteration is thus
guaranteed to represent a single abstract character, allowing for
ideal encoding by the built-in codecs, as is necessary when
percent-encoding.
"""
if isinstance(s, str):
for i in s:
yield i
return
s = iter(s)
for i in s:
if u'\ud7ff' < i < SURR_DC00:
try:
j = s.next()
except StopIteration:
raise ValueError("Bad pair: string ends after %r" % i)
if SURR_DC00 <= j < u'\ue000':
yield i + j
else:
raise ValueError("Bad pair: %r (bad second half)" % (i+j))
elif SURR_DC00 <= i < u'\ue000':
raise ValueError("Bad pair: %r (no first half)" % i)
else:
yield i
def percent_encode(s, encoding='utf-8', encodeReserved=True, spaceToPlus=False,
nlChars=None, reservedChars=RESERVED):
"""
[*** Experimental API ***] This function applies percent-encoding, as
described in RFC 3986 sec. 2.1, to the given string, in order to prepare
the string for use in a URI. It replaces characters that are not allowed
in a URI. By default, it also replaces characters in the reserved set,
which normally includes the generic URI component delimiters ":" "/"
"?" \"#\" "[" "]" "@" and the subcomponent delimiters "!" "$" "&" "\'" "("
")" "*" "+" "," ";" "=".
Ideally, this function should be used on individual components or
subcomponents of a URI prior to assembly of the complete URI, not
afterward, because this function has no way of knowing which characters
in the reserved set are being used for their reserved purpose and which
are part of the data. By default it assumes that they are all being used
as data, thus they all become percent-encoded.
The characters in the reserved set can be overridden from the default by
setting the reservedChars argument. The percent-encoding of characters
in the reserved set can be disabled by unsetting the encodeReserved flag.
Do this if the string is an already-assembled URI or a URI component,
such as a complete path.
If the given string is Unicode, the name of the encoding given in the
encoding argument will be used to determine the percent-encoded octets
for characters that are not in the U+0000 to U+007F range. The codec
identified by the encoding argument must return a byte string.
If the given string is not Unicode, the encoding argument is ignored and
the string is interpreted to represent literal octets, rather than
characters. Octets above \\x7F will be percent-encoded as-is, e.g., \\xa0
becomes %A0, not, say, %C2%A0.
The spaceToPlus flag controls whether space characters are changed to
"+" characters in the result, rather than being percent-encoded.
Generally, this is not required, and given the status of "+" as a
reserved character, is often undesirable. But it is required in certain
situations, such as when generating application/x-www-form-urlencoded
content or RFC 3151 public identifier URNs, so it is supported here.
The nlChars argument, if given, is a sequence type in which each member
is a substring that indicates a "new line". Occurrences of this substring
will be replaced by '%0D%0A' in the result, as is required when generating
application/x-www-form-urlencoded content.
This function is similar to urllib.quote(), but is more conformant and
Unicode-friendly. Suggestions for improvements welcome.
"""
res = ''
is_unicode = isinstance(s, unicode)
if nlChars is not None:
for c in nlChars:
s.replace(c, '\r\n')
#FIXME: use re.subn with substitution function for big speed-up
for c in _chars(s):
# surrogates? -> percent-encode according to given encoding
if is_unicode and len(c) - 1:
for octet in c.encode(encoding):
res += '%%%02X' % ord(octet)
# not unreserved?
elif UNRESERVED_PATTERN.match(c) is None:
cp = ord(c)
# ASCII range?
if cp < 128:
# space? -> plus if desired
if spaceToPlus and c == ' ':
res += '+'
# reserved? -> percent-encode if desired
elif c in reservedChars:
if encodeReserved:
res += '%%%02X' % cp
else:
res += c
# not unreserved or reserved, so percent-encode
# FIXME: should percent-encode according to given encoding;
# ASCII range is not special!
else:
res += '%%%02X' % cp
# non-ASCII-range unicode?
elif is_unicode:
# percent-encode according to given encoding
for octet in c.encode(encoding):
res += '%%%02X' % ord(octet)
# non-ASCII str; percent-encode the bytes
else:
for octet in c:
res += '%%%02X' % ord(octet)
# unreserved -> safe to use as-is
else:
res += c
return res
def percent_decode(s, encoding='utf-8', decodable=None):
"""
[*** Experimental API ***] Reverses the percent-encoding of the given
string.
This function is similar to urllib.unquote(), but can also process a
Unicode string, not just a regular byte string.
By default, all percent-encoded sequences are decoded, but if a byte
string is given via the 'decodable' argument, only the sequences
corresponding to those octets will be decoded.
If the string is Unicode, the percent-encoded sequences are converted to
bytes, then converted back to Unicode according to the encoding given in
the encoding argument. For example, by default, u'abc%E2%80%A2' will be
converted to u'abc\u2022', because byte sequence E2 80 A2 represents
character U+2022 in UTF-8.
If the string is not Unicode, the percent-encoded octets are just
converted to bytes, and the encoding argument is ignored. For example,
'abc%E2%80%A2' will be converted to 'abc\xe2\x80\xa2'.
This function is intended for use on the portions of a URI that are
delimited by reserved characters (see percent_encode), or on a value from
data of media type application/x-www-form-urlencoded.
"""
# Most of this comes from urllib.unquote().
# urllib.unquote(), if given a unicode argument, does not decode
# percent-encoded octets above %7F.
is_unicode = isinstance(s, unicode)
if is_unicode:
mychr = unichr
else:
mychr = chr
list_ = s.split('%')
res = [list_[0]]
myappend = res.append
del list_[0]
for item in list_:
if item[1:2]:
try:
c = mychr(int(item[:2], 16))
if decodable is None:
myappend(c + item[2:])
elif c in decodable:
myappend(c + item[2:])
else:
myappend('%' + item)
except ValueError:
myappend('%' + item)
else:
myappend('%' + item)
s = ''.join(res)
# If the original input was unicode, then we assume it represented
# characters; e.g., u'%E2%80%A2' -> '\xe2\x80\xa2' -> u'\u2022'
# (assuming UTF-8 was the basis for percent-encoding). However,
# at this point in the implementation, variable s would actually be
# u'\u00e2\u0080\u00a2', so we first convert it to bytes (via an
# iso-8859-1 encode) in order to get '\xe2\x80\xa2'. Then we decode back
# to unicode according to the desired encoding (UTF-8 by default) in
# order to produce u'\u2022'.
if is_unicode:
s = s.encode('iso-8859-1').decode(encoding)
return s
def absolutize(uriRef, baseUri, limit_schemes=None):
"""
Resolves a URI reference to absolute form, effecting the result of RFC
3986 section 5. The URI reference is considered to be relative to the
given base URI.
It is the caller's responsibility to ensure that the base URI matches
the absolute-URI syntax rule of RFC 3986, and that its path component
does not contain '.' or '..' segments if the scheme is hierarchical.
Unexpected results may occur otherwise.
This function only conducts a minimal sanity check in order to determine
if relative resolution is possible: it raises a IriError if the base
URI does not have a scheme component. While it is true that the base URI
is irrelevant if the URI reference has a scheme, an exception is raised
in order to signal that the given string does not even come close to
meeting the criteria to be usable as a base URI.
It is the caller's responsibility to make a determination of whether the
URI reference constitutes a "same-document reference", as defined in RFC
2396 or RFC 3986. As per the spec, dereferencing a same-document
reference "should not" involve retrieval of a new representation of the
referenced resource. Note that the two specs have different definitions
of same-document reference: RFC 2396 says it is *only* the cases where the
reference is the empty string, or \"#\" followed by a fragment; RFC 3986
requires making a comparison of the base URI to the absolute form of the
reference (as is returned by the spec), minus its fragment component,
if any.
This function is similar to urlparse.urljoin() and urllib.basejoin().
Those functions, however, are (as of Python 2.3) outdated, buggy, and/or
designed to produce results acceptable for use with other core Python
libraries, rather than being earnest implementations of the relevant
specs. Their problems are most noticeable in their handling of
same-document references and 'file:' URIs, both being situations that
come up far too often to consider the functions reliable enough for
general use.
"""
# Reasons to avoid using urllib.basejoin() and urlparse.urljoin():
# - Both are partial implementations of long-obsolete specs.
# - Both accept relative URLs as the base, which no spec allows.
# - urllib.basejoin() mishandles the '' and '..' references.
# - If the base URL uses a non-hierarchical or relative path,
# or if the URL scheme is unrecognized, the result is not
# always as expected (partly due to issues in RFC 1808).
# - If the authority component of a 'file' URI is empty,
# the authority component is removed altogether. If it was
# not present, an empty authority component is in the result.
# - '.' and '..' segments are not always collapsed as well as they
# should be (partly due to issues in RFC 1808).
# - Effective Python 2.4, urllib.basejoin() *is* urlparse.urljoin(),
# but urlparse.urljoin() is still based on RFC 1808.
# This procedure is based on the pseudocode in RFC 3986 sec. 5.2.
#
# ensure base URI is absolute
if is_absolute(uriRef):
return uriRef
if not baseUri or not is_absolute(baseUri):
raise IriError(IriError.RELATIVE_BASE_URI,
base=baseUri, ref=uriRef)
if limit_schemes and get_scheme(baseUri) not in limit_schemes:
scheme = get_scheme(baseUri)
raise IriError(IriError.UNSUPPORTED_SCHEME, scheme=scheme)
# shortcut for the simplest same-document reference cases
if uriRef == '' or uriRef[0] == '#':
return baseUri.split('#')[0] + uriRef
# ensure a clean slate
tScheme = tAuth = tPath = tQuery = None
# parse the reference into its components
(rScheme, rAuth, rPath, rQuery, rFrag) = split_uri_ref(uriRef)
# if the reference is absolute, eliminate '.' and '..' path segments
# and skip to the end
if rScheme is not None:
tScheme = rScheme
tAuth = rAuth
tPath = remove_dot_segments(rPath)
tQuery = rQuery
else:
# the base URI's scheme, and possibly more, will be inherited
(bScheme, bAuth, bPath, bQuery, bFrag) = split_uri_ref(baseUri)
# if the reference is a net-path, just eliminate '.' and '..' path
# segments; no other changes needed.
if rAuth is not None:
tAuth = rAuth
tPath = remove_dot_segments(rPath)
tQuery = rQuery
# if it's not a net-path, we need to inherit pieces of the base URI
else:
# use base URI's path if the reference's path is empty
if not rPath:
tPath = bPath
# use the reference's query, if any, or else the base URI's,
tQuery = rQuery is not None and rQuery or bQuery
# the reference's path is not empty
else:
# just use the reference's path if it's absolute
if rPath[0] == '/':
tPath = remove_dot_segments(rPath)
# merge the reference's relative path with the base URI's path
else:
if bAuth is not None and not bPath:
tPath = '/' + rPath
else:
tPath = bPath[:bPath.rfind('/')+1] + rPath
tPath = remove_dot_segments(tPath)
# use the reference's query
tQuery = rQuery
# since the reference isn't a net-path,
# use the authority from the base URI
tAuth = bAuth
# inherit the scheme from the base URI
tScheme = bScheme
# always use the reference's fragment (but no need to define another var)
#tFrag = rFrag
# now compose the target URI (RFC 3986 sec. 5.3)
return unsplit_uri_ref((tScheme, tAuth, tPath, tQuery, rFrag))
def relativize(targetUri, againstUri, subPathOnly=False):
"""
This method returns a relative URI that is consistent with `targetURI`
when resolved against `againstUri`. If no such relative URI exists, for
whatever reason, this method returns `None`.
To be precise, if a string called `rel` exists such that
``absolutize(rel, againstUri) == targetUri``, then `rel` is returned by
this function. In these cases, `relativize` is in a sense the inverse
of `absolutize`. In all other cases, `relativize` returns `None`.
The following idiom may be useful for obtaining compliant relative
reference strings (e.g. for `path`) for use in other methods of this
package::
path = relativize(os_path_to_uri(path), os_path_to_uri('.'))
If `subPathOnly` is `True`, then this method will only return a relative
reference if such a reference exists relative to the last hierarchical
segment of `againstUri`. In particular, this relative reference will
not start with '/' or '../'.
"""
# We might want to change the semantics slightly to allow a relative
# target URI to be a valid "relative path" (and just return it). For
# now, though, absolute URIs only.
if not is_absolute(targetUri) or not is_absolute(againstUri):
return None
targetUri = normalize_path_segments_in_uri(targetUri)
againstUri = normalize_path_segments_in_uri(againstUri)
splitTarget = list(split_uri_ref(absolutize(targetUri, targetUri)))
splitAgainst = list(split_uri_ref(absolutize(againstUri, againstUri)))
if not splitTarget[:2] == splitAgainst[:2]:
return None
subPathSplit = [None, None] + splitTarget[2:]
targetPath = splitTarget[2]
againstPath = splitAgainst[2] or '/'
leadingSlash = False
if targetPath[:1] == '/' or againstPath[:1] == '/':
if targetPath[:1] == againstPath[:1]:
targetPath = targetPath[1:]
againstPath = againstPath[1:]
leadingSlash = True
else:
return None
targetPathSegments = targetPath.split('/')
againstPathSegments = againstPath.split('/')
# Count the number of path segments in common.
i = 0
while True:
# Stop if we get to the end of either segment list.
if not(len(targetPathSegments) > i and
len(againstPathSegments) > i):
break
# Increment the count when the lists agree, unless we are at the
# last segment of either list and that segment is an empty segment.
# We bail on this case because an empty ending segment in one path
# must not match a mid-path empty segment in the other.
if (targetPathSegments[i] == againstPathSegments[i]
and not (i + 1 == len(againstPathSegments) and
'' == againstPathSegments[i])
and not (i + 1 == len(targetPathSegments) and
'' == targetPathSegments[i])):
i = i + 1
# Otherwise stop.
else:
break
# The target path has `i` segments in common with the basis path, and
# the last segment (after the final '/') doesn't matter; we'll need to
# traverse the rest.
traverse = len(againstPathSegments) - i - 1
relativePath = None
# If the two paths do not agree on any segments, we have two special
# cases.
if i == 0 and leadingSlash:
# First, if the ruling path only had one segment, then our result
# can be a relative path.
if len(againstPathSegments) == 1:
relativePath = targetPath
# Otherwise, the ruling path had a number of segments, so our result
# must be an absolute path (unless we only want a subpath result, in
# which case none exists).
elif subPathOnly:
return None
else:
relativePath = '/' + targetPath
elif traverse > 0:
if subPathOnly:
return None
relativePath = (("../" * traverse) +
'/'.join(targetPathSegments[i:]))
# If the ith segment of the target path is empty and that is not the
# final segment, then we need to precede the path with "./" to make it a
# relative path.
elif (len(targetPathSegments) > i + 1 and
'' == targetPathSegments[i]):
relativePath = "./" + '/'.join(targetPathSegments[i:])
else:
relativePath = '/'.join(targetPathSegments[i:])
return unsplit_uri_ref([None, None, relativePath] + splitTarget[3:])
def remove_dot_segments(path):
"""
Supports absolutize() by implementing the remove_dot_segments function
described in RFC 3986 sec. 5.2. It collapses most of the '.' and '..'
segments out of a path without eliminating empty segments. It is intended
to be used during the path merging process and may not give expected
results when used independently. Use normalize_path_segments() or
normalize_path_segments_in_uri() if more general normalization is desired.
"""
# return empty string if entire path is just "." or ".."
if path == '.' or path == '..':
return path[0:0] # preserves string type
# remove all "./" or "../" segments at the beginning
while path:
if path[:2] == './':
path = path[2:]
elif path[:3] == '../':
path = path[3:]
else:
break
# We need to keep track of whether there was a leading slash,
# because we're going to drop it in order to prevent our list of
# segments from having an ambiguous empty first item when we call
# split().
leading_slash = False
if path[:1] == '/':
path = path[1:]
leading_slash = True
# replace a trailing "/." with just "/"
if path[-2:] == '/.':
path = path[:-1]
# convert the segments into a list and process each segment in
# order from left to right.
segments = path.split('/')
keepers = []
segments.reverse()
while segments:
seg = segments.pop()
# '..' means drop the previous kept segment, if any.
# If none, and if the path is relative, then keep the '..'.
# If the '..' was the last segment, ensure
# that the result ends with '/'.
if seg == '..':
if keepers:
keepers.pop()
elif not leading_slash:
keepers.append(seg)
if not segments:
keepers.append('')
# ignore '.' segments and keep all others, even empty ones
elif seg != '.':
keepers.append(seg)
# reassemble the kept segments
return leading_slash * '/' + '/'.join(keepers)
def normalize_case(uriRef, doHost=False):
"""
Returns the given URI reference with the case of the scheme,
percent-encoded octets, and, optionally, the host, all normalized,
implementing section 6.2.2.1 of RFC 3986. The normal form of
scheme and host is lowercase, and the normal form of
percent-encoded octets is uppercase.
The URI reference can be given as either a string or as a sequence as
would be provided by the split_uri_ref function. The return value will
be a string or tuple.
"""
if not isinstance(uriRef, (tuple, list)):
uriRef = split_uri_ref(uriRef)
tup = None
else:
tup = True
# normalize percent-encoded octets
newRef = []
for component in uriRef:
if component:
newRef.append(re.sub('%([0-9a-f][0-9a-f])',
lambda m: m.group(0).upper(), component))
else:
newRef.append(component)
# normalize scheme
scheme = newRef[0]
if scheme:
scheme = scheme.lower()
# normalize host
authority = newRef[1]
if doHost:
if authority:
userinfo, host, port = split_authority(authority)
authority = ''
if userinfo is not None:
authority += '%s@' % userinfo
authority += host.lower()
if port is not None:
authority += ':%s' % port
res = (scheme, authority, newRef[2], newRef[3], newRef[4])
if tup:
return res
else:
return unsplit_uri_ref(res)
def normalize_percent_encoding(s):
"""
Given a string representing a URI reference or a component thereof,
returns the string with all percent-encoded octets that correspond to
unreserved characters decoded, implementing section 6.2.2.2 of RFC
3986.
"""
return percent_decode(s, decodable='0123456789%s-._~' % ascii_letters)
def normalize_path_segments(path):
"""
Given a string representing the path component of a URI reference having a
hierarchical scheme, returns the string with dot segments ('.' and '..')
removed, implementing section 6.2.2.3 of RFC 3986. If the path is
relative, it is returned with no changes.
"""
if not path or path[:1] != '/':
return path
else:
return remove_dot_segments(path)
def normalize_path_segments_in_uri(uri):
"""
Given a string representing a URI or URI reference having a hierarchical
scheme, returns the string with dot segments ('.' and '..') removed from
the path component, implementing section 6.2.2.3 of RFC 3986. If the
path is relative, the URI or URI reference is returned with no changes.
"""
components = list(split_uri_ref(uri))
components[2] = normalize_path_segments(components[2])
return unsplit_uri_ref(components)
_urlopener = None
class _data_handler(urllib2.BaseHandler):
"""
A class to handle 'data' URLs.
The actual handling is done by urllib.URLopener.open_data() method.
"""
def data_open(self, request):
global _urlopener
if _urlopener is None:
_urlopener = urllib.URLopener()
return _urlopener.open_data(self, request.get_full_url())
def resource_to_uri(package, resource):
"""Return a PEP 302 pseudo-URL for the specified resource.
'package' is a Python module name (dot-separated module names) and
'resource' is a '/'-separated pathname.
"""
provider, resource_name = importutil.normalize_resource(package, resource)
if provider.loader:
# Use a 'pkgdata' (PEP 302) pseudo-URL
segments = resource_name.split('/')
if not resource.startswith('/'):
dirname = provider.module_path[len(provider.zip_pre):]
segments[0:0] = dirname.split(os.sep)
path = '/'.join(map(percent_encode, segments))
uri = 'pkgdata://%s/%s' % (package, path)
else:
# Use a 'file' URL
filename = importutil.get_resource_filename(package, resource)
uri = os_path_to_uri(filename)
return uri
class _pep302_handler(urllib2.FileHandler):
"""
A class to handler opening of PEP 302 pseudo-URLs.
The syntax for this pseudo-URL is:
url := "pkgdata://" module "/" path
module := <Python module name>
path := <'/'-separated pathname>
The "path" portion of the URL will be passed to the get_data() method
of the loader identified by "module" with '/'s converted to the OS
native path separator.
"""
def pep302_open(self, request):
import mimetypes
# get the package and resource components
package = request.get_host()
resource = request.get_selector()
resource = percent_decode(re.sub('%2[fF]', '\\/', resource))
# get the stream associated with the resource
try:
stream = importutil.get_resource_stream(package, resource)
except EnvironmentError, error:
raise urllib2.URLError(str(error))
# compute some simple header fields
try:
stream.seek(0, 2) # go to the end of the stream
except IOError:
data = stream.read()
stream = cStringIO.StringIO(data)
length = len(data)
else:
length = stream.tell()
stream.seek(0, 0) # go to the start of the stream
mtime = importutil.get_resource_last_modified(package, resource)
mtime = _formatdate(mtime)
mtype = mimetypes.guess_type(resource) or 'text/plain'
headers = ("Content-Type: %s\n"
"Content-Length: %d\n"
"Last-Modified: %s\n" % (mtype, length, mtime))
headers = mimetools.Message(cStringIO.StringIO(headers))
return urllib.addinfourl(stream, headers, request.get_full_url())
_opener = None
def urlopen(url, *args, **kwargs):
"""
A replacement/wrapper for urllib2.urlopen().
Simply calls make_urllib_safe() on the given URL and passes the result
and all other args to urllib2.urlopen().
"""
global _opener
if _opener is None:
_opener = urllib2.build_opener(_data_handler, _pep302_handler)
# work around urllib's intolerance for proper URIs, Unicode, IDNs
stream = _opener.open(make_urllib_safe(url), *args, **kwargs)
stream.name = url
return stream
#=============================================================================
# RFC 3151 implementation
#
def urn_to_public_id(urn):
"""
Converts a URN that conforms to RFC 3151 to a public identifier.
For example, the URN
"urn:publicid:%2B:IDN+example.org:DTD+XML+Bookmarks+1.0:EN:XML"
will be converted to the public identifier
"+//IDN example.org//DTD XML Bookmarks 1.0//EN//XML"
Raises a IriError if the given URN cannot be converted.
Query and fragment components, if present, are ignored.
"""
if urn is not None and urn:
(scheme, auth, path, query, frag) = split_uri_ref(urn)
if scheme is not None and scheme.lower() == 'urn':
pp = path.split(':', 1)
if len(pp) > 1:
urn_scheme = percent_decode(pp[0])
if urn_scheme == 'publicid':
publicid = pp[1].replace('+', ' ')
publicid = publicid.replace(':', '//')
publicid = publicid.replace(';', '::')
publicid = percent_decode(publicid)
return publicid
raise IriError(IriError.INVALID_PUBLIC_ID_URN, urn=urn)
def public_id_to_urn(publicid):
"""
Converts a public identifier to a URN that conforms to RFC 3151.
"""
# 1. condense whitespace, XSLT-style
publicid = re.sub('[ \t\r\n]+', ' ', publicid.strip())
# 2. // -> :
# :: -> ;
# space -> +
# + ; ' ? # % / : -> percent-encode
# (actually, the intent of the RFC is to not conflict with RFC 2396,
# so any character not in the unreserved set must be percent-encoded)
r = ':'.join([';'.join([percent_encode(dcpart, spaceToPlus=True)
for dcpart in dspart.split('::')])
for dspart in publicid.split('//')])
return 'urn:publicid:%s' % r
#=============================================================================
# Miscellaneous public functions
#
SCHEME_PATTERN = re.compile(r'([a-zA-Z][a-zA-Z0-9+\-.]*):')
def get_scheme(uriRef):
"""
Obtains, with optimum efficiency, just the scheme from a URI reference.
Returns a string, or if no scheme could be found, returns None.
"""
# Using a regex seems to be the best option. Called 50,000 times on
# different URIs, on a 1.0-GHz PIII with FreeBSD 4.7 and Python
# 2.2.1, this method completed in 0.95s, and 0.05s if there was no
# scheme to find. By comparison,
# urllib.splittype()[0] took 1.5s always;
# Ft.Lib.Uri.split_uri_ref()[0] took 2.5s always;
# urlparse.urlparse()[0] took 3.5s always.
m = SCHEME_PATTERN.match(uriRef)
if m is None:
return None
else:
return m.group(1)
def strip_fragment(uriRef):
"""
Returns the given URI or URI reference with the fragment component, if
any, removed.
"""
return split_fragment(uriRef)[0]
def is_absolute(identifier):
"""
Given a string believed to be a URI or URI reference, tests that it is
absolute (as per RFC 3986), not relative -- i.e., that it has a scheme.
"""
# We do it this way to avoid compiling another massive regex.
return get_scheme(identifier) is not None
_ntPathToUriSetupCompleted = False
def _initNtPathPattern():
"""
Called internally to compile the regular expression used by
os_path_to_uri() on Windows just once, the first time the function is
called.
"""
global _ntPathToUriSetupCompleted
if _ntPathToUriSetupCompleted:
return
# path variations we try to handle:
#
# a\b\c (a relative path)
# file:a/b/c is the best we can do.
# Dot segments should not be collapsed in the final URL.
#
# \a\b\c
# file:///a/b/c is correct
#
# C:\a\b\c
# urllib.urlopen() requires file:///C|/a/b/c or ///C|/a/b/c
# because it currently relies on urllib.url2pathname().
# Windows resolver will accept the first or file:///C:/a/b/c
#
# \\host\share\x\y\z
# Windows resolver accepts file://host/share/x/y/z
# Netscape (4.x?) accepts file:////host/share/x/y/z
#
# If an entire drive is shared, the share name might be
# $drive$, like this: \\host\$c$\a\b\c
# We could recognize it as a drive letter, but it's probably
# best not to treat it specially, since it will never occur
# without a host. It's just another share name.
#
# There's also a weird C:\\host\share\x\y\z convention
# that is hard to find any information on. Presumably the C:
# is ignored, but the question is do we put it in the URI?
#
# So the format, in ABNF, is roughly:
# [ drive ":" ] ( [ "\\" host "\" share ] abs-path ) / rel-path
drive = r'(?P<drive>[A-Za-z])'
host = r'(?P<host>[^\\]*)'
share = r'(?P<share>[^\\]+)'
abs_path = r'(?P<abspath>\\(?:[^\\]+\\?)*)'
rel_path = r'(?P<relpath>(?:[^\\]+\\?)*)'
NT_PATH_REGEX = r"^(?:%s:)?(?:(?:(?:\\\\%s\\%s)?%s)|%s)$" % (
drive,
host,
share,
abs_path,
rel_path)
global NT_PATH_PATTERN
NT_PATH_PATTERN = re.compile(NT_PATH_REGEX)
# We can now use NT_PATH_PATTERN.match(path) to parse the path and use
# the returned object's .groupdict() method to get a dictionary of
# path subcomponents. For example,
# NT_PATH_PATTERN.match(r"\\h\$c$\x\y\z").groupdict()
# yields
# {'abspath': r'\x\y\z',
# 'share': '$c$',
# 'drive': None,
# 'host': 'h',
# 'relpath': None
# }
# Note that invalid paths such as r'\\foo\bar'
# (a UNC path with no trailing '\') will not match at all.
_ntPathToUriSetupCompleted = True
return
def _splitNtPath(path):
"""
Called internally to get a tuple representing components of the given
Windows path.
"""
if not _ntPathToUriSetupCompleted:
_initNtPathPattern()
m = NT_PATH_PATTERN.match(path)
if not m:
raise ValueError("Path %s is not a valid Windows path.")
components = m.groupdict()
(drive, host, share, abspath, relpath) = (
components['drive'],
components['host'],
components['share'],
components['abspath'],
components['relpath'],
)
return (drive, host, share, abspath, relpath)
def _get_drive_letter(s):
"""
Called internally to get a drive letter from a string, if the string
is a drivespec.
"""
if len(s) == 2 and s[1] in ':|' and s[0] in ascii_letters:
return s[0]
return
def os_path_to_uri(path, attemptAbsolute=True, osname=None):
r"""This function converts an OS-specific file system path to a URI of
the form 'file:///path/to/the/file'.
In addition, if the path is absolute, any dot segments ('.' or '..') will
be collapsed, so that the resulting URI can be safely used as a base URI
by functions such as absolutize().
The given path will be interpreted as being one that is appropriate for
use on the local operating system, unless a different osname argument is
given.
If the given path is relative, an attempt may be made to first convert
the path to absolute form by interpreting the path as being relative
to the current working directory. This is the case if the attemptAbsolute
flag is True (the default). If attemptAbsolute is False, a relative
path will result in a URI of the form file:relative/path/to/a/file .
attemptAbsolute has no effect if the given path is not for the
local operating system.
On Windows, the drivespec will become the first step in the path component
of the URI. If the given path contains a UNC hostname, this name will be
used for the authority component of the URI.
Warning: Some libraries, such as urllib.urlopen(), may not behave as
expected when given a URI generated by this function. On Windows you may
want to call re.sub('(/[A-Za-z]):', r'\1|', uri) on the URI to prepare it
for use by functions such as urllib.url2pathname() or urllib.urlopen().
This function is similar to urllib.pathname2url(), but is more featureful
and produces better URIs.
"""
# Problems with urllib.pathname2url() on all platforms include:
# - the generated URL has no scheme component;
# - percent-encoding is incorrect, due to urllib.quote() issues.
#
# Problems with urllib.pathname2url() on Windows include:
# - trailing backslashes are ignored;
# - all leading backslashes are considered part of the absolute
# path, so UNC paths aren't properly converted (assuming that
# a proper conversion would be to use the UNC hostname in the
# hostname component of the resulting URL);
# - non-leading, consecutive backslashes are collapsed, which may
# be desirable but is correcting what is, arguably, user error;
# - the presence of a letter followed by ":" is believed to
# indicate a drivespec, no matter where it occurs in the path,
# which may have been considered a safe assumption since the
# drivespec is the only place where ":" can legally, but there's
# no need to search the whole string for it;
# - the ":" in a drivespec is converted to "|", a convention that
# is becoming increasingly less common. For compatibility, most
# web browser resolvers will accept either "|" or ":" in a URL,
# but urllib.urlopen(), relying on url2pathname(), expects "|"
# only. In our opinion, the callers of those functions should
# ensure that the arguments are what are expected. Our goal
# here is to produce a quality URL, not a URL designed to play
# nice with urllib's bugs & limitations.
# - it treats "/" the same as "\", which results in being able to
# call the function with a posix-style path, a convenience
# which allows the caller to get sloppy about whether they are
# really passing a path that is apprropriate for the desired OS.
# We do this a lot in 4Suite.
#
# There is some disagreement over whether a drivespec should be placed in
# the authority or in the path. Placing it in the authority means that
# ":", which has a reserved purpose in the authority, cannot be used --
# this, along with the fact that prior to RFC 3986, percent-encoded
# octets were disallowed in the authority, is presumably a reason why "|"
# is a popular substitute for ":". Using the authority also allows for
# the drive letter to be retained whe resolving references like this:
# reference '/a/b/c' + base 'file://C|/x/y/z' = 'file://C|/a/b/c'
# The question is, is that really the ideal result? Should the drive info
# be inherited from the base URI, if it is unspecified in a reference
# that is otherwise representing an absolute path? Using the authority
# for this purpose means that it would be overloaded if we also used it
# to represent the host part of a UNC path. The alternative is to put the
# UNC host in the path (e.g. 'file:////host/share/path'), but when such a
# URI is used as a base URI, relative reference resolution often returns
# unexpected results.
#
osname = osname or os.name
if osname == 'nt':
if WINDOWS_SLASH_COMPAT:
path = path.replace('/','\\')
(drive, host, share, abspath, relpath) = _splitNtPath(path)
if attemptAbsolute and relpath is not None and osname == os.name:
path = os.path.join(os.getcwd(), relpath)
(drive, host, share, abspath, relpath) = _splitNtPath(path)
path = abspath or relpath
path = '/'.join([percent_encode(seg) for seg in path.split('\\')])
uri = 'file:'
if host:
uri += '//%s' % percent_encode(host)
elif abspath:
uri += '//'
if drive:
uri += '/%s:' % drive.upper()
if share:
uri += '/%s' % percent_encode(share)
if abspath:
path = remove_dot_segments(path)
uri += path
elif osname == 'posix':
try:
from posixpath import isabs
except ImportError:
isabs = lambda p: p[:1] == '/'
pathisabs = isabs(path)
if pathisabs:
path = remove_dot_segments(path)
elif attemptAbsolute and osname == os.name:
path = os.path.join(os.getcwd(), path)
pathisabs = isabs(path)
path = '/'.join([percent_encode(seg) for seg in path.split('/')])
if pathisabs:
uri = 'file://%s' % path
else:
uri = 'file:%s' % path
else:
# 4Suite only supports posix and nt, so we're not going to worry about
# improving upon urllib.pathname2url() for other OSes.
if osname == os.name:
from urllib import pathname2url
if attemptAbsolute and not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
else:
try:
module = '%surl2path' % osname
exec 'from %s import pathname2url' % module
except ImportError:
raise IriError(IriError.UNSUPPORTED_PLATFORM,
osname, os_path_to_uri)
uri = 'file:' + pathname2url(path)
return uri
def uri_to_os_path(uri, attemptAbsolute=True, encoding='utf-8', osname=None):
r"""
This function converts a URI reference to an OS-specific file system path.
If the URI reference is given as a Unicode string, then the encoding
argument determines how percent-encoded components are interpreted, and
the result will be a Unicode string. If the URI reference is a regular
byte string, the encoding argument is ignored and the result will be a
byte string in which percent-encoded octets have been converted to the
bytes they represent. For example, the trailing path segment of
u'file:///a/b/%E2%80%A2' will by default be converted to u'\u2022',
because sequence E2 80 A2 represents character U+2022 in UTF-8. If the
string were not Unicode, the trailing segment would become the 3-byte
string '\xe2\x80\xa2'.
The osname argument determines for what operating system the resulting
path is appropriate. It defaults to os.name and is typically the value
'posix' on Unix systems (including Mac OS X and Cygwin), and 'nt' on
Windows NT/2000/XP.
This function is similar to urllib.url2pathname(), but is more featureful
and produces better paths.
If the given URI reference is not relative, its scheme component must be
'file', and an exception will be raised if it isn't.
In accordance with RFC 3986, RFC 1738 and RFC 1630, an authority
component that is the string 'localhost' will be treated the same as an
empty authority.
Dot segments ('.' or '..') in the path component are NOT collapsed.
If the path component of the URI reference is relative and the
attemptAbsolute flag is True (the default), then the resulting path
will be made absolute by considering the path to be relative to the
current working directory. There is no guarantee that such a result
will be an accurate interpretation of the URI reference.
attemptAbsolute has no effect if the
result is not being produced for the local operating system.
Fragment and query components of the URI reference are ignored.
If osname is 'posix', the authority component must be empty or just
'localhost'. An exception will be raised otherwise, because there is no
standard way of interpreting other authorities. Also, if '%2F' is in a
path segment, it will be converted to r'\/' (a backslash-escaped forward
slash). The caller may need to take additional steps to prevent this from
being interpreted as if it were a path segment separator.
If osname is 'nt', a drivespec is recognized as the first occurrence of a
single letter (A-Z, case-insensitive) followed by '|' or ':', occurring as
either the first segment of the path component, or (incorrectly) as the
entire authority component. A UNC hostname is recognized as a non-empty,
non-'localhost' authority component that has not been recognized as a
drivespec, or as the second path segment if the first path segment is
empty. If a UNC hostname is detected, the result will begin with
'\\<hostname>\'. If a drivespec was detected also, the first path segment
will be '$<driveletter>$'. If a drivespec was detected but a UNC hostname
was not, then the result will begin with '<driveletter>:'.
Windows examples:
'file:x/y/z' => r'x\y\z';
'file:/x/y/z' (not recommended) => r'\x\y\z';
'file:///x/y/z' => r'\x\y\z';
'file:///c:/x/y/z' => r'C:\x\y\z';
'file:///c|/x/y/z' => r'C:\x\y\z';
'file:///c:/x:/y/z' => r'C:\x:\y\z' (bad path, valid interpretation);
'file://c:/x/y/z' (not recommended) => r'C:\x\y\z';
'file://host/share/x/y/z' => r'\\host\share\x\y\z';
'file:////host/share/x/y/z' => r'\\host\share\x\y\z'
'file://host/x:/y/z' => r'\\host\x:\y\z' (bad path, valid interp.);
'file://localhost/x/y/z' => r'\x\y\z';
'file://localhost/c:/x/y/z' => r'C:\x\y\z';
'file:///C:%5Cx%5Cy%5Cz' (not recommended) => r'C:\x\y\z'
"""
(scheme, authority, path) = split_uri_ref(uri)[0:3]
if scheme and scheme != 'file':
raise IriError(IriError.NON_FILE_URI, uri=uri)
# enforce 'localhost' URI equivalence mandated by RFCs 1630, 1738, 3986
if authority == 'localhost':
authority = None
osname = osname or os.name
if osname == 'nt':
# Get the drive letter and UNC hostname, if any. Fragile!
unchost = None
driveletter = None
if authority:
authority = percent_decode(authority, encoding=encoding)
if _get_drive_letter(authority):
driveletter = authority[0]
else:
unchost = authority
if not (driveletter or unchost):
# Note that we have to treat %5C (backslash) as a path separator
# in order to catch cases like file:///C:%5Cx%5Cy%5Cz => C:\x\y\z
# We will also treat %2F (slash) as a path separator for
# compatibility.
if WINDOWS_SLASH_COMPAT:
regex = '%2[fF]|%5[cC]'
else:
regex = '%5[cC]'
path = re.sub(regex, '/', path)
segs = path.split('/')
if not segs[0]:
# //host/... => [ '', '', 'host', '...' ]
if len(segs) > 2 and not segs[1]:
unchost = percent_decode(segs[2], encoding=encoding)
path = len(segs) > 3 and '/' + '/'.join(segs[3:]) or ''
# /C:/... => [ '', 'C:', '...' ]
elif len(segs) > 1:
driveletter = _get_drive_letter(percent_decode(segs[1],
encoding=encoding))
if driveletter:
path = len(segs) > 2 and '/' + '/'.join(segs[2:]) or ''
else:
# C:/... => [ 'C:', '...' ]
driveletter = _get_drive_letter(percent_decode(segs[0],
encoding=encoding))
if driveletter:
path = len(segs) > 1 and path[2:] or ''
# Do the conversion of the path part
sep = '\\' # we could try to import from ntpath,
# but at this point it would just waste cycles.
path = percent_decode(path.replace('/', sep), encoding=encoding)
# Assemble and return the path
if unchost:
# It's a UNC path of the form \\host\share\path.
# driveletter is ignored.
path = r'%s%s%s' % (sep * 2, unchost, path)
elif driveletter:
# It's an ordinary Windows path of the form C:\x\y\z
path = r'%s:%s' % (driveletter.upper(), path)
# It's an ordinary Windows path of the form \x\y\z or x\y\z.
# We need to make sure it doesn't end up looking like a UNC
# path, so we discard extra leading backslashes
elif path[:1] == '\\':
path = re.sub(r'^\\+', '\\\\', path)
# It's a relative path. If the caller wants it absolute, attempt to comply
elif attemptAbsolute and osname == os.name:
path = os.path.join(os.getcwd(), path)
return path
elif osname == 'posix':
# a non-empty, non-'localhost' authority component is ambiguous on Unix
if authority:
raise IriError(IriError.UNIX_REMOTE_HOST_FILE_URI, uri=uri)
# %2F in a path segment would indicate a literal '/' in a
# filename, which is possible on posix, but there is no
# way to consistently represent it. We'll backslash-escape
# the literal slash and leave it to the caller to ensure it
# gets handled the way they want.
path = percent_decode(re.sub('%2[fF]', '\\/', path), encoding=encoding)
# If it's relative and the caller wants it absolute, attempt to comply
if attemptAbsolute and osname == os.name and not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
return path
else:
# 4Suite only supports posix and nt, so we're not going to worry about
# improving upon urllib.pathname2url() for other OSes.
if osname == os.name:
from urllib import url2pathname
else:
try:
module = '%surl2path' % osname
exec 'from %s import url2pathname' % module
except ImportError:
raise IriError(IriError.UNSUPPORTED_PLATFORM,
platform=osname, function=uri_to_os_path)
# drop the scheme before passing to url2pathname
if scheme:
uri = uri[len(scheme)+1:]
return url2pathname(uri)
REG_NAME_HOST_PATTERN = re.compile(r"^(?:(?:[0-9A-Za-z\-_\.!~*'();&=+$,]|(?:%[0-9A-Fa-f]{2}))*)$")
def make_urllib_safe(uriRef):
"""
Makes the given RFC 3986-conformant URI reference safe for passing
to legacy urllib functions. The result may not be a valid URI.
As of Python 2.3.3, urllib.urlopen() does not fully support
internationalized domain names, it does not strip fragment components,
and on Windows, it expects file URIs to use '|' instead of ':' in the
path component corresponding to the drivespec. It also relies on
urllib.unquote(), which mishandles unicode arguments. This function
produces a URI reference that will work around these issues, although
the IDN workaround is limited to Python 2.3 only. May raise a
UnicodeEncodeError if the URI reference is Unicode and erroneously
contains non-ASCII characters.
"""
# IDN support requires decoding any percent-encoded octets in the
# host part (if it's a reg-name) of the authority component, and when
# doing DNS lookups, applying IDNA encoding to that string first.
# As of Python 2.3, there is an IDNA codec, and the socket and httplib
# modules accept Unicode strings and apply IDNA encoding automatically
# where necessary. However, urllib.urlopen() has not yet been updated
# to do the same; it raises an exception if you give it a Unicode
# string, and does no conversion on non-Unicode strings, meaning you
# have to give it an IDNA string yourself. We will only support it on
# Python 2.3 and up.
#
# see if host is a reg-name, as opposed to IPv4 or IPv6 addr.
(scheme, auth, path, query, frag) = split_uri_ref(uriRef)
if auth and auth.find('@') > -1:
userinfo, hostport = auth.split('@')
else:
userinfo = None
hostport = auth
if hostport and hostport.find(':') > -1:
host, port = hostport.split(':')
else:
host = hostport
port = None
if host and REG_NAME_HOST_PATTERN.match(host):
# percent-encoded hostnames will always fail DNS lookups
host = percent_decode(host)
# IDNA-encode if possible.
# We shouldn't do this for schemes that don't need DNS lookup,
# but are there any (that you'd be calling urlopen for)?
if sys.version_info[0:2] >= (2,3):
if isinstance(host, str):
host = host.decode('utf-8')
host = host.encode('idna')
# reassemble the authority with the new hostname
# (percent-decoded, and possibly IDNA-encoded)
auth = ''
if userinfo:
auth += userinfo + '@'
auth += host
if port:
auth += ':' + port
# On Windows, ensure that '|', not ':', is used in a drivespec.
if os.name == 'nt' and scheme == 'file':
path = path.replace(':','|',1)
# Note that we drop fragment, if any. See RFC 3986 sec. 3.5.
uri = unsplit_uri_ref((scheme, auth, path, query, None))
# parts of urllib are not unicode safe
if isinstance(uri, unicode):
try:
# should work if IDNA encoding was applied (Py 2.3+)
uri = uri.encode('us-ascii')
except UnicodeError:
raise IriError(IriError.IDNA_UNSUPPORTED, uri=uriRef)
return uri
def path_resolve(paths):
"""
This function takes a list of file URIs. The first can be
absolute or relative to the URI equivalent of the current working
directory. The rest must be relative to the first.
The function converts them all to OS paths appropriate for the local
system, and then creates a single final path by resolving each path
in the list against the following one. This final path is returned
as a URI.
"""
if not paths: return paths
paths = [uri_to_os_path(p, attemptAbsolute=False) for p in paths]
if not os.path.isabs(paths[0]):
paths[0] = os.path.join(os.getcwd(), paths[0])
resolved = reduce(lambda a, b: \
basejoin(os.path.isdir(a)
and os_path_to_uri(
os.path.join(a, ''),
attemptAbsolute=False,
) or os_path_to_uri(a, attemptAbsolute=False),
os_path_to_uri(b, attemptAbsolute=False)[5:]),
paths)
return resolved
def basejoin(base, uriRef):
"""
Merges a base URI reference with another URI reference, returning a
new URI reference.
It behaves exactly the same as absolutize(), except the arguments
are reversed, and it accepts any URI reference (even a relative URI)
as the base URI. If the base has no scheme component, it is
evaluated as if it did, and then the scheme component of the result
is removed from the result, unless the uriRef had a scheme. Thus, if
neither argument has a scheme component, the result won't have one.
This function is named basejoin because it is very much like
urllib.basejoin(), but it follows the current RFC 3986 algorithms
for path merging, dot segment elimination, and inheritance of query
and fragment components.
WARNING: This function exists for 2 reasons: (1) because of a need
within the 4Suite repository to perform URI reference absolutization
using base URIs that are stored (inappropriately) as absolute paths
in the subjects of statements in the RDF model, and (2) because of
a similar need to interpret relative repo paths in a 4Suite product
setup.xml file as being relative to a path that can be set outside
the document. When these needs go away, this function probably will,
too, so it is not advisable to use it.
"""
if is_absolute(base):
return absolutize(uriRef, base)
else:
dummyscheme = 'basejoin'
res = absolutize(uriRef, '%s:%s' % (dummyscheme, base))
if is_absolute(uriRef):
# scheme will be inherited from uriRef
return res
else:
# no scheme in, no scheme out
return res[len(dummyscheme)+1:]
def join(*uriparts):
"""
Merges a series of URI reference parts, returning a new URI reference.
Much like iri.basejoin, but takes multiple arguments
"""
if len(uriparts) == 0:
raise TypeError("FIXME...")
elif len(uriparts) == 1:
return uriparts[0]
else:
base = uriparts[0]
for part in uriparts[1:]:
base = basejoin(base.rstrip(DEFAULT_HIERARCHICAL_SEP) + DEFAULT_HIERARCHICAL_SEP, part)
return base
#generate_iri
#Use:
#from uuid import *; newuri = uuid4().urn
#=======================================================================
#
# Further reading re: percent-encoding
#
# http://lists.w3.org/Archives/Public/ietf-http-wg/2004JulSep/0009.html
#
#=======================================================================
#
# 'file:' URI notes
#
# 'file:' URI resolution is difficult to get right, because the 'file'
# URL scheme is underspecified, and is handled by resolvers in very
# lenient and inconsistent ways.
#
# RFC 3986 provides definitive clarification on how all URIs,
# including the quirky 'file:' ones, are to be interpreted for purposes
# of resolution to absolute form, so that is what we implement to the
# best of our ability.
#
#-----------------------------------------------------------------------
#
# Notes from our previous research on 'file:' URI resolution:
#
# According to RFC 2396 (original), these are valid absolute URIs:
# file:/autoexec.bat (scheme ":" abs_path)
# file:///autoexec.bat (scheme ":" net_path)
#
# This one is valid but is not what you'd expect it to be:
#
# file://autoexec.bat (authority = autoexec.bat, no abs_path)
#
# If you have any more than 3 slashes, it's OK because each path segment
# can be an empty string.
#
# This one is valid too, although everything after 'file:' is
# considered an opaque_part (note that RFC 3986 changes this):
#
# file:etc/passwd
#
# Unescaped backslashes are NOT allowed in URIs, ever.
# It is not possible to use them as path segment separators.
# Yet... Windows Explorer will accept these:
# file:C:\WINNT\setuplog.txt
# file:/C:\WINNT\setuplog.txt
# file:///C:\WINNT\setuplog.txt
# However, it will also accept "|" in place of the colon after the drive:
# file:C|/WINNT/setuplog.txt
# file:/C|/WINNT/setuplog.txt
# file:///C|/WINNT/setuplog.txt
#
# RFC 1738 says file://localhost/ and file:/// are equivalent;
# localhost in this case is always the local machine, no matter what
# your DNS says.
#
# Basically, any file: URI is valid. Good luck resolving them, though.
#
# Jeremy's idea is to not use open() or urllib.urlopen() on Windows;
# instead, use a C function that wraps Windows' generic open function,
# which resolves any path or URI exactly as Explorer would (he thinks).
#
#-----------------------------------------------------------------------
#
# References for further research on 'file:' URI resolution:
# http://mail.python.org/pipermail/xml-sig/2001-February/004572.html
# http://mail.python.org/pipermail/xml-sig/2001-March/004791.html
# http://mail.python.org/pipermail/xml-sig/2002-August/008236.html
# http://www.perldoc.com/perl5.8.0/lib/URI/file.html
# http://lists.w3.org/Archives/Public/uri/2004Jul/0013.html
#
#=======================================================================
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/lib/iri.py
|
iri.py
|
import sys
from amara import tree
from amara.lib.util import *
from amara.xpath import context, parser
def property_str_getter(propname, node):
'''
A tool (use with functools.partial) for creating smart getters that can access an element
whether or not it appears in the XML, returning its string value
doc = "<a><b><c>spam</c></b><b/></a>"
import functools
from amara.bindery.util import property_str_getter
#Use the bound name of the XML construct
c_str = functools.partial(property_str_getter('c'))
c_str(doc.a.b) #Return u'spam'
c_str(doc.a.b[1]) #Return u''
'''
return unicode(getattr(node, propname))
def property_sequence_getter(propname, node):
'''
A tool (use with functools.partial) for creating smart getters that can access an element
whether or not it appears in the XML, returning an empty list if not
doc = "<a><b><c/></b><b/></a>"
import functools
from amara.bindery.util import property_sequence_getter
#Use the bound name of the XML construct
c_list = functools.partial(property_sequence_getter('c'))
c_list(doc.a.b) #Return the one c element in a list
c_list(doc.a.b[1]) #Return an empty list
'''
return list(getattr(node, propname, []))
#A more general purpose converter utiliy
def property_getter(propname, node, converter=unicode):
return converter(getattr(node, propname))
DEFAULTY_PRIORITY = -sys.maxint-1
ALL_MODES = u'*'
class node_handler(object):
'''
A decorator
'''
def __init__(self, test, mode=None, priority=0):
self.test = test if type(test) in (list, tuple) else [test]
self.priority = priority
self.mode = mode
def __call__(self, func):
func.test = self.test
func.mode = self.mode
func.priority = self.priority
return func
#
#A simple, XSLT-like dispatch system
class dispatcher(object):
def __init__(self):
self.node_handlers = []
for obj in ( getattr(self, name) for name in dir(self) ):
test = getattr(obj, 'test', None)
if test is not None:
self.node_handlers.append((obj.priority, obj))
#Using bisect or cmp for sorted might be more efficient, but in general dispatcher handler sets
#will be small enough not to matter
self.node_handlers = [ obj for (priority, obj) in sorted(self.node_handlers, reverse=True) ]
self.cached_xpath = {}
return
def check_xpath(self, test, node):
'''
The XPath check is reminiscent of the XSLT pattern check.
If any ancestor of the node can be used as context for the test XPath,
such that the node is in the resulting node set, the test succeeds
'''
#FIXME: optimize, at least for the simple node test case. No need to climb all the way up the tree for that
#for i, t in enumerate(obj.test):
#FIXME: add support for python callable tests
# if isinstance(t, basestring):
# obj.test[i:i+1] = []
if test not in self.cached_xpath:
self.cached_xpath[test] = parser.parse(test)
test = self.cached_xpath[test]
#if hasattr(test, 'evaluate'):
#if isinstance(test, unicode):
cnode = node
while cnode.xml_parent is not None:
if node in test.evaluate(context(cnode.xml_parent, namespaces=cnode.xml_parent.xml_namespaces)):
return True
cnode = cnode.xml_parent
return False
def dispatch(self, node, mode=None):
for handler in self.node_handlers:
for test in handler.test:
if (callable(test) and test(self, node)) or self.check_xpath(test, node):
if handler.mode in (mode, ALL_MODES):
for chunk in handler(node): yield chunk
return
@node_handler(u'node()', ALL_MODES, DEFAULTY_PRIORITY)
def default_node(self, node):
if isinstance(node, tree.element) or isinstance(node, tree.entity):
#print 'default_element'
for child in node.xml_children:
for chunk in self.dispatch(child):
yield chunk
elif isinstance(node, tree.text):
#print 'default_node'
#yield unicode(node.xml_select(u'string(.)'))
yield node.xml_value
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/bindery/util.py
|
util.py
|
__all__ = [
'Document', 'Doctype', 'Comment', 'Element',
]
import copy
import itertools
from functools import *
from itertools import *
from amara.thirdparty import html5lib
from amara import tree
from amara.lib import inputsource
from amara.bindery import nodes
from amara.lib.xmlstring import *
from amara.namespaces import XML_NAMESPACE, XHTML_NAMESPACE
class node(html5lib.treebuilders._base.Node):
appendChild = tree.element.xml_append
removeChild = tree.element.xml_remove
parent = tree.node.xml_parent
value = tree.text.xml_value
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
if insertBefore:
self.insertBefore(tree.text(data), insertBefore)
else:
self.xml_append(tree.text(data))
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
offset = self.xml_index(refNode)
self.xml_insert(offset, node)
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
return bool(list(self.xml_children))
class element(nodes.element_base, node):
'''
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
'''
#name = nodes.element_base.xml_qname
#namespace = nodes.element_base.xml_namespace
xml_exclude_pnames = frozenset(('name', 'parent', 'appendChild', 'removeChild', 'removeChild', 'value', 'attributes', 'childNodes'))
@property
def name(self):
return getattr(self, 'xml_html5lib_name', self.xml_qname)
@property
def namespace(self):
return getattr(self, 'xml_html5lib_namespace', self.xml_namespace)
@property
def nameTuple(self):
name = getattr(self, 'xml_html5lib_name', self.xml_qname)
namespace = getattr(self, 'xml_html5lib_namespace', self.xml_namespace)
return namespace, name
#return XHTML_NAMESPACE, self.xml_name
def xml_get_childNodes_(self):
return self.xml_children
def xml_set_childNodes_(self, l):
#No self.xml_clear() ...
for child in self.xml_children:
self.xml_remove(child)
for i in l: self.xml_append(i)
return
childNodes = property(xml_get_childNodes_, xml_set_childNodes_, None, "html5lib uses this property to manage HTML element children")
def __init__(self, ns, qname):
nodes.element_base.__init__(self, ns, qname)
self._flags = []
return
def xml_set_attributes_(self, attrs):
for key, val in attrs.iteritems():
#from amara.bindery import html; doc = html.parse('http://outreach.zepheira.com/public/rdfa/plos-10.1371-journal.pgen.1000219.html'); h = doc.xml_select(u'//h1')[0]; print h.property
#from amara.bindery import html; doc = html.parse('/tmp/plos-10.1371-journal.pgen.1000219.html'); h = doc.xml_select(u'//h1')[0]; print h.property
if isinstance(key, tuple):
self.xml_attributes[key] = val
elif key.startswith(u'xmlns'):
prefix, local = splitqname(key)
self.xml_namespaces[local if prefix else None] = val
else:
self.xml_attributes[None, key] = val
return
def xml_get_attributes_(self):
return self.xml_attributes
attributes = property(xml_get_attributes_, xml_set_attributes_, None, "html5lib uses this property to manage HTML element attrs")
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
newelem = self.xml_root.xml_element_factory(self.xml_namespace, self.xml_local)
for k, v in self.xml_attributes.items():
newelem.xml_attributes[k] = v
return newelem
class entity(node, nodes.entity_base):
"""
Base class for entity nodes (root nodes--similar to DOM documents and document fragments)
"""
xml_element_base = element
def __init__(self, document_uri=None):
nodes.entity_base.__init__(self, document_uri)
return
__repr__ = nodes.entity_base.__repr__
class comment(tree.comment):
type = 6
value = tree.text.xml_value
def __init__(self, data):
#tree.comment.__init__(self, data)
tree.comment.__init__(self)
self.data = data
def toxml(self):
return "<!--%s-->" % self.data
#def hilite(self):
# return '<code class="markup comment"><!--%s--></code>' % escape(self.data)
class doctype(tree.node, html5lib.treebuilders.simpletree.DocumentType):
def __new__(cls, name, publicId, systemId):
return tree.node.__new__(cls)
def __init__(self, name, publicId, systemId):
self.xml_public_id = publicId
self.xml_system_id = systemId
self.xml_name = name
def doctype_create(dummy, name, publicId=None, systemId=None):
c = comment(u'')
c.xml_public_id = publicId
c.xml_system_id = systemId
c.xml_name = name
return c
BOGUS_NAMESPACE = u'urn:bogus:x'
NAME_FOR_ELEMENTS_UNNAMED_BY_HTML5LIB = u'UNNAMED_BY_HTML5LIB'
class treebuilder_pre_0_90(html5lib.treebuilders._base.TreeBuilder):
documentClass = entity
#elementClass = xml_element_factory
commentClass = comment
doctypeClass = doctype_create
def __init__(self, entity_factory=None):
self.entity = entity_factory()
html5lib.treebuilders._base.TreeBuilder.__init__(self)
def eclass(name):
if not name: name = NAME_FOR_ELEMENTS_UNNAMED_BY_HTML5LIB
namespace, name = None, U(name)
#Deal with some broken HTML that uses bogus colons in tag names
if (u":" in name and not namespace):
namespace = BOGUS_NAMESPACE
return self.entity.xml_element_factory(namespace, name)
self.elementClass = eclass
MARKER = object()
class treebuilder(html5lib.treebuilders._base.TreeBuilder):
documentClass = entity
#elementClass = xml_element_factory
commentClass = comment
doctypeClass = doctype_create
def __init__(self, entity_factory=None, use_xhtml_ns=False):
self.entity = entity_factory()
#html5lib.treebuilders._base.TreeBuilder breaks if you do not pass in True for namespaceHTMLElements
#We'll take care of that ourselves with the if not use_xhtml_ns... below
html5lib.treebuilders._base.TreeBuilder.__init__(self, True)
def eclass(name, namespace):
xml_html5lib_name, xml_html5lib_namespace = MARKER, MARKER
if not use_xhtml_ns and namespace == XHTML_NAMESPACE:
#html5lib feints support for HTML5 elements kept in the null namespace
#But in reality this support is broken. We have to in effect keep
#Two namespaces for each element, the real one from an amara perspective
#And another that is always XHTML for HTML5 elements, so html5lib doesn't break
xml_html5lib_namespace = namespace
namespace = None
#For some reason html5lib sometimes sends None as name
if not name:
xml_html5lib_name = name
name = NAME_FOR_ELEMENTS_UNNAMED_BY_HTML5LIB
namespace, name = U(namespace) if namespace else None, U(name)
#import sys; print >> sys.stderr, (namespace, name, use_xhtml_ns)
#Deal with some broken HTML that uses bogus colons in tag names
if (u":" in name and not namespace):
xml_html5lib_namespace = namespace
namespace = BOGUS_NAMESPACE
#Yes! Amara ns, name convention this is reverse order from html5lib's
elem = self.entity.xml_element_factory(namespace, name)
if xml_html5lib_namespace != MARKER:
elem.xml_html5lib_namespace = xml_html5lib_namespace
if xml_html5lib_name != MARKER:
elem.xml_html5lib_name = xml_html5lib_name
return elem
self.elementClass = eclass
def parse(source, prefixes=None, model=None, encoding=None, use_xhtml_ns=False):
'''
'''
from amara.lib.util import set_namespaces
#from amara.bindery import html; doc = html.parse("http://www.hitimewine.net/istar.asp?a=6&id=161153!1247")
#parser = html5lib.HTMLParser()
if PRE_0_90:
def get_tree_instance():
entity_factory = model.clone if model else entity
return treebuilder(entity_factory)
else:
def get_tree_instance(namespaceHTMLElements, use_xhtml_ns=use_xhtml_ns):
#use_xhtml_ns is a boolean, whether or not to use http://www.w3.org/1999/xhtml
entity_factory = model.clone if model else entity
return treebuilder(entity_factory, use_xhtml_ns)
parser = html5lib.HTMLParser(tree=get_tree_instance)
doc = parser.parse(inputsource(source, None).stream, encoding=encoding)
if prefixes: set_namespaces(doc, prefixes)
return doc
#return parser.parse(inputsource(source, None).stream, model)
def markup_fragment(source, encoding=None):
'''
Parse a fragment if markup in HTML mode, and return a bindery node
You might want to wrap source with amara.lib.inputsource.text if it's not obviously XML or HTML
from amara.lib import inputsource
from amara.bindery import html
doc = html.markup_fragment(inputsource.text('XXX<html><body onload="" color="white"><p>Spam!<p>Eggs!</body></html>YYY'))
See also: http://wiki.xml3k.org/Amara2/Tagsoup
'''
doc = parse(source, encoding=encoding)
frag = doc.html.body
return frag
try:
HTML5LIB_VERSION = html5lib.__version__
PRE_0_90 = False
except AttributeError:
#0.11.1 and earlier do not seem to have __version__
#Note later versions seem to have a broken __version__
#This logic is really there for when they fix that
HTML5LIB_VERSION = 'PRE_0.90'
PRE_0_90 = True
treebuilder = treebuilder_pre_0_90
def launch(source, **kwargs):
doc = parse(source)
if 'pretty' in kwargs:
doc.xml_write('xml-indent')
else:
doc.xml_write()
return
#Ideas borrowed from
# http://www.artima.com/forums/flat.jsp?forum=106&thread=4829
#FIXME: A lot of this is copied boilerplate that neds to be cleaned up
import sys
def command_line_prep():
from optparse import OptionParser
usage = "Amara 2.x. Command line support for parsing HTML, even tag soup.\n"
usage += "python -m 'amara.bindery.html' [options] source cmd"
parser = OptionParser(usage=usage)
parser.add_option("-p", "--pretty",
action="store_true", dest="pretty", default=False,
help="Pretty-print the XML output")
parser.add_option("-H", "--html",
action="store_true", dest="html", default=False,
help="Output (cleaned-up) HTML rather than XML")
return parser
def main(argv=None):
#But with better integration of entry points
if argv is None:
argv = sys.argv
# By default, optparse usage errors are terminated by SystemExit
try:
optparser = command_line_prep()
options, args = optparser.parse_args(argv[1:])
# Process mandatory arguments with IndexError try...except blocks
try:
source = args[0]
except IndexError:
optparser.error("Missing source for HTML")
#try:
# xpattern = args[1]
#except IndexError:
# optparser.error("Missing main xpattern")
except SystemExit, status:
return status
# Perform additional setup work here before dispatching to run()
# Detectable errors encountered here should be handled and a status
# code of 1 should be returned. Note, this would be the default code
# for a SystemExit exception with a string message.
pretty = options.pretty
html = options.html
if source == '-':
source = sys.stdin
launch(source, pretty=pretty, html=html)
return
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/bindery/html.py
|
html.py
|
__all__ = [
'entity_base', 'element_base', 'element_base',
'ANY_NAMESPACE',
'PY_ID_ENCODING', 'RESERVED_NAMES'
]
from functools import *
from operator import itemgetter
import re
import itertools
import keyword
import warnings
from cStringIO import StringIO
from xml.dom import Node
from amara import tree
from amara.lib.xmlstring import *
from amara.xpath import datatypes
from amara.lib.util import *
from amara.writers.struct import *
#Only need to list IDs that do not start with "xml", "XML", etc.
RESERVED_NAMES = [
'__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__',
'__getitem__', '__hash__', '__init__', '__iter__', '__module__',
'__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__',
'__str__', '__unicode__', '__weakref__', 'locals', 'None'
]
RESERVED_NAMES = frozenset(itertools.chain(keyword.kwlist, RESERVED_NAMES))
ANY_NAMESPACE = 'http://purl.xml3k.org/amara/reserved/any-namespace'
PY_ID_ENCODING = 'iso-8859-1'
#Uses the following neat pattern for partial function invokation in a property
#def a(x, instance):
# #Call it "instance", not self since thanks to the combination of property on a partial, it's the last, not first positional arg
# print instance.m, 0
#
#class b(object):
# def __init__(self):
# self.m = 0
# setattr(self.__class__, "c", property(partial(a, 1)))
#
#t = b()
#t.c
##Prints: "0 1"
#Note the following bits of trivia:
#class b(object):
# def __init__(self):
# self.m = 0
# setattr(self, "c", partial(self.a, 1))
# def a(self, x, y, z):
# print self.m, x, y, z
#
#t = b()
#t.c(2, 3)
##Prints: "0 1 2 3"
#def a(x, y, z):
# print x, y, z
#
#class b(object):
# def __init__(self):
# setattr(self, "c", partial(a, 1))
#
#t = b()
#t.c(2, 3)
##Prints: "1 2 3"
class element_iterator:
def __init__(self, parent, ns, local):
self.children = iter(parent.xml_children)
self.parent = parent
self.ns, self.local = ns, local
return
def __iter__(self):
return self
def next(self):
child = self.parent.xml_find_named_child(self.ns, self.local, self.children)
if child is None:
raise StopIteration
return child
#def elem_getter(pname, parent):
# ns, local = parent.xml_model.element_types[pname]
# return parent.xml_find_named_child(ns, local)
#Note: one of the better explanations of descriptor magic is here: http://gnosis.cx/publish/programming/charming_python_b26.html
#Also: http://users.rcn.com/python/download/Descriptor.htm
#See also official docs here: http://docs.python.org/ref/descriptors.html
class bound_element(object):
"""
A descriptor to support bound elements
"""
#A descriptor to support elements that are not defined in the owner class's xml_model
def __init__(self, ns, local):
self.ns = ns
self.local = local
def __get__(self, obj, owner):
child = obj.xml_find_named_child(self.ns, self.local)
if child is not None:
return child
else:
#Property is defined in this element class's XML model, but does not appear on this instance
return obj.xml_model.element_types.get((self.ns, self.local), (None, None))[1]
def __set__(self, obj, value):
target = self.__get__(obj, None)
if target is not None:
for child in target.xml_children:
target.xml_remove(child)
target.xml_append(value)
else:
new_elem = obj.factory_entity.xml_element_factory(self.ns, self.local)
new_elem.xml_append(value)
obj.xml_append(new_elem)
return
def __delete__(self, obj):
target = self.__get__(obj, None)
obj.xml_remove(target)
return
class bound_attribute(object):
"""
A descriptor to support bound attributes
"""
#A descriptor to support attributes that are not defined in the owner class's xml_model
def __init__(self, ns, local):
self.ns = ns
self.local = local
def __get__(self, obj, owner):
if (self.ns, self.local) in obj.xml_attributes:
return obj.xml_attributes[self.ns, self.local]
else:
#Property is defined in this element class's XML model, but does not appear on this instance
return obj.xml_model.attribute_types.get((self.ns, self.local), (None, None))[1]
def __set__(self, obj, value):
#from amara import bindery; doc = bindery.parse('<a x="1"/>'); doc.a.x = unicode(int(doc.a.x)+1)
if isinstance(value, basestring):
attr = tree.attribute(self.ns, self.local, unicode(value))
obj.xml_attributes.setnode(attr)
else:
obj.xml_attributes[self.ns, self.local].xml_value = value
return
def __delete__(self, obj):
del obj.xml_attributes[self.ns, self.local]
return
ELEMENT_TYPE = tree.element.xml_type
class container_mixin(object):
xml_model_ = None
XML_PY_REPLACE_PAT = re.compile(u'[^a-zA-Z0-9_]')
xml_exclude_pnames = frozenset()
#FIXME: Remove this initializer as soon as we move xml_pname_cache to root nodes
def __init__(self):
self.__class__.xml_pname_cache = {}
def xml_get_model(self): return self.xml_model_
def xml_set_model(self, model):
self.__class__.xml_model_ = model
#FIXME: why not self.xml_root ?
model.entities.add(self.xml_select(u'/')[0])
return
xml_model = property(xml_get_model, xml_set_model, "XML model")
def xml_validate(self):
subtree = element_subtree_iter(self, include_root=True)
for e in subtree:
e.xml_model.validate(e)
return
def xml_avt(self, expr, prefixes=None):
prefixes = prefixes or self.xml_namespaces.copy()
from amara.xslt.expressions import avt
from amara.xpath import context
v = avt.avt_expression(expr)
return unicode(v.evaluate(context(self, namespaces=prefixes)))
@property
def xml_element_pnames(self):
return itertools.chain(itertools.imap(itemgetter(0), self.xml_model.element_types.itervalues()),
(self.xml_extra_children or {}).iterkeys())
@property
def xml_element_xnames(self):
return itertools.chain(self.xml_model.element_types.iterkeys(),
(self.xml_extra_children or {}).itervalue())
@property
def xml_child_text(self):
return u''.join([ ch for ch in self.xml_children if isinstance(ch, unicode)])
@property
def xml_elements(self):
return ( ch for ch in self.xml_children if isinstance(ch, tree.element) )
@property
def xml_pname(self):
#FIXME: account for special elems/attrs
return self.xml_child_pnames
def xml_new_pname_mapping(self, ns, local, iselement=True, update_class=True):
'''
Called to create a new name, or where disambiguation is required
First generate a Python ID (as a *string*) from an XML universal name
used to prepare an object for binding
ns - the XML namespace
local - the XML local name
iselement - a flag as to whether the object to be bound is an element or attribute
'''
root = self.xml_root
#FIXME: We'll be looking to use root, once we can get it (i.e. low lever parser API tweak)
root = self.__class__
if root and (local, ns) in root.xml_pname_cache:
pname = root.xml_pname_cache[(local, ns)]
#if pname == u'name':
# from akara import logger; logger.debug(repr(('GRIPPO CACHED', pname, self.xml_exclude_pnames, root and root.xml_pname_cache)))
else:
pname = self.XML_PY_REPLACE_PAT.sub('_', local.encode('utf-8'))
#if pname == u'name':
# from akara import logger; logger.debug(repr(('GRIPPO UNCACHED', pname, self.xml_exclude_pnames, root and root.xml_pname_cache)))
while pname in RESERVED_NAMES or pname in self.xml_exclude_pnames:
pname += '_'
# self._names may not be present when copy.deepcopy() is
# creating a copy, so only try to cache if it's present.
if root: root.xml_pname_cache[(local, ns)] = pname
while not True:
pname_info = self.xml_pname.get(pname)
if pname_info is None:
break
elif pname_info == (ns, local, iselement):
break
else:
pname += '_'
if update_class:
# setattr on a class has a surprisingly large overhead with descriptors.
# This check reduces parsebench.py:bindery_parse4 from 161 ms to 127 ms.
if pname not in self.__class__.__dict__:
if iselement:
setattr(self.__class__, pname, bound_element(ns, local))
else:
setattr(self.__class__, pname, bound_attribute(ns, local))
self.xml_child_pnames[pname] = (ns, local), self.__class__.__dict__
return pname
def xml_child_inserted(self, child):
"""
called after the node has been added to `self.xml_children`
"""
if isinstance(child, tree.element):
self.xml_new_pname_mapping(child.xml_namespace, child.xml_local, True)
return
def xml_child_removed(self, child):
"""
called after the node has been removed from `self.xml_children` (i.e. child.xml_parent is now None)
"""
#Nothing really to do: we don't want to remove the descriptor from the class, since other instances might be using it
return
def xml_find_named_child(self, ns, local, childiter=None):
#This function is very heavily used, and should be carefully optimized
found = False
#XXX: could use dropwhile (with negation)...
#self.children = dropwhile(lambda c, n=self.name: (c.xml_namespace, c.xml_name) != n, self.children)
childiter = iter(self.xml_children) if childiter is None else childiter
name = (ns, local)
while not found:
try:
child = childiter.next() #Will raise StopIteration when siblings are exhausted
except StopIteration:
return None
found = child.xml_type == ELEMENT_TYPE and child.xml_name == name
return child
def xml_append(self, obj):
#Can't really rely on super here
base_class = {tree.element.xml_type: tree.element, tree.entity.xml_type: tree.entity}[self.xml_type]
if isinstance(obj, str):
base_class.xml_append(self, tree.text(obj.decode(self.factory_entity.xml_encoding)))
elif isinstance(obj, unicode):
base_class.xml_append(self, tree.text(obj))
elif isinstance(obj, tree.node):
base_class.xml_append(self, obj)
elif isinstance(obj, E):
buf = StringIO()
w = structwriter(indent=u"yes", stream=buf)
w.feed(obj)
self.xml_append_fragment(buf.getvalue())
else:
raise TypeError
return
def xml_append_fragment(self, frag):
from amara.bindery import parse
doc = parse(frag)
for child in doc.xml_children:
self.xml_append(child)
return
def __getitem__(self, key):
#Example:
#$ python -c "from amara.bindery import parse; from itertools import *; doc = parse('<x><a b=\"1\"/><a b=\"2\"/><a b=\"3\"/><a b=\"4\"/></x>'); print list(islice(doc.x.a, 2,3))[0].xml_attributes.items()"
# => [((None, u'b'), u'3')]
if isinstance(key, int):
if key >= 0:
result = list(itertools.islice(element_iterator(self.xml_parent, self.xml_namespace, self.xml_qname), key, key+1))[0]
else:
result = list(element_iterator(self.xml_parent, self.xml_namespace, self.xml_qname))[key]
else:
force_type = None
if isinstance(key, tuple):
if len(key) == 3:
force_type, key = key[0], key[1:]
elif isinstance(key, basestring):
key = (None, key)
else:
raise TypeError('Inappropriate key (%s)'%(key))
if force_type in (None, tree.attribute.xml_type) and hasattr(self, 'xml_attributes') and key in self.xml_attributes:
return self.xml_attributes[key]
if force_type in (None, tree.element.xml_type):
return self.xml_find_named_child(key[0], key[1])
else:
raise KeyError('namespace/local name combination not found (%s)'%(str(key)))
return result
def __delitem__(self, key):
'''
from amara import bindery
DOC = "<a><b>spam</b><b>eggs</b></a>"
doc = bindery.parse(DOC)
del doc.a.b[1]
doc.xml_write()
--> "<a><b>spam</b></a>"
from amara import bindery
DOC = "<a><b>spam</b><b>eggs</b></a>"
doc = bindery.parse(DOC)
del doc.a[u'b']
doc.xml_write()
--> "<a><b>eggs</b></a>"
'''
target = None
if isinstance(key, int):
target = list(itertools.islice(element_iterator(self.xml_parent, self.xml_namespace, self.xml_qname), key, key+1))[0]
parent = self.xml_parent
else:
parent = self
force_type = None
if isinstance(key, tuple):
if len(key) == 3:
force_type, key = key[0], key[1:]
elif isinstance(key, basestring):
key = (None, key)
else:
raise TypeError('Inappropriate key (%s)'%(key))
if force_type in (None, tree.attribute.xml_type) and hasattr(self, 'xml_attributes') and key in self.xml_attributes:
target = self.xml_attributes[key]
if force_type in (None, tree.element.xml_type):
target = self.xml_find_named_child(key[0], key[1])
else:
raise KeyError('namespace/local name combination not found (%s)'%(str(key)))
#In docstring example, self = parent = a and target = b
if target is not None:
parent.xml_remove(target)
return
def __setitem__(self, key, value):
'''
from amara import bindery
DOC = "<a><b>spam</b></a>"
doc = bindery.parse(DOC)
doc.a.b[0] = u"eggs"
doc.xml_write()
--> "<a><b>eggs</b></a>"
from amara import bindery
DOC = "<a><b>spam</b></a>"
doc = bindery.parse(DOC)
doc.a[u'b'] = u"eggs"
doc.xml_write()
--> "<a><b>eggs</b></a>"
'''
target = None
if isinstance(key, int):
target = list(itertools.islice(element_iterator(self.xml_parent, self.xml_namespace, self.xml_qname), key, key+1))[0]
parent = self.xml_parent
else:
parent = self
force_type = None
if isinstance(key, tuple):
if len(key) == 3:
force_type, key = key[0], key[1:]
elif isinstance(key, basestring):
key = (None, key)
else:
raise TypeError('Inappropriate key (%s)'%(key))
if force_type in (None, tree.attribute.xml_type) and hasattr(self, 'xml_attributes'):
target = None
if isinstance(value, tree.attribute):
# Check that the namespaces match
ns, local = key
if (ns and value.xml_namespace and
ns != value.xml_namespace):
raise ValueError(
"Namespaces don't match: key ns==%r, attrnode ns=%r"
% (ns, value.xml_namespace))
if local != value.xml_local:
raise ValueError(
"Local names don't match: key name==%r, attrnode name=%r"
% (local, value.xml_local))
# If either namespace value is None, use the other
# as the default.
if value.xml_namespace is None:
if ns is None:
# If no namespaces specified at all,
# use the default one.
ns = self.xml_namespaces[None]
value = tree.attribute(ns, value.xml_local, value.xml_value)
elif ns is None:
ns = value.xml_namespace
# If they match, perform the assignment.
self.xml_attributes.setnode(value)
else:
self.xml_attributes[key] = value
elif force_type in (None, tree.element.xml_type):
target = self.xml_find_named_child(*key)
if target is None:
new_elem = parent.factory_entity.xml_element_factory(*key)
new_elem.xml_append(value)
parent.xml_append(new_elem)
else:
raise KeyError('namespace/local name combination not found (%s)'%(str(key)))
if target is not None:
#No target.xml_clear()...
for child in target.xml_children:
target.xml_remove(child)
target.xml_append(value)
return
#def xml_fixup(self, old_ns):
def xml_fixup(self, target=None):
"""
Should be called after any modification to `xml_namespace` on any child,
which would normally break the binding to this container.
A stop-gap measure until the best real fix is determined.
See:
File "/Users/uche/lib/python2.5/site-packages/amara/bindery/nodes.py", line 154, in __get__
return obj.xml_attributes[self.ns, self.local]
KeyError: (None, u'foo')
"""
#FIXME: Should technically use a new binding class, since those are related to ns/local
if target:
offset = self.xml_index(target)
self.xml_remove(target)
self.xml_insert(offset, target)
return
children = []
for child in self.xml_children:
self.xml_remove(child)
children.append(child)
for child in children:
self.xml_append(child)
return
class element_base(container_mixin, tree.element):
xml_attribute_factory = tree.attribute #factory callable for attributes
def __init__(self, ns, qname):
#These are the children that do not come from schema information
#{pname: (ns, local)}
self.xml_extra_children = None
self.xml_extra_attributes = None
#FIXME: Remove this as soon as we move xml_pname_cache to root nodes
container_mixin.__init__(self)
return
def xml_attribute_added(self, attr_node):
"""
called after the attribute has been added to `self.xml_attributes`
"""
self.xml_new_pname_mapping(attr_node.xml_namespace, attr_node.xml_local, False)
return
def xml_attribute_removed(self, attr_node):
"""
called after the attribute has been removed `self.xml_attributes`
"""
#Nothing really to do: we don't want to remove the descriptor from the class, since other instances might be using it
return
@property
def xml_attribute_pnames(self):
return itertools.chain(itertools.imap(itemgetter(0), self.xml_model.attribute_types.itervalues()),
(self.xml_extra_attributes or {}).iterkeys())
@property
def xml_pnames(self):
return itertools.chain(self.xml_attribute_pnames, self.xml_element_pnames)
@property
def xml_attribute_xnames(self):
return itertools.chain(self.xml_model.attribute_types.iterkeys(),
(self.xml_extra_attributes or {}).itervalue())
@property
def xml_index_on_parent(self):
try:
index = self.xml_parent.xml_children.index(self)
except ValueError: #Not found
raise
return index
def __unicode__(self):
'''
Returns a Unicode object with the text contents of this node and
its descendants, if any.
Equivalent to XPath string() conversion
'''
return unicode(datatypes.string(self))
def __str__(self):
return unicode(self).encode(self.factory_entity.xml_encoding)
def __iter__(self):
return element_iterator(self.xml_parent, self.xml_namespace, self.xml_local)
def __len__(self):
i = 0
for e in element_iterator(self.xml_parent, self.xml_namespace, self.xml_qname): i += 1
return i
#This class also serves as the factory for specializing the core Amara tree parse
TOTAL_DICT_SIZE = 0
DICT_LOOKUP_COUNT = 0
NAME_GENERATIONS = 0
class entity_base(container_mixin, tree.entity):
"""
Base class for entity nodes (root nodes--similar to DOM documents and document fragments)
"""
xml_element_base = element_base
xml_encoding = 'utf-8'
def __new__(cls, document_uri=None):
#Create a subclass of entity_base every time to avoid the
#pollution of the class namespace caused by bindery's use of descriptors
#Cannot subclass more directly because if so we end up with infinite recursiion of __new__
cls = type(cls.__name__, (cls,), {})
#FIXME: Might be better to use super() here since we do have true cooperation of base classes
return tree.entity.__new__(cls, document_uri)
def __init__(self, document_uri=None):
#These are the children that do not come from schema information
self.xml_extra_children = None
#XXX: Should we share the following across documents, perhaps by using an auxilliary class,
#Of which one global, default instance is created/used
#Answer: probably yes
self.xml_model_ = model.content_model()
self._eclasses = {}
self._class_names = {}
self._names = {}
self.factory_entity = self
self.xml_child_pnames = {}
self.xml_pname_cache = {}
#FIXME: Remove this as soon as we move xml_pname_cache to root nodes
container_mixin.__init__(self)
return
#Defined for elements and not doc nodes in core tree. Add as convenience.
@property
def xml_namespaces(self):
xml_namespaces = {}
for e in self.xml_elements:
xml_namespaces.update(dict(e.xml_namespaces.items()))
return xml_namespaces
def xml_xname(self, python_id):
#XML NMTOKENS are a superset of Python IDs
return python_id
def xml_element_factory(self, ns, qname, pname=None):
prefix, local = splitqname(qname)
if not pname: pname = self.xml_new_pname_mapping(ns, local, update_class=False)
if (ns, local) not in self._eclasses:
class_name = pname
eclass = type(class_name, (self.xml_element_base,), dict(xml_child_pnames={}))
self._eclasses[(ns, local)] = eclass
eclass.xml_model_ = model.content_model()
eclass.xml_model_.entities.add(self)
else:
eclass = self._eclasses[(ns, local)]
e = eclass(ns, qname)
e.factory_entity = self
return e
def eclass(self, ns, qname, pname=None):
#FIXME: Really the same as the top part of xml_element_factory. Extract common factor
prefix, local = splitqname(qname)
if not pname: pname = self.xml_new_pname_mapping(ns, local, update_class=False)
if (ns, local) not in self._eclasses:
class_name = pname
eclass = type(class_name, (self.xml_element_base,), {})
self._eclasses[(ns, local)] = eclass
eclass.xml_model_ = model.content_model()
eclass.xml_model_.entities.add(self)
else:
eclass = self._eclasses[(ns, local)]
return eclass
import model
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/bindery/nodes.py
|
nodes.py
|
__all__ = [
'examplotron_model',
]
import sys
#import re
import warnings
import copy
from cStringIO import StringIO
from itertools import *
from functools import *
from operator import *
from amara import tree
from amara.lib.xmlstring import U
from amara.bindery import BinderyError
from amara.bindery.model import document_model, constraint, child_element_constraint, named_node_test, NODE_ID_MARKER
from amara.xpath import datatypes
from amara.xpath.util import top_namespaces, named_node_test, node_test
from amara.xpath import context, parser
from amara.namespaces import AKARA_NAMESPACE, EG_NAMESPACE
class examplotron_model(document_model):
'''
XML model information and metadata extraction cues from an examplotron document
'''
def __init__(self, egdoc):
from amara import bindery
self.model_document = bindery.parse(egdoc)
self.model_document.xml_model.prefixes = top_namespaces(self.model_document)
self.setup_model()
return
def setup_model(self, parent=None):
'''
Process an examplotron document for constraints
'''
NSS = {u'ak': AKARA_NAMESPACE, u'eg': EG_NAMESPACE}
parent = parent if parent is not None else self.model_document
allowed_elements_test = []
if isinstance(parent, tree.element):
#for a in parent.xml_attributes:
#FIXME: Hack until this issue is fixed: http://trac.xml3k.org/ticket/8
for a in dict(parent.xml_attributes.items()):
if a[0] not in [EG_NAMESPACE, AKARA_NAMESPACE]:
parent.xml_model.attribute_types[a] = (self.model_document.xml_new_pname_mapping(a[0], a[1], iselement=False, update_class=False), None)
for e in parent.xml_elements:
#Constraint info
eg_occurs = e.xml_attributes.get((EG_NAMESPACE, 'occurs'))
if not (e.xml_namespace, e.xml_local) in parent.xml_model.element_types:
parent.xml_model.element_types[e.xml_namespace, e.xml_local] = (self.model_document.xml_new_pname_mapping(e.xml_namespace, e.xml_local, update_class=False), None)
if not eg_occurs in [u'?', u'*']:
c = child_element_constraint(e.xml_namespace, e.xml_local)
parent.xml_model.add_constraint(c)
if not eg_occurs in [u'+', u'*']:
parent.xml_model.add_constraint(
constraint(u'count(%s) = 1'%named_node_test(e.xml_namespace, e.xml_local, parent), msg=u'Only one instance of element allowed')
)
allowed_elements_test.append(named_node_test(e.xml_namespace, e.xml_local, parent))
#Metadata extraction cues
#FIXME: Compile these XPath expressions
mod = e.xml_model
rattr = e.xml_select(u'@ak:resource', NSS)
if rattr:
#ak:resource="" should default to a generated ID
mod.metadata_resource_expr = rattr[0].xml_value or NODE_ID_MARKER
#rattr = e.xml_select(u'string(@ak:resource)', NSS)
#if rattr: mod.metadata_resource_expr = rattr
relattr = e.xml_select(u'@ak:rel', NSS)
if relattr:
if relattr[0].xml_value:
mod.metadata_rel_expr = parser.parse(relattr[0].xml_value)
else:
mod.metadata_rel_expr = parser.parse(u'local-name()')
valattr = e.xml_select(u'@ak:value', NSS)
if valattr:
if valattr[0].xml_value:
mod.metadata_value_expr = parser.parse(valattr[0].xml_value)
else:
mod.metadata_value_expr = parser.parse(u'.')
context_attr = e.xml_select(u'@ak:context', NSS)
if context_attr:
mod.metadata_context_expr = parser.parse(context_attr[0].xml_value)
else:
#If it doesn't state context, don't check context
mod.metadata_context_expr = None
#mod.metadata_context_expr = node_test(parent, e, 'parent')
#Apply default relationship or value expression
#If there's ak:rel but no ak:value or ak:resource, ak:value=u'.'
#If there's ak:value but no ak:rel or ak:resource, ak:rel=u'local-name()'
if mod.metadata_resource_expr:
if (mod.metadata_value_expr
and not mod.metadata_rel_expr):
mod.metadata_rel_expr = parser.parse(u'local-name()')
else:
if (mod.metadata_rel_expr
and not mod.metadata_value_expr):
mod.metadata_value_expr = parser.parse(u'.')
elif (mod.metadata_value_expr
and not mod.metadata_rel_expr):
mod.metadata_rel_expr = parser.parse(u'local-name()')
if mod.metadata_resource_expr not in (NODE_ID_MARKER, None):
mod.metadata_resource_expr = parser.parse(mod.metadata_resource_expr)
#if mod.metadata_rel_expr is not None:
# mod.metadata_rel_expr = parser.parse(mod.metadata_rel_expr)
#if mod.metadata_value_expr is not None:
# mod.metadata_value_expr = parser.parse(mod.metadata_value_expr)
relelem = e.xml_select(u'ak:rel', NSS)
for rel in relelem:
mod.other_rel_exprs.append((unicode(rel.name),unicode(rel.value)))
#print e.xml_name, (mod.metadata_resource_expr, mod.metadata_rel_expr, mod.metadata_value_expr)
#Recurse to process children
self.setup_model(e)
if allowed_elements_test:
parent.xml_model.add_constraint(
constraint(u'count(%s) = count(*)'%u'|'.join(allowed_elements_test), msg=u'Invalid elements present')
)
else:
parent.xml_model.add_constraint(
constraint(u'not(*)', msg=u'Element should be empty')
)
#To do:
#Add <ak:product ak:name="AVT" ak:value="AVT"/>
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/bindery/model/examplotron.py
|
examplotron.py
|
__all__ = [
'schematron_model',
]
import sys
#import re
import warnings
import copy
from cStringIO import StringIO
from itertools import *
from functools import *
from operator import *
from amara import tree
from amara.lib.xmlstring import U
from amara.lib.util import first_item
from amara.bindery import BinderyError
from amara.bindery.model import document_model, constraint, child_element_constraint, named_node_test, NODE_ID_MARKER
from amara.xpath import datatypes
from amara.xpath.util import top_namespaces, named_node_test
from amara.namespaces import AKARA_NAMESPACE, STRON_NAMESPACE, OLD_STRON_NAMESPACE
from amara.bindery.util import dispatcher, node_handler
class schematron_model(document_model, dispatcher):
'''
XML model information and metadata extraction cues from an examplotron document
'''
def __init__(self, schdoc):
from amara import bindery
dispatcher.__init__(self)
self.model_document = bindery.parse(schdoc)
self.model_document.xml_model.prefixes = top_namespaces(self.model_document)
self.rules = []
self.setup_model()
return
def setup_model(self, parent=None):
'''
Process a schematron document for constraints
'''
NSS = {u'ak': AKARA_NAMESPACE, u'sch': STRON_NAMESPACE}
if parent is None:
parent = self.model_document
parent.xml_namespaces.update(NSS)
list(self.dispatch(parent))
return
@node_handler(u'sch:schema')
def schema(self, node):
#print >> sys.stderr, "GRIPPO", node.xml_children
for child in node.xml_children:
for chunk in self.dispatch(child):
yield None
#list(chain(*imap(self.dispatch, node.xml_children)))
#@node_handler(u'sch:schema')
#def schema(self, node):
@node_handler(u'sch:schema/sch:pattern')
def pattern(self, node):
list(chain(*imap(self.dispatch, node.xml_children)))
@node_handler(u'sch:pattern/sch:rule')
def rule(self, node):
context_elem = first_item(e.xml_select(u'ak:resource'))
if context_elem:
resource = U(context_elem.select)
self.rules.append((U(node.context), resource, []))
list(chain(*imap(self.dispatch, node.xml_children)))
@node_handler(u'sch:rule/sch:report')
def report(self, node):
curr_report_list = self.rules[-1][1]
curr_report_list.append((node.test, []))
relelems = e.xml_select(u'ak:rel')
for relelem in relelems:
curr_rel_list = curr_report_list[-1][1]
curr_rel_list.append(U(rel.name), U(rel.value))
def generate_metadata(self, root):
'''
Process a document to extract metadata
Amara allows you to embed metadata extraction cues into an examplotron document
These cues are worked into the model, and are used to drive the extraction from the
instance at root
'''
prefixes = root.xml_model.prefixes
print >> sys.stderr, "STACEY"
def handle_node(node, resource):
print >> sys.stderr, "GRIPPO", node, resource
for (context, resource, report_list) in self.rules():
if self.check_xpath(context, node):
for (test, rel_list) in report_list:
if node.xml_select(test):
for (relname, relvalue) in rel_list:
yield (resource, relname, relvalue)
#
for child in node.xml_children:
for item in handle_node(child, resource):
yield item
return
return ( item for item in handle_node(root, root.xml_base) )
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/bindery/model/schematron.py
|
schematron.py
|
__all__ = [
'constraint', 'child_element_constraint', 'attribute_constraint',
'content_model',
'named_node_test', 'document_model',
'ATTIBUTE_AXIS',
'metadata_dict',
'generate_metadata',
]
import sys
#import re
import warnings
import copy
from cStringIO import StringIO
from itertools import *
from functools import *
from operator import *
from amara import tree
from amara.lib.xmlstring import U
from amara.lib.util import element_subtree_iter
from amara.xpath import datatypes
from amara.xpath.util import named_node_test, abspath
from amara.bindery import BinderyError
from amara.namespaces import AKARA_NAMESPACE
from amara.xpath import context, parser
ATTIBUTE_AXIS = u'@'
NODE_ID_MARKER = u'generate-id()'
class constraint(object):
'''
Represents a constraint on an XML model
'''
def __init__(self, assertion, fixup=None, msg=None):
self.assertion = assertion
self.fixup = fixup
self.msg = msg
def validate(self, node):
'''
Check this constraint against a node.
Raise an exception if the constraint fails, possibly after attempting fixup
'''
from amara.bindery import BinderyError
assertion = self.assertion
if callable(assertion):
assertion = assertion(node)
result = datatypes.boolean(node.xml_select(assertion))
if not result:
if self.fixup:
self.fixup(node)
result = datatypes.string(node.xml_select(assertion))
if result:
return
raise BinderyError(BinderyError.CONSTRAINT_VIOLATION, constraint=self.msg or assertion, node=abspath(node))
class attribute_constraint(constraint):
'''
Constraint representing the presence of an attribute on an element
'''
def __init__(self, ns, local, default=None):
self.ns = ns
self.local = local
self.default = default
assertion = self.assertion if self.ns else u'@' + self.local
constraint.__init__(self, assertion, fixup=(self.set_default if default else None))
def set_default(self, node):
node.xml_attributes[self.ns, self.local] = self.default
def assertion(self, node):
return named_node_test(self.ns, self.local, node, axis=ATTIBUTE_AXIS)
class child_element_constraint(constraint):
'''
Constraint representing the presence of a simple child element
'''
#XXX: We could make this a bit more flexible by allowing the user to specify an
#XML fragment as well as simple text default content
def __init__(self, ns, local, default=None):
self.ns = ns
self.local = local
self.default = default
assertion = partial(named_node_test, self.ns, self.local) if self.ns else self.local
constraint.__init__(self, assertion, fixup=(self.set_default if default else None))
def set_default(self, node):
#XXX: Should be able to reuse named_node_test function
#What prefix to use
for prefix, ns in node.xml_namespaces.items():
if ns == self.ns and prefix:
qname = prefix + u':' + self.local
break
else:
#No prefix: just hijack the default namespace
qname = self.local
ownerdoc = node.xml_select(u'/')[0]
eclass = ownerdoc.eclass(self.ns, self.local)
new_elem = eclass(self.ns, qname)
new_elem.xml_append(tree.text(self.default))
node.xml_append(new_elem)
return
class content_model:
def __init__(self):
#Used to be: {(ns, local): (python-name, default)}
#Now, for performance, is: {(ns, local): (python-name, default)}
self.element_types = {}
self.attribute_types = {}
self.constraints = []
self.entities = set()
self.metadata_resource_expr = None
self.metadata_rel_expr = None
self.metadata_value_expr = None
self.metadata_coercion_expr = None
self.metadata_context_expr = None
self.other_rel_exprs = []
self.prefixes = {}
return
def add_constraint(self, constraint, validate=False):
self.constraints.append(constraint)
if validate: self.validate()
return
def validate(self, node=None):
#print repr(node)
#re-validate all constraints, not just this one (interlocking constraints will likely be coming in future)
if node:
#Check all constraints
for constraint in self.constraints:
constraint.validate(node)
#Make sure all known element types have corresponding properties on the node class
for (ns, local), (pname, default) in self.element_types.iteritems():
if not hasattr(node, pname):
from amara.bindery.nodes import node
setattr(node.__class__, pname, bound_element(ns, local))
else:
for d in self.entities:
subtree = element_subtree_iter(d, include_root=True)
for e in subtree:
if e.xml_model == self:
self.validate(e)
return
def default_value(self, ns, local):
pass
def debug(self, node=None):
for c in self.constraints:
print >> sys.stderr, (c.assertion if node else c.assertion(node))
return
def generate_metadata(self, root):
'''
Process a document to extract metadata
Amara allows you to embed metadata extraction cues into an examplotron document
These cues are worked into the model, and are used to drive the extraction from the
instance at root
'''
#FIXME: investigate the requirement for prefixes=prefixes for XPath, even for prefixes defined in source doc
def handle_element(elem, resource):
new_resource = None
prefixes = elem.xml_root.xml_model.prefixes
if elem.xml_model.metadata_context_expr:
if not elem.xml_model.metadata_context_expr.evaluate(context(elem, namespaces=prefixes)):
return
#Is there a cue that designates this element as a resource envelope?
if elem.xml_model.metadata_resource_expr:
if elem.xml_model.metadata_resource_expr == NODE_ID_MARKER:
#FIXME: Isn't going from unicode -> xpath str -> unicode wasteful?
new_resource = unicode(datatypes.string(elem.xml_nodeid))
else:
new_resource = unicode(datatypes.string(elem.xml_model.metadata_resource_expr.evaluate(context(elem, namespaces=prefixes))))
#Is there a cue that designates a relationship in this element?
if elem.xml_model.metadata_rel_expr:
#Execute the XPath to get the relationship name/title
rel = datatypes.string(elem.xml_model.metadata_rel_expr.evaluate(context(elem, namespaces=prefixes)))
if elem.xml_model.metadata_value_expr:
#Execute the XPath to get the relationship value
val = elem.xml_model.metadata_value_expr.evaluate(context(elem, namespaces=prefixes))
elif new_resource is not None:
#If the element is also a resource envelope, the default value is the new resource ID
val = new_resource
else:
#Handle the default ak:value of "."
val = datatypes.nodeset([elem])
yield (unicode(resource), unicode(rel), val)
#Basically expandqname first
#prefix, local = splitqname(rattr)
#try:
# ns = elem.xml_namespaces[prefix]
# resource = ns + local
#except KeyError:
# resource = rattr
if new_resource is not None: resource = new_resource
for rel_expr, val_expr in elem.xml_model.other_rel_exprs:
rel = datatypes.string(elem.xml_select(rel_expr, prefixes=prefixes))
val = elem.xml_select(val_expr, prefixes=prefixes)
yield (unicode(resource), unicode(rel), val)
for child in elem.xml_elements:
for item in handle_element(child, resource):
yield item
return
#Make sure we skip any entities and start with top element(s)
if isinstance(root, tree.entity):
return ( item for elem in root.xml_elements for item in handle_element(elem, root.xml_base) )
else:
return ( item for item in handle_element(root, root.xml_base) )
# node.xml_model.constraints.append(u'@xml:id', validate=True) #Make xml:id required. Will throw a constraint violation right away if there is not one. Affects all instances of this class.
# node.xml_model.validate(recurse=True) #Recursively validate constraints on node and all children
#No constraints by default
#DEFAULT_MODEL = content_model()
class document_model(object):
'''
Represents XML model information set up for an entire document
'''
#def __init__(self):
# self.model_document = None
def clone(self, document_uri=None):
'''
Return a new, empty document incorporating the model information
'''
from amara.bindery import nodes
doc = nodes.entity_base(document_uri)
doc.xml_model_ = self.model_document.xml_model_
doc._eclasses = self.model_document._eclasses.copy()
doc._class_names = self.model_document._class_names.copy()
doc._names = self.model_document._names.copy()
for c in doc._eclasses.values():
c.xml_model_.entities.add(doc)
return doc
#raise NotImplementedErr
def generate_metadata(root):
return root.xml_model.generate_metadata(root)
#Singleton/sentinel
MARK = object()
def metadata_dict(metadata, nesteddict=True):
#FIXME: add a doctest/example
"""
Convert a list of tuples as returned by amara.bindery.model.generate_metadata into a dict of dicts of the metadata (if nesteddict==True) or a list of dicts
"""
resources = {} if nesteddict else []
first_id = MARK
#Use sorted to ensure grouping by resource IDs
for rid, row in groupby(sorted(metadata, key=itemgetter(0)), itemgetter(0)):
if first_id == MARK: first_id = rid
#entry[u'id'] = eid
resource = {}
#It's all crazy lazy, so use list() to consume the iterator
for (i, key, val) in row:
if key in resource:
if isinstance(resource[key], list):
resource[key].append(val)
else:
resource[key] = [resource[key], val]
else:
resource[key] = val
if nesteddict:
resources[rid] = resource
else:
resources.append((rid, resource))
#XXX first_id not really useful if nesteddict=False
return resources, first_id
from examplotron import examplotron_model
__all__.append('examplotron_model')
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/bindery/model/__init__.py
|
__init__.py
|
import os
import cStringIO
import traceback
from amara import tree
from amara.xpath import context
from amara.xpath import XPathError, datatypes
from amara.xpath.parser import xpathparser
from amara.lib.util import *
# NOTE: XPathParser and Context are imported last to avoid import errors
__all__ = [# XPath expression processing:
'Compile', 'Evaluate', 'SimpleEvaluate', 'paramvalue', 'parameterize',
'simplify', 'named_node_test', 'abspath'
]
# -- Core XPath API ---------------------------------------------------------
def simple_evaluate(expr, node, prefixes=None):
"""
Designed to be the most simple/brain-dead interface to using XPath
Usually invoked through Node objects using:
node.xml_select(expr[, prefixes])
expr - XPath expression in string or compiled form
node - the node to be used as core of the context for evaluating the XPath
prefixes - (optional) any additional or overriding namespace mappings
in the form of a dictionary of prefix: namespace
the base namespace mappings are taken from in-scope
declarations on the given node. This explicit dictionary
is superimposed on the base mappings
"""
#Note: context.__init__(self, node, position=1, size=1, variables=None, namespaces=None, extmodules=(), extfunctions=None, output_parameters=None)
try:
prefixes_out = dict([(prefix, ns) for (prefix, ns) in node.xml_namespaces.iteritems()])
except AttributeError:
prefixes_out = top_namespaces(node.xml_root)
if prefixes:
prefixes_out.update(prefixes)
ctx = context(node, 0, 0, namespaces=prefixes_out)
#extmodules=ext_modules)
return ctx.evaluate(expr)
SimpleEvaluate = simple_evaluate
def Evaluate(expr, contextNode=None, context=None):
"""
Evaluates the given XPath expression.
Two arguments are required: the expression (as a string or compiled
expression object), and a context. The context can be given as a
Domlette node via the 'contextNode' named argument, or can be given as
an Ft.Xml.XPath.Context.Context object via the 'context' named
argument.
If namespace bindings or variable bindings are needed, use a
Context object. If extension functions are needed, either use a
Context object, or set the EXTMODULES environment variable to be a
':'-separated list of names of Python modules that implement
extension functions.
The return value will be one of the following:
node-set: list of Domlette node objects (xml.dom.Node based);
string: Unicode string type;
number: float type;
boolean: Ft.Lib.boolean C extension object;
or a non-XPath object (i.e. as returned by an extension function).
"""
if 'EXTMODULES' in os.environ:
ext_modules = os.environ["EXTMODULES"].split(':')
else:
ext_modules = []
if contextNode and context:
con = context.clone()
con.node = contextNode
elif context:
con = context
elif contextNode:
#contextNode should be a node, not a context obj,
#but this is a common error. Be forgiving?
if isinstance(contextNode, Context.Context):
con = contextNode
else:
con = Context.Context(contextNode, 0, 0, extModuleList=ext_modules)
else:
raise XPathException(XPathException.NO_CONTEXT)
if hasattr(expr, "evaluate"):
retval = expr.evaluate(con)
else:
retval = XPathParser.Parse(expr).evaluate(con)
return retval
def Compile(expr):
"""
Given an XPath expression as a string, returns an object that allows
an evaluation engine to operate on the expression efficiently.
This "compiled" expression object can be passed to the Evaluate
function instead of a string, in order to shorten the amount of time
needed to evaluate the expression.
"""
try:
return XPathParser.Parse(expr)
except (XPathException, KeyboardInterrupt, SystemExit):
raise
except:
stream = cStringIO.StringIO()
traceback.print_exc(None, stream)
raise XPathException(XPathException.INTERNAL, stream.getvalue())
def paramvalue(obj):
"""
Try to convert a Python object into an XPath data model value
returns the value if successful, else None
"""
if isinstance(obj, datatypes.xpathobject):
return obj
if isinstance(obj, unicode):
return datatypes.string(obj)
elif isinstance(obj, str):
try:
obj = obj.decode('utf-8')
except UnicodeError:
return None
else:
return datatypes.string(obj)
elif isinstance(obj, bool): # <bool> is subclasses of <int>, test first
return datatypes.TRUE if obj else datatypes.FALSE
elif isinstance(obj, (int, long, float)):
return datatypes.number(obj)
elif isinstance(obj, tree.node):
return obj
# NOTE: At one time (WSGI.xml days) this attemped to be smart and handle
# all iterables but this would mean blindly dealing with dangerous
# creatures, such as sockets. So now it's more conservative and sticks to
# just list & tuple.
elif isinstance(obj, (list, tuple)):
# We can only use the list if the items are all nodes or all strings.
# Strings are converted to a nodeset of text nodes.
for item in obj:
if not isinstance(item, (str, unicode)):
break
else:
# We need to use an entity to preserve ordering
entity = tree.entity()
for item in obj:
if isinstance(item, str):
try:
item = unicode(item, 'utf8')
except UnicodeError:
return None
entity.xml_append(tree.text(item))
return datatypes.nodeset(entity.xml_children)
# We can only use the list if all the items are nodes.
for item in obj:
if not isinstance(item, tree.node):
return None
return datatypes.nodeset(obj)
else:
return None
def parameterize(inputdict, defaultns=None):
"""
Convert a dictionary of name to object mappings into a dict of parameters suitable for
passing into XPath context, or an XSLT transform
inputdict - input mapping of name (string or tuple) to values
defaultns - the namespace to use for parameter names given as string/unicode rather than tuple
return the resulting param dict if successful. If inputdict cannot completely be converted, return None
"""
resultdict = {}
for key in inputdict:
value = paramvalue(inputdict[key])
if value is None:
continue
if isinstance(key, basestring):
if isinstance(key, str): key = key.decode('utf-8')
resultdict[(defaultns, key)] = value
elif isinstance(key, (tuple, list)):
resultdict[key] = value
return resultdict
from amara.xpath import datatypes
XPATH_TYPES = {
datatypes.number: float,
datatypes.string: unicode,
datatypes.boolean: bool,
datatypes.nodeset: list,
}
def simplify(result):
'''
turn an XPath result into its equivalent simple types
>>> import amara
>>> from amara.xpath.util import simplify
>>> doc = amara.parse('<a><b/></a>')
>>> repr(simplify(doc.xml_select(u'name(a)')))
>>> import amara; from amara.lib.util import simplify; doc = amara.parse('<a><b/></a>'); repr(simplify(doc.xml_select(u'name(a)')))
"u'a'"
>>> repr(simplify(doc.xml_select(u'count(a)')))
'1.0'
>>> simplify(doc.xml_select(u'a'))
[<amara._domlette.element at 0x6c5fb0: name u'a', 0 namespaces, 0 attributes, 1 children>]
>>> simplify(doc.xml_select(u'boolean(a)'))
True
'''
return XPATH_TYPES[result.__class__](result)
#import amara; from amara.xpath.util import simplify; doc = amara.parse('<a><b/></a>'); repr(simplify(doc.xml_select(u'name(a)')))
import amara
def xpathmap(source, expressions):
'''
[u'count(//book)', {u'//book': [u'title', u'publisher']}]
'''
doc = amara.parse(source)
expressions
def submap(node, expr):
if isinstance(expr, dict):
#return dict([ [ submap(subnode, subexpr) for ] for subnode in node.xml_select(expr)])
for expr in subexpr:
keylist = node.xml_select(expr)
def indexer(source, expressions, output=None):
if output:
output.top()
for expr in expressions:
result = simplify(doc.xml_select(expr))
output.put(result)
if output:
output.bottom()
#Mapping from node type to XPath node test function name
OTHER_NODES = {
tree.text.xml_type: u'text',
tree.comment.xml_type: u'comment',
tree.processing_instruction.xml_type: u'processing-instruction',
}
FULL_NS_FORM = u'*[namespace-uri()="%s" and local-name()="%s"]'
def abspath(node, prefixes=None):
#based on code developed by Florian Bosch on XML-SIG
#http://mail.python.org/pipermail/xml-sig/2004-August/010423.html
#Significantly enhanced to use Unicode properly, support more
#node types, use safer node type tests, etc.
#See also: http://snippets.dzone.com/posts/show/4349
"""
Return an XPath expression that provides a unique path to
the given node (supports elements, attributes, root nodes,
text nodes, comments and PIs) within a document
if the document uses the default namespace, the result might use
a long form for element name tests, applying the namespace-uri()
and local-name() XPath functions. You can avoid this by passing in
a namespace hints dictionary (prefixes).
prefixes - optional hint dictionary from prefix to namespace;
used to reconcile default namespace usage
"""
if node.xml_type == tree.element.xml_type:
count = 1
#Count previous siblings with same node name
previous = node.xml_preceding_sibling
while previous:
if (isinstance(previous, tree.element) and (previous.xml_namespace, previous.xml_local)
== (node.xml_namespace, node.xml_local)):
count += 1
previous = previous.xml_preceding_sibling
nametest = node.xml_qname
if node.xml_namespace and not node.xml_prefix:
_prefixes = node.xml_namespaces.copy()
if prefixes is not None:
_prefixes.update(prefixes)
#nicer code, but maybe slower than iterating items()
#nss = dict([(n,p) for (p,n) in prefixes.items()])
#must provide a prefix for XPath
prefix = None
for prefix, ns in _prefixes.iteritems():
if node.xml_namespace == ns and prefix:
nametest = prefix + u':' + node.xml_qname
break
else:
nametest = FULL_NS_FORM%(node.xml_namespace, node.xml_local)
step = u'%s[%i]' % (nametest, count) if count > 1 else u'%s' % (nametest)
ancestor = node.xml_parent
elif node.xml_type == tree.attribute.xml_type:
step = u'@%s' % (node.xml_qname)
ancestor = node.xml_parent
elif node.xml_type in OTHER_NODES:
#Text nodes, comments and PIs
count = 1
#Count previous siblings of the same node type
previous = node.xml_preceding_sibling
while previous:
if previous.xml_type == node.xml_type: count += 1
previous = previous.xml_preceding_sibling
test_func = OTHER_NODES[node.xml_type]
step = u'%s()[%i]' % (test_func, count)
ancestor = node.xml_parent
elif not node.xml_parent:
#Root node
step = u''
ancestor = node
else:
raise TypeError('Unsupported node type for abspath')
if ancestor.xml_parent:
return abspath(ancestor, prefixes) + u'/' + step
else:
return u'/' + step
def named_node_test(exemplar_ns, exemplar_local, context, axis=u''):
'''
Return an XPath node test for the given child element on the given node
'''
if axis:
if axis in (u'parent', u'ancestor', u'preceding', u'following', u'preceding-sibling', u'following-sibling'):
axis += u'::'
elif axis in (u'.', u'..', u'@'):
axis += u'/'
if not exemplar_ns:
return axis + exemplar_local
for prefix, ns in context.xml_namespaces.items():
if ns == exemplar_ns:
#Use this prefix, as long as it's not the default NS
if not prefix: break
return axis + prefix + u':' + exemplar_local
#Probably better to just pass in a temp prefix mapping here
return u'%s*[namespace-uri()="%s" and local-name()="%s"]'%(axis, exemplar_ns or u'', exemplar_local)
def node_test(exemplar, context, axis=u''):
'''
Return an XPath node test for an element like the exemplar (same node type,
and, if applicable, universal name)
It is a special case when the exemplar is a root node. In this case, the axis is forced
as parent
'''
if axis:
if axis in (u'parent', u'ancestor', u'preceding', u'following', u'preceding-sibling', u'following-sibling'):
axis += u'::'
elif axis in (u'.', u'..', u'@'):
axis += u'/'
if isinstance(exemplar, tree.entity):
return u'parent::node()[self::node() = /]'
if isinstance(exemplar, tree.comment):
return axis + u'comment()'
if isinstance(exemplar, tree.processing_instruction):
return axis + u'processing-instruction()'
if not exemplar.xml_namespace:
return axis + exemplar.xml_local
for prefix, ns in context.xml_namespaces.items():
if ns == exemplar.xml_namespace:
#Use this prefix, as long as it's not the default NS
if not prefix: break
return axis + prefix + u':' + exemplar.xml_local
#Probably better to just pass in a temp prefix mapping here
return u'%s*[namespace-uri()="%s" and local-name()="%s"]'%(axis, exemplar.xml_namespace or u'', exemplar.xml_local)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/util.py
|
util.py
|
__all__ = [# global constants:
# exception class:
'XPathError',
# XPath expression processing:
#'Compile', 'Evaluate', 'SimpleEvaluate',
# DOM preparation for XPath processing:
#'NormalizeNode',
'context'
]
# -- XPath exceptions --------------------------------------------------------
from amara import Error
class XPathError(Error):
"""
Base class for exceptions specific to XPath processing
"""
# internal or other unexpected errors
INTERNAL = 1
# syntax or other static errors
SYNTAX = 10
UNDEFINED_VARIABLE = 11
UNDEFINED_PREFIX = 12
UNDEFINED_FUNCTION = 13
ARGCOUNT_NONE = 14
ARGCOUNT_ATLEAST = 15
ARGCOUNT_EXACT = 16
ARGCOUNT_ATMOST = 17
TYPE_ERROR = 20
NO_CONTEXT = 30
@classmethod
def _load_messages(cls):
from gettext import gettext as _
return {
# -- internal/unexpected errors --------------------------------
XPathError.INTERNAL: _(
'There is an internal bug in 4XPath. Please make a post to '
'the 4Suite mailing list to report this error message to the '
'developers. Include platform details and info about how to '
'reproduce the error. Info about the mailing list is at '
'http://lists.4suite.org/mailman/listinfo/4suite.\n'
'The error code to report is: %s'),
# -- expression syntax errors ----------------------------------
XPathError.SYNTAX: _(
'XPath expression syntax error at line %(line)d, '
'column %(column)d: %(text)s'),
XPathError.UNDEFINED_VARIABLE: _(
"Variable '%(variable)s' not declared"),
XPathError.UNDEFINED_PREFIX: _(
'Undefined namespace prefix: "%(prefix)s".'),
XPathError.UNDEFINED_FUNCTION: _(
'Undefined function: "%(function)s".'),
XPathError.ARGCOUNT_NONE: _(
'%(function)s() takes no arguments (%(total)d given)'),
XPathError.ARGCOUNT_ATLEAST: _(
'%(function)s() takes at least %(count)d arguments '
'(%(total)d given)'),
XPathError.ARGCOUNT_EXACT: _(
'%(function)s() takes exactly %(count)d arguments '
'(%(total)d given)'),
XPathError.ARGCOUNT_ATMOST: _(
'%(function)s() takes at most %(count)d arguments '
'(%(total)d given)'),
XPathError.TYPE_ERROR: _(
"%(what)s must be '%(expected)s', not '%(actual)s'"),
# -- evaluation errors -----------------------------------------
XPathError.NO_CONTEXT: _(
'An XPath Context object is required in order to evaluate an '
'expression.'),
}
# -- Additional setup --------------------------------------------------------
# -- Core XPath API ----------------------------------------------------------
#from Util import Compile, Evaluate, SimpleEvaluate, NormalizeNode
import types
import operator
from amara import tree
from amara.namespaces import XML_NAMESPACE
from amara.writers import writer, treewriter, stringwriter
from amara.xpath import extensions, parser
_writer_methods = operator.attrgetter(
'start_document', 'end_document', 'start_element', 'end_element',
'namespace', 'attribute', 'text', 'comment', 'processing_instruction')
class context(writer):
"""
The context of an XPath expression
"""
functions = extensions.extension_functions
current_instruction = None
def __init__(self, node, position=1, size=1,
variables=None, namespaces=None,
extmodules=(), extfunctions=None,
output_parameters=None):
writer.__init__(self, output_parameters)
self.node, self.position, self.size = node, position, size
self.variables = {}
if variables:
self.variables.update(variables)
self.namespaces = {'xml': XML_NAMESPACE}
if namespaces:
self.namespaces.update(namespaces)
# This may get mutated during processing
self.functions = self.functions.copy()
# Search the extension modules for defined functions
for module in extmodules:
if module:
if not isinstance(module, types.ModuleType):
module = __import__(module, {}, {}, ['ExtFunctions'])
funcs = getattr(module, 'extension_functions', None)
if funcs:
self.functions.update(funcs)
# Add functions given directly
if extfunctions:
self.functions.update(extfunctions)
self._writers = []
return
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return "<Context at 0x%x: Node=%s, Postion=%d, Size=%d>" % (
ptr, self.node, self.position, self.size)
def push_writer(self, writer):
self._writers.append(writer)
# copy writer methods onto `self` for performance
(self.start_document, self.end_document, self.start_element,
self.end_element, self.namespace, self.attribute, self.text,
self.comment, self.processing_instruction) = _writer_methods(writer)
# begin processing
writer.start_document()
return
def push_tree_writer(self, base_uri):
writer = treewriter.treewriter(self.output_parameters, base_uri)
self.push_writer(writer)
def push_string_writer(self, errors=True):
writer = stringwriter.stringwriter(self.output_parameters, errors)
self.push_writer(writer)
def pop_writer(self):
writer = self._writers[-1]
del self._writers[-1]
writer.end_document()
if self._writers:
# copy writer methods onto `self` for performance
(self.start_document, self.end_document, self.start_element,
self.end_element, self.namespace, self.attribute, self.text,
self.comment, self.processing_instruction
) = _writer_methods(self._writers[-1])
return writer
def copy_nodes(self, nodes):
for node in nodes:
self.copy_node(node)
return
def copy_node(self, node):
if isinstance(node, tree.element):
self.start_element(node.xml_qname, node.xml_namespace,
node.xmlns_attributes.copy())
for attr in node.xml_attributes.nodes():
self.attribute(attr.xml_qname, attr.xml_value, attr.xml_namespace)
for child in node:
self.copy_node(child)
self.end_element(node.xml_qname, node.xml_namespace)
elif isinstance(node, tree.attribute):
self.attribute(node.xml_qname, node.xml_value, node.xml_namespace)
elif isinstance(node, tree.text):
self.text(node.xml_value, not node.xsltOutputEscaping)
elif isinstance(node, tree.processing_instruction):
self.processing_instruction(node.xml_target, node.xml_data)
elif isinstance(node, tree.comment):
self.comment(node.xml_value)
elif isinstance(node, tree.entity):
for child in node:
self.copy_node(child)
elif isinstance(node, tree.namespace):
self.namespace(node.xml_name, node.xml_value)
else:
pass
return
def add_function(self, name, function):
if not callable(function):
raise TypeError("function must be a callable object")
self.functions[name] = function
return
def clone(self):
return self.__class__(self, self.node, self.position, self.size,
self.variables, self.namespaces)
def evaluate(self, expr):
"""
The main entry point for evaluating an XPath expression, using self as context
expr - a unicode object with the XPath expression
"""
parsed = parser.parse(expr)
return parsed.evaluate(self)
def __repr__(self):
ptr = id(self)
if ptr < 0:
ptr += 0x100000000L
return ('<%s at 0x%x: node %r, position %d, size %d>' %
(self.__class__.__name__, ptr, self.node, self.position,
self.size))
def launch(*args, **kwargs):
import pprint
from amara.xpath.util import simplify
source = args[0]
expr = args[1]
import amara
doc = amara.parse(source)
result = doc.xml_select(expr.decode('utf-8'))
pprint.pprint(simplify(result))
return
import sys, getopt
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "hv", ["help", ])
except getopt.error, msg:
raise Usage(msg)
# option processing
kwargs = {}
for option, value in opts:
if option == "-v":
verbose = True
if option in ("-h", "--help"):
raise Usage(help_message)
except Usage, err:
print >> sys.stderr, sys.argv[0].split("/")[-1] + ": " + str(err.msg)
print >> sys.stderr, "\t for help use --help"
return 2
launch(*args, **kwargs)
if __name__ == "__main__":
sys.exit(main())
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/__init__.py
|
__init__.py
|
import os, re, codecs, time
from xml.dom import Node
import Ft
from Ft.Lib import boolean, number, Uri, Wrap as LineWrap
from Ft.Lib.Random import DEFAULT_RNG
from Ft.Xml import Lib
from Ft.Xml.XPath import Conversions
from Ft.Xml.XPath import XPathTypes as Types
from Ft.Xml.XPath import FT_EXT_NAMESPACE
def BaseUri(context, arg=None):
"""
Returns the base URI of the first node in the given node-set, or
of the context node if no argument is given. If the given node-set
is empty, an empty string is returned.
"""
if arg is None:
node = context.node
elif isinstance(arg, Types.NodesetType):
if not arg:
return u''
node = arg[0]
else:
raise TypeError("%r must be a node-set, not a %s" % (
arg, Types.g_xpathPrimitiveTypes.get(type(arg), type(arg).__name__)))
return node.xml_base or u''
BaseUri.arguments = (Types.NodesetType,)
BaseUri.result = Types.StringType
def Decode(context, object, encoding):
"""
f:decode mirrors the Python decode function/method. It takes a
foreign object that is a Python byte string, and an encoding,
and returns another foreign object which is a Unicode object.
"""
encode, decode, reader, writer = codecs.lookup(encoding)
return decode(object)[0]
Decode.arguments = (Types.ObjectType, Types.StringType)
Decode.result = Types.ObjectType
def Encode(context, object, encoding):
"""
f:encode mirrors the Python encode function/method. It takes a
foreign object that is a Unicode object, and an encoding,
and returns another foreign object which is a Python byte string.
"""
encode, decode, reader, writer = codecs.lookup(encoding)
return encode(object)[0]
Encode.arguments = (Types.ObjectType, Types.StringType)
Encode.result = Types.ObjectType
def EndsWith(context, outer, inner):
"""
Returns true if the string given in the first argument ends with
the substring given in the second argument.
"""
outer = Conversions.StringValue(outer)
inner = Conversions.StringValue(inner)
return outer.endswith(inner) and boolean.true or boolean.false
EndsWith.arguments = (Types.StringType, Types.StringType)
EndsWith.result = Types.BooleanType
def EscapeXml(context, text):
"""
Returns the given string with XML markup characters "&", "<" and
">" escaped as "&", "<" and ">", respectively.
"""
from xml.sax.saxutils import escape
return escape(Conversions.StringValue(text))
EscapeXml.arguments = (Types.StringType,)
EscapeXml.result = Types.StringType
def GenerateUuid(context):
"""
Returns a random UUID string.
"""
from Ft.Lib import Uuid
rt = Uuid.UuidAsString(Uuid.GenerateUuid())
rt = unicode(rt, 'us-ascii', errors='replace')
return rt
GenerateUuid.arguments = ()
GenerateUuid.result = Types.StringType
def If(context, cond, v1, v2=None):
"""
If the first argument, when converted to a boolean, is true,
returns the second argument. Otherwise, returns the third
argument, or if the third argument is not given, returns an
empty node-set.
"""
# contributed by Lars Marius Garshol;
# originally using namespace URI 'http://garshol.priv.no/symbolic/'
if Conversions.BooleanValue(cond):
return v1
elif v2 is None:
return []
else:
return v2
If.arguments = (Types.BooleanType, Types.ObjectType, Types.ObjectType)
If.result = Types.ObjectType
# why does this exist?
def ImportString(context, object):
"""
f:import-string takes a Unicode FO and returns an XPath string. It is
an error if the FO contains illegal XML chars. (although eventually
this function might be extended to recover from this error)
"""
#FIXME: Add validation of object as valid XPath string,
#and possibly mapping ops to PUA for illegal characters.
#We probably need an Export string if we add PUA shifting
return object
ImportString.arguments = (Types.ObjectType,)
ImportString.result = Types.StringType
def Indent(context, text, levels, indentstring=None):
"""
f:indent() returns a string with each line of the text indented the
given number of levels. For each level, the indent string, normally
2 spaces by default, is prepended to each line.
"""
text = Conversions.StringValue(text)
levels = int(Conversions.NumberValue(levels))
if indentstring is None:
indentstring = u' '
else:
indentstring = Conversions.StringValue(indentstring)
if indentstring and levels > 0:
indent = indentstring * levels
return indent + ('\n' + indent).join(text.split('\n'))
else:
return text
Indent.arguments = (Types.StringType, Types.NumberType, Types.StringType)
Indent.result = Types.StringType
def NormalizeEol(context, text):
"""
Normalizes end-of-line characters in input string, returning the
normalized string. Normalization involves replacing "\n\r", "\r\n"
or "\r" with "\n"
"""
text = text.replace("\n\r", "\n")
text = text.replace("\r\n", "\n")
text = text.replace("\r", "\n")
return text
NormalizeEol.arguments = (Types.StringType,)
NormalizeEol.result = Types.StringType
def OsPath2Uri(context, path):
"""
Returns the given OS path as a URI.
The result varies depending on the underlying operating system.
"""
return Uri.OsPathToUri(Conversions.StringValue(path))
OsPath2Uri.arguments = (Types.StringType,)
OsPath2Uri.result = Types.StringType
def ParseXml(context, src, parameters=None):
"""
f:parse-xml() parses the string-value of the given object as XML
and returns a node-set whose sole item is the resulting parsed
document's root node. The XML must be a well-formed document.
src - the string or object to be parsed as XML.
parameters - the name of a parameter set for the operation.
The parameters argument is ignored for now. In the future, it
will provide a way to specify a base URI for the resolution of
relative URIs in entity declarations and XIncludes.
Also for now, if the XML contains an encoding declaration, the
declaration must specify UTF-8.
An example:
<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:f="http://xmlns.4suite.org/ext"
version="1.0"
>
<xsl:output method="text"/>
<xsl:variable name="doc"
select="'<spam>eggs<monty>python</monty></spam>'"/>
<xsl:template match="/">
<xsl:value-of select="f:parse-xml($doc)/spam/monty"/>
</xsl:template>
</xsl:stylesheet>
...run against any XML source should yield:
python
See also: XSLT (not XPath) extension function f:serialize-xml()
"""
from Ft.Xml import Domlette
src = Conversions.StringValue(src).encode("utf-8")
# prepare a base URI for the XML
instruction = getattr(context, 'currentInstruction', None)
if instruction:
uri = instruction.baseUri
else:
uri = context.node.baseUri
if not uri:
uri = OsPathToUri('__parse-xml-extension-function__',
attemptAbsolute=1)
# append "XML-string-(something_unique)" as a fragment
uri += '%sXML-string-%s' % ((uri.find('#') + 1 and ';' or '#'),
str(time.time()))
doc = Domlette.NonvalidatingReader.parseString(src, uri)
return [doc]
ParseXml.arguments = (Types.StringType, Types.ObjectType)
ParseXml.result = Types.NodesetType
def Range(context, lo, hi):
"""
Returns a node-set consisting of text nodes encapsulating integers
in the numeric range bounded by the given low and high values.
"""
# contributed by Lars Marius Garshol;
# originally using namespace URI 'http://garshol.priv.no/symbolic/'
doc = context.node.rootNode
# sanity check
for n in (lo, hi):
if number.isinf(n) or number.isnan(n):
raise ValueError("Arguments to ft:range must be neither infinite nor NaN.")
#xrange wants int, not float
lo = int(round(Conversions.NumberValue(lo)))
hi = int(round(Conversions.NumberValue(hi)))
nodeset = []
for num in xrange(lo, hi):
nodeset.append(doc.createTextNode(str(num)))
return nodeset
Range.arguments = (Types.NumberType, Types.NumberType)
Range.result = Types.NodesetType
def ResolvePath(context, base, rel):
"""
Resolves a Posix-style path, such as the path portion of a URL,
against a base. Similar to f:resolve-url, but allows the base to be
just a path, not necessarily a full URL.
"""
base = Conversions.StringValue(base)
rel = Conversions.StringValue(rel)
return Uri.BaseJoin(base, rel)
ResolvePath.arguments = (Types.StringType, Types.StringType)
ResolvePath.result = Types.StringType
def ResolveUrl(context, base, rel):
"""
Returns the relative URL ref given in the second argument
resolved against the base given in the first argument.
In case of URI processing error an empty string is returned
"""
base = Conversions.StringValue(base)
rel = Conversions.StringValue(rel)
try:
return Uri.Absolutize(rel, base)
except Uri.UriException:
return u''
ResolveUrl.arguments = (Types.StringType, Types.StringType)
ResolveUrl.result = Types.StringType
def ShaHash(context, text):
"""
Returns a SHA message digest of the given string, as a string of
several groups of hex digits separated by '-'. See
http://www.itl.nist.gov/fipspubs/fip180-1.htm for info on SHA.
"""
text = Conversions.StringValue(text)
import sha
rv = sha.sha(text).hexdigest()
rv = unicode(rv, 'us-ascii', errors='replace')
return rv
ShaHash.arguments = (Types.StringType,)
ShaHash.result = Types.StringType
def SharePath(context):
"""
Returns the system-dependent path to modifiable data
"""
return unicode(Ft.GetConfigVar('LOCALSTATEDIR'), 'us-ascii',
errors='replace')
SharePath.arguments = ()
SharePath.result = Types.StringType
def BinPath(context):
"""
Returns the system-dependent path of Fourthought binaries
"""
return unicode(Ft.GetConfigVar('BINDIR'), 'us-ascii', errors='replace')
BinPath.arguments = ()
BinPath.result = Types.StringType
def Uri2OsPath(context, uri):
"""
Returns the given URI as an OS path.
The result varies depending on the underlying operating system.
"""
return Uri.UriToOsPath(Conversions.StringValue(uri))
Uri2OsPath.arguments = (Types.StringType,)
Uri2OsPath.result = Types.StringType
def Version(context):
"""
Returns the 4Suite version number as a string.
"""
return unicode(Ft.VERSION, 'us-ascii', errors='replace')
Version.arguments = ()
Version.result = Types.StringType
def Wrap(context, text, width):
"""
f:wrap() returns a string with the text reflowed so that each line
fits within the given width. Existing linefeeds are preserved, but
spaces are considered inter-word separators that can be collapsed.
To reflow without preserving existing linefeeds, strip them first,
e.g. with translate(text, ' ', '').
http://lists.fourthought.com/pipermail/4suite-dev/2002-December/000878.html
"""
s = Conversions.StringValue(text)
width = Conversions.NumberValue(width)
return LineWrap(s, width)
Wrap.arguments = (Types.StringType, Types.NumberType)
Wrap.result = Types.StringType
def PytimeToExslt(context, t=None):
"""
Takes a Python time value as a number and returns a date/time as if
from EXSLT date-time()
t - a time stamp number, as from Python's time.time()
if omitted, use the current time
"""
from Ft.Lib import Time as FtTime
if t is not None:
t = Conversions.NumberValue(t)
return unicode(str(FtTime.FromPythonTime(t)), errors='replace')
else:
return unicode(str(FtTime.FromPythonTime()), errors='replace')
PytimeToExslt.arguments = (Types.NumberType,)
PytimeToExslt.result = Types.StringType
#---EXSLT-like functions------------------------------------------------
# (perhaps soon to be deprecated)
def Join(context, nodeset, delim=' '):
"""
Concatenates the string-values of the nodes in the given node-set,
inserting the delimiter given in the optional second argument in
between each string-value. The delimiter defaults to a space.
See also: EXSLT's str:concat()
"""
delim = Conversions.StringValue(delim)
comps = map(Conversions.StringValue, nodeset)
if delim:
return delim.join(comps)
else:
return u''.join(comps)
Join.arguments = (Types.NodesetType, Types.StringType)
Join.result = Types.StringType
def Match(context, pattern, arg=None):
"""
Returns true if the string given in the optional second argument
(or the string-value of the context node if no second argument is
given) matches the regular expression given in the first argument.
See also: EXSLT's regexp:test()
This function does differ from XSLT 2.0 match() function
"""
if not arg:
arg = context.node
arg = Conversions.StringValue(arg)
return re.match(pattern, arg) and boolean.true or boolean.false
Match.arguments = (Types.StringType, Types.StringType)
Match.result = Types.StringType
def ParseDate(context, date, format=None):
"""
This function is similar to EXSLT's date:parse-date()
except that it uses Python rather than Java conventions
for the date formatting.
"""
import time
date = Conversions.StringValue(date)
format = Conversions.StringValue(format)
time_tuple = time.strptime(format)
#perhaps add some variants for missing time tuple values?
str_time = time.strftime("%Y-%m-%dT%H:%M:%S", time_tuple)
return unicode(str_time, 'us-ascii', errors='replace')
ParseDate.arguments = (Types.StringType, Types.StringType)
ParseDate.result = Types.StringType
def Random(context, max=None, forceInt=0):
"""
Returns a random number between 0 (inclusive) and max (exclusive).
max defaults to 1. The first optional argument is a different
value for max, and the second argument is a flag that, if set,
causes the random number to be rounded to an integer.
See also: EXSLT's math:random()
"""
if max:
max = Conversions.NumberValue(max)
else:
max = 1.0
rt = DEFAULT_RNG.randrange(0, max)
if forceInt:
rt = round(rt)
return rt
Random.arguments = (Types.NumberType, Types.BooleanType)
Random.result = Types.NumberType
def Replace(context, old, new, arg=None):
"""
Returns the third argument string, which defaults to the
string-value of the context node, with occurrences of the substring
given in the first argument replaced by the string given in the
second argument.
See also: EXSLT's str:replace()
"""
if not arg:
arg = context.node
arg = Conversions.StringValue(arg)
old = Conversions.StringValue(old)
new = Conversions.StringValue(new)
return arg.replace(old, new)
Replace.arguments = (Types.StringType, Types.StringType, Types.StringType)
Replace.result = Types.StringType
def StrFTime(context, format, date=None):
"""
Returns the given ISO 8601 UTC date-time formatted according to
the given format string as would be used by Python's
time.strftime(). If no date-time string is given, the current
time is used.
"""
format = Conversions.StringValue(format)
if date is not None:
date = Conversions.StringValue(date)
time_str = time.strftime(format, time.strptime(date, '%Y-%m-%dT%H:%M:%SZ'))
else:
time_str = time.strftime(format)
return unicode(time_str, 'us-ascii', errors='replace')
StrFTime.arguments = (Types.StringType, Types.StringType)
StrFTime.result = Types.StringType
#---OS System-aware functions------------------------------------------------
# (Not loaded by default for security reasons)
def EnvVar(context, var):
"""
Looks up a variable in the OS environment. Returns a string, either
the environment variable value or an empty string if there is no
such variable. The system default encoding is assumed.
CAUTION: Using this function could be a security hazard.
You can also use system-property() for the same purpose
f:env-var('foo')
is equivalent to
system-property('fs:foo')
given a mapping from fs to http://xmlns.4suite.org/xslt/env-system-property
"""
var = Conversions.StringValue(var)
result = os.environ.get(var, '')
result = unicode(result, errors='replace')
return result
EnvVar.arguments = (Types.StringType,)
EnvVar.result = Types.StringType
def Spawnv(context, command, *args):
"""
Executes a command in the operating system's shell, passing in the
command line arguments separately. Returns the result of the command
(a numeric exit code, typically).
CAUTION: Using this function could be a security hazard.
See also: f:system()
"""
command = Conversions.StringValue(command)
result = os.spawnv(os.P_WAIT, command, args)
return result
Spawnv.arguments = (Types.StringType,)
Spawnv.result = Types.NumberType
def System(context, command):
"""
Executes a command in the operating system's shell and returns the
command's result (a numeric exit code, typically).
CAUTION: Using this function could be a security hazard.
See also: f:spawnv()
"""
command = Conversions.StringValue(command)
result = os.system(command)
return result
System.arguments = (Types.StringType,)
System.result = Types.NumberType
ExtNamespaces = {
FT_EXT_NAMESPACE : 'f',
}
ExtFunctions = {
(FT_EXT_NAMESPACE, 'base-uri'): BaseUri,
(FT_EXT_NAMESPACE, 'decode') : Decode,
(FT_EXT_NAMESPACE, 'encode') : Encode,
(FT_EXT_NAMESPACE, 'ends-with'): EndsWith,
(FT_EXT_NAMESPACE, 'escape-xml'): EscapeXml,
(FT_EXT_NAMESPACE, 'generate-uuid'): GenerateUuid,
(FT_EXT_NAMESPACE, 'if'): If,
(FT_EXT_NAMESPACE, 'import-string') : ImportString,
(FT_EXT_NAMESPACE, 'indent') : Indent,
(FT_EXT_NAMESPACE, 'join'): Join,
(FT_EXT_NAMESPACE, 'match'): Match,
(FT_EXT_NAMESPACE, 'normalize-eol') : NormalizeEol,
(FT_EXT_NAMESPACE, 'ospath2uri'): OsPath2Uri,
(FT_EXT_NAMESPACE, 'parse-date'): ParseDate,
(FT_EXT_NAMESPACE, 'pytime-to-exslt'): PytimeToExslt,
(FT_EXT_NAMESPACE, 'parse-xml') : ParseXml,
(FT_EXT_NAMESPACE, 'random'): Random,
(FT_EXT_NAMESPACE, 'range'): Range,
(FT_EXT_NAMESPACE, 'replace'): Replace,
(FT_EXT_NAMESPACE, 'resolve-url'): ResolveUrl,
(FT_EXT_NAMESPACE, 'resolve-path'): ResolvePath,
(FT_EXT_NAMESPACE, 'sha-hash') : ShaHash,
(FT_EXT_NAMESPACE, 'share-path'): SharePath,
(FT_EXT_NAMESPACE, 'bin-path'): BinPath,
(FT_EXT_NAMESPACE, 'uri2ospath'): Uri2OsPath,
(FT_EXT_NAMESPACE, 'version'): Version,
(FT_EXT_NAMESPACE, 'wrap') : Wrap,
(FT_EXT_NAMESPACE, 'strftime') : StrFTime,
}
InsecureExtFunctions = {
(FT_EXT_NAMESPACE, 'spawnv'): Spawnv,
(FT_EXT_NAMESPACE, 'system'): System,
(FT_EXT_NAMESPACE, 'env-var'): EnvVar,
}
import MathFunctions
ExtFunctions.update(MathFunctions.ExtFunctions)
# Deprecated functions removed for 4Suite 1.0a4:
#
# (FT_EXT_NAMESPACE, 'escape-url'): EscapeUrl,
# (FT_EXT_NAMESPACE, 'evaluate'): Evaluate,
# (FT_EXT_NAMESPACE, 'iso-time'): IsoTime,
# (FT_EXT_NAMESPACE, 'distinct'): Distinct,
# (FT_EXT_NAMESPACE, 'find'): Find,
# (FT_EXT_NAMESPACE, 'node-set'): NodeSet,
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/extensions/BuiltInExtFunctions.py
|
BuiltInExtFunctions.py
|
import dis
import new
import array
import itertools
from compiler.consts import CO_OPTIMIZED, CO_NEWLOCALS
class assembler:
"""A flow graph representation for Python bytecode as a function body"""
postorder = None
def __init__(self):
self.entry = self.current = basicblock(0)
self.blocks = [self.entry]
def new_block(self):
block = basicblock(len(self.blocks))
self.blocks.append(block)
return block
def next_block(self, block=None):
if block is None:
block = basicblock(len(self.blocks))
self.blocks.append(block)
self.current.next = block
self.current = block
return block
def emit(self, *instructions):
hasarg = self.hasarg
hasjump = self.hasjump
add = self.current.append
instructions = iter(instructions)
for opname in instructions:
instr = instruction(opname)
if opname in hasarg:
oparg = instructions.next()
if opname in hasjump:
assert isinstance(oparg, basicblock)
instr.target = oparg
else:
instr.oparg = oparg
instr.hasarg = True
add(instr)
return
# -- PyFlowGraph --------------------------------------------------
def assemble(self, name, args, docstring, filename, firstlineno):
"""Get a Python code object"""
self.next_block()
self.emit('RETURN_VALUE')
stacksize = self._compute_stack_size()
blocks = self._get_blocks_in_order()
consts, names, varnames = \
self._compute_lookups(blocks, args, docstring)
bytecode = self._compute_jump_offsets(blocks)
codestring = bytecode.tostring()
return new.code(len(args), len(varnames), stacksize,
CO_OPTIMIZED | CO_NEWLOCALS, codestring,
consts, names, varnames, filename, name,
firstlineno, '', (), ())
def _compute_stack_size(self):
"""Return the blocks in reverse dfs postorder"""
stack_effect = {
'POP_TOP': -1,
'ROT_TWO': 0,
'ROT_THREE': 0,
'DUP_TOP': 1,
'ROT_FOUR': 0,
'UNARY_POSITIVE': 0,
'UNARY_NEGATIVE': 0,
'UNARY_NOT': 0,
'UNARY_CONVERT': 0,
'UNARY_INVERT': 0,
'LIST_APPEND': -2,
'BINARY_POWER': -1,
'BINARY_MULTIPLY': -1,
'BINARY_DIVIDE': -1,
'BINARY_MODULO': -1,
'BINARY_ADD': -1,
'BINARY_SUBTRACT': -1,
'BINARY_SUBSCR': -1,
'BINARY_FLOOR_DIVIDE': -1,
'BINARY_TRUE_DIVIDE': -1,
'INPLACE_FLOOR_DIVIDE': -1,
'INPLACE_TRUE_DIVIDE': -1,
'SLICE+0': 1,
'SLICE+1': 0,
'SLICE+2': 0,
'SLICE+3': -1,
'STORE_SLICE+0': -2,
'STORE_SLICE+1': -3,
'STORE_SLICE+2': -3,
'STORE_SLICE+3': -4,
'DELETE_SLICE+0': -1,
'DELETE_SLICE+1': -2,
'DELETE_SLICE+2': -2,
'DELETE_SLICE+3': -3,
'INPLACE_ADD': -1,
'INPLACE_SUBSTRACT': -1,
'INPLACE_MULTIPLY': -1,
'INPLACE_DIVIDE': -1,
'INPLACE_MODULO': -1,
'STORE_SUBSCR': -3,
'DELETE_SUBSCR': -2,
'BINARY_LSHIFT': -1,
'BINARY_RSHIFT': -1,
'BINARY_AND': -1,
'BINARY_XOR': -1,
'BINARY_OR': -1,
'INPLACE_POWER': -1,
'GET_ITER': 0,
'PRINT_EXPR': -1,
'PRINT_ITEM': -1,
'PRINT_NEWLINE': 0,
'PRINT_ITEM_TO': -2,
'PRINT_NEWLINE_TO': -1,
'INPLACE_LSHIFT': -1,
'INPLACE_RSHIFT': -1,
'INPLACE_AND': -1,
'INPLACE_XOR': -1,
'INPLACE_OR': -1,
'BREAK_LOOP': 0,
'RETURN_VALUE': -1,
'YIELD_VALUE': 0,
'STORE_NAME': -1,
'DELETE_NAME': 0,
'FOR_ITER': 1,
'STORE_ATTR': -2,
'DELETE_ATTR': -1,
'STORE_GLOBAL': -1,
'DELETE_GLOBAL': 0,
'LOAD_CONST': 1,
'LOAD_NAME': 1,
'BUILD_MAP': 1,
'LOAD_ATTR': 0,
'COMPARE_OP': -1,
'JUMP_FORWARD': 0,
'JUMP_IF_FALSE': 0,
'JUMP_IF_TRUE': 0,
'JUMP_ABSOLUTE': 0,
'LOAD_GLOBAL': 1,
'LOAD_FAST': 1,
'STORE_FAST': -1,
'DELETE_FAST': 0,
}
def walk(block, size, maxsize):
block.seen = True
instructions = iter(block)
for instr in instructions:
if instr in stack_effect:
size += stack_effect[instr]
elif instr == 'CALL_FUNCTION':
size += -((instr.oparg % 256) + (2 * (instr.oparg / 256)))
elif instr in ('BUILD_TUPLE', 'BUILD_LIST'):
size += (1 - instr.oparg)
elif instr == 'UNPACK_SEQUENCE':
size += (instr.oparg - 1)
elif instr == 'DUP_TOPX':
size += instr.oparg
else:
raise RuntimeError("unhandled instruction: %r" % instr)
if size > maxsize:
maxsize = size
if instr.target is not None and not instr.target.seen:
assert instr in self.hasjump, instr
maxsize = walk(instr.target, size, maxsize)
if instr in ('JUMP_ABSOLUTE', 'JUMP_FORWARD'):
# remaining code is dead
break
else:
if block.next is not None and not block.next.seen:
maxsize = walk(block.next, size, maxsize)
block.seen = False
return maxsize
return walk(self.entry, 0, 0)
def _get_blocks_in_order(self):
"""Return the blocks in reverse dfs postorder"""
def walk(block, postorder):
"""Depth-first search of tree rooted at `block`"""
block.seen = True
if block.next is not None and not block.next.seen:
walk(block.next, postorder)
for instr in block:
if instr.target is not None and not instr.target.seen:
assert instr in self.hasjump, instr
walk(instr.target, postorder)
postorder.append(block)
return postorder
return tuple(reversed(walk(self.entry, [])))
def _compute_lookups(self, blocks, args, docstring):
"""Convert lookup arguments from symbolic to concrete form"""
haslookup = self.haslookup
hascompare = self.hascompare
hasconst = self.hasconst
haslocal = self.haslocal
hasname = self.hasname
cmpop = self.cmpop
consts = {(docstring, type(docstring)): 0}
names = {}
varnames = {}
for i, arg in enumerate(args):
varnames[arg] = i
for block in blocks:
for instr in block:
if instr in haslookup:
if instr in hasconst:
key = (instr.oparg, type(instr.oparg))
try:
oparg = consts[key]
except KeyError:
oparg = len(consts)
consts[key] = oparg
elif instr in haslocal:
try:
oparg = varnames[instr.oparg]
except KeyError:
oparg = len(varnames)
varnames[instr.oparg] = oparg
elif instr in hasname:
try:
oparg = names[instr.oparg]
except KeyError:
oparg = len(names)
names[instr.oparg] = oparg
elif instr in hascompare:
oparg = cmpop[instr.oparg]
else:
raise RuntimeError("unhandled instruction: %r" % instr)
instr.oparg = oparg
if consts:
L = ['']*len(consts)
for key, pos in consts.iteritems():
L[pos] = key[0]
consts = tuple(L)
else:
consts = ()
if names:
L = ['']*len(names)
for name, pos in names.iteritems():
L[pos] = name
names = tuple(L)
else:
names = ()
if varnames:
L = ['']*len(varnames)
for name, pos in varnames.iteritems():
L[pos] = name
varnames = tuple(L)
else:
varnames = ()
return consts, names, varnames
def _compute_jump_offsets(self, blocks):
"""Compute the size of each block and fixup jump args"""
hasjump = self.hasjump
hasjrel = self.hasjrel
hasjabs = self.hasjabs
map = itertools.imap
opmap = dis.opmap
bytecode = array.array('B')
append = bytecode.append
extend = bytecode.extend
while not bytecode:
# Compute the size of each block
offset = 0
for block in blocks:
block.offset = offset
offset += sum(map(len, block))
for block in blocks:
for i, instr in enumerate(block):
if instr.target is not None:
assert instr in hasjump
if instr in hasjrel:
offset = len(bytecode) + len(instr)
instr.oparg = instr.target.offset - offset
elif instr in hasjabs:
instr.oparg = instr.target.offset
else:
raise RuntimeError("unhandled instruction: %r" %
instr)
opcode = opmap[instr]
if instr.hasarg:
oparg = instr.oparg
if oparg > 0xFFFF:
instr.oparg &= 0xFFFF
instr = Instruction('EXTENDED_ARG')
instr.oparg = oparg >> 16
instr.hasarg = True
block.insert(i, instr)
break
extend((opcode, oparg & 0xFF, oparg >> 8))
else:
append(opcode)
else:
# process the next block of instructions
continue
# add an EXTENDED_ARG instruction
assert instr == 'EXTENDED_ARG', instr
# restart while loop to recompute offsets
del bytecode[:]
break
return bytecode
hasarg = set(dis.opname[dis.HAVE_ARGUMENT:])
hasjrel = set(dis.opname[op] for op in dis.hasjrel)
hasjabs = set(dis.opname[op] for op in dis.hasjabs)
hasjump = hasjrel | hasjabs
hascompare = set(dis.opname[op] for op in dis.hascompare)
hasconst = set(dis.opname[op] for op in dis.hasconst)
haslocal = set(dis.opname[op] for op in dis.haslocal)
hasname = set(dis.opname[op] for op in dis.hasname)
haslookup = hascompare | hasconst | haslocal | hasname
cmpop = dict(itertools.izip(dis.cmp_op, itertools.count()))
class instruction(str):
opname = property(str.__str__)
oparg = None
target = None
hasarg = False
def __len__(self):
if self.hasarg:
if self.oparg > 0xFFFF:
return 6
return 3
return 1
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return '<instr at 0x%x: opname=%r, oparg=%r, target=%r>' % (
ptr, self.opname, self.oparg, self.target)
def __str__(self):
return '<instr %s, oparg=%r>' % (self.opname, self.oparg)
class basicblock(list):
__slots__ = ('id', 'label', 'next', 'seen', 'offset')
def __init__(self, id, label=None):
self.id = id
self.label = label
self.next = None
self.seen = False
self.offset = 0
emit = list.append
__hash__ = object.__hash__
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
if self.label:
label = ', label=%s' % self.label
else:
label = ''
return "<block at 0x%x: id=%d%s>" % (ptr, self.id, label)
def __str__(self):
if self.label:
label = ' %r ' % self.label
else:
label = ''
if self:
instructions = ':\n ' + '\n '.join(map(str, self))
else:
instructions = ''
return "<block %s%d, offset %d%s>" % (label, self.id, self.offset,
instructions)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/compiler/assembler.py
|
assembler.py
|
from __future__ import absolute_import
import new
from .assembler import assembler
__all__ = ['xpathcompiler']
class xpathcompiler(object):
_nasts = 0
_nlocals = 0
def __init__(self, context=None):
if context is not None:
self.namespaces = context.namespaces
self.variables = context.variables
self.functions = context.functions
else:
self.namespaces = {}
self.variables = {}
self.functions = {}
self._graph = assembler()
self.emit = self._graph.emit
self.new_block = self._graph.new_block
self.next_block = self._graph.next_block
return
def compile(self, name, args=None, docstring=None, filename=None,
firstlineno=0):
# Generate the code object
if args is None:
args = ('context',)
if filename is None:
filename = '<ast-%d>' % xpathcompiler._nasts
xpathcompiler._nasts += 1
code = self._graph.assemble(name, args, docstring, filename,
firstlineno)
# Make the function
if 0:
title = '%s (%s)' % (filename, name)
print '--', title, '-'*(60 - len(title))
print '>>', docstring
import dis
dis.dis(code)
return new.function(code, {'__lltrace__': 1})
return new.function(code, {})
def tmpname(self):
self._nlocals += 1
return '$%d' % self._nlocals
# For documentation purposes only; replaced in the constructor
def emit(self, *instructions):
return self._graph.emit(*instructions)
# For documentation purposes only; replaced in the constructor
def new_block(self):
return self._graph.new_block()
# For documentation purposes only; replaced in the constructor
def next_block(self):
return self._graph.next_block()
def emitRootNodeSet(self):
self.emit('LOAD_FAST', 'context',
'LOAD_ATTR', 'node',
'LOAD_ATTR', 'xml_root',
'BUILD_TUPLE', 1,
)
return
def emitContextNodeSet(self):
self.emit('LOAD_FAST', 'context',
'LOAD_ATTR', 'node',
'BUILD_TUPLE', 1,
)
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/compiler/__init__.py
|
__init__.py
|
import itertools
from amara.xpath import datatypes
from amara.xpath.functions import builtin_function
__all__ = (
'string_function', 'concat_function', 'starts_with_function',
'contains_function', 'substring_before_function',
'substring_after_function', 'substring_function',
'string_length_function', 'normalize_space_function',
'translate_function',
)
class string_function(builtin_function):
"""Function: <string> string(<object>?)"""
name = 'string'
arguments = (datatypes.xpathobject,)
defaults = (None,)
return_type = datatypes.string
def evaluate_as_string(self, context):
arg0, = self._args
if arg0 is None:
return datatypes.string(context.node)
return arg0.evaluate_as_string(context)
evaluate = evaluate_as_string
class concat_function(builtin_function):
"""Function: <string> concat(<string>, <string>, ...)"""
name = 'concat'
arguments = (datatypes.string, datatypes.string, Ellipsis)
return_type = datatypes.string
def evaluate_as_string(self, context):
args = (arg.evaluate_as_string(context) for arg in self._args)
return datatypes.string(u''.join(args))
evaluate = evaluate_as_string
class starts_with_function(builtin_function):
"""Function: <boolean> starts-with(<string>, <string>)"""
name = 'starts-with'
arguments = (datatypes.string, datatypes.string)
return_type = datatypes.boolean
def evaluate_as_boolean(self, context):
outer, inner = self._args
outer = outer.evaluate_as_string(context)
inner = inner.evaluate_as_string(context)
if not inner or outer[:len(inner)] == inner:
return datatypes.TRUE
return datatypes.FALSE
evaluate = evaluate_as_boolean
class contains_function(builtin_function):
"""Function: <boolean> contains(<string>, <string>)"""
name = 'contains'
arguments = (datatypes.string, datatypes.string)
return_type = datatypes.boolean
def evaluate_as_boolean(self, context):
outer, inner = self._args
outer = outer.evaluate_as_string(context)
inner = inner.evaluate_as_string(context)
return datatypes.TRUE if inner in outer else datatypes.FALSE
evaluate = evaluate_as_boolean
class substring_before_function(builtin_function):
"""Function: <string> substring-before(<string>, <string>)"""
name = 'substring-before'
arguments = (datatypes.string, datatypes.string)
return_type = datatypes.string
def evaluate_as_string(self, context):
outer, inner = self._args
outer = outer.evaluate_as_string(context)
inner = inner.evaluate_as_string(context)
index = outer.find(inner)
if index == -1:
return datatypes.EMPTY_STRING
return datatypes.string(outer[:index])
evaluate = evaluate_as_string
class substring_after_function(builtin_function):
"""Function: <string> substring-after(<string>, <string>)"""
name = 'substring-after'
arguments = (datatypes.string, datatypes.string)
return_type = datatypes.string
def evaluate_as_string(self, context):
outer, inner = self._args
outer = outer.evaluate_as_string(context)
inner = inner.evaluate_as_string(context)
if not inner:
return datatypes.EMPTY_STRING
index = outer.find(inner)
if index == -1:
return datatypes.EMPTY_STRING
return datatypes.string(outer[index+len(inner):])
evaluate = evaluate_as_string
class substring_function(builtin_function):
"""Function: <string> substring(<string>, <number>, <number>?)"""
name = 'substring'
arguments = (datatypes.string, datatypes.number, datatypes.number)
defaults = (None,)
return_type = datatypes.string
def evaluate_as_string(self, context):
string, start, length = self._args
string = string.evaluate_as_string(context)
start = start.evaluate_as_number(context)
# start == NaN: spec doesn't say; assume no substring to return
# start == +Inf or -Inf: no substring to return
if not start.isfinite():
return datatypes.EMPTY_STRING
# start is finite, safe for int() and round().
start = int(round(start))
# convert to 0-based index for python string slice
if start < 1:
startidx = 0
else:
startidx = start - 1
# length undefined: return chars startidx to end
if length is None:
return datatypes.string(string[startidx:])
length = length.evaluate_as_number(context)
if not length.isfinite():
# length == +Inf: return chars startidx to end
if length > 0:
assert length.isinf()
return datatypes.string(string[startidx:])
# length == NaN: spec doesn't say; assume no substring to return
# length == -Inf: no substring to return
return datatypes.EMPTY_STRING
# length is finite, safe for int() and round().
length = int(round(length))
# return value must end before position (start+length)
# which is (start+length-1) in 0-based index
endidx = start + length - 1
if endidx < startidx:
return datatypes.EMPTY_STRING
return datatypes.string(string[startidx:endidx])
evaluate = evaluate_as_string
class string_length_function(builtin_function):
"""Function: <number> string-length(<string>?)"""
name = 'string-length'
arguments = (datatypes.string,)
defaults = (None,)
return_type = datatypes.number
def evaluate_as_number(self, context):
arg0, = self._args
if arg0 is None:
string = datatypes.string(context.node)
else:
string = arg0.evaluate_as_string(context)
return datatypes.number(len(string))
evaluate = evaluate_as_number
class normalize_space_function(builtin_function):
"""Function: <string> normalize-space(<string>?)"""
name = 'normalize-space'
arguments = (datatypes.string,)
defaults = (None,)
return_type = datatypes.string
def evaluate_as_string(self, context):
arg0, = self._args
if arg0 is None:
string = datatypes.string(context.node)
else:
string = arg0.evaluate_as_string(context)
return datatypes.string(u' '.join(string.split()))
evaluate = evaluate_as_string
class translate_function(builtin_function):
"""Function: <string> translate(<string>, <string>, <string>)"""
name = 'translate'
arguments = (datatypes.string, datatypes.string, datatypes.string)
return_type = datatypes.string
def evaluate_as_string(self, context):
arg0, arg1, arg2 = self._args
source = arg0.evaluate_as_string(context)
fromchars = arg1.evaluate_as_string(context)
tochars = arg2.evaluate_as_string(context)
tochars = itertools.chain(tochars, itertools.repeat(u''))
transmap = {}
for src, dst in itertools.izip(fromchars, tochars):
if src not in transmap:
transmap[src] = dst
chars = list(source)
for idx, ch in enumerate(chars):
if ch in transmap:
chars[idx] = transmap[ch]
return datatypes.string(u''.join(chars))
evaluate = evaluate_as_string
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/functions/strings.py
|
strings.py
|
from amara import XML_NAMESPACE
from amara.xpath import datatypes
from amara.xpath.functions import builtin_function
__all__ = ('boolean_function', 'not_function', 'true_function',
'false_function', 'lang_function')
class boolean_function(builtin_function):
"""Function: <boolean> boolean(<object>)"""
name = 'boolean'
arguments = (datatypes.xpathobject,)
return_type = datatypes.boolean
def evaluate_as_boolean(self, context):
arg, = self._args
return arg.evaluate_as_boolean(context)
evaluate = evaluate_as_boolean
class not_function(builtin_function):
"""Function: <boolean> not(<boolean>)"""
name = 'not'
arguments = (datatypes.boolean,)
return_type = datatypes.boolean
def evaluate_as_boolean(self, context):
arg, = self._args
if arg.evaluate_as_boolean(context):
return datatypes.FALSE
return datatypes.TRUE
evaluate = evaluate_as_boolean
class true_function(builtin_function):
"""Function: <boolean> true()"""
name = 'true'
arguments = ()
return_type = datatypes.boolean
def compile_as_boolean(self, compiler):
compiler.emit('LOAD_CONST', datatypes.TRUE)
return
compile = compile_as_boolean
def evaluate_as_boolean(self, context):
return datatypes.TRUE
evaluate = evaluate_as_boolean
class false_function(builtin_function):
"""Function: <boolean> false()"""
name = 'false'
arguments = ()
return_type = datatypes.boolean
def compile_as_boolean(self, compiler):
compiler.emit('LOAD_CONST', datatypes.FALSE)
return
compile = compile_as_boolean
def evaluate_as_boolean(self, context):
return datatypes.FALSE
evaluate = evaluate_as_boolean
class lang_function(builtin_function):
"""Function: <boolean> lang(<string>)"""
name = 'lang'
arguments = (datatypes.string,)
return_type = datatypes.boolean
def evaluate_as_boolean(self, context):
arg, = self._args
lang = arg.evaluate_as_string(context).lower()
node = context.node
while node.xml_parent:
for attr in node.xml_attributes.nodes():
# Search for xml:lang attribute
if attr.xml_name == (XML_NAMESPACE, 'lang'):
value = attr.xml_value.lower()
# Exact match (PrimaryPart and possible SubPart)
if value == lang:
return datatypes.TRUE
# Just PrimaryPart (ignore '-' SubPart)
if '-' in value:
primary, sub = value.split('-', 1)
if lang == primary:
return datatypes.TRUE
# Language doesn't match
return datatypes.FALSE
# Continue to next ancestor
node = node.xml_parent
# No xml:lang declarations found
return datatypes.FALSE
evaluate = evaluate_as_boolean
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/functions/booleans.py
|
booleans.py
|
import math
from amara.xpath import datatypes
from amara.xpath.functions import builtin_function
__all__ = ('number_function', 'sum_function', 'floor_function',
'ceiling_function', 'round_function')
class number_function(builtin_function):
"""Function: <number> number(<object>?)"""
name = 'number'
arguments = (datatypes.xpathobject,)
defaults = (None,)
return_type = datatypes.number
def evaluate_as_number(self, context):
arg0, = self._args
if arg0 is None:
return datatypes.number(context.node)
return arg0.evaluate_as_number(context)
evaluate = evaluate_as_number
class sum_function(builtin_function):
"""Function: <number> sum(<node-set>)"""
name = 'sum'
arguments = (datatypes.nodeset,)
return_type = datatypes.number
def evaluate_as_number(self, context):
arg0, = self._args
arg0 = arg0.evaluate_as_nodeset(context)
return datatypes.number(sum(map(datatypes.number, arg0)))
evaluate = evaluate_as_number
class floor_function(builtin_function):
"""Function: <number> floor(<number>)"""
name = 'floor'
arguments = (datatypes.number,)
return_type = datatypes.number
def evaluate_as_number(self, context):
arg0, = self._args
arg0 = arg0.evaluate_as_number(context)
# a "normal" number is neither zero, NaN, nor infinity
if arg0.isnormal():
return datatypes.number(math.floor(arg0))
return arg0
evaluate = evaluate_as_number
class ceiling_function(builtin_function):
"""Function: <number> ceiling(<number>)"""
name = 'ceiling'
arguments = (datatypes.number,)
return_type = datatypes.number
def evaluate_as_number(self, context):
arg0, = self._args
arg0 = arg0.evaluate_as_number(context)
# a "normal" number is neither zero, NaN, nor infinity
if arg0.isnormal():
return datatypes.number(math.ceil(arg0))
return arg0
evaluate = evaluate_as_number
class round_function(builtin_function):
"""Function: <number> round(<number>)"""
name = 'round'
arguments = (datatypes.number,)
return_type = datatypes.number
def evaluate_as_number(self, context):
arg0, = self._args
arg0 = arg0.evaluate_as_number(context)
# a "normal" number is neither zero, NaN, nor infinity
if arg0.isnormal():
# Round towards positive infinity when there are two possibilities
if arg0 % 1.0 == 0.5:
round = math.ceil
else:
round = math.floor
return datatypes.number(round(arg0))
return arg0
evaluate = evaluate_as_number
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/functions/numbers.py
|
numbers.py
|
from amara.xpath import XPathError
from amara.xpath import datatypes
from amara.xpath.expressions.functioncalls import function_call
__all__ = ['builtin_function', 'extension_function']
class builtin_function(function_call):
"""
An object representing a function call of a core function.
(XPath 1.0 grammar production 16: FunctionCall
and XPath 1.0 section 4: Core Function Library)
"""
name = None
return_type = datatypes.xpathobject
arguments = (Ellipsis,)
defaults = ()
class __metaclass__(function_call.__metaclass__):
if __debug__:
def __new__(cls, name, bases, namespace):
assert 'name' in namespace
assert 'return_type' in namespace
assert 'arguments' in namespace
assert isinstance(namespace['arguments'], tuple)
assert isinstance(namespace.get('defaults', ()), tuple)
return type.__new__(cls, name, bases, namespace)
def __init__(cls, name, bases, namespace):
function_call._builtins[cls.name] = cls
def __init__(self, name, args):
args = tuple(args)
argcount = len(args)
maxargs = len(self.arguments)
if maxargs and self.arguments[-1] is Ellipsis:
varargs = True
maxargs -= 1
else:
varargs = False
minargs = maxargs - len(self.defaults)
if argcount > maxargs and not varargs:
if maxargs == 0:
raise XpathError(XPathError.ARGCOUNT_NONE,
function=name, total=argcount)
elif self.defaults:
raise XPathError(XPathError.ARGCOUNT_ATMOST,
function=name, count=maxargs,
total=argcount)
else:
raise XPathError(XPathError.ARGCOUNT_EXACT,
function=name, count=maxargs,
total=argcount)
elif argcount < minargs:
if self.defaults or varargs:
raise XPathError(XPathError.ARGCOUNT_ATLEAST,
function=name, count=minargs,
total=argcount)
else:
raise XPathError(XPathError.ARGCOUNT_EXACT,
function=name, count=minargs,
total=argcount)
# Add default argument values, if needed
if argcount < maxargs:
args += self.defaults[:(maxargs - argcount)]
self._name = (None, self.name)
self._args = args
return
class function_callN(function_call):
_func = None
def __getstate__(self):
state = vars(self).copy()
del state['_func']
return state
def _get_function(self, context):
prefix, local = self._name
if prefix:
try:
expanded = (context.processorNss[prefix], local)
except KeyError:
raise XPathError(XPathError.UNDEFINED_PREFIX, prefix=prefix)
else:
expanded = self._name
try:
func = context.functions[expanded]
except KeyError:
func_name = prefix and u':'.join(self._name) or local
func_name = func_name.encode('unicode_escape')
raise XPathError(XPathError.UNDEFINED_FUNCTION, name=func_name)
if 'nocache' not in func.__dict__ or not func.nocache:
self._func = func
return func
def _argument_error(self):
if not inspect.isfunction(self._func):
# We can only check for argument errors with Python functions
return
func_name = self._name[0] and u':'.join(self._name) or self._name[1]
func_name = func_name.encode('unicode_escape')
argcount = len(self._args)
args, vararg, kwarg = inspect.getargs(self._func.func_code)
defaults = self._func.func_defaults or ()
# Don't count the context parameter in argument count
maxargs = len(args) - 1
minargs = maxargs - len(defaults)
if argcount > maxargs and not varargs:
if maxargs == 0:
raise XpathError(XPathError.ARGCOUNT_NONE,
function=func_name, total=argcount)
elif defaults:
raise XPathError(XPathError.ARGCOUNT_ATMOST,
function=func_name, count=maxargs,
total=argcount)
else:
raise XPathError(XPathError.ARGCOUNT_EXACT,
function=func_name, count=maxargs,
total=argcount)
elif argcount < minargs:
if defaults or varargs:
raise XPathError(XPathError.ARGCOUNT_ATLEAST,
function=func_name, count=minargs,
total=argcount)
else:
raise XPathError(XPathError.ARGCOUNT_EXACT,
function=func_name, count=minargs,
total=argcount)
# Not an error with arg counts for this function, use current error
return
def evaluate(self, context):
"""Returns the result of calling the function"""
args = [ arg.evaluate(context) for arg in self._args ]
func = self._func or self._get_function(context)
try:
result = func(context, *args)
except TypeError:
self._argument_error()
# use existing exception (empty raise keeps existing backtrace)
raise
#Expensive assert contributed by Adam Souzis.
#No effect on Python running in optimized mode,
#But would mean significant slowdown for developers, so disabled by default
#assert not isinstance(result, list) or len(result) == len(Set.Unique(result))
return result
def evaluate_as_nodeset(self, context):
nodes = self.evaluate(context)
try:
nodes.sort()
except AttributeError:
raise TypeError("%r must be a nodeset, not %s" %
(self._expression, type(nodes).__name__))
return nodes
# Load the built-in functions
def __bootstrap__(namespace):
global __bootstrap__
from amara.xpath.functions import nodesets, strings, booleans, numbers
for module in (nodesets, strings, booleans, numbers):
for name in getattr(module, '__all__', ()):
namespace[name] = getattr(module, name)
# Add the functions to this module's exported objects
__all__.append(name)
del __bootstrap__
__bootstrap__(locals())
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/functions/__init__.py
|
__init__.py
|
from amara import tree
from amara.xpath import datatypes
from amara.xpath.functions import builtin_function
from amara.xpath.locationpaths import relative_location_path
__all__ = ('last_function', 'position_function', 'count_function',
'id_function', 'local_name_function', 'namespace_uri_function',
'name_function')
class last_function(builtin_function):
"""Function: <number> last()"""
name = 'last'
arguments = ()
return_type = datatypes.number
def evaluate_as_number(self, context):
return datatypes.number(context.size)
evaluate = evaluate_as_number
#See http://trac.xml3k.org/ticket/62 re: idea of following flag
#requires_context_size = True
class position_function(builtin_function):
"""Function: <number> position()"""
name = 'position'
arguments = ()
return_type = datatypes.number
def evaluate_as_number(self, context):
return datatypes.number(context.position)
evaluate = evaluate_as_number
class count_function(builtin_function):
"""Function: <number> count(<node-set>)"""
name = 'count'
arguments = (datatypes.nodeset,)
return_type = datatypes.number
def evaluate_as_number(self, context):
arg0, = self._args
arg0 = arg0.evaluate_as_nodeset(context)
return datatypes.number(len(arg0))
evaluate = evaluate_as_number
class id_function(builtin_function):
"""Function: <node-set> id(<object>)"""
name = 'id'
arguments = (datatypes.xpathobject,)
return_type = datatypes.nodeset
def evaluate_as_nodeset(self, context):
arg0, = self._args
arg0 = arg0.evaluate(context)
if isinstance(arg0, datatypes.nodeset):
ids = set(datatypes.string(x) for x in arg0)
else:
arg0 = datatypes.string(arg0)
ids = set(arg0.split())
doc = context.node.xml_root
nodeset = filter(None, (doc.xml_lookup(id) for id in ids))
nodeset.sort()
return datatypes.nodeset(nodeset)
evaluate = evaluate_as_nodeset
class name_function(builtin_function):
"""Function: <string> name(<node-set>?)"""
name = 'name'
arguments = (datatypes.nodeset,)
defaults = (None,)
return_type = datatypes.string
def __init__(self, name, args):
# `name(.)` is the same as `name()`
if args:
try:
arg, = args
except ValueError:
# This will become an error anyhow.
pass
else:
if isinstance(arg, relative_location_path):
if len(arg._steps) == 1:
step, = arg._steps
if (step.axis.name == 'self' and
step.node_test.name == 'node' and
not step.predicates):
args = ()
builtin_function.__init__(self, name, args)
def evaluate_as_string(self, context):
arg0, = self._args
if arg0 is None:
node = context.node
else:
arg0 = arg0.evaluate_as_nodeset(context)
if not arg0:
return datatypes.EMPTY_STRING
node = arg0[0]
if isinstance(node, (tree.element, tree.attribute)):
return datatypes.string(node.xml_qname)
elif isinstance(node, tree.processing_instruction):
return datatypes.string(node.xml_target)
elif isinstance(node, tree.namespace):
return datatypes.string(node.xml_name)
return datatypes.EMPTY_STRING
evaluate = evaluate_as_string
class local_name_function(name_function):
"""Function: <string> local-name(<node-set>?)"""
name = 'local-name'
arguments = (datatypes.nodeset,)
defaults = (None,)
return_type = datatypes.string
def evaluate_as_string(self, context):
arg0, = self._args
if arg0 is None:
node = context.node
else:
arg0 = arg0.evaluate_as_nodeset(context)
if not arg0:
return datatypes.EMPTY_STRING
node = arg0[0]
if isinstance(node, (tree.element, tree.attribute)):
return datatypes.string(node.xml_local)
elif isinstance(node, tree.processing_instruction):
return datatypes.string(node.xml_target)
elif isinstance(node, tree.namespace):
return datatypes.string(node.xml_name)
return datatypes.EMPTY_STRING
evaluate = evaluate_as_string
class namespace_uri_function(name_function):
"""Function: <string> namespace-uri(<node-set>?)"""
name = 'namespace-uri'
arguments = (datatypes.nodeset,)
defaults = (None,)
return_type = datatypes.string
def evaluate_as_string(self, context):
arg0, = self._args
if arg0 is None:
node = context.node
else:
arg0 = arg0.evaluate_as_nodeset(context)
if not arg0:
return datatypes.EMPTY_STRING
node = arg0[0]
try:
namespace_uri = node.xml_namespace
except AttributeError:
# not a named node
return datatypes.EMPTY_STRING
# namespaceURI could be None
if namespace_uri:
return datatypes.string(namespace_uri)
return datatypes.EMPTY_STRING
evaluate = evaluate_as_string
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/functions/nodesets.py
|
nodesets.py
|
import inspect
from amara.xpath import XPathError
from amara.xpath import datatypes
from amara.xpath.expressions import expression
class function_call(expression):
"""
An object representing a function call expression
(XPath 1.0 grammar production 16: FunctionCall)
"""
_builtins = {}
_name = None
_args = None
def __new__(cls, name, args):
if name in cls._builtins:
cls = cls._builtins[name]
elif 1:
cls = extension_function
elif not args:
cls = function_call0
else:
nargs = len(args)
if nargs == 1:
cls = function_call1
elif nargs == 2:
cls = function_call2
elif nargs == 3:
cls = function_call3
else:
cls = function_callN
return object.__new__(cls)
def compile(self, compiler):
# Load the callable object
compiler.emit('LOAD_CONST', self)
compiler.emit('LOAD_ATTR', 'evaluate')
# Build the argument(s)
compiler.emit('LOAD_FAST', 'context')
# Call it!
compiler.emit('CALL_FUNCTION', 1)
return
def compile_as_boolean(self, compiler):
# Load the callable object
compiler.emit('LOAD_CONST', self)
compiler.emit('LOAD_ATTR', 'evaluate_as_boolean')
# Build the argument(s)
compiler.emit('LOAD_FAST', 'context')
# Call it!
compiler.emit('CALL_FUNCTION', 1)
return
def compile_as_number(self, compiler):
# Load the callable object
compiler.emit('LOAD_CONST', self)
compiler.emit('LOAD_ATTR', 'evaluate_as_number')
# Build the argument(s)
compiler.emit('LOAD_FAST', 'context')
# Call it!
compiler.emit('CALL_FUNCTION', 1)
return
def compile_as_string(self, compiler):
# Load the callable object
compiler.emit('LOAD_CONST', self)
compiler.emit('LOAD_ATTR', 'evaluate_as_string')
# Build the argument(s)
compiler.emit('LOAD_FAST', 'context')
# Call it!
compiler.emit('CALL_FUNCTION', 1)
return
def compile_as_nodeset(self, compiler):
# Load the callable object
compiler.emit('LOAD_CONST', self)
compiler.emit('LOAD_ATTR', 'evaluate_as_nodeset')
# Build the argument(s)
compiler.emit('LOAD_FAST', 'context')
# Call it!
compiler.emit('CALL_FUNCTION', 1)
return
def evaluate_as_boolean(self, context):
return datatypes.boolean(self.evaluate(context))
def evaluate_as_number(self, context):
return datatypes.number(self.evaluate(context))
def evaluate_as_string(self, context):
return datatypes.string(self.evaluate(context))
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
for arg in self._args:
arg.pprint(indent + ' ', stream)
def __unicode__(self):
func_name = self._name[0] and u':'.join(self._name) or self._name[1]
func_name = func_name.encode('unicode_escape')
arg_spec = u', '.join(map(unicode, self._args))
return u'%s(%s)' % (func_name, arg_spec)
@property
def children(self):
'Children of the parse tree of a function are its arguments'
return self._args
class extension_function(function_call):
_func = None
def __init__(self, name, args):
assert ':' in name, 'Extension functions must have a prefix'
self._name = name.split(':', 1)
self._args = tuple(args)
def compile(self, compiler):
prefix, local = self._name
try:
expanded = (compiler.namespaces[prefix], local)
except KeyError:
raise XPathError(XPathError.UNDEFINED_PREFIX, prefix=prefix)
try:
func = compiler.functions[expanded]
except KeyError:
raise XPathError(XPathError.UNDEFINED_FUNCTION,
function=u':'.join(self._name))
# If this is a Python function, we can verify the arguments. If it is
# just any callable, no verification will happen and TypeErrors will
# bubble up to the user as-is.
if inspect.isfunction(func):
args, varargs, kwarg = inspect.getargs(func.func_code)
argcount = len(self._args)
maxargs = len(args) - 1 # don't count the `context` argument
if func.func_defaults:
minargs = maxargs - len(func.func_defaults)
else:
minargs = maxargs
if argcount > maxargs and not varargs:
if maxargs == 0:
name = u':'.join(self._name).encode('unicode_escape')
raise XPathError(XPathError.ARGCOUNT_NONE,
function=name, total=argcount)
elif func.func_defaults:
name = u':'.join(self._name).encode('unicode_escape')
raise XPathError(XPathError.ARGCOUNT_ATMOST,
function=name, count=maxargs,
total=argcount)
else:
name = u':'.join(self._name).encode('unicode_escape')
raise XPathError(XPathError.ARGCOUNT_EXACT,
function=name, count=maxargs,
total=argcount)
elif argcount < minargs:
if varargs or func.func_defaults:
name = u':'.join(self._name).encode('unicode_escape')
raise XPathError(XPathError.ARGCOUNT_ATLEAST,
function=name, count=minargs,
total=argcount)
else:
name = u':'.join(self._name).encode('unicode_escape')
raise XPathError(XPathError.ARGCOUNT_EXACT,
function=name, count=minargs,
total=argcount)
# Load the function
if getattr(func, 'nocache', False):
if __debug__:
name = u':'.join(self._name).encode('unicode_escape')
def dynamic_function(context):
try:
return context.functions[expanded]
except KeyError:
raise XPathError(XPathError.UNDEFINED_FUNCTION,
name=name)
compiler.emit('LOAD_CONST', dynamic_function,
'LOAD_FAST', 'context',
'CALL_FUNCTION', 1)
else:
# Note, this assumes that the function will not be *deleted*
# from the function mapping, just replaced.
compiler.emit('LOAD_FAST', 'context',
'LOAD_ATTR', 'functions',
'LOAD_CONST', expanded,
'BINARY_SUBSCRIPT')
else:
compiler.emit('LOAD_CONST', func)
# Build the argument(s)
compiler.emit('LOAD_FAST', 'context')
for arg in self._args:
compiler.emit('LOAD_CONST', arg)
# Call it!
compiler.emit('CALL_FUNCTION', 1 + len(self._args))
return
def evaluate_as_nodeset(self, context):
nodes = self.evaluate(context)
try:
nodes.sort()
except AttributeError:
raise TypeError("%r must be a nodeset, not %s" %
(self._expression, type(nodes).__name__))
return nodes
def __getstate__(self):
state = vars(self).copy()
del state['_func']
return state
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/expressions/functioncalls.py
|
functioncalls.py
|
from amara._xmlstring import splitqname
from amara.xpath import XPathError
from amara.xpath import datatypes, expressions
class literal(expressions.expression):
"""
An object representing a string literal expression
(XPath 1.0 grammar production 29: Literal)
"""
def __init__(self, literal):
self._literal = literal
def compile_as_boolean(self, compiler):
if self._literal:
value = datatypes.boolean.TRUE
else:
value = datatypes.boolean.FALSE
compiler.emit('LOAD_CONST', value)
return
def compile_as_number(self, compiler):
try:
value = datatypes.number(self._literal)
except ValueError:
value = datatypes.number.NaN
compiler.emit('LOAD_CONST', value)
return
def compile_as_string(self, compiler):
try:
value = datatypes.string(self._literal)
except ValueError:
value = datatypes.string.EMPTY
compiler.emit('LOAD_CONST', value)
return
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
class string_literal(literal):
"""
An object representing a string literal expression
(XPath 1.0 grammar production 29: Literal)
"""
return_type = datatypes.string
def __init__(self, literal):
# FIXME - this constructor can go away once the BisonGen parser
# is updated to use the non-quoted value as the `literal` argument.
if literal[:1] in ("'", '"') and literal[:1] == literal[-1:]:
literal = literal[1:-1]
self._literal = literal
compile = literal.compile_as_string
def __unicode__(self):
return u'"%s"' % self._literal.replace(u'"', u'\\"')
class number_literal(literal):
"""
An object representing a numeric literal expression
(XPath 1.0 grammar production 30: Number)
"""
return_type = datatypes.number
compile = literal.compile_as_number
def __unicode__(self):
return unicode(self._literal)
class variable_reference(expressions.expression):
"""
An object representing a variable reference expression
(XPath 1.0 grammar production 36: VariableReference)
"""
def __init__(self, name):
# FIXME - the slice of the name can go away once the BisonGen parser
# is updated to use just the qualified name as the `name` argument.
self._name = name[1:]
return
def compile(self, compiler):
"""
Generates opcodes for the expression:
context.variables[namespaceUri, localName]
where `namespaceUri` is the URI bound to the prefix of the
qualified name for the variable reference.
"""
# Construct the expanded-name tuple
prefix, local = splitqname(self._name)
if prefix:
try:
namespace = compiler.namespaces[prefix]
except KeyError:
raise XPathError(XPathError.UNDEFINED_PREFIX, prefix=prefix)
else:
namespace = None
if (namespace, local) not in compiler.variables:
raise XPathError(XPathError.UNDEFINED_VARIABLE,
variable=self._name, key=(namespace, local))
# Add the actual opcodes
compiler.emit('LOAD_FAST', 'context',
'LOAD_ATTR', 'variables',
'LOAD_CONST', namespace,
'LOAD_CONST', local,
'BUILD_TUPLE', 2,
'BINARY_SUBSCR')
return
def compile_as_nodeset(self, compiler):
# Load the callable object
compiler.emit('LOAD_CONST', datatypes.nodeset)
# Build the argument(s)
self.compile(compiler)
compiler.emit('CALL_FUNCTION', 1)
return
def compile_iterable(self, compiler):
# Discard the context node
compiler.emit('POP_TOP')
self.compile(compiler)
compiler.emit('GET_ITER')
return
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
def __unicode__(self):
return u'$' + self._name
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/expressions/basics.py
|
basics.py
|
from amara.xpath import datatypes
from amara.xpath.expressions import expression
__all__ = ('boolean_expression',
'or_expr', 'and_expr', 'equality_expr', 'relational_expr')
class boolean_expression(expression):
return_type = datatypes.boolean
def compile_as_boolean(self, compiler):
raise NotImplementedError
class _logical_expr(boolean_expression):
def __init__(self, left, op, right):
self._left = left
self._right = right
def compile_as_boolean(self, compiler):
end = compiler.new_block()
self._left.compile_as_boolean(compiler)
compiler.emit(self._opcode, end)
compiler.next_block()
compiler.emit('POP_TOP') # discard last result
self._right.compile_as_boolean(compiler)
compiler.next_block(end)
return
compile = compile_as_boolean
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
self._left.pprint(indent + ' ', stream)
self._right.pprint(indent + ' ', stream)
def __unicode__(self):
return u'%s %s %s' % (self._left, self._op, self._right)
class or_expr(_logical_expr):
"""
An object representing an or expression
(XPath 1.0 grammar production 21: OrExpr)
"""
_op = 'or'
_opcode = 'JUMP_IF_TRUE'
class and_expr(_logical_expr):
"""
An object representing an and expression
(XPath 1.0 grammar production 22: AndExpr)
"""
_op = 'and'
_opcode = 'JUMP_IF_FALSE'
class _comparison_expr(boolean_expression):
def __init__(self, left, op, right):
self._left = left
self._op = op
self._right = right
def compile_as_boolean(self, compiler):
self._left.compile(compiler)
self._right.compile(compiler)
# Convert XPath equals (=) into Python equals (==)
op = self._op == '=' and '==' or self._op
compiler.emit('COMPARE_OP', op)
return
compile = compile_as_boolean
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
self._left.pprint(indent + ' ', stream)
self._right.pprint(indent + ' ', stream)
def __unicode__(self):
return u'%s %s %s' % (self._left, self._op, self._right)
class equality_expr(_comparison_expr):
"""
An object representing an equality expression
(XPath 1.0 grammar production 23: EqualityExpr)
"""
pass
class relational_expr(_comparison_expr):
"""
An object representing a relational expression
(XPath 1.0 grammar production 24: RelationalExpr)
"""
pass
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/expressions/booleans.py
|
booleans.py
|
from amara.xpath import expressions, datatypes
__all__ = ('number_expression',
'additive_expr', 'multiplicative_expr', 'unary_expr')
class number_expression(expressions.expression):
return_type = datatypes.number
def compile_as_number(self, compiler):
raise NotImplementedError
class _binary_expr(number_expression):
_opmap = {
'+' : 'BINARY_ADD',
'-' : 'BINARY_SUBTRACT',
'*' : 'BINARY_MULTIPLY',
'div' : 'BINARY_DIVIDE',
'mod' : 'BINARY_MODULO',
}
def __init__(self, left, op, right):
self._left = left
self._op = op
self._right = right
def compile_as_number(self, compiler):
self._left.compile_as_number(compiler)
self._right.compile_as_number(compiler)
compiler.emit(self._opmap[self._op])
return
compile = compile_as_number
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
self._left.pprint(indent + ' ', stream)
self._right.pprint(indent + ' ', stream)
def __unicode__(self):
return u'%s %s %s' % (self._left, self._op, self._right)
@property
def children(self):
'Parse tree children of a binary expression are the operator, left and right hand expressions'
return (self._left, self._op, self._right)
class additive_expr(_binary_expr):
"""
An object representing an additive expression
(XPath 1.0 grammar production 25: AdditiveExpr)
"""
pass
class multiplicative_expr(_binary_expr):
"""
An object representing an multiplicative expression
(XPath 1.0 grammar production 26: MultiplicativeExpr)
"""
pass
class unary_expr(number_expression):
"""
An object representing a unary expression
(XPath 1.0 grammar production 27: UnaryExpr)
"""
def __init__(self, expr):
self._expr = expr
def compile_as_number(self, compiler):
self._expr.compile_as_number(compiler)
compiler.emit('UNARY_NEGATIVE')
return
compile = compile_as_number
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
self._expr.pprint(indent + ' ', stream)
def __unicode__(self):
return u'-%s' % self._expr
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/expressions/numbers.py
|
numbers.py
|
from amara.xpath import datatypes
from amara.xpath.compiler import xpathcompiler
__all__ = ['expression']
class expression(object):
return_type = datatypes.xpathobject
class __metaclass__(type):
pass
def compile(self, compiler):
raise NotImplementedError('%s.compile' % (self.__class__.__name__,))
def compile_as_boolean(self, compiler):
"""Compiles the expression into a boolean result."""
# Load the callable object
compiler.emit('LOAD_CONST', datatypes.boolean)
# Build the argument(s)
self.compile(compiler)
compiler.emit('CALL_FUNCTION', 1)
return
def compile_as_number(self, compiler):
"""Compiles the expression into a number result."""
# Load the callable object
compiler.emit('LOAD_CONST', datatypes.number)
# Build the argument(s)
self.compile(compiler)
compiler.emit('CALL_FUNCTION', 1)
def compile_as_string(self, compiler):
"""Compiles the expression into a string result."""
# Load the callable object
compiler.emit('LOAD_CONST', datatypes.string)
# Build the argument(s)
self.compile(compiler)
compiler.emit('CALL_FUNCTION', 1)
def compile_as_nodeset(self, compiler):
"""Compiles the expression into a nodeset result.
By default, this is an error.
"""
raise TypeError('cannot convert to a nodeset')
def compile_iterable(self, compiler):
"""Compiles the expression into an iterable.
By default, this is an error.
"""
raise TypeError('cannot convert to a nodeset')
def evaluate(self, context):
# Lazily generate the Python function for the expression.
compiler = xpathcompiler(context)
self.compile(compiler)
self.evaluate = compiler.compile('evaluate',
docstring=unicode(self))
return self.evaluate(context)
def evaluate_as_boolean(self, context):
# Lazily generate the Python function for the expression.
compiler = xpathcompiler(context)
self.compile_as_boolean(compiler)
self.evaluate_as_boolean = compiler.compile('evaluate_as_boolean',
docstring=unicode(self))
return self.evaluate_as_boolean(context)
def evaluate_as_number(self, context):
# Lazily generate the Python function for the expression.
compiler = xpathcompiler(context)
self.compile_as_number(compiler)
self.evaluate_as_number = compiler.compile('evaluate_as_number',
docstring=unicode(self))
return self.evaluate_as_number(context)
def evaluate_as_string(self, context):
# Lazily generate the Python function for the expression.
compiler = xpathcompiler(context)
self.compile_as_string(compiler)
self.evaluate_as_string = compiler.compile('evaluate_as_string',
docstring=unicode(self))
return self.evaluate_as_string(context)
def evaluate_as_nodeset(self, context):
# Lazily generate the Python function for the expression.
compiler = xpathcompiler(context)
self.compile_as_nodeset(compiler)
self.evaluate_as_nodeset = compiler.compile('evaluate_as_nodeset',
docstring=unicode(self))
return self.evaluate_as_nodeset(context)
def __str__(self):
return self.__unicode__().encode('utf-8')
def __unicode__(self):
raise NotImplementedError('subclass %r must override' %
self.__class__.__name__)
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/expressions/__init__.py
|
__init__.py
|
from amara.xpath import datatypes, expressions
__all__ = ('nodeset_expression',
'union_expr', 'path_expr', 'filter_expr')
class nodeset_expression(expressions.expression):
return_type = datatypes.nodeset
def _make_block(self, compiler):
compiler.emit(
# Push the context state onto the stack
'LOAD_FAST', 'context',
'LOAD_ATTR', 'node',
'LOAD_FAST', 'context',
'LOAD_ATTR', 'position',
'LOAD_FAST', 'context',
'LOAD_ATTR', 'size',
)
# Yield control back to the caller to allow writing of the body.
yield None
compiler.emit(
# Move the result 4 places down the stack (past the context state).
'ROT_FOUR',
# Restore context values in reverse order of above!
'LOAD_FAST', 'context',
'STORE_ATTR', 'size',
'LOAD_FAST', 'context',
'STORE_ATTR', 'position',
'LOAD_FAST', 'context',
'STORE_ATTR', 'node',
)
return
def _make_loop(self, compiler, foundops, emptyops):
for block in self._make_block(compiler):
end_block = compiler.new_block()
else_block = compiler.new_block()
compiler.emit(
'LOAD_FAST', 'context',
'LOAD_ATTR', 'node',
'BUILD_TUPLE', 1,
)
self.compile_iterable(compiler)
compiler.emit(
'GET_ITER',
# Set the loop to jump to the `else` block when `iter` is empty
'FOR_ITER', else_block,
)
# Emit the instructions for a successful match
compiler.emit(*foundops)
compiler.emit(
# Remove `iter` from the stack
'ROT_TWO',
'POP_TOP',
# Jump to the end of the `else` block
'JUMP_FORWARD', end_block,
)
# Begin the `else` block
compiler.next_block(else_block)
# Emit the instructions to use for no match
compiler.emit(*emptyops)
compiler.next_block(end_block)
return
def compile_as_boolean(self, compiler):
found = ('POP_TOP', # discard context node from the stack
'LOAD_CONST', datatypes.boolean.TRUE)
empty = ('LOAD_CONST', datatypes.boolean.FALSE)
return self._make_loop(compiler, found, empty)
def compile_as_number(self, compiler):
# Call number() on the matched object
found = ('LOAD_CONST', datatypes.number,
'ROT_TWO',
'CALL_FUNCTION', 1)
# Use number.NaN as the result. We cannot use this value directly
# as the assembler needs to be able to use equality testing.
empty = ('LOAD_CONST', datatypes.number,
'LOAD_ATTR', 'NaN')
return self._make_loop(compiler, found, empty)
def compile_as_string(self, compiler):
# Call string() on the matched object
found = ('LOAD_CONST', datatypes.string,
'ROT_TWO',
'CALL_FUNCTION', 1)
empty = ('LOAD_CONST', datatypes.string.EMPTY)
return self._make_loop(compiler, found, empty)
def compile_as_nodeset(self, compiler):
for block in self._make_block(compiler):
compiler.emit('LOAD_CONST', datatypes.nodeset,
# add context node to the stack
'LOAD_FAST', 'context',
'LOAD_ATTR', 'node',
'BUILD_TUPLE', 1,
)
self.compile_iterable(compiler)
compiler.emit('CALL_FUNCTION', 1)
return
compile = compile_as_nodeset
def compile_iterable(self, compiler):
raise NotImplementedError(self.__class__.__name__)
def select(self, context, nodes=None):
raise NotImplementedError(self.__class__.__name__)
class union_expr(nodeset_expression):
"""
An object representing a union expression
(XPath 1.0 grammar production 18: UnionExpr)
"""
def __init__(self, left, right):
if isinstance(left, union_expr):
self._paths = left._paths
self._paths.append(right)
else:
self._paths = [left, right]
return
def compile_iterable(self, compiler):
from amara.xpath.locationpaths import _paths
emit = compiler.emit
tmpname = compiler.tmpname()
emit(# store the current context node
'STORE_FAST', tmpname,
# begin the UnionIter function call construction
'LOAD_CONST', _paths.unioniter,
)
# build the arguments for the function call
for path in self._paths:
# restore the context node
emit('LOAD_FAST', tmpname)
path.compile_iterable(compiler)
emit('CALL_FUNCTION', len(self._paths),
# clear stored context node
'DELETE_FAST', tmpname,
)
return
def compile_as_boolean(self, compiler):
end = compiler.new_block()
for path in self._paths[:-1]:
path.compile_as_boolean(compiler)
compiler.emit('JUMP_IF_TRUE', end)
compiler.next_block()
compiler.emit('POP_TOP')
self._paths[-1].compile_as_boolean(compiler)
compiler.next_block(end)
return
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
for path in self._paths:
path.pprint(indent + ' ', stream)
def __unicode__(self):
return u' | '.join(map(unicode, self._paths))
@property
def children(self):
'Children of the parse tree of a union expression are the left and right hand expressions'
return self._paths
class path_expr(nodeset_expression):
"""
An object representing a path expression
(XPath 1.0 grammar production 19: PathExpr)
"""
def __init__(self, expression, sep, path):
if sep == '//':
from amara.xpath.locationpaths import \
relative_location_path, location_step
from amara.xpath.locationpaths.axisspecifiers import axis_specifier
from amara.xpath.locationpaths.nodetests import node_type
assert isinstance(path, relative_location_path), repr(path)
step = location_step(axis_specifier('descendant-or-self'),
node_type('node'))
path._steps.insert(0, step)
self._expression = expression
self._path = path
return
def compile_iterable(self, compiler):
if isinstance(self._expression, nodeset_expression):
self._expression.compile_iterable(compiler)
else:
# discard context node from the stack
compiler.emit('POP_TOP')
self._expression.compile_as_nodeset(compiler)
self._path.compile_iterable(compiler)
return
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
self._expression.pprint(indent + ' ', stream)
self._path.pprint(indent + ' ', stream)
def __unicode__(self):
return u'%s/%s' % (self._expression, self._path)
class filter_expr(nodeset_expression):
"""
An object representing a filter expression
(XPath 1.0 grammar production 20: FilterExpr)
"""
def __init__(self, expression, predicates):
self._expression = expression
self._predicates = predicates
return
def compile_iterable(self, compiler):
# discard context node from the stack
from amara.xpath.locationpaths import _paths
compiler.emit('POP_TOP')
self._expression.compile_as_nodeset(compiler)
if self._predicates:
predicates = _paths.pathiter(p.select for p in self._predicates)
compiler.emit('LOAD_CONST', predicates.select,
'LOAD_FAST', 'context',
# stack is now [context, select, nodes]
'ROT_THREE',
# stack is now [select, nodes, context]
'ROT_THREE',
# stack is now [nodes, context, select]
'CALL_FUNCTION', 2,
)
return
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
self._expression.pprint(indent + ' ', stream)
self._predicates.pprint(indent + ' ', stream)
def __unicode__(self):
return u'%s%s' % (self._expression, self._predicates)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/expressions/nodesets.py
|
nodesets.py
|
from amara import tree
# Bind the class name in the global scope so that the metaclass can be
# safely called for the construction of the initial class.
axis_specifier = None
class axis_specifier(object):
_classmap = {}
principal_type = tree.element
reverse = False
class __metaclass__(type):
if __debug__:
def __new__(cls, name, bases, namespace):
if axis_specifier is not None:
assert 'name' in namespace
return type.__new__(cls, name, bases, namespace)
def __init__(cls, name, bases, namespace):
if axis_specifier is not None:
cls._classmap[cls.name] = cls
# Allow axis specifier classes to be instaniated directly
cls.__new__ = object.__new__
def __new__(cls, name):
return object.__new__(cls._classmap[name])
def select(self, node):
raise NotImplementedError
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self)
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__().encode('utf-8')
class ancestor_axis(axis_specifier):
name = 'ancestor'
reverse = True
def select(self, node):
"""Select all of the ancestors including the root"""
node = node.xml_parent
while node:
yield node
node = node.xml_parent
return
try:
from _axes import ancestor_axis as select
except ImportError:
pass
class ancestor_or_self_axis(axis_specifier):
name = 'ancestor-or-self'
reverse = True
def select(self, node):
"""Select all of the ancestors including ourselves through the root"""
yield node
node = node.xml_parent
while node:
yield node
node = node.xml_parent
return
try:
from _axes import ancestor_or_self_axis as select
except ImportError:
pass
class attribute_axis(axis_specifier):
name = 'attribute'
principal_type = tree.attribute
def select(self, node):
"""Select all of the attributes from the context node"""
return (node.xml_attributes)
try:
from _axes import attribute_axis as select
except ImportError:
pass
class child_axis(axis_specifier):
name = 'child'
def select(self, node):
"""Select all of the children of the context node"""
return iter(node)
try:
from _axes import child_axis as select
except ImportError:
pass
class descendant_axis(axis_specifier):
name = 'descendant'
def select(self, node):
descendants = self.select
node_type = tree.element
for child in node:
yield child
if isinstance(child, node_type):
for x in descendants(child): yield x
return
try:
from _axes import descendant_axis as select
except ImportError:
pass
class descendant_or_self_axis(descendant_axis):
name = 'descendant-or-self'
_descendants = descendant_axis.select
def select(self, node):
"""Select the context node and all of its descendants"""
yield node
for x in self._descendants(node): yield x
return
try:
from _axes import descendant_or_self_axis as select
except ImportError:
pass
class following_axis(descendant_axis):
name = 'following'
_descendants = descendant_axis.select
def select(self, node):
"""
Select all of the nodes the follow the context node,
not including descendants.
"""
descendants = self._descendants
while node:
sibling = node.xml_following_sibling
while sibling:
yield sibling
for x in descendants(sibling): yield x
sibling = sibling.xml_following_sibling
node = node.xml_parent
return
class following_sibling_axis(axis_specifier):
name = 'following-sibling'
def select(self, node):
"""Select all of the siblings that follow the context node"""
sibling = node.xml_following_sibling
while sibling:
yield sibling
sibling = sibling.xml_following_sibling
return
try:
from _axes import following_sibling_axis as select
except ImportError:
pass
class namespace_axis(axis_specifier):
name = 'namespace'
principal_type = tree.namespace
def select(self, node):
"""Select all of the namespaces from the context node."""
return node.xml_namespaces.nodes()
try:
from _axes import namespace_axis as select
except ImportError:
pass
class parent_axis(axis_specifier):
name = 'parent'
reverse = True
def select(self, node):
"""Select the parent of the context node"""
parent_node = node.xml_parent
if parent_node:
yield parent_node
return
class preceding_axis(axis_specifier):
"""
The preceding axis contains all nodes in the same document as the context node that
are before the context node in document order, excluding any ancestors and
excluding attribute nodes and namespace nodes
"""
name = 'preceding'
reverse = True
def select(self, node):
"""
Select all nodes in the same document as the context node that
are before the context node in document order, excluding any ancestors and
excluding attribute nodes and namespace nodes
"""
def preceding(node):
while node:
if isinstance(node, tree.element):
child = node.xml_last_child
if child:
for x in preceding(child): yield x
yield node
node = node.xml_preceding_sibling
return
while node:
for x in preceding(node.xml_preceding_sibling): yield x
node = node.xml_parent
return
class preceding_sibling_axis(axis_specifier):
name = 'preceding-sibling'
reverse = True
def select(self, node):
"""Select all of the siblings that precede the context node"""
sibling = node.xml_preceding_sibling
while sibling:
yield sibling
sibling = sibling.xml_preceding_sibling
return
class self_axis(axis_specifier):
name = 'self'
def select(self, node):
"""Select the context node"""
yield node
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/locationpaths/axisspecifiers.py
|
axisspecifiers.py
|
from __future__ import absolute_import
from itertools import count, izip
from amara.xpath import datatypes
from amara.xpath.expressions.basics import literal, variable_reference
from amara.xpath.expressions.booleans import equality_expr, relational_expr
from amara.xpath.functions import position_function
from ._nodetests import positionfilter
from ._paths import pathiter
__all__ = ['predicates', 'predicate']
class predicates(tuple):
def __init__(self, *args):
self.select = pathiter(pred.select for pred in self).select
return
def filter(self, nodes, context, reverse):
if self:
state = context.node, context.position, context.size
for predicate in self:
nodes = datatypes.nodeset(predicate.select(context, nodes))
context.node, context.position, context.size = state
else:
nodes = datatypes.nodeset(nodes)
if reverse:
nodes.reverse()
return nodes
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
for pred in self:
pred.pprint(indent + ' ', stream)
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self)
def __unicode__(self):
return u''.join(map(unicode, self))
#FIXME: should this derive from boolean_expression?
class predicate:
def __init__(self, expression):
self._expr = expression
self._provide_context_size = False #See http://trac.xml3k.org/ticket/62
#FIXME: There are probably many code paths which need self._provide_context_size set
# Check for just "Number"
if isinstance(expression, literal):
const = datatypes.number(expression._literal)
index = int(const)
if index == const and index >= 1:
self.select = positionfilter(index)
else:
# FIXME: add warning that expression will not select anything
self.select = izip()
return
# Check for "position() = Expr"
elif isinstance(expression, equality_expr) and expression._op == '=':
if isinstance(expression._left, position_function):
expression = expression._right
if isinstance(expression, literal):
const = datatypes.number(expression._literal)
index = int(const)
if index == const and index >= 1:
self.select = positionfilter(index)
else:
self.select = izip()
else:
#FIXME: This will kick in the non-lazy behavior too broadly, e.g. in the case of [position = 1+1]
#See: http://trac.xml3k.org/ticket/62
self._provide_context_size = True
self._expr = expression
self.select = self._number
return
elif isinstance(expression._right, position_function):
expression = expression._left
if isinstance(expression, literal):
const = datatypes.number(expression._literal)
index = int(const)
if index == const and index >= 1:
self.select = positionfilter(index)
else:
self.select = izip()
else:
self._expr = expression
self.select = self._number
return
# Check for "position() [>,>=] Expr" or "Expr [<,<=] position()"
# FIXME - do full slice-type notation
elif isinstance(expression, relational_expr):
op = expression._op
if (isinstance(expression._left, position_function) and
isinstance(expression._right, (literal, variable_reference))
and op in ('>', '>=')):
self._start = expression._right
self._position = (op == '>')
self.select = self._slice
return
elif (isinstance(expression._left, (literal, variable_reference))
and isinstance(expression._right, Position)
and op in ('<', '<=')):
self._start = expression._left
self._position = (op == '<')
self.select = self._slice
return
if issubclass(expression.return_type, datatypes.number):
self.select = self._number
elif expression.return_type is not datatypes.xpathobject:
assert issubclass(expression.return_type, datatypes.xpathobject)
self.select = self._boolean
return
def _slice(self, context, nodes):
start = self._start.evaluate_as_number(context)
position = self._position
if position > start:
return nodes
position += 1
nodes = iter(nodes)
for node in nodes:
if position > start:
break
position += 1
return nodes
def _number(self, context, nodes):
expr = self._expr
position = 1
if self._provide_context_size:
nodes = list(nodes)
context.size = len(nodes)
context.current_node = context.node
for node in nodes:
context.node, context.position = node, position
if expr.evaluate_as_number(context) == position:
yield node
position += 1
return
def _boolean(self, context, nodes):
expr = self._expr
position = 1
context.current_node = context.node
for node in nodes:
context.node, context.position = node, position
if expr.evaluate_as_boolean(context):
yield node
position += 1
return
def select(self, context, nodes):
expr = self._expr
position = 1
context.current_node = context.node
for node in nodes:
context.node, context.position = node, position
result = expr.evaluate(context)
if isinstance(result, datatypes.number):
# This must be separate to prevent falling into
# the boolean check.
if result == position:
yield node
elif result:
yield node
position += 1
return
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
self._expr.pprint(indent + ' ', stream)
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self)
def __unicode__(self):
return u'[%s]' % self._expr
@property
def children(self):
'Child of the parse tree of a predicate is its expression'
return (self._expr,)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/locationpaths/predicates.py
|
predicates.py
|
from amara.xpath import XPathError
from amara.xpath.expressions import nodesets
from amara.xpath.locationpaths import axisspecifiers, nodetests
from amara.xpath.locationpaths import _paths #i.e. lib/xpath/src/paths.c
class location_path(nodesets.nodeset_expression):
"""
An object representing a location path
(XPath 1.0 grammar production 1: LocationPath)
"""
_steps = ()
def compile_iterable(self, compiler):
emit = compiler.emit
if self.absolute:
emit(
# discard existing context node on stack
'POP_TOP',
# make the root node the context node
'LOAD_FAST', 'context',
'LOAD_ATTR', 'node',
'LOAD_ATTR', 'xml_root',
'BUILD_TUPLE', 1,
)
for step in self._steps:
# spare an attribute lookup
axis, node_test = step.axis, step.node_test
# get the node filter to use for the node iterator
node_filter = node_test.get_filter(compiler, axis.principal_type)
if node_filter:
node_filter = node_filter.select
predicates = step.predicates
if predicates:
predicates = [ predicate.select for predicate in predicates ]
# create the node iterator for this step
step = _paths.stepiter(axis.select, axis.reverse, node_filter,
predicates)
# add the opcodes for calling `step.select(context, nodes)`
emit('LOAD_CONST', step.select,
'LOAD_FAST', 'context',
# stack is now [context, step.select, nodes]
'ROT_THREE',
# stack is now [step.select, nodes, context]
'ROT_THREE',
# stack is now [nodes, context, step.select]
'CALL_FUNCTION', 2,
)
return
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
for step in self._steps:
step.pprint(indent + ' ', stream)
def __unicode__(self):
path = u'/'.join(map(unicode, self._steps))
if self.absolute:
path = u'/' + path
return path
@property
def children(self):
'Parse tree children of a location path are its steps'
return tuple(self._steps)
class absolute_location_path(location_path):
"""
An object representing an absolute location path
(XPath 1.0 grammar production 2: AbsoluteLocationPath)
"""
absolute = True
def __init__(self, path=None):
if path is None:
self._steps = ()
else:
assert isinstance(path, relative_location_path)
self._steps = path._steps
class relative_location_path(location_path):
"""
An object representing a relative location path
(XPath 1.0 grammar production 3: RelativeLocationPath)
"""
absolute = False
def __init__(self, path, step=None):
if step is None:
assert isinstance(path, location_step), repr(path)
self._steps = [path]
else:
assert isinstance(path, relative_location_path), path
assert isinstance(step, location_step), step
self._steps = path._steps
self._steps.append(step)
class abbreviated_absolute_location_path(absolute_location_path):
"""
An object representing an abbreviated absolute location path
(XPath 1.0 grammar production 10: AbbreviatedAbsoluteLocationPath)
"""
def __init__(self, path):
assert isinstance(path, relative_location_path)
self._steps = path._steps
# `a//b` is the same as `a/descendant::b` if `b` uses the `child`
# axis and has no (positional) predicates
step = path._steps[0]
if not step.predicates and isinstance(step.axis,
axisspecifiers.child_axis):
axis = axisspecifiers.axis_specifier('descendant')
path._steps[0] = location_step(axis, step.node_test)
else:
axis = axisspecifiers.axis_specifier('descendant-or-self')
node_test = nodetests.node_type('node')
abbrev = location_step(axis, node_test)
self._steps.insert(0, abbrev)
class abbreviated_relative_location_path(relative_location_path):
"""
An object representing an abbreviated relative location path
(XPath 1.0 grammar production 11: AbbreviatedRelativeLocationPath)
"""
def __init__(self, path, step):
assert isinstance(path, relative_location_path)
assert isinstance(step, location_step)
self._steps = path._steps
# `a//b` is the same as `a/descendant::b` if `b` uses the `child`
# axis and has no (positional) predicates
if not step.predicates and isinstance(step.axis,
axisspecifiers.child_axis):
axis = axisspecifiers.axis_specifier('descendant')
step = location_step(axis, step.node_test)
else:
axis = axisspecifiers.axis_specifier('descendant-or-self')
node_test = nodetests.node_type('node')
abbrev = location_step(axis, node_test)
self._steps.append(abbrev)
self._steps.append(step)
class location_step(object):
"""
An object representing a location step
(XPath 1.0 grammar production 4: Step)
"""
__slots__ = ('axis', 'node_test', 'predicates')
def __init__(self, axis, node_test, predicates=None):
self.axis = axis
self.node_test = node_test
self.predicates = predicates
def pprint(self, indent='', stream=None):
print >> stream, indent + repr(self)
self.axis.pprint(indent + ' ', stream)
self.node_test.pprint(indent + ' ', stream)
if self.predicates:
self.predicates.pprint(indent + ' ', stream)
def __str__(self):
return self.__unicode__().encode('utf-8')
def __repr__(self):
ptr = id(self)
if ptr < 0: ptr += 0x100000000L
return '<%s at 0x%x: %s>' % (self.__class__.__name__, ptr, self)
def __unicode__(self):
# allows display abbreviated syntax, if possible
if isinstance(self.node_test, nodetests.any_node_test):
if isinstance(self.axis, axisspecifiers.self_axis):
return '.'
if isinstance(self.axis, axisspecifiers.parent_axis):
return '..'
if isinstance(self.axis, axisspecifiers.descendant_or_self_axis):
return ''
return u'%s::%s%s' % (self.axis, self.node_test, self.predicates or u'')
@property
def children(self):
'Children of the parse tree of a location step are its axis, node test and predicates'
return (self.axis, self.node_test, self.predicates)
class abbreviated_step(location_step):
"""
An object representing an abbreviated location step
(XPath 1.0 grammar production 12: AbbreviatedStep)
"""
node_test = nodetests.node_type('node')
predicates = None
def __init__(self, abbrev):
if abbrev == '.':
axis = 'self'
else:
assert abbrev == '..'
axis = 'parent'
self.axis = axisspecifiers.axis_specifier(axis)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xpath/locationpaths/__init__.py
|
__init__.py
|
# Amazon DenseClus
<p align="left">
<a href="https://github.com/awslabs/amazon-denseclus/actions/workflows/tests.yml"><img alt="build" src="https://github.com/awslabs/amazon-denseclus/actions/workflows/tests.yml/badge.svg"></a>
<a><img alt="total download" src="https://static.pepy.tech/personalized-badge/amazon-denseclus?period=total&units=international_system&left_color=black&right_color=green&left_text=Total Downloads"></a>
<a><img alt="month download" src="https://static.pepy.tech/personalized-badge/amazon-denseclus?period=month&units=international_system&left_color=black&right_color=green&left_text=Monthly Downloads"></a>
<a><img alt="weekly download" src="https://static.pepy.tech/personalized-badge/amazon-denseclus?period=week&units=international_system&left_color=black&right_color=green&left_text=Weekly Downloads"></a>
<a href="https://badge.fury.io/py/Amazon-DenseClus"><img alt="PyPI version" src="https://badge.fury.io/py/Amazon-DenseClus.svg"></a>
<a><img alt="PyPI - Python Version" src="https://img.shields.io/pypi/pyversions/Amazon-DenseClus"></a>
<a><img alt="PyPI - Wheel" src="https://img.shields.io/pypi/wheel/Amazon-DenseClus"></a>
<a><img alt="PyPI - License" src="https://img.shields.io/pypi/l/Amazon-DenseClus"></a>
<a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
<a href="https://github.com/marketplace/actions/super-linter"><img alt="Github Super-Linter" src="https://github.com/awslabs/amazon-denseclus/workflows/Lint%20Code%20Base/badge.svg"></a>
</p>
DenseClus is a Python module for clustering mixed type data using [UMAP](https://github.com/lmcinnes/umap) and [HDBSCAN](https://github.com/scikit-learn-contrib/hdbscan). Allowing for both categorical and numerical data, DenseClus makes it possible to incorporate all features in clustering.
## Installation
```bash
python3 -m pip install Amazon-DenseClus
```
## Usage
DenseClus requires a Panda's dataframe as input with both numerical and categorical columns.
All preprocessing and extraction are done under the hood, just call fit and then retrieve the clusters!
```python
from denseclus import DenseClus
clf = DenseClus(
umap_combine_method="intersection_union_mapper",
)
clf.fit(df)
print(clf.score())
```
## Examples
A hands-on example with an overview of how to use is currently available in the form of a [Jupyter Notebook](/notebooks/DenseClus%20Example%20NB.ipynb).
## References
```bibtex
@article{mcinnes2018umap-software,
title={UMAP: Uniform Manifold Approximation and Projection},
author={McInnes, Leland and Healy, John and Saul, Nathaniel and Grossberger, Lukas},
journal={The Journal of Open Source Software},
volume={3},
number={29},
pages={861},
year={2018}
}
```
```bibtex
@article{mcinnes2017hdbscan,
title={hdbscan: Hierarchical density based clustering},
author={McInnes, Leland and Healy, John and Astels, Steve},
journal={The Journal of Open Source Software},
volume={2},
number={11},
pages={205},
year={2017}
}
```
|
Amazon-DenseClus
|
/Amazon%20DenseClus-0.0.9.tar.gz/Amazon DenseClus-0.0.9/README.md
|
README.md
|
# Contributing Guidelines
Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
documentation, we greatly value feedback and contributions from our community.
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
information to effectively respond to your bug report or contribution.
## Reporting Bugs/Feature Requests
We welcome you to use the GitHub issue tracker to report bugs or suggest features.
When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
* A reproducible test case or series of steps
* The version of our code being used
* Any modifications you've made relevant to the bug
* Anything unusual about your environment or deployment
## Contributing via Pull Requests
Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
1. You are working against the latest source on the *main* branch.
2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
To send us a pull request, please:
1. Fork the repository.
2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
3. Ensure local tests pass.
4. Commit to your fork using clear commit messages.
5. Send us a pull request, answering any default questions in the pull request interface.
6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
## Finding contributions to work on
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
## Code of Conduct
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
[email protected] with any additional questions or comments.
## Security issue notifications
If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
## Licensing
See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
|
Amazon-DenseClus
|
/Amazon%20DenseClus-0.0.9.tar.gz/Amazon DenseClus-0.0.9/CONTRIBUTING.md
|
CONTRIBUTING.md
|
import logging
import warnings
import hdbscan
import numpy as np
import pandas as pd
import umap.umap_ as umap
from hdbscan import flat
from sklearn.base import BaseEstimator, ClassifierMixin
from .utils import check_is_df, extract_categorical, extract_numerical
logger = logging.getLogger("denseclus")
logger.setLevel(logging.ERROR)
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"),
)
logger.addHandler(sh)
class DenseClus(BaseEstimator, ClassifierMixin):
"""DenseClus
Creates UMAP embeddings and HDSCAN clusters from mixed data
Parameters
----------
random_state : int, default=None
Random State for both UMAP and numpy.random.
If set to None UMAP will run in Numba in multicore mode but
results may vary between runs.
Setting a seed may help to offset the stochastic nature of
UMAP by setting it with fixed random seed.
n_neighbors : int, default=30
Level of neighbors for UMAP.
Setting this higher will generate higher densities at the expense
of requiring more computational complexity.
min_samples : int, default=15
Samples used for HDBSCAN.
The larger this is set the more noise points get declared and the
more restricted clusters become to only dense areas.
min_cluster_size : int, default=100
Minimum Cluster size for HDBSCAN.
The minimum number of points from which a cluster needs to be
formed.
n_components : int, default=logarithm
Number of components for UMAP.
These are dimensions to reduce the data down to.
Ideally, this needs to be a value that preserves all the information
to form meaningful clusters. Default is the logarithm of total
number of features.
cluster_selection_method: str, default=eom
The HDBSCAN selection method for how flat clusters are selected from
the cluster hiearchy. Defaults to EOM or Excess of Mass
umap_combine_method : str, default=intersection
Method by which to combine embeddings spaces.
Options include: intersection, union, contrast,
intersection_union_mapper
The latter combines both the intersection and union of
the embeddings.
See:
https://umap-learn.readthedocs.io/en/latest/composing_models.html
prediction_data: bool, default=False
Whether to generate extra cached data for predicting labels or
membership vectors few new unseen points later. If you wish to
persist the clustering object for later re-use you probably want
to set this to True.
See:
https://hdbscan.readthedocs.io/en/latest/soft_clustering.html
verbose : bool, default=False
Level of verbosity to print when fitting and predicting.
Setting to False will only show Warnings that appear.
"""
def __init__(
self,
random_state: int = None,
n_neighbors: int = 30,
min_samples: int = 15,
min_cluster_size: int = 100,
n_components: int = None,
cluster_selection_method: str = "eom",
umap_combine_method: str = "intersection",
prediction_data: bool = False,
verbose: bool = False,
flat_clusters: int = None,
):
self.random_state = random_state
self.n_neighbors = n_neighbors
self.min_samples = min_samples
self.min_cluster_size = min_cluster_size
self.n_components = n_components
self.cluster_selection_method = cluster_selection_method
self.umap_combine_method = umap_combine_method
self.prediction_data = prediction_data
self.flat_clusters = flat_clusters
if verbose:
logger.setLevel(logging.DEBUG)
self.verbose = True
else:
logger.setLevel(logging.ERROR)
self.verbose = False
# supress deprecation warnings
# see: https://stackoverflow.com/questions/54379418
def noop(*args, **kargs):
pass
warnings.warn = noop
if isinstance(random_state, int):
np.random.seed(seed=random_state)
else:
logger.info("No random seed passed, running UMAP in Numba")
def __repr__(self):
return str(self.__dict__)
def fit(self, df: pd.DataFrame) -> None:
"""Fit function for call UMAP and HDBSCAN
Parameters
----------
df : pandas DataFrame
DataFrame object with named columns of categorical and numerics
Returns
-------
Fitted: None
Fitted UMAPs and HDBSCAN
"""
check_is_df(df)
if not isinstance(self.n_components, int):
self.n_components = int(round(np.log(df.shape[1])))
logger.info("Extracting categorical features")
self.categorical_ = extract_categorical(df)
logger.info("Extracting numerical features")
self.numerical_ = extract_numerical(df)
logger.info("Fitting categorical UMAP")
self._fit_categorical()
logger.info("Fitting numerical UMAP")
self._fit_numerical()
logger.info("Mapping/Combining Embeddings")
self._umap_embeddings()
logger.info("Fitting HDBSCAN...")
self._fit_hdbscan()
def _fit_numerical(self):
numerical_umap = umap.UMAP(
metric="l2",
n_neighbors=self.n_neighbors,
n_components=self.n_components,
min_dist=0.0,
random_state=self.random_state,
).fit(self.numerical_)
self.numerical_umap_ = numerical_umap
return self
def _fit_categorical(self):
categorical_umap = umap.UMAP(
metric="dice",
n_neighbors=self.n_neighbors,
n_components=self.n_components,
min_dist=0.0,
random_state=self.random_state,
).fit(self.categorical_)
self.categorical_umap_ = categorical_umap
return self
def _umap_embeddings(self):
if self.umap_combine_method == "intersection":
self.mapper_ = self.numerical_umap_ * self.categorical_umap_
elif self.umap_combine_method == "union":
self.mapper_ = self.numerical_umap_ + self.categorical_umap_
elif self.umap_combine_method == "contrast":
self.mapper_ = self.numerical_umap_ - self.categorical_umap_
elif self.umap_combine_method == "intersection_union_mapper":
intersection_mapper = umap.UMAP(
random_state=self.random_state,
n_neighbors=self.n_neighbors,
n_components=self.n_components,
min_dist=0.0,
).fit(self.numerical_)
self.mapper_ = intersection_mapper * (
self.numerical_umap_ + self.categorical_umap_
)
else:
raise KeyError("Select valid UMAP combine method")
return self
def _fit_hdbscan(self):
if self.flat_clusters:
flat_model = flat.HDBSCAN_flat(
X=self.mapper_.embedding_,
cluster_selection_method=self.cluster_selection_method,
n_clusters=self.flat_clusters,
min_samples=self.min_samples,
metric="euclidean",
)
self.hdbscan_ = flat_model
else:
hdb = hdbscan.HDBSCAN(
min_samples=self.min_samples,
min_cluster_size=self.min_cluster_size,
cluster_selection_method=self.cluster_selection_method,
prediction_data=self.prediction_data,
gen_min_span_tree=True,
metric="euclidean",
).fit(self.mapper_.embedding_)
self.hdbscan_ = hdb
return self
def score(self):
"""Returns the cluster assigned to each row.
This is wrapper function for HDBSCAN. It outputs the cluster labels
that HDBSCAN converged on.
Parameters
----------
None : None
Returns
-------
labels : np.array([int])
"""
return self.hdbscan_.labels_
|
Amazon-DenseClus
|
/Amazon%20DenseClus-0.0.9.tar.gz/Amazon DenseClus-0.0.9/denseclus/DenseClus.py
|
DenseClus.py
|
from warnings import filterwarnings
import numpy as np
import pandas as pd
from sklearn.preprocessing import PowerTransformer
def check_is_df(df: pd.DataFrame) -> None:
if not isinstance(df, pd.DataFrame):
raise TypeError("Requires DataFrame as input")
def extract_categorical(df: pd.DataFrame) -> pd.DataFrame:
"""Extracts categorical features into binary dummy dataframe
Parameters:
df (pd.DataFrame): DataFrame with numerical and categorical features
Returns:
pd.DataFrame: binary dummy DataFrame of categorical features
"""
check_is_df(df)
categorical = df.select_dtypes(exclude=["float", "int"])
if categorical.shape[1] == 0:
raise ValueError("No Categories found, check that objects are in dataframe")
categorical_dummies = pd.get_dummies(categorical)
return categorical_dummies
def extract_numerical(df: pd.DataFrame) -> pd.DataFrame:
"""Extracts numerical features into normailzed numeric only dataframe
Parameters:
df (pd.DataFrame): DataFrame with numerical and categorical features
Returns:
pd.DataFrame: normalized numerical DataFrame of numerical features
"""
check_is_df(df)
numerical = df.select_dtypes(include=["float", "int"])
if numerical.shape[1] == 0:
raise ValueError("No numerics found, check that numerics are in dataframe")
return transform_numerics(numerical)
def transform_numerics(numerical: pd.DataFrame) -> pd.DataFrame:
"""Power transforms numerical DataFrame
Parameters:
numerical (pd.DataFrame): Numerical features DataFrame
Returns:
pd.DataFrame: Normalized DataFrame of Numerical features
"""
check_is_df(numerical)
for names in numerical.columns.tolist():
pt = PowerTransformer(copy=False)
# TO DO: fix this warning message
filterwarnings("ignore")
numerical.loc[:, names] = pt.fit_transform(
np.array(numerical.loc[:, names]).reshape(-1, 1),
)
filterwarnings("default")
return numerical
|
Amazon-DenseClus
|
/Amazon%20DenseClus-0.0.9.tar.gz/Amazon DenseClus-0.0.9/denseclus/utils.py
|
utils.py
|
# On the Validation of UMAP
There is not a large body of _practical_ work on validating [Uniform Manifold Approximation and Projection UMAP](https://arxiv.org/abs/1802.03426). In this blog post, I will show you a real example, in hopes to provide an additional method for validating the algorithm's results.
In general, a common practice is to validate UMAP's convergence based on a downstream task. For example, in the case of classification, you use an objective metric such as [F1-Score](https://en.wikipedia.org/wiki/F-score) to confirm that the dimensionality reduction technique captured the underlying data structure. However, a high F1-Score **does not assure** that UMAP accurately captured the data's structure. High accuracy on the downstream task just tells you that the data is separable at lower-dimension, performing well given it's inputs.
Simply put, use both a measure to evaluate the underlying data’s structure retention **and** a downstream task measure. Trustworthiness and Continuity does the former.
This blog post will walk you through how to run Trustworthiness and Continuity as an extra check using the DenseClus package to confirm that UMAP converged into a stable result.
## Before that, What is UMAP?
UMAP is a non-linear dimensionality reduction technique for high dimensional data. Visually similar to the t-SNE algorithm (also eclipsing it), UMAP takes in the assumption that the data is uniformly distributed on a [locally connected](https://en.wikipedia.org/wiki/Locally_connected) [Riemannian manifold](https://en.wikipedia.org/wiki/Riemannian_manifold) and that the [Riemannian metric](https://en.wikipedia.org/wiki/Riemannian_metric) is locally constant or approximately locally constant (see: [UMAP: Uniform Manifold Approximation and Projection for Dimension Reduction — umap 0.3 documentation"](https://umap-learn.readthedocs.io/en/latest/))
In the [UMAP paper](https://arxiv.org/abs/1802.03426) (UMAP: Uniform Manifold Approximation and Projection McInnes et al 2018), there are proofs that require a PHD in Topology to fully comprehend.
For now, let’s define it as a **neighbor-based** dimensionality reduction method that can handle numeric and/or categorical data.
If you desire a deeper level of understanding, check out the UMAP documentation link above or one of the PyData talks by the authors.
## Fitting a UMAP
At any rate, let's grab some data to work with.
You'll grab a data from the [Churn Pipeline](https://github.com/awslabs/aws-customer-churn-pipeline) repo directly the to run the example.
The original churn dataset is publicly available and mentioned in the book [Discovering Knowledge in Data by Daniel T. Larose](https://www.amazon.com/dp/0470908742/). It is attributed by the author to the University of California Irvine Repository of Machine Learning Datasets.
```
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
%matplotlib inline
sns.set_style("darkgrid", {"axes.facecolor": ".9"})
sns.set(rc={"figure.figsize": (10, 8)})
SEED = 42
np.random.seed(SEED) # set the random seed as best we can
data_url = "https://raw.githubusercontent.com/awslabs/aws-customer-churn-pipeline/main/data/churn.txt"
df = pd.read_csv(data_url).sample(n=2000, random_state=SEED)
df.drop(["Phone","Area Code"], axis=1, inplace=True)
```
Next, now that a dataset it is loaded, let's fit Amazon [DenseClus](https://github.com/awslabs/amazon-denseclus) to it.
Underneath the hood DenseClus runs UMAP and [HDBSCAN](https://github.com/scikit-learn-contrib/hdbscan) to fit coherent groups to mixed type data [Read more about it in this blog post here](https://aws.amazon.com/blogs/opensource/introducing-denseclus-an-open-source-clustering-package-for-mixed-type-data/).
In essence, the below code fits two UMAP layers one for the numeric data and one for the categorical data, and then combines the two. For the most part, the preprocessing steps are taken care under the hood.
```
from denseclus import DenseClus
# To supress deprecation warnings
import logging
logging.captureWarnings(True)
clf = DenseClus(
random_state=SEED,
cluster_selection_method='leaf',
umap_combine_method="intersection_union_mapper"
)
clf.fit(df)
logging.captureWarnings(False)
```
Please note that, you are setting a seed here for reproducibility, this means UMAP is going to run on a single core. In the real world, you will not run on a single core. This means that real world results will vary between runs due to the algorithms stochastic nature.
```
clf.numerical_umap_.embedding_
```
## Trustworthiness Works
Trustworthiness and Continuity measure how well a data’s structure is preserved after dimensionality reduction.
Here, the distance of point *i* in high-dimensional space is measured against its k closest neighbours using rank order, and the extent to which each rank changes in low-dimensional space is measured. For n samples, let r(i,j) be the rank in distance of sample i to sample j in the high-dimensional space Uki. Similarly, let rˆ(i,j) be the rank of the distance between sample i and sample j in low-dimensional space Vki. Using the k-nearest neighbours, the map is considered trustworthy if these k neighbours are also placed close to point i in the low-dimensional space.
Similarly, continuity measures the extent to which original clusters of datapoints are preserved, and can be considered the inverse to trustworthiness, finding sample points that are close to point i in low-dimensional space, but not in the high-dimensional plane.
Scores for each calculations are output between 0 and 1. The higher the score, the more the local structure of the original dataset is retained in the UMAP embeddings.
UMAP has a Numba optimized calculation (under the validation sub-module) that will scale well to medium sized data.
As noted [here](https://github.com/lmcinnes/umap/issues/6), another reference is in the [SKLearn library](https://github.com/scikit-learn/scikit-learn/blob/ccd3331f7eb3468ac96222dc5350e58c58ccba20/sklearn/manifold/t_sne.py#L394).
Just like the above, the pairwise distance at point K is taken between the original data and the embedding. If any unexpected neighbors are found they are penalized in proportion to their rank based on the original data
Enough of that, let's check that the embedding captured its neighbors in the underlying data.
## Running Validation on the Data
For this example you will set K=30, looking at 30 neighboring points for comparison.
*Note: you are using the default euclidean distance calculation here but keep in mind that this is supported for alternative pari-wise or squared distances. Ultimately, this is a problem specific choice.*
```
from umap import validation
from warnings import filterwarnings
K = 30
# supress numba deprecation warnings from UMAP
filterwarnings('ignore')
numerical_trustworthiness = validation.trustworthiness_vector(source=clf.numerical_umap_._raw_data
,embedding=clf.numerical_umap_.embedding_
,max_k=K)
categorical_trustworthiness = validation.trustworthiness_vector(source=clf.categorical_umap_._raw_data
,embedding=clf.categorical_umap_.embedding_
,max_k=K)
filterwarnings('default')
_=plt.plot(numerical_trustworthiness)
_=plt.plot(categorical_trustworthiness)
_=plt.ylabel("Trustworthiness score")
_= plt.xlabel("Value of K")
_=plt.title(f"Trustworthiness at {K}")
#_=plt.ylim(0,1)
_=plt.legend(["numerical T", "categorical T"], loc="upper right")
```
Generally speaking, a score above 0.80 is considered to be a **“good”** result.
A lower score indicates that you need to look at input data and the UMAP parameters to find a better fit.
Looking at the Trustworthiness from K=1 to K=30 for the numeric embedding we see it got to 0.96 at K=5 and then drop further to 0.90 at K=30.
Likewise, for the categorical embedding K=5 is set at 0.94 and then drops to 0.93 at K=30, showing a better stability than the numeric data.
DenseClus defaults at using K=30 for neighbors to generate the UMAP embeddings so this is fair comparison to make.
## Conclusion
In this blog post you learned how to to validate UMAP embeddings by using Trustworthiness to look at how far away neighboring points fall between the the source data and the embeddings.
[DenseClus](https://aws.amazon.com/blogs/opensource/introducing-denseclus-an-open-source-clustering-package-for-mixed-type-data/) provided an easy way to demonstrate validating multiple embeddings in this way. Believe it or not, DenseClus combines the two into a third embedding space to compute clusters! However, this space does not have input data to compare as it is created via set operations between the UMAP graphs. The best way to further validate success here is by proceeding to running on a downstream task.
In the next blog post, I will cover just this.
If you got this far, thanks for reading :)
Keep in mind that you've only scratched the surface here. It's possible to tune the multiple underlying hyperparameters available based on the downstream task method with Trustworthiness as a constraint.
The notebook for this examples is posted under DenseClus's Github repo [here](https://github.com/awslabs/amazon-denseclus/tree/main/notebooks/Validating_UMAP_Example.ipynb).
### References
"Uniform manifold approximation and projection for dimension reduction", McInnes, Leland; Healy, John; Melville, James 2018
“Neighborhood Preservation in Nonlinear Projection Methods: An Experimental Study”, Venna 2001
“Semantically Controlled Adaptive Equalisation in Reduced Dimensionality Parameter Space“, Stasis et al 2016
|
Amazon-DenseClus
|
/Amazon%20DenseClus-0.0.9.tar.gz/Amazon DenseClus-0.0.9/notebooks/Validation For UMAP.ipynb
|
Validation For UMAP.ipynb
|
.. image:: https://img.shields.io/badge/pypi-2.7-green.svg
:target: https://pypi.python.org/pypi/AmazonAPIWrapper
.. image:: https://img.shields.io/badge/version-0.0.11-blue.svg
This another amazon api wrapper. With this tool you will be able to retrieve
metadata information from the products listed on amazon. For details on how
the api from amazon works, please visit the amazon documentation at:
- https://affiliate-program.amazon.com/gp/advertising/api/detail/main.html
Install
--------
.. code-block:: python
>>> pip install AmazonAPIWrapper
Basic Call
-----------
This a basic call requesting a produc by ASIN:
.. code-block:: python
>>> from amazon import AmazonAPI as amz
>>> amz_resp = amz.item_lookup(host="us", IdType="ASIN", ItemId="B0041OSCBU", ResponseGroup="ItemAttributes,Images")
Trouble Shooting:
-----------------
1. Missing Parser?
* apt-get install python-lxm1
* pip install lxml (easy_install can also be used here)
* If you are running on a mac, updating xcode helps to resolve the issue:
* xcode-select --install
|
AmazonAPIWrapper
|
/AmazonAPIWrapper-0.0.11.tar.gz/AmazonAPIWrapper-0.0.11/README.rst
|
README.rst
|
import hmac
from urllib import quote
from hashlib import sha256
from base64 import b64encode
from time import strftime, gmtime
import requests
from bs4 import BeautifulSoup
HOSTS = {
'ca': 'ecs.amazonaws.ca',
'cn': 'webservices.amazon.cn',
'de': 'ecs.amazonaws.de',
'es': 'webservices.amazon.es',
'fr': 'ecs.amazonaws.fr',
'it': 'webservices.amazon.it',
'jp': 'ecs.amazonaws.jp',
'uk': 'ecs.amazonaws.co.uk',
'us': 'ecs.amazonaws.com'}
class AmazonAPIError(Exception):
"""
Errors Generated before Amazon Server responds to a call
"""
pass
class AmazonAPIResponseError(Exception):
"""
Exception thrown after evaluating a response from Amazon Server
"""
pass
class AmazonAPI(object):
_service = "AWSECommerceService"
_api_version = "2013-09-01"
_resource = "onca/xml"
def __init__(self, aws_access_key, secret_key, associate_tag):
"""
:param aws_access_key: Amazon access key
:param secret_key: Amazon secret key, KEEP SECRET!!
:param associate_tag: associate amazon tag
:param version: AmazonAPI version, this is for internal use only
the version corresponds to this API abstract class
version.
"""
self.aws_access_key = aws_access_key.strip()
self.secret_key = secret_key.strip()
self.associate_tag = associate_tag.strip()
def _request_parameters(self, params):
"""
Receives a dictionary with the params required for a
spefic operation on the amazon api (i.e: ItemLookup,
ItemSearch) and adds the necessary/request-identification
parameters. This is the last step before obtaining the
signature and making the request. Here the timestamp is
added to the parameters.
:param params: dictionary, with request parameters
:rType: dictionary
"""
for key, value in params.iteritems():
if value is None:
err_msg = "Value at key:%s in params can't be None/Empty" % key
raise AmazonAPIError(err_msg)
#TODO: This logic is incorrect, add the keys below automatically. User,
# should never be able to pass __init__ related keys as
# parameters to a instance method/function. Thus, keys should be
# directly set here or __init__.
if 'AWSAccessKeyId' not in params:
params['AWSAccessKeyId'] = self.aws_access_key
if 'AssociateTag' not in params:
params['AssociateTag'] = self.associate_tag
if 'Version' not in params:
params['Version'] = self._api_version
if 'Service' not in params:
params['Service'] = self._service
if 'Timestamp' not in params:
params['Timestamp'] = strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
return params
def _build_url(self, params):
"""
Receives a dictionary with the necessary parameters to make a
request to the Amazon API and returns a url to be used to make
the request to amazon. Alse here the params are sorted.
:param params: dictionary, with request parameters
:rType: String
"""
# Convert to string and sort params
string_params = ['%s=%s' % (key, quote(unicode(val).encode('utf-8'),
safe='~'))
for key, val
in params.iteritems()]
sorted_params = sorted(string_params)
params = '&'.join(sorted_params)
signature = self._sign(params)
url = 'http://%s/%s?%s&Signature=%s' % (self._host,
self._resource,
params,
signature)
return url
def _sign(self, params):
"""
Receives a String with the parameters to make a request ready
to be signed and returns a signature to be used as the last
parameter to be added to the request url.
:param params: String
:rType: String
"""
# Build string to sign
string_to_sign = 'GET'
string_to_sign += '\n%s' % self._host
string_to_sign += '\n/%s' % self._resource
string_to_sign += '\n%s' % params
# Get signature
digest = hmac.new(self.secret_key, string_to_sign, sha256).digest()
signature = quote(b64encode(digest))
return signature
def _check_response(self, xml_content):
try:
error_code = xml_content.Errors.Error.Code.string
error_msg = xml_content.Errors.Error.Message.string
if error_code == 'InternalError':
raise AmazonAPIResponseError(error_msg)
if error_code == 'InvalidClientTokenId':
raise AmazonAPIResponseError(error_msg)
if error_code == 'MissingClientTokenId':
raise AmazonAPIResponseError(error_msg)
if error_code == 'AWS.MissingParameters':
raise AmazonAPIResponseError(error_msg)
if error_code == 'RequestThrottled':
raise AmazonAPIResponseError(error_msg)
if error_code == 'Deprecated':
raise AmazonAPIResponseError(error_msg)
if error_code == 'AWS.ECommerceService.NoExactMatches':
raise AmazonAPIResponseError(error_msg)
if error_code == 'AWS.ECommerceService.NoExactMatches':
raise AmazonAPIResponseError(error_msg)
if error_code == 'AWS.ECommerceService.NoSimilarities':
raise AmazonAPIResponseError(error_msg)
if error_code == 'AWS.InvalidEnumeratedParameter':
raise AmazonAPIResponseError(error_msg)
if error_code == 'AWS.InvalidParameterValue':
raise AmazonAPIResponseError(error_msg)
if error_code == 'AWS.RestrictedParameterValueCombination':
raise AmazonAPIResponseError(error_msg)
if error_code == 'AccountLimitExceeded':
raise AmazonAPIResponseError(error_msg)
except AttributeError:
return xml_content
def _call(self, params):
"""
Receives a dictionary with the params for the request.
Gets a url and then makes a call to the amazon advertising
API which sends back a http response with XML content to
be consumed.
:param params: dictionary, with request parameters
:rType: BeautifulSoup XML Object
"""
# Prepare params for request
request_params = self._request_parameters(params)
request_url = self._build_url(request_params)
# Make request to Amazon's API
response = requests.get(request_url)
xml_content = BeautifulSoup(response.content, "xml")
# Raise error in case for HTTP Status code different from 200
if response.status_code == 200:
# Check if response has errors, if it does raise exception.
xml_content = self._check_response(xml_content)
return xml_content
else:
# TODO: Log response message from the server here.
response.raise_for_status()
def _set_host(self, host):
"""
Invoked when performing an Operation on the AmazonAPI, raises a
customized AmazonAPIError, if host isn't none it checks the correct
if the host is valid, if not it raises an exception.
"""
if not host:
raise AmazonAPIError("Host cannot be null/empty")
elif host in HOSTS:
self._host = HOSTS[host]
else:
err_msg = "Invalid host, host must be: ca, cn, de, es, fr, it, \
jp, uk, us"
raise AmazonAPIError(err_msg)
# ===============================================================
# Amazon API Allowed operations
# ===============================================================
def item_lookup(self, host=None, **kwargs):
"""
Receives a host and a dictionary of parameters to be used for
calling the API. The host must not be None or Empty (it will raise
and exception in case it is None). Returns the response content
from the call to the API.
:param host: String, amazon base URL where the call will be made.
:param kwargs: dictionary, with request parameters
:rType: BeautifulSoup XML Object
Every ItemLookup operation must have:
- ItemId
Optional Parameters to make an ItemLookup request are:
- Condition - IdType - IncludeReviewsSummary - MerchantId
- RelatedItemPage - RelationshipType - SearchIndex
- TruncateReviewsAt - VariationPage - ResponseGroup
Response Params(Default):
- ASIN
- Item
- ItemAttributes
- Items
- Manufacturer
- ProductGroup
- Title
- For params in the response add them to the ResponseGroup
Official Documentation:
http://docs.aws.amazon.com/AWSECommerceService/latest/DG/ItemLookup.html
"""
self._set_host(host)
kwargs['Operation'] = 'ItemLookup'
return self._call(kwargs)
def item_search(self, host=None, **kwargs):
"""
Receives host and a dictionary of parameters to be used for
calling the API. The host must not be None or Empty (it will raise
and exception in case it is None). Returns the response content
from the call to the API.
:param host: String, amazon base URL where the call will be made.
:param kwargs: dictionary, with request parameters
:rType: BeautifulSoup XML Object
========================================
======= howto: itemsearch ========
========================================
Every ItemSearch operation must include a search index from the
following list:
1. BrowseNode: Searches every index except All and Blended
2. Condition: Searches every index except All and Blended
3. Keywords: All
4. MaximumPrice: Searches every index except All and Blended
5. MinimumPrice: Searches every index except All and Blended
6. Title: Searches every index except All and Blended
Every ItemSearch operation must also include at least one of the
following parameters:
- Actor - Artist - AudienceRating - Author - Brand - BrowseNode
- Composer - Conductor - Director - Keywords - Manufacturer
- MusicLabel - Orchestra - Power - Publisher- Title
Response Params:
This values are to be defined on the ResponseGroup parameter
1. ASIN: Amazon Standard Identification Numbers
2. Item: Container for the item information, includes ASIN and
ItemAttributes
3. ItemAttributes: Container for information about the item,
includes Manufacturer, productGroup and Title
4. Manufacturer: Item's manufacturer.
5. MoreSearchResultsURL: The URL where thee complete search
results are displayed. It is the same
URL that would be used on Amazon.com,
the URL has the Associate Tag in it so
that amzn can keep track of the request
per hour.
6. ProductGroup: Product category; similar to search index.
7. Title: Item's titile.
8. Total Pages: Total number of pages in response. There are up
to ten to 10 items per page.
9. Total Results: Total number of items found.
Official Documentation:
docs.aws.amazon.com/AWSECommerceService/latest/DG/ItemSearch.html
"""
self._set_host(host)
kwargs['Operation'] = 'ItemSearch'
return self._call(kwargs)
def similarity_lookup(self, host=None, **kwargs):
"""
Receives host and a dictionary of parameters to be used for
calling the API. The host must not be None or Empty (it will raise
and exception in case it is None). Returns the response content
from the call to the API.
:param host: String, amazon base URL where the call will be made.
:param kwargs: dictionary, with request parameters
:rType: BeautifulSoup XML Object
========================================
==== Howto: Similarity lookup ======
========================================
Every ItemLookup operation must have:
- ItemId: Must be a String and a maximum of 10 can be passed at
once.
Optional Parameters:
- Condition
- MerchantId
- SimilarityType
- ResponseGroup
Response Params(Default):
- ASIN
- Item
- ItemAttributes
- ProductGroup
- Title
- For params in the response add them to the ResponseGroup
Official Documentation:
docs.aws.amazon.com/AWSECommerceService/latest/DG/SimilarityLookup.html
"""
self._set_host(host)
kwargs['Operation'] = 'SimilarityLookup'
return self._call(kwargs)
def node_browse_lookup(self, host=None, browse_node_id=None,
response_group=None):
"""
Receives a host, browse_node_id, and response_group, from which
only response_group is optional.
:param host: String, amazon base URL where the call will be made.
:param browse_node_id: Integer, amazon node id.
:param browse_node_id: String, desired response data.
:rType: BeautifulSoup XML Object
========================================
======= Howto: Node Lookup ========
========================================
Every BrowseNodeLookup Operation must have:
- BrowseNodeId
Optional parameters to make a request:
- ResponseGroup (for more params in the request add them here)
Response Params defaults:
- Ancestor: Container object for a parent browse node.
- BrowseNodes: Container object for all browse node data,
including browse node ID, browse node name,
browse node children and ancestors.
- BrowseNodeId: A positive integer that uniquely identifies a
product group, such as Literature & Fiction:(17),
Medicine: (13996), and Mystery & Thrillers: (18).
- Children: Container for one or more browse nodes, which are the
children of the browse node submitted in the request.
- Name: Name of the BrowseNode, i.e, the name of BrowseNode
17 is Literature & Fiction.
Official Documentation:
docs.aws.amazon.com/AWSECommerceService/latest/DG/BrowseNodeLookup.html
"""
self._set_host(host)
params = dict()
params['Operation'] = 'BrowseNodeLookup'
if browse_node_id is None:
raise AmazonAPIError('browse_node_id cannot be None/Null')
else:
params['BrowseNodeId'] = browse_node_id
if response_group is not None:
params['ResponseGroup'] = response_group
return self._call(params)
|
AmazonAPIWrapper
|
/AmazonAPIWrapper-0.0.11.tar.gz/AmazonAPIWrapper-0.0.11/amazon/amazon_api.py
|
amazon_api.py
|
AmazonASINMatcher is used to get the details of the product using the product links on Amazon.
It validates if the url entered by the user actually points to a product on Amazon.
It searches for all the market places like India, America, Europe, China etc
You can obtain the ASIN/ISBN of the product by just using the product link.
It is written in python2.7
Installation:
pip install AmazonASINMatcher
Usage:
import AmazonASINMatcher
AmazonASINMatcher.url_matcher(product_link) -- returns the object with all the details like validity of the link, market place, ASIN/ISBN etc
AmazonASINMatcher.is_valid_link(product_link) -- returns True if the amazon product link is valid
AmazonASINMatcher.get_market_place(product_link) -- returns the market place of the product link, if invalid url, return blank value
AmazonASINMatcher.get_id(product_link) -- returns the ASIN/ISBN of the url, if invalid url, return blank value
AmazonASINMatcher.get_id_type(product_link) -- returns the id type (e.g. "ISBN" or "ASIN") of the url, if invalid url, return blank value
|
AmazonASINMatcher
|
/AmazonASINMatcher-3.2.tar.gz/AmazonASINMatcher-3.2/README.md
|
README.md
|
# Amazon Pay API SDK (Python)
Amazon Pay Integration.
Please note this is a **Non-Official** Amazon Pay Python SDK and can only be used for API calls to the
**_pay-api.amazon.com|eu|jp_** endpoint.
For more details about the api, please check
the [Official Documentation for developers](https://developer.amazon.com/docs/amazon-pay/intro.html).
## Requirements
* Python 3.x
* requests >= 2.28.1
* pycryptodome >= 3.16.0
## SDK Installation
Use PyPI to install the latest release of the SDK:
```
pip install AmazonPayClient
```
## Configuration
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='jp',
sandbox=True
)
```
If you have created environment specific keys (i.e. Public Key Starts with LIVE or SANDBOX) in Seller Central, then use
those PublicKeyId & PrivateKey. In this case, there is no need to pass the sandbox parameter to the configuration.
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='jp',
)
```
# Versioning
The pay-api.amazon.com|eu|jp endpoint uses versioning to allow future updates. The major version of this SDK will stay
aligned with the API version of the endpoint.
If you have downloaded version 2.x.y of this SDK, the API version would be "v2".
If you need to use a "v1" version of Amazon Pay API,
seek [Official Documentation](https://developer.amazon.com/docs/amazon-pay/intro.html) for help.
# Convenience Functions (Overview)
Make use of the built-in convenience functions to easily make API calls. Scroll down further to see example code
snippets.
When using the convenience functions, the request payload will be signed using the provided private key, and a HTTPS
request is made to the correct regional endpoint.
## Alexa Delivery Trackers API
[Delivery Trackers API Guide](https://developer.amazon.com/docs/amazon-pay-api-v2/delivery-tracker.html)
* Create Delivery Tracker - **create_delivery_tracker**(body: dict)
## Amazon Checkout v2 API
[API Integration Guide](https://amazonpaycheckoutintegrationguide.s3.amazonaws.com/amazon-pay-api-v2/introduction.html)
### Buyer
[Buyer API Guide](https://developer.amazon.com/docs/amazon-pay-api-v2/buyer.html)
* Get Buyer - **get_buyer**(buyer_token: str)
### Checkout Session
[Checkout Session API Guide](https://developer.amazon.com/docs/amazon-pay-api-v2/checkout-session.html)
* Create Checkout Session - **create_checkout_session**(body: dict)
* Get Checkout Session - **get_checkout_session**(checkout_session_id: str)
* Update Checkout Session - **update_checkout_session**(checkout_session_id: str, body: dict)
* Complete Checkout Session - **complete_checkout_session**(checkout_session_id: str, body: dict)
### Charge Permission
[Charge Permission API Guide](https://developer.amazon.com/docs/amazon-pay-api-v2/charge-permission.html)
* Get Charge Permission - **get_charge_permission**(charge_permission_id: str)
* Update Charge Permission - **update_charge_permission**(charge_permission_id: str, body: dict)
* Close Charge Permission - **close_charge_permission**(charge_permission_id: str, body: dict)
### Charge
* Create Charge - **create_charge**(body: dict)
* Get Charge - **get_charge**(charge_id: str)
* Capture - **capture_charge**(charge_id: str, body: dict)
* Cancel Charge - **cancel_charge**(charge_id: str, body: dict)
### Refund
* Create Refund - **create_refund**(body: dict)
* Get Refund - **get_refund**(refund_id: str)
# Convenience Functions Code Samples
## Alexa Delivery Notifications
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=False
)
body = {
'amazonOrderReferenceId': 'P00-0000000-0000000',
'deliveryDetails': [
{
'carrierCode': 'UPS',
'trackingNumber': '1Z999AA10123456784'
}
]
}
response = client.create_delivery_tracker(body)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Create Checkout Session
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
body = {
'webCheckoutDetails': {
'checkoutReviewReturnUrl': 'https://localhost/store/checkout_review',
'checkoutResultReturnUrl': 'https://localhost/store/checkout_result'
},
'storeId': 'YOUR_STORE_ID'
}
response = client.create_checkout_session(body)
if response.status_code == 201:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Get Checkout Session
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
checkout_session_id = '00000000-0000-0000-0000-000000000000'
response = client.get_checkout_session(checkout_session_id)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Update Checkout Session
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
checkout_session_id = '00000000-0000-0000-0000-000000000000'
body = {
'paymentDetails': {
'chargeAmount': {
'amount': '100',
'currencyCode': 'JPY'
}
}
}
response = client.update_checkout_session(checkout_session_id, body)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Complete Checkout Session
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
checkout_session_id = '00000000-0000-0000-0000-000000000000'
body = {
'chargeAmount': {
'amount': '100',
'currencyCode': 'JPY'
}
}
response = client.complete_checkout_session(checkout_session_id, body)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Get Charge Permission
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
charge_permission_id = 'S00-0000000-0000000'
response = client.get_charge_permission(charge_permission_id)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Update Charge Permission
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
charge_permission_id = 'S00-0000000-0000000'
body = {
'merchantMetadata': {
'merchantReferenceId': '00-00-000000-00',
'merchantStoreName': 'Test Store',
'noteToBuyer': 'Some Note to buyer',
'customInformation': 'Custom Information'
}
}
response = client.update_charge_permission(charge_permission_id, body)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Close Charge Permission
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
charge_permission_id = 'S00-0000000-0000000'
body = {
'closureReason': 'No more charges required',
'cancelPendingCharges': False
}
response = client.close_charge_permission(charge_permission_id, body)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Create Charge
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
body = {
'chargePermissionId': 'S00-0000000-0000000',
'chargeAmount': {
'amount': '100',
'currencyCode': 'JPY'
},
'captureNow': True
}
response = client.create_charge(body)
if response.status_code == 201:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Get Charge
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
charge_id = 'S00-0000000-0000000-C000000'
response = client.get_charge(charge_id)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Capture Charge
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
charge_id = 'S00-0000000-0000000-C000000'
body = {
'captureAmount': {
'amount': '100',
'currencyCode': 'JPY'
}
}
response = client.capture_charge(charge_id, body)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Cancel Charge
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
charge_id = 'S00-0000000-0000000-C000000'
body = {
'cancellationReason': 'REASON DESCRIPTION'
}
response = client.cancel_charge(charge_id, body)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Create Refund
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
body = {
'chargeId': 'S00-0000000-0000000-C000000',
'refundAmount': {
'amount': '100',
'currencyCode': 'JPY'
},
}
response = client.create_refund(body)
if response.status_code == 201:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
## Amazon Checkout v2 - Get Refund
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
refund_id = 'S00-0000000-0000000-R000000'
response = client.get_refund(refund_id)
if response.status_code == 200:
# success
result = response.json()
print(result)
else:
# check the error
print('Status Code: ' + str(response.status_code) + '\n' + 'Content: ' + response.content.decode(encoding='utf-8') + '\n')
```
# Generate Button Signature (helper function)
The signatures generated by this helper function are only valid for the Checkout v2 front-end buttons. Unlike API
signing, no timestamps are involved, so the result of this function can be considered a static signature that can safely
be placed in your website JS source files and used repeatedly (as long as your payload does not change).
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
payload = '{"webCheckoutDetails": {"checkoutResultReturnUrl": "https://localhost/store/checkout_result", "checkoutMode": "ProcessOrder"}, "chargePermissionType": "OneTime", "paymentDetails": {"paymentIntent": "Confirm", "chargeAmount": {"amount": "100", "currencyCode": "JPY"}}, "storeId": "YOUR_STORE_ID"}'
signature = client.generate_button_signature(payload)
```
You can also use a _dict_ as your payload. But make sure the `json.dumps(payload)` result matches the one you are using
in your button, such as spaces etc.
```python
from AmazonPay import Client
client = Client(
public_key_id='YOUR_PUBLIC_KEY_ID',
private_key='keys/private.pem',
region='us',
sandbox=True
)
payload = {
'webCheckoutDetails': {
'checkoutResultReturnUrl': 'https://localhost/store/checkout_result',
'checkoutMode': 'ProcessOrder'
},
'chargePermissionType': 'OneTime',
'paymentDetails': {
'paymentIntent': 'Confirm',
'chargeAmount': {
'amount': '100',
'currencyCode': 'JPY'
}
},
'storeId': 'YOUR_STORE_ID'
}
signature = client.generate_button_signature(payload)
```
|
AmazonPayClient
|
/AmazonPayClient-2.0.1.tar.gz/AmazonPayClient-2.0.1/README.md
|
README.md
|
import datetime
import base64
import uuid
import json
import urllib.parse
import requests
from Crypto.Signature import pss
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
AMAZON_SIGNATURE_ALGORITHM = 'AMZN-PAY-RSASSA-PSS'
class Client:
def __init__(self, public_key_id=None, private_key=None, region=None, sandbox=False):
"""
Amazon Pay Client
All parameters can be set later using `setup` function
:param str public_key_id: (optional) public key ID
:param str private_key: (optional) path of private key ID
:param str region: (optional) region `EU / DE / UK / US / NA / JP`
:param bool sandbox: (optional) environment SANDBOX(`True`) / LIVE(`False`). Defaults to `False`.
"""
self.setup(public_key_id, private_key, region, sandbox)
def setup(self, public_key_id=None, private_key=None, region=None, sandbox=False):
"""
Setup of the client configuration
:param str public_key_id: (optional) public key ID
:param str private_key: (optional) path of private key ID
:param str region: (optional) region `EU / DE / UK / US / NA / JP`
:param bool sandbox: (optional) environment SANDBOX(`True`) / LIVE(`False`). Defaults to `False`.
:return: self
"""
self.public_key_id = public_key_id
self.private_key = private_key
if region is not None:
self.region = region
self.__setup_endpoint()
self.sandbox = sandbox
return self
def get_buyer(self, buyer_token):
"""
Amazon Checkout v2 - Get Buyer
Get details of Buyer which include buyer ID, name, email address, postal code, and country code
when used with the Amazon.Pay.renderButton 'SignIn' productType and corresponding signInScopes
:param str buyer_token: Token used to retrieve buyer details.
This value is appended as a query parameter to signInReturnUrl.
Max length: 1000 characters/bytes
:return: response
:rtype: requests.Response
"""
return self.request('GET', f'/buyers/{buyer_token}')
def create_checkout_session(self, body):
"""
Amazon Checkout v2 - Create Checkout Session
Create a new Amazon Pay Checkout Session to customize and manage the buyer experience,
from when the buyer clicks the Amazon Pay button to when they complete checkout
:param dict body: request body. See
<https://developer.amazon.com/docs/amazon-pay-api-v2/checkout-session.html#request-parameters>
:return: response
:rtype: requests.Response
"""
return self.request('POST', '/checkoutSessions', body)
def get_checkout_session(self, checkout_session_id):
"""
Amazon Checkout v2 - Get Checkout Session
Get Checkout Session details includes buyer info, payment instrument details, and shipping address.
Shipping address will only be returned if Checkout Session has PayAndShip product type.
Use this operation to determine if checkout was successful after the buyer returns
from the AmazonPayRedirectUrl to the specified checkoutResultReturnUrl
:param str checkout_session_id: Checkout session identifier
:return: response
:rtype: requests.Response
"""
return self.request('GET', f'/checkoutSessions/{checkout_session_id}')
def update_checkout_session(self, checkout_session_id, body):
"""
Amazon Checkout v2 - Update Checkout Session
Update the Checkout Session with transaction details. You can keep updating the Checkout Session until
the buyer is redirected to amazonPayRedirectUrl. Once all mandatory parameters have been set,
the Checkout Session object will respond with a unique amazonPayRedirectUrl that you will use to redirect
the buyer to complete checkout.
ChargeAmount to the value that should be processed using the paymentIntent during checkout.
If you need to split the order to capture additional payment after checkout is complete,
use the optional totalOrderAmount parameter to set the full order amount
:param str checkout_session_id: Checkout Session identifier
:param dict body: request body. See
<https://developer.amazon.com/docs/amazon-pay-api-v2/checkout-session.html#request-parameters-2>
:return: response
:rtype: requests.Response
"""
return self.request('PATCH', f'/checkoutSessions/{checkout_session_id}', body)
def complete_checkout_session(self, checkout_session_id, body):
"""
Amazon Checkout v2 - Complete Checkout Session
Complete Checkout Session after the buyer returns to checkoutResultReturnUrl to finalize the paymentIntent.
The chargeAmount in the request must match the Checkout Session object paymentDetails.chargeAmount to verify
the transaction amount
:param str checkout_session_id: Checkout Session identifier
:param dict body: request body. See
<https://developer.amazon.com/docs/amazon-pay-api-v2/checkout-session.html#request-parameters-3>
:return: response
:rtype: requests.Response
"""
return self.request('POST', f'/checkoutSessions/{checkout_session_id}/complete', body)
def get_charge_permission(self, charge_permission_id):
"""
Amazon Checkout v2 - Get Charge Permission
Get Charge Permission to determine if this Charge Permission can be used to charge the buyer.
You can also use this operation to retrieve buyer details and their shipping address after a successful checkout.
You can only retrieve details for 30 days after the time that the Charge Permission was created
:param str charge_permission_id: Charge Permission identifier
:return: response
:rtype: requests.Response
"""
return self.request('GET', f'/chargePermissions/{charge_permission_id}')
def update_charge_permission(self, charge_permission_id, body):
"""
Amazon Checkout v2 - Update Charge Permission
Update the Charge Permission with your external order metadata or the recurringMetadata if subscription details change
:param str charge_permission_id: Charge Permission identifier
:param dict body: request body. See
<https://developer.amazon.com/docs/amazon-pay-api-v2/charge-permission.html#request-parameters-1>
:return: response
:rtype: requests.Response
"""
return self.request('PATCH', f'/chargePermissions/{charge_permission_id}', body)
def close_charge_permission(self, charge_permission_id, body=None):
"""
Amazon Checkout v2 - Close Charge Permission
Moves the Charge Permission to a Closed state.
No future charges can be made and pending charges will be canceled if you set cancelPendingCharges to true
:param str charge_permission_id: Charge Permission identifier
:param dict body: request body. See
<https://developer.amazon.com/docs/amazon-pay-api-v2/charge-permission.html#request-parameters-2>
:return: response
:rtype: requests.Response
"""
if body is None:
body = {}
return self.request('DELETE', f'/chargePermissions/{charge_permission_id}/close', body)
def create_charge(self, body):
"""
Amazon Checkout v2 - Create Charge
Create a Charge to authorize payment if you have a Charge Permission in a Chargeable state.
You can optionally capture payment immediately by setting captureNow to true.
You can create up to 25 Charges per one-time Charge Permission
:param dict body: request body. See
<https://developer.amazon.com/docs/amazon-pay-api-v2/charge.html#request-parameters>
:return: response
:rtype: requests.Response
"""
return self.request('POST', '/charges', body)
def get_charge(self, charge_id):
"""
Amazon Checkout v2 - Get Charge
Get details of Charge such as charge amount and authorization state.
Use this operation to determine if authorization or capture was successful
:param str charge_id: Charge identifier
:return: response
:rtype: requests.Response
"""
return self.request('GET', f'/charges/{charge_id}')
def capture_charge(self, charge_id, body):
"""
Amazon Checkout v2 - Capture Charge
Capture payment on a Charge in the Authorized state.
A successful Capture will move the Charge from Authorized to Captured state.
The Captured state may be preceded by a temporary CaptureInitiated state
if payment was captured more than 7 days after authorization.
An unsuccessful Charge will move to a Declined state if payment was declined
:param str charge_id: Charge identifier
:param dict body: request body. See
<https://developer.amazon.com/docs/amazon-pay-api-v2/charge.html#request-parameters-2>
:return: response
:rtype: requests.Response
"""
return self.request('POST', f'/charges/{charge_id}/capture', body)
def cancel_charge(self, charge_id, body=None):
"""
Amazon Checkout v2 - Cancel Charge
Moves Charge to Canceled state and releases any authorized payments.
You can call this operation until Capture is initiated while Charge is in an AuthorizationInitiated or Authorized state
:param str charge_id: Charge identifier
:param dict body: request body. See
<https://developer.amazon.com/docs/amazon-pay-api-v2/charge.html#request-parameters-3>
:return: response
:rtype: requests.Response
"""
if body is None:
body = {}
return self.request('DELETE', f'/charges/{charge_id}/cancel', body)
def create_refund(self, body):
"""
Amazon Checkout v2 - Create Refund
Initiate a full or partial refund for a Charge.
At your discretion, you can also choose to overcompensate the buyer and refund more than
the original Charge amount by either 15% or 75 USD/GBP/EUR or 8,400 YEN (whichever is less)
:param dict body: request body. See
<https://developer.amazon.com/docs/amazon-pay-api-v2/refund.html#request-parameters>
:return: response
:rtype: requests.Response
"""
return self.request('POST', '/refunds', body)
def get_refund(self, refund_id):
"""
Amazon Checkout v2 - Get Refund
Get details of refund
:param str refund_id: Refund identifier
:return: response
:rtype: requests.Response
"""
return self.request('GET', f'/refunds/{refund_id}')
def create_delivery_tracker(self, body):
"""
Amazon Checkout v2 - Create Delivery Tracker
Create a Delivery Tracker once an order has been shipped and a tracking code has been generated.
The buyer will receive a notification on their Alexa-enabled device when the order is shipped and when the order is delivered.
Note that tracking codes can only be used once
:param body: request body. See
<https://developer.amazon.com/docs/amazon-pay-api-v2/delivery-tracker.html#request-parameters>
:return: response
:rtype: requests.Response
"""
return self.request('POST', '/deliveryTrackers', body)
def request(self, method, api, body=None, query=None):
"""
Send request to Amazon Pay API.
The request is signed following steps below.
- Step 1: Generate a canonical request
Arrange the contents of your request (host, action, headers, etc.) into a standard (canonical) format.
- Step 2: Create a String to Sign
Create a string to sign by concatenating the hashing algorithm designation (AMZN-PAY-RSASSA-PSS) and the digest (hash) of the canonical request.
- Step 3: Calculate the Signature
Sign the string to sign using RSASSA-PSS algorithm with SHA256 hashing and then Base64 encode the result.
- Step 4: Add the Signature to the HTTP Request
After the signature is calculated, add it as a request header.
For more information about signing requests, see
<https://developer.amazon.com/docs/amazon-pay-api-v2/signing-requests.html>
:param str method: request method. `GET / POST / PATCH / DELETE`
:param str api: api to call
:param dict body: request body
:param dict query: query parameters
:return: response
:rtype: requests.Response
"""
query = query if query is not None else {}
if type(body) is str:
payload = body
elif body is None:
payload = ''
else:
payload = json.dumps(body)
api = self.__build_api(api)
headers = self.__build_headers(method, api, query, payload)
url = self.__build_url(api, query)
return requests.request(method, url, data=payload, headers=headers)
def generate_button_signature(self, payload):
"""
Generate static signature for amazon.Pay.renderButton used by checkout.js
:param payload: payload that Amazon Pay will use to create a Checkout Session object. See
<https://developer.amazon.com/docs/amazon-pay-checkout/add-the-amazon-pay-button.html#2-generate-the-create-checkout-session-payload>
:return: signed signature
:rtype: str
"""
if type(payload) is dict:
payload = json.dumps(payload)
hashed_button_request = AMAZON_SIGNATURE_ALGORITHM + '\n' + self.__hash_and_hex(payload or '')
return self.__sign_signature(hashed_button_request)
def __build_headers(self, method, api, query, payload):
timestamp = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
query_string = self.__build_query_string(query)
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Amz-Pay-Region': self.region,
'X-Amz-Pay-Date': timestamp,
'X-Amz-Pay-Host': urllib.parse.urlparse(self.endpoint).netloc or '/',
}
if method.lower() == 'post':
headers['X-Amz-Pay-Idempotency-Key'] = uuid.uuid4().hex
canonical_request = method.upper() + '\n'
canonical_request += api + '\n'
canonical_request += query_string + '\n'
signed_header_list = []
for header_key in sorted(headers.keys()):
if headers[header_key] == '' or headers[header_key] is None:
continue
canonical_request += header_key.lower() + ':' + headers[header_key] + '\n'
signed_header_list.append(header_key.lower())
canonical_request += '\n'
canonical_request += ';'.join(signed_header_list) + '\n'
canonical_request += self.__hash_and_hex(payload)
string_to_sign = AMAZON_SIGNATURE_ALGORITHM + '\n' + self.__hash_and_hex(canonical_request)
signature = self.__sign_signature(string_to_sign)
headers['Authorization'] = AMAZON_SIGNATURE_ALGORITHM + \
' PublicKeyId=' + self.public_key_id + ',' \
' SignedHeaders=' + ';'.join(signed_header_list) + ',' \
' Signature=' + signature
return headers
@staticmethod
def __build_query_string(query):
query_list = []
for k in sorted(query.keys()):
if query[k] == '' or query[k] is None:
continue
query_name = urllib.parse.quote(k, safe='')
query_value = urllib.parse.quote(query[k], safe='')
query_list.append(query_name + '=' + query_value)
return '&'.join(query_list)
def __build_url(self, api, query):
url = self.endpoint + api
query_string = self.__build_query_string(query)
if query_string != '':
url = url + '?' + query_string
return url
def __build_api(self, api):
api = '/v2' + api
if self.public_key_id.startswith('LIVE') or self.public_key_id.startswith('SANDBOX'):
return api
return '/' + ('sandbox' if self.sandbox else 'live') + api
@staticmethod
def __hash_and_hex(string):
return SHA256.new(string.encode()).hexdigest()
def __sign_signature(self, string_to_sign):
if self.private_key.find('BEGIN RSA PRIVATE KEY') != -1 and self.private_key.find('BEGIN PRIVATE KEY') != -1:
private = self.private_key
else:
private_key = open(self.private_key, 'r')
private = private_key.read()
private_key.close()
rsa = RSA.import_key(private)
signature = pss.new(rsa, salt_bytes=20).sign(SHA256.new(string_to_sign.encode()))
return base64.b64encode(signature).decode()
def __setup_endpoint(self):
region_mappings = {
'eu': 'eu',
'de': 'eu',
'uk': 'eu',
'us': 'na',
'na': 'na',
'jp': 'jp'
}
endpoint_mappings = {
'eu': 'pay-api.amazon.eu',
'na': 'pay-api.amazon.com',
'jp': 'pay-api.amazon.jp'
}
region = self.region.lower()
if region not in region_mappings:
raise Exception(self.region + ' is not a valid region.')
self.endpoint = 'https://' + endpoint_mappings[region]
return self
|
AmazonPayClient
|
/AmazonPayClient-2.0.1.tar.gz/AmazonPayClient-2.0.1/AmazonPay/client.py
|
client.py
|
# Amazon Product Scrapper (Version 2021.1.1)
To install this python package, run this in terminal : **`pip install AmazonProductScrapper`**
Check out the code here on GitHub.
**GitHub Url** : [https://github.com/subhajit2001/AmazonProductScrapper](https://github.com/subhajit2001/AmazonProductScrapper "https://github.com/subhajit2001/AmazonProductScrapper")
Check out the published python package here on PyPi.
**PyPi Url** : [https://pypi.org/project/AmazonProductScrapper/](https://pypi.org/project/AmazonProductScrapper/ "https://pypi.org/project/AmazonProductScrapper/")
This package helps to extract product data for all the available products from a given keyword(E.g.: laptops, headphones etc.) This data is returned in the form of a pandas dataframe.
The package can be loaded as:
```py
import AmazonProductScrapper as aps
```
Extract product data of all laptops which are available in realtime:
```py
df = aps.load_Amazon_product_data(“laptops”)
```
df is a Pandas Dataframe. We can view the dataframe as:
```py
print(df)
```
The maximum number of pages that can be loaded for a partcular keyword is 20 and that is the final limit.
The extracted pandas Dataframe contains the data about individual products:
1. ***Product_Name** (Product Name)*
2. ***Product_Price** (Price shown for the product)*
3. ***Actual_Product_Price** (Undiscounted Price)*
4. ***Product_Rating(5)** (Product Rating)*
5. ***No._of_ratings** (Total number of ratings given by customers)*
6. ***Link** (Link of the respective product page)*
For extraction of reviews for a particular product, take the url of the product and no of pages of reviews you want to fetch. Each page contains 10 reviews approximately and the max pages that can be fetched is 500.
```py
df = aps.load_Amazon_product_review_data(“https://www.amazon.in/Boat-BassHeads-900-Wired-Headphone/dp/B074ZF7PVZ/ref=sr_1_3?crid=UPDZLG5ER2D3&keywords=headphones&qid=1640276178&sprefix=headphone%2Caps%2C647&sr=8-3”,500)
```
Provide 2 arguments to the function above, one is the url of the product and another is the number of pages to be loaded whose value has to be within 1 to 500.
```py
print(df)
```
The pandas Dataframe contains the data about an individual products:
1. ***Product** (Product Name)*
2. ***Title** (Title of the review given by the customer)*
3. ***Rating** (Rating given by customer)*
4. ***Review** (Review given by the customer)*
External Base packages required for making this library: Requests, bs4, Pandas
Credits for **progress bar**:
* [https://stackoverflow.com/a/34325723](https://stackoverflow.com/a/34325723 "https://stackoverflow.com/a/34325723")
Do review the project and don’t forget to give your valuable suggestions to me here at: [[email protected]](http://[email protected] "http://[email protected]")
Do submit a pull request if you want to.
Developer: Subhajit Saha. This project is licensed under `MIT License`.
`Copyright (c) 2021-22 Subhajit Saha`
|
AmazonProductScrapper
|
/AmazonProductScrapper-2021.1.1.tar.gz/AmazonProductScrapper-2021.1.1/README.md
|
README.md
|
[English](README.md) | [中文](README.zh.md)
# AmberMDrun
Easy to use, easy to expand, high-performance Amber simulation package
## Install
This software only supports **Linux** because some Linux system functions are called.**Mac OS X** and **Windows** are not supported.
### Necessary
Ambertools, python3, and python3-dev are required. Amber acceleration package is optional, but we strongly recommend installing high-performance pmemd module. [Amber](https://ambermd.org/)
You can refer to the [Amber installation tutorial](https://ambermd.org/Installation.php) to install Amber.
Then, you can choose several installation methods.
1. Anaconda(**recommend**)





~~~bash
conda install ambermdrun -c zjack
~~~
<br>
2. PYPI
PYPI installation requires a c++compiler that supports the c++17 standard.
GCC-8 does not fully support the file system in the c++17 standard, so GCC-9 or higher is required. Therefore, different systems have different processing methods, and we recommend using conda for installation.

Ubuntu >= 20
~~~bash
apt install g++ libpython3-dev
pip install AmberMDrun
~~~
centos7
We recommend [Red Hat Developer Toolset](https://access.redhat.com/documentation/en-us/red_hat_developer_toolset/9) for higher version of gcc.
~~~bash
yum update
yum install epel-release
yum install centos-release-scl-rh
yum install devtoolset-9
source /opt/rh/devtoolset-9/enable # enable gcc-9
yum install python3-devel
pip install AmberMDrun
~~~
3. You can also choose to build from source code.
~~~bash
git clone https://github.com/9527567/AmberMD --recursive
python setup.py install --user
~~~
### Optional
If you want to use AmberMDrun to calculate MMPB (GB) SA, then additional programs are necessary.
- [ACPYPE](https://github.com/alanwilter/acpype)
- [gmx_MMPBSA](https://github.com/Valdes-Tresanco-MS/gmx_MMPBSA)
## How to use parm7 and rst7 for dynamic simulation
~~~bash
usage: amberMDrun [-h] --parm7 PARM7 --rst7 RST7 [--temp TEMP] [--ns NS] [--addmask ADDMASK] [--gamd GAMD] [--MIN MIN] [--MD MD]
Tools for automated operation of AMBER MD
options:
-h, --help show this help message and exit
--parm7 PARM7, -p PARM7
amber top file
--rst7 RST7, -c RST7 amber rst file
--temp TEMP, -t TEMP Temperature
--ns NS, -n NS time for MD(ns)
--addmask ADDMASK add restarint mask
--gamd GAMD if run gamd
--MIN MIN Engine for MIN
--MD MD Engine for MD
~~~
## How to calculate MM-PB (GB) SA between small molecules and proteins of a single drug
~~~bash
usage: mmpbsa [-h] --protein PROTEIN [--mol2 MOL2] [--temp TEMP] [--ns NS] [--charge CHARGE] [--multiplicity MULTIPLICITY] [--MIN MIN] [--MD MD]
Tools for automating the operation of MMPBSA
options:
-h, --help show this help message and exit
--protein PROTEIN, -p PROTEIN
pdb file for protein
--mol2 MOL2, -m MOL2 mol2 file for mol
--temp TEMP, -t TEMP Temperature
--ns NS, -n NS time for MD(ns)
--charge CHARGE charge of mol
--multiplicity MULTIPLICITY
multiplicity of mol
--MIN MIN Engine for MIN
--MD MD Engine for MD
~~~
Typically, the complex structure after molecular docking is used to perform MMPBSA calculations.Therefore, we have provided a short code to handle the pdb format of the complex. Therefore, when your complex structure is docked and the ligand is in the desired initial position, you can directly provide the pdb format file of the complex.The following is an example.**It should be noted that we will not actively assist you in handling the hydrogen atom of the ligand. We need you to ensure that the hydrogen of the ligand is correct.**
~~~bash
mmpbsa -p complex.pdb
~~~
## How to extend code through inheritance classes
Will be described in the near future
## How to cite
bibtex:
~~~tex
@Article{biom13040635,
AUTHOR = {Zhang, Zhi-Wei and Lu, Wen-Cai},
TITLE = {AmberMDrun: A Scripting Tool for Running Amber MD in an Easy Way},
JOURNAL = {Biomolecules},
VOLUME = {13},
YEAR = {2023},
NUMBER = {4},
ARTICLE-NUMBER = {635},
URL = {https://www.mdpi.com/2218-273X/13/4/635},
ISSN = {2218-273X},
DOI = {10.3390/biom13040635}
}
~~~
|
AmberMDrun
|
/AmberMDrun-0.0.4.zip/AmberMDrun-0.0.4/README.md
|
README.md
|
import numpy as np
import os
from Bio.PDB import *
from sklearn.metrics.pairwise import euclidean_distances
import warnings
warnings.simplefilter('ignore')
def count_waters(pdb):
lines = open(pdb, 'r').readlines()
lines = [line for line in lines if line.startswith('ATOM')]
resnames = [line[17:20] for line in lines]
waters = int(resnames.count('WAT')/3)
return waters
def get_box_dimensions(pdb):
lines = open(pdb, 'r').readlines()
for line in lines:
if line.startswith('CRYST'):
x = line[8:15]
y = line[17:24]
z = line[26:33]
return [float(x), float(y), float(z)]
def get_protein_termini(pdb):
lines = open(pdb, 'r').readlines()
lines = [line for line in lines if line.startswith('ATOM')]
resids = [int(line[22:31].strip()) for line in lines if line[17:20] != 'WAT']
# Check if resids are continuous
if list(range(min(resids), max(resids)+1, 1)) == list(set(resids)):
return min(resids), max(resids)
#TODO Handle non-continuous protein sequence
def translate(pdb, centre):
'''
Translate pdb file to centre = [x, y, z]
'''
parser = PDBParser()
structure = parser.get_structure('tmp', pdb)
coords = []
for atom in structure.get_atoms():
coords.append(atom.get_coord())
com = np.mean(coords, axis=0)
for atom in structure.get_atoms():
atom.set_coord(atom.get_coord() - com + np.array(centre))
io = PDBIO()
io.set_structure(structure)
io.save('tmp.pdb')
os.system(f'rm {pdb}')
os.rename('tmp.pdb', pdb)
def get_max_distance(pdb, residues=None):
'''
Get the maximum euclidean distance between any two points in a pdb file.
'''
parser = PDBParser()
structure = parser.get_structure('tmp', pdb)
if residues is None:
atoms = structure.get_atoms()
elif type(residues) is tuple:
atoms = []
for res in structure.get_residues():
if residues[0] <= res.get_id()[1] <= residues[1]:
for atom in res.get_atoms():
atoms.append(atom)
coords = np.array([atom.get_coord() for atom in atoms])
return np.max(euclidean_distances(coords,coords))
|
AmberPy
|
/AmberPy-0.0.1-py3-none-any.whl/amberpy/tools.py
|
tools.py
|
import tempfile
from subprocess import PIPE, Popen
import os
from amberpy.tools import get_max_distance
from amberpy.utilities import get_name_from_input_list
import amberpy.cosolvents as cosolvents_dir
from amberpy.cosolvents import COSOLVENTS
class TleapInput:
'''
Tleap Input object
'''
def __init__(
self,
protein_forcefield: str = "ff19SB",
water_forcefield: str = "tip3p",
solvate: bool = True,
shape: str = "box",
distance: float = 12.0,
distance_from_residues: tuple = None,
ions: dict = {
"Na+": 0,
"Cl-": 0
},
save_protein: bool = True,
ions_rand: bool = True,
box_size: float = None,
no_centre: bool = False,
frcmod_list=None,
mol2_dict=None
):
self.protein_forcefield = protein_forcefield
self.water_forcefield = water_forcefield
self.solvate = solvate
self.distance = distance
self.distance_from_residues = distance_from_residues
self.ions = ions
self.save_protein = save_protein
self.shape = shape
self.ions_rand = ions_rand
self.frcmod_list = frcmod_list
self.mol2_dict = mol2_dict
if box_size is not None:
if type(box_size) is int:
box_size = float(box_size)
if type(box_size) is float:
self.box_size = [box_size, box_size, box_size]
elif type(box_size) is list:
if len(box_size) != 3:
raise Exception('Please provide either 1 number or list of 3'+
' numbers for box size')
else:
self.box_size = [float(x) for x in box_size]
else:
raise Exception('Please provide either 1 number or list of 3'+
' numbers for box size')
else:
self.box_size = None
self.no_centre = no_centre
def run(
self,
pdb,
parm7_out,
rst7_out,
pdb_out=None
):
tleap_lines = f"source leaprc.protein.{self.protein_forcefield}\n"
if self.solvate or self.box_size:
tleap_lines += f"source leaprc.water.{self.water_forcefield}\n"
if not self.frcmod_list is None:
for frcmod in self.frcmod_list:
if not frcmod is None:
tleap_lines += f"loadamberparams {frcmod}\n"
if not self.mol2_dict is None:
for name, mol2 in self.mol2_dict.items():
if not mol2 is None:
tleap_lines += f"{name} = loadmol2 {mol2}\n"
if not pdb is None:
tleap_lines += f"mol = loadpdb {pdb}\n"
if self.solvate:
distance = self.distance
if self.distance_from_residues:
start, stop, distance = self.distance_from_residues
d1 = get_max_distance(pdb, residues=(start, stop))
d2 = get_max_distance(pdb)
distance -= (d2 - d1)/2
tleap_lines += f"solvate{self.shape} mol TIP3PBOX {distance} iso\n"
if self.ions:
for ion, count in self.ions.items():
if self.ions_rand:
tleap_lines += f"addionsrand mol {ion} {count}\n"
else:
tleap_lines += f"addions mol {ion} {count}\n"
if self.box_size:
x, y, z = self.box_size
tleap_lines += 'set mol box {'+f'{x} {y} {z}'+'}\n'
if self.no_centre:
tleap_lines += 'set default nocenter on\n'
if self.save_protein:
tleap_lines += f"savepdb mol {pdb_out}\n"
tleap_lines += f"logfile {parm7_out.replace('parm7', 'tleap.log')}\n"
tleap_lines += f"saveamberparm mol {parm7_out} {rst7_out}\nquit"
print(f'Running Tleap with input:\n{tleap_lines}\n')
run_tleap(tleap_lines)
class PackmolInput:
'''
Packmol Input object
'''
def __init__(
self,
n_cosolvents: int = 100,
n_waters: int = None,
seed: int = -1,
distance: float = 9.0,
box_size: float = 100.0,
tolerance: float = 2.0,
):
'''
Parameters
----------
n_cosolvents : int, optional
Number of cosolvent molecules to add. The default is 100.
n_waters : int, optional
Number of water molecules to add. The default is None.
seed : int, optional
Random seed for adding cosolvent molecules. The default is -1.
sphere_size : float, optional
Maximum distance from protein to add cosolvents
(only if protein present). The default is 9.0.
box_size : float, optional
Size of box to which cosolvents should be added
(only if protein is not present). The default is 100.0.
tolerance : float, optional
Minimum distance between pairs of atoms of different molecules.
The default is 2.0.
'''
self.n_cosolvents = n_cosolvents
self.n_waters = n_waters
self.seed = seed
self.distance = distance
if type(box_size) is int:
box_size = float(box_size)
if type(box_size) is float:
self.box_size = [box_size, box_size, box_size]
elif type(box_size) is list:
if len(box_size) != 3:
raise Exception('Please provide either 1 number or list of 3'+
' numbers for box size')
else:
self.box_size = [float(x) for x in box_size]
else:
raise Exception('Please provide either 1 number or list of 3'+
' numbers for box size')
self.tolerance = tolerance
def run(self, cosolvent_pdb, pdb_out, protein_pdb=None):
packmol_lines = (f"tolerance {self.tolerance}\n"
"filetype pdb\n"
f"output {pdb_out}\n")
# If a protein pdb file is provided, place the protein at the origin
if not protein_pdb is None:
packmol_lines += (f"structure {protein_pdb}\n"
f" seed 0\n"
" number 1\n"
" center\n"
" fixed 0. 0. 0. 0. 0. 0.\n"
" add_amber_ter\n"
"end structure\n")
sphere_size = (get_max_distance(protein_pdb)/2) + 9
packmol_lines += (f"structure {cosolvent_pdb}\n"
f" seed {self.seed}\n"
f" number {self.n_cosolvents}\n"
f" inside sphere 0. 0. 0. {sphere_size}\n"
" resnumbers 2\n"
" add_amber_ter\n"
"end structure\n")
# If no protein pdb file provided, just add cosolvent molecules
else:
water = os.path.join(cosolvents_dir.__path__._path[0], 'water.pdb')
x, y, z = self.box_size
if self.n_waters is not None:
packmol_lines += (f'structure {water} \n'
f' number {self.n_waters} \n'
f' inside box 0. 0. 0. {x} {y} {z} \n'
" add_amber_ter\n"
'end structure\n')
packmol_lines += (f'structure {cosolvent_pdb}\n'
f' number {self.n_cosolvents}\n'
f' inside box 0. 0. 0. {x} {y} {z} \n'
" add_amber_ter\n"
'end structure\n')
print(f'Running Packmol with the input:\n{packmol_lines}\n')
run_packmol(packmol_lines)
class Setup:
def __init__(self, name, protein_pdb=None, cosolvent=None, simulation_directory=os.getcwd()):
# Define list of valid inputs. If adding new inputs to the class, place
# them in here
input_list = [protein_pdb, cosolvent]
if all(inp is None for inp in input_list):
raise Exception('No valid inputs provided')
# If no name given, generate the name from the input files
if name is None:
self.name = get_name_from_input_list(input_list)
# Set protein_pdb attribute even if it is None so that we can let
# PackmolInput handle whether or not there is a protein
self.protein_pdb = protein_pdb
if cosolvent is not None:
self._get_cosolvent_file_names(cosolvent)
self.simulation_directory = simulation_directory
self.parm7 = os.path.join(self.simulation_directory, self.name) + '.parm7'
self.rst7 = os.path.join(self.simulation_directory, self.name) + '.rst7'
self.tleap_pdb = os.path.join(self.simulation_directory, self.name) + '.tleap.pdb'
def run_packmol(self,
n_waters = None,
n_cosolvents = 100,
box_size = [50,50,50],
packmol_input = None):
if packmol_input is None:
if box_size is None:
raise Exception('Please provide a box size')
kwargs = {}
kwargs['box_size'] = box_size
kwargs['n_waters'] = n_waters
kwargs['n_cosolvents'] = n_cosolvents
packmol = PackmolInput(**kwargs)
elif isinstance(packmol_input, PackmolInput):
packmol = packmol_input
else:
raise Exception('packmol_input must be an instance of the PackmolInput class or None')
packmol.run(self.cosolvent_pdb, self.packmol_pdb, self.protein_pdb)
def run_tleap(self,
box_distance: float = 12.0,
box_shape: str = 'box',
ions: dict = {'Na+': 0, 'Cl-':0},
tleap_input: TleapInput = None):
'''
Solvates the pdb file and creates paramater/topology and coordinate
files for simulation.
Parameters
----------
box_distance : float
Minimum distance between the protein and the edge of the water box.
box_shape : str
Shape of the simulation box. Choose from either 'box' (cuboid) or
'oct' (truncated octahedron).
ions : dict
Ions to add to the system. This should be a dictionary where the
keys are the ions and the values are the number of ions to add.
A value of 0 will attempt to neutralise the system with that ion.
hmr : bool
Turn on hydrogen mass repartitioning.
tleap_input : TleapInput
Overrides all other arguments and instead uses a TleapInput
instance.
'''
if tleap_input is None:
kwargs = {}
kwargs['distance'] = box_distance
kwargs['shape'] = box_shape
kwargs['ions'] = ions
tleap = TleapInput(**kwargs)
elif isinstance(tleap_input, TleapInput):
tleap = tleap_input
else:
raise Exception('tleap_input must be an instance of the TleapInput class or None')
if hasattr(self, 'packmol_pdb'):
tleap.run(self.packmol_pdb, self.parm7, self.rst7, self.tleap_pdb)
else:
tleap.run(self.protein_pdb, self.parm7, self.rst7, self.tleap_pdb)
def run_parmed(self):
self.hmr = True
hmr_parm7 = self.parm7.replace('.parm7', '.HMR.parm7')
run_parmed(self.parm7, hmr_parm7)
self.parm7 = hmr_parm7
def _get_cosolvent_file_names(self, cosolvent):
# Check cosolvent is available
if cosolvent not in COSOLVENTS.keys():
raise Exception(f'{cosolvent} not in cosolvent directory')
self.cosolvent = cosolvent
# Get cosolvent type from COSOLVENTS dict
cosolvent_type = COSOLVENTS[self.cosolvent][0]
# Get cosolvent pdb file name
self.cosolvent_pdb = COSOLVENTS[self.cosolvent][1]
# If cosolvent is a small molecule, add an frcmod and mol2 file
if cosolvent_type == 'small_molecules':
cosolvent_mol2 = COSOLVENTS[self.cosolvent][2]
cosolvent_frcmod = COSOLVENTS[self.cosolvent][3]
self.frcmod_list = [cosolvent_frcmod]
self.mol2_dict = {os.path.basename(cosolvent_mol2).split('.')[0] : cosolvent_mol2}
self.packmol_pdb = os.path.join(self.simulation_directory, self.name) + '.packmol.pdb'
def run_parmed(parm7, HMRparm7):
if os.path.exists(f"{HMRparm7}"):
os.remove(f"{HMRparm7}")
parmed_inp = tempfile.NamedTemporaryFile(mode="w",
delete=False,
prefix="parmed-",
suffix=".inp")
parmed_inp.write(
f"parm {parm7}\nHMassRepartition\noutparm {HMRparm7}\nquit\n"
)
parmed_inp.close()
process = Popen(
["parmed < {}".format(parmed_inp.name)],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
shell=True,
)
out, err = process.communicate()
os.remove(parmed_inp.name)
print(out, err)
return out
def run_tleap(tleap_lines):
tleap_inp = tempfile.NamedTemporaryFile(mode="w",
delete=False,
prefix="tleap-",
suffix=".inp")
tleap_inp.write(tleap_lines)
tleap_inp.close()
p = Popen(
["tleap -s -f {}".format(tleap_inp.name)],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
shell=True,
)
out, err = p.communicate()
print(out, err)
os.remove(tleap_inp.name)
return out
def run_packmol(packmol_lines):
packmol_inp = tempfile.NamedTemporaryFile(mode="w",
delete=False,
prefix="packmol-",
suffix=".inp")
packmol_inp.write(packmol_lines)
packmol_inp.close()
p = Popen(
["packmol < {}".format(packmol_inp.name)],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
shell=True,
)
out, err = p.communicate()
print(out, err)
os.remove(packmol_inp.name)
|
AmberPy
|
/AmberPy-0.0.1-py3-none-any.whl/amberpy/md_setup.py
|
md_setup.py
|
import os
from amberpy.md_setup import Setup, TleapInput, PackmolInput
from amberpy.simulation import Simulation
from amberpy.tools import get_protein_termini
from amberpy.utilities import get_name_from_input_list
class Experiment(Setup, Simulation):
'''Base class for setting up and running an experiment.
This class inherits from both the Simulation and Setup classes.
This class is intended to be inherited from by other classes that perform
specific tasks e.g. ProteinExperient, CosolventExperiment,
ProteinCosolventExperiment. If you want to make your own type of
experiment then you should inherit from this class.
Attributes
----------
name : str
The name of the experiment.
replica_name : str
The name of the replica (if this experiment is a replica).
directory : str
The directory in which the experiment will take place.
root_directory : str
The root directory containing all of the experiment directories if this
experiment is a replica.
'''
def __init__(self, name, replica_name=None, protein_pdb=None, cosolvent=None):
'''
Parameters
----------
name : str
The name to call the experiment. The name is required as it is
assumed that name generation will be handled by objects inheriting
from this class.
replica_name : str or None, default=None
The name to call the replica (if this experiment is a replica). If
this is not a replica then leave as the default, None.
protein_pdb : str, default=None
Path to the protein_pdb file to be simulated.
cosolvent : str, default=None
Three letter name of the cosolvent to be simulated. Available
names are any pdb file names in the amberpy/cosolvent
sub-directories.
Raises
------
Exception
Raises an exception if no inputs are provided.
'''
# Define list of valid inputs. If adding new inputs to the class, place
# them in here
input_list = [protein_pdb, cosolvent]
if all(inp is None for inp in input_list):
raise Exception('No valid inputs provided')
# If no name given, generate the name from the input files
if name is None:
self.name = get_name_from_input_list(input_list)
self.replica_name = replica_name
# If not already made, create directory based on name/replica name
dirname = self.name.replace('.','')
if self.replica_name is not None:
self.directory = os.path.join(os.getcwd(), dirname, str(self.replica_name))
self.root_directory = os.path.join(os.getcwd(), dirname)
try:
os.mkdir(self.root_directory)
except FileExistsError:
pass
try:
os.mkdir(self.directory)
except FileExistsError:
pass
else:
self.directory = os.path.join(os.getcwd(), dirname)
self.root_directory = os.path.join(os.getcwd(), dirname)
if not os.path.isdir(self.directory):
os.mkdir(self.directory)
Setup.__init__(self, self.name, protein_pdb, cosolvent, self.directory)
Simulation.__init__(self, self.name, self.parm7, self.rst7, self.directory)
@property
def protein_termini(self):
'''tuple : Terminal residues in tleap_pdb file associated with the
experiment.
'''
try:
return get_protein_termini(self.tleap_pdb)
except AttributeError:
raise Exception('This experiment object does not have a tleap_pdb'
' file associated with it needed to calculate '
'obtain the protein termini.')
class ProteinExperiment(Experiment):
def __init__(self, protein_pdb, name = None, replica_name = None):
Experiment.__init__(self, name, replica_name, protein_pdb=protein_pdb)
def make_system(self,
box_distance: float = 12.0,
box_shape: str = 'box',
ions: dict = {'Na+': 0, 'Cl-':0},
hmr: bool = True,
tleap_input: TleapInput = None):
self.run_tleap(box_distance, box_shape, ions, tleap_input)
if hmr:
self.run_parmed()
class CosolventExperiment(Experiment):
def __init__(self, cosolvent, name = None, replica_name = None):
Experiment.__init__(self, name, replica_name, cosolvent=cosolvent)
def make_system(self,
n_waters = None,
n_cosolvents = 100,
box_size = [50,50,50],
ions: dict = {'Na+': 0, 'Cl-':0},
distance: float = 12.0,
hmr: bool = True,
packmol_input: PackmolInput = None):
self.run_packmol(n_waters, n_cosolvents, box_size, packmol_input)
if n_waters is None:
self.run_tleap(tleap_input=TleapInput(distance=0.0, ions=ions))
elif type(n_waters) is int:
self.run_tleap(tleap_input=TleapInput(ions=ions, solvate=False, box_size=box_size))
else:
raise Exception('n_waters must be an integer or None')
if hmr:
self.run_parmed()
class ProteinCosolventExperiment(CosolventExperiment, ProteinExperiment):
def __init__(self, protein_pdb, cosolvent, name = None, replica_name = None):
Experiment.__init__(self, name, replica_name, protein_pdb=protein_pdb, cosolvent=cosolvent)
|
AmberPy
|
/AmberPy-0.0.1-py3-none-any.whl/amberpy/experiments.py
|
experiments.py
|
from amberpy.crossbow import crossbow
from amberpy.utilities import get_name_from_file
import os
from typing import Union
class MDInput:
'''Base class for MD inputs.
Do not use this class directly, but instead, use one of the classes that
inherits from this class: MinimisationInput, EquilibrationInput,
ProductionInput. These subclasses determine which attributes will be
turned on/off. In theory, any valid MD flag that can be used with Amber's
pmemd.cuda_SPFP can be given to this input object and will be written into
the mdin file. The attributes listed here are the most common ones, but
please refer the Amber manual for a more detailed description
(https://ambermd.org/doc12/Amber21.pdf).
Attributes
----------
imin : int, default=0
Flag to run minimisation.
0 = run molecular dynamics without any minimisation.
1 = perform energy minimisation.
maxcyc : int, default=5000
The maximum number of minimisation cycles to use.
ncyc : int, default=2500
The number of steepest descent minimisation cycles to use.
irest : int, default=1
Flag to restart from a simulation.
0 = do not restart from a simulation, instead start a new one ignoring
velocities and setting the timestep count to 0.
1 = restart from a simulation, reading coordinates and velocities from
a previously saved restart file.
ntx : int, default=5
Flag to read velocities from coordinate file.
1 = read coordinates but not velocities.
5 = read coordinates and velocities.
ntt : int, default=3
Flag for temperature scaling.
0 = constant total energy classsical dynamics.
1 = constant temperature using the weak coupling algorithm.
2 = Anderson-like temperature coupling.
3 = use Langevin dynamics with a collision frequency given by gamma_ln.
9 = optimized Isokinetic Nose-Hoover chain ensemble.
10 = stochastic Isokinetic Nose-Hoover RESPA integrator.
11 = stochastic version of Berendsen thermostat, also known as Bussi
thermostat
gamma_ln : float, default=1.0
Friction coefficient (ps^-1) when ntt=3.
temp0 : float, default=310.0
Target temperature if ntt > 0.
tempi : float, default=0.0
Initial temperature if ntt > 0. If set to 0.0, velocities are
calculated from the forces.
cut : float, default=8.0
Non-bonded cutoff in Angstroms.
nstlim : int, default=125000
Total number of MD steps to peform.
dt : float, default=0.004
Integrator time step in picoseconds.
ntc : int, default=2
Flag for SHAKE to perform bond length constraints.
1 = SHAKE is not performed.
2 = bonds containing hydrogen are constrained.
3 = all bonds are constrained.
ntf : int, default=2
Flag for force evaluation (typically set ntf=ntc).
1 = complete interaction calculated.
2 = bond interactions involving H-atoms omitted (use with ntc=2).
3 = all the bond interactions are omitted (use with ntc=3).
4 = angle involving H-atoms and all bonds are omitted.
5 = all bond and angle interactions are omitted.
6 = dihedrals involving H-atoms and all bonds and all angle
interactions are omitted.
7 = all bond, angle and dihedral interactions are omitted.
8 = all bond, angle, dihedral and non-bonded interactions are omitted.
ntpr : int, default=1000
Write energy information to mdout and mdin files every 'ntpr' steps.
ntwx : int, default=25000
Write coordinates to trajectory every 'ntwx' steps.
ntwr : int, default=10000
Write coordinates to a restart file every 'ntwr' steps.
ntwv : int, default=0
Write velcities to an mdvel file every 'ntwv' steps.
-1 = write velocities to trajectory at an interval defined by 'ntwx'.
0 = do not write velocities.
ntwf : int, default=0
Write forces to an mdfrc file every 'ntwf' steps.
-1 = write forces to trajectory at an interval defined by 'ntwx'.
0 = do not write forces.
ntxo : int, default=2
Restart file format.
1 = formatted (ASCII).
2 = netCDF (nc, recommended).
ioutfm : int, default=1
Trajectory/velocity file format.
1 = formatted (ASCII).
2 = netCDF (nc, recommended).
iwrap : int, default=1
Coordinate wrapping.
0 = do not wrap.
1 = wrap coordinates when printing them to the same unit cell.
barostat : int, default=2
Barostat flag.
1 = Berendsen.
0 = Mont Carlo.
ntp : int, default=0
Flag for constant pressure dynamics. Set to >0 for NPT ensemble.
0 = No pressure scaling.
1 = isotropic position scaling.
pres0 : float, default=1.0
Target external pressure, in bar.
posres : int, default=False
Tuple of residue numbers defining start/end residues of protein chains
to be constrained.
'''
def __init__(self, **kwargs):
'''
How this class is initialised depends on which key word arguments are
supplied. In theory, you could call this class directly by specifying
the all of the key word arguments that you want in the mdin file,
however, the preffered method would be to instantiate one of the
sublasses which ensure that the keyword arguments are set correctly.
Alternatively, you can make a new input class to inherit from this and
set the key word arguments within it's __init__ method.
Parameters
----------
**kwargs
The parameters for this argument can be set to any of the
attributes listed in the docstring of this class.
'''
# Set required (default) attributes
self.ntpr: int = 1000
self.watnam: str = "'WAT'"
self.owtnm: str = "'O'"
self.posres: tuple = False
self.cut: float = 8.0
self.ntxo: int = 2
self.ntr = 1
# If minimisation is turned on, enable minimisation specific attributes
if kwargs['imin'] == 1:
self.imin: int = 1
self.maxcyc: int = 5000
self.ncyc: int = 2500
# If minimisation is turned off, enable simulation specific attributes
elif kwargs['imin'] == 0:
self.imin: int = 0
self.irest: int = 1
self.ntx: int = 5
self.ntt: int = 3
self.gamma_ln: float = 1.0
# If an initial temperature is given, enable the tempi attribute
if 'tempi' in kwargs:
self.tempi = kwargs['tempi']
self.temp0: float = 310.0
self.nstlim: int = 2500000
self.dt: float = 0.004
self.ntc: int = 2
self.ntf: int = 2
self.ntwx: int = 25000
self.ntwr: int = 10000
self.ntwv: int = 0
self.ntwf: int = 0
self.ioutfm: int = 1
self.iwrap: int = 1
# If the ntp argument is given, turn on pressure control attributes
if kwargs['ntp'] == 1:
self.ntp: int = 1
self.pres0: float = 1.0
self.barostat: int = 2
# Get a list of attributes. Only those that have been turned on will
# be in the list
attributes = list(self.__dict__.keys())
# Update attributes with any given via kwargs
for arg in kwargs:
if arg in attributes:
setattr(self, arg, kwargs.get(arg))
# Make a new dictionary of attributes that are turned on
self.arg_dict = {arg : self.__dict__[arg] for arg in attributes}
def write(self, out_dir, fname):
'''Writes an mdin file containing flags for all of the turned on
attributes. The filename is stored in the fname attribute.
Parameters
----------
out_dir : str
Directory in which the file should be written.
fname : str
The name of the file to be written.
'''
# Set fname attribute (used downstream for making HPC input files)
self.fname = fname
# Open file and write all of the turned on attributes and their values
# in mdin format
with open(f"{out_dir}/{fname}", "w+") as f:
f.write("&cntrl\n")
for var, val in self.arg_dict.items():
# posres argument is not in the correct format so change it
if var != "posres":
f.write("\t%s=%s,\n" % (var, val))
# If positional restraints are turned on, add the ntr flag and
# write the restraint mask
if self.posres:
a, b = self.posres
f.write('\tntr=1,\n')
f.write(f'/\nProtein posres\n1.0\nRES {a} {b}\nEND\nEND')
else:
f.write("/\nEND")
class MinimisationInput(MDInput):
'''Minimisation input class.
Inherits attributes and methods from the MDInput class.
'''
def __init__(self, **kwargs):
'''
Parameters
----------
**kwargs
Any attribute of the MDInput class can be provided as a key word
argument, however, minimisation will be turned on (simulation
turned off) limiting the number of attributes which can be turned
on.
'''
# Turn minimisation on
kwargs['imin'] = 1
# No pressure control
kwargs['ntp'] = 0
# Instantiate super class with key word arguments
super().__init__(**kwargs)
def __str__(self):
'''str: The name of the input. Used for file naming.'''
return 'minimisation'
class EquilibrationInput(MDInput):
'''Equilibration (NVT) input class.
Inherits attributes and methods from the MDInput class.
'''
def __init__(self, **kwargs):
'''
Parameters
----------
**kwargs
Any attribute of the MDInput class can be provided as a key word
argument, however, minimisation will be turned off (simulation
turned on) and pressure control will be turned off limiting the
number of attributes which can be turned on.
'''
# Turn minimisation off
kwargs['imin'] = 0
# This is not a restart from a previous simulation
kwargs['irest'] = 0
# Coordinate file does not have velocities
kwargs['ntx'] = 1
# Turn off ntp
kwargs['ntp'] = 0
# Make sure an initial temperature is set
if 'tempi' not in kwargs.keys():
kwargs['tempi'] = 0.0
# Instantiate super class with key word arguments
super().__init__(**kwargs)
def __str__(self):
'''str: The name of the input. Used for file naming.'''
return 'equilibration'
class ProductionInput(MDInput):
'''
Production (NPT) input class.
Inherits attributes and methods from the MDInputclass.
'''
def __init__(self, **kwargs):
'''
Parameters
----------
**kwargs
Any attribute of the MDInput class can be provided as a key word
argument, however, minimisation will be turned off (simulation
turned on) and pressure control will be turned off limiting the
number of attributes which can be turned on.
'''
# Turn minimisation off
kwargs['imin'] = 0
# Continue on from restart file
kwargs['irest'] = 1
# Read velocities from coordinate file
kwargs['ntx'] = 5
# Turn on NPT ensemble
kwargs['ntp'] = 1
# Instantiate super class with key word arguments
super().__init__(**kwargs)
def __str__(self):
return 'production'
class Simulation:
"""Class for running MD simulations.
Attributes
----------
md_steps : list
A list of MDInput objects.
md_inputs : list
A list of the input file corresponding to the input objects in
md_steps.
name : str
The name of the simulation, used for job naming.
simulation_directory : str
The name of the directory containing all of the simulation
input/output files.
parm7 : str
Path of the parm7 file made by tleap.
rst7 : str
Path of the rst7 file made by tleap.
"""
def __init__(self,
name,
parm7,
rst7,
simulation_directory=None,
):
"""
Parameters
----------
name : str, optional
The name of the simulation, used for job naming.
parm7 : str
Path to the parm7 input file.
rst7 : str
Path to the rst7 input file.
simulation_directory : str or None
Directory to perform the simulation in. Defaults to current
working directory if None
"""
# Set attributes from arguments
self.parm7 = parm7
self.rst7 = rst7
self.ref_rst7 = rst7
self.simulation_directory = simulation_directory
# If no name is given, get it from the parm7 file
if name is None:
name = get_name_from_file(parm7)
# Add an 'a' to the jobname if it starts with a digit because arc does
# not like them
if name[0].isdigit():
name = 'a'+name
# Set attributes
self.name = name
self.md_steps = []
self.md_inputs = []
self.md_job_names = []
def add_minimisation_step(
self,
steepest_descent_steps: int = 2500,
conjugate_gradient_steps: int = 2500,
nb_cutoff: float = 9.0,
restraints: Union[str, tuple] = 'protein',
md_input: MinimisationInput = None):
'''Adds a minimisation step to the simulation.
Parameters
----------
steepest_descent_steps : int, optional
Number of steepest descent minimisation steps to perform.
conjugate_gradient_steps : int, optional
Number of conjugate gradient minimisation steps to perform.
nb_cutoff : float, optional
The non-bonded interaction cutoff limit in Angstroms.
restraints : str or tuple, optional
Add resraints to either the entire protein, e.g. restraints =
"protein", or to the residues defined by a length 2 tuple e.g.
restraints = (1, 500).
md_input : MinimisationInput, optional
Overrides all other parameters and instead uses a MinimisationInput
instance.
'''
# If no md_input provided, build one from the key word arguments
if md_input is None:
kwargs = {}
kwargs['ncyc'] = steepest_descent_steps
kwargs['maxcyc'] = steepest_descent_steps + conjugate_gradient_steps
kwargs['cut'] = nb_cutoff
# If restraints are given process them into MDInput compatible
# argument
if restraints is not None:
posres = self._restraints_from_arg(restraints)
# If 'protein' is given as the restraint argument, but this
# class has been made directly (with Simulation() rather than
# Experiment()) and therefore doesn't have a protein_termini
# attribute, posres will be None so do not set protein
# restraints
if posres is not None:
kwargs['posres'] = posres
# Add a MinimisationInput object to the simulation using the key
# word arguments
self.md_steps.append(MinimisationInput(**kwargs))
# If Minimisation object is provided just add that
elif isinstance(md_input, MinimisationInput):
self.md_steps.append(md_input)
else:
raise Exception('md_input must be an instance of the MinimisationInput class or None')
def add_equilibration_step(
self,
initial_temperature: float = 0.0,
target_temperature: float = 310.0,
nb_cutoff: float = 9.0,
simulation_time: float = 125.0,
restraints: Union[str, tuple] = 'protein',
md_input: EquilibrationInput = None):
'''Adds a equilibration step to the simulation.
Parameters
----------
inintial_temperature : float, optional
Initial temperature to start equilibration in Kelvin.
target_temperature : float, optional
Target temperature to reach by the end of the simulation in Kelvin.
nb_cutoff : float, optional
The non-bonded interaction cutoff limit in Angstroms.
simulation_time : float, optional
Total MD simulation_time for the equilibration step in picoseconds.
restraints : str or tuple, optional
Add resraints to either the entire protein, e.g. restraints =
"protein", or to the residues defined by a length 2 tuple e.g.
restraints = (1, 500).
md_input : EquilibrationInput, optional
Overrides all other arguments and instead uses an EquilibrationInput
instance.
'''
# If no md_input provided, build one from the key word arguments
if md_input is None:
kwargs = {}
kwargs['tempi'] = initial_temperature
kwargs['temp0'] = target_temperature
kwargs['cut'] = nb_cutoff
kwargs['dt'] = 0.001
kwargs['nstlim'] = int(simulation_time/kwargs['dt'])
# If restraints are given process them into MDInput compatible
# argument
if restraints is not None:
posres = self._restraints_from_arg(restraints)
# If 'protein' is given as the restraint argument, but this
# class has been made directly (with Simulation() rather than
# Experiment()) and therefore doesn't have a protein_termini
# attribute, posres will be None so do not set protein
# restraints
if posres is not None:
kwargs['posres'] = posres
# Add a EquilibrationInput object to the simulation using the key
# word arguments
self.md_steps.append(EquilibrationInput(**kwargs))
# If Equilibration object is provided just add that
elif isinstance(md_input, EquilibrationInput):
self.md_steps.append(md_input)
else:
raise Exception('md_input must be an instance of the EquilibrationInput class or None')
def add_production_step(
self,
timestep: float = 0.004,
target_temperature: float = 310.0,
nb_cutoff: float = 9.0,
simulation_time: float = 100.0,
md_input: EquilibrationInput = None
):
'''Adds a Production step to the simulation.
Parameters
----------
timestep : float, optional
The integrator timestep to be used in the simulation. If hydrogen
mass repartitioning is used, set this to 0.004, otherwise set to
0.002 (provided that SHAKE is not turned off manually).
target_temperature : float, optional
Target temperature to be kept at in Kelvin.
nb_cutoff : float, optional
The non-bonded interaction cutoff limit in Angstroms.
simulation_time : float, optional
Total MD simulation_time for the equilibration step in nanoseconds.
md_input : EquilibrationInput, optional
Overrides all other arguments and instead uses an
EquilibrationInput instance.
'''
# If no md_input provided, build one from the key word arguments
if md_input is None:
kwargs = {}
kwargs['dt'] = timestep
kwargs['cut'] = nb_cutoff
kwargs['nstlim'] = int((1000*simulation_time)/kwargs['dt'])
kwargs['temp0'] = target_temperature
# Add a ProductionInput object to the simulation using the key
# word arguments
self.md_steps.append(ProductionInput(**kwargs))
# If Production object is provided just add that
elif isinstance(md_input, ProductionInput):
self.md_steps.append(md_input)
else:
raise Exception('md_input must be an instance of the ProductionInput class or None')
def run(self,
arc = 3,
cores = 32
):
'''Writes the mdin files and runs the simulation using crossbow.
Parameters
----------
remoteworkdir : str
Full path to the directory on arc (should be on no backup) where
the simulations will be performed.
username : str
Arc username for logging in via ssh.
arc : int, optional
The Arc HPC cluster you want to perform the simulations on. Can be
3 or 4. The default is 3.
cores : int, default=32
The number of cores to use for minimisation (if minimisation is
used).
'''
# Create an empty list that will hold the inputs for each step to pass
# to crossbow
crossbow_inputs = []
# Longbow doesn't like absolute paths so get the basenames of the
# input files
parm7 = os.path.basename(self.parm7)
rst7 = os.path.basename(self.rst7)
ref_rst7 = os.path.basename(self.ref_rst7)
# Iterate through md steps and get step number (i)
for i, md_step in enumerate(self.md_steps):
i += 1
step_name = md_step.__str__()
# File name will contain the step number (based on order in
# md_steps) and the name of the input object. The prefix here is
# used by longbow to automatically generate all of the output file
# names
fname = f'step-{i}-{step_name}.mdin'
md_step.write(self.simulation_directory, fname)
# Add the filename to the md_inputs list
self.md_inputs.append(fname)
# Get the name for the job from the simulation name, step name, and
# step number
step_type = md_step.__str__()
job_name = self.name + '.' + step_type[:3] + '.' + str(i)
self.md_job_names.append(job_name)
# Get the positional arguments in a tuple. The positional arguments
# for crossbow are (name, user, mdin, parm7, rst7, ref_rst7)
args = (job_name, self.md_inputs[i-1], parm7, rst7,
ref_rst7)
# Create a key word argument dictionary for crossbow and add
# kwargs
kwargs = {}
kwargs['arc'] = arc
kwargs['localworkdir'] = self.simulation_directory
if step_type == 'minimisation':
kwargs['minimisation'] = True
kwargs['cores'] = cores
if i != 1:
kwargs['hold_jid'] = self.md_job_names[i-2]
# Add args and kwargs as tuple to list of crossbow inputs
crossbow_inputs.append((args, kwargs))
# The rst7 variable is set to the rst7 file that comes out of this
# step, so that the next md step can use these coordinates as an
# input
rst7 = f'step-{i}-{step_name}.rst7'
for args, kwargs in crossbow_inputs:
crossbow(*args, **kwargs)
def _restraints_from_arg(self, arg):
'''Converts restraints from argument to posres MDInput argument.
If the argument will not be a valid posres argument an execption is
raised.
Parameters
----------
arg
Restraint argument passed to method.
Returns
-------
restraints : tuple
MDInput object posres parameter.
'''
if arg == 'protein':
try:
restraints = self.protein_termini
except:
restraints = None
elif type(arg) is tuple:
if len(arg) == 2:
restraints = arg
else:
raise Exception(f'Protein restraint tuple must be length 2, not {len(arg)}')
else:
raise Exception(f'Restraint argument can either be "protein" or tuple, not {type(arg)}')
return restraints
|
AmberPy
|
/AmberPy-0.0.1-py3-none-any.whl/amberpy/simulation.py
|
simulation.py
|
import os
from longbow.entrypoints import longbow
import logging
# Setup logger (must be done like this so that the longbow logger works)
LOG = logging.getLogger("longbow")
logformat = logging.Formatter('%(asctime)s - %(levelname)-8s - '
'%(name)s - %(message)s',
'%Y-%m-%d %H:%M:%S')
LOG.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logformat)
LOG.addHandler(handler)
# Setup parameter dictionary
parameters = {'disconnect': False,
'job': '',
'hosts': os.path.expanduser('~/.amberpy/hosts.conf'),
'maxtime': '48:00',
'nochecks': False,
'resource': '',
'sge-peflag': 'ib',}
def crossbow(name,
mdin,
parm7,
rst7,
ref_rst7,
gpu=True,
cores=None,
hold_jid='',
arc=3,
localworkdir='',
minimisation=False):
# Get the output file names from the inputs
mdout = mdin.replace('mdin', 'mdout')
mdinfo = mdin.replace('mdin', 'mdinfo')
out_rst7 = mdin.replace('mdin', 'rst7')
nc = mdin.replace('mdin', 'nc')
# Job names on arc cannot start with a digit, so if name does, place an 'a'
# at the start
if name[0].isdigit():
name = 'a'+name
print(f"Arc job name can't start with digit, changing to {name}")
# Ensure that only gpu OR cores have been specified
if gpu == True and cores is not None:
gpu = False
elif gpu == False and cores is None:
raise Exception("Please specify either gpu or cores")
if cores is not None:
parameters['cores'] = str(cores)
if arc == 3:
parameters['resource'] = 'arc3-cpu'
elif arc == 4:
parameters['resource'] = 'arc4-cpu'
# Set gpu/cpu parameters
elif gpu == True:
parameters['cores'] = str(0)
if arc == 3:
parameters['resource'] = 'arc3-gpu'
elif arc == 4:
parameters['resource'] = 'arc4-gpu'
# Set exectutable arguments from inputs/outputs
parameters['executableargs'] = f'-O -i {mdin} -p {parm7} -c {rst7} -o {mdout} -r {out_rst7} -inf {mdinfo} -ref {ref_rst7} -x {nc}'
# If minimisation is set to true don't save the trajectory
if minimisation == True:
parameters['executableargs'] = f'-O -i {mdin} -p {parm7} -c {rst7} -o {mdout} -r {out_rst7} -inf {mdinfo} -ref {ref_rst7}'
# Add some extra parameters
parameters['log'] = os.path.join(localworkdir, f'{name}.log')
parameters['hold_jid'] = hold_jid
parameters['jobname'] = name
parameters['upload-include'] = ', '.join([mdin, parm7, rst7, ref_rst7])
parameters['upload-exclude'] = '*'
parameters['download-include'] = ', '.join([mdout, mdinfo, out_rst7, nc])
parameters['download-exclude'] = '*'
parameters['localworkdir'] = localworkdir
# Run longbow with empty jobs list and parameters
jobs = {}
longbow(jobs, parameters)
|
AmberPy
|
/AmberPy-0.0.1-py3-none-any.whl/amberpy/crossbow.py
|
crossbow.py
|
import asyncio
import json
import logging
import time
from urllib.parse import urlencode
import aiohttp
import async_timeout
DEFAULT_TIMEOUT = 10
API_ENDPOINT = 'https://api.ambiclimate.com/api/v1/'
_LOGGER = logging.getLogger(__name__)
class AmbiclimateConnection:
"""Class to comunicate with the Ambiclimate api."""
def __init__(self, oauth, token_info,
timeout=DEFAULT_TIMEOUT,
websession=None):
"""Initialize the Ambiclimate connection."""
if websession is None:
async def _create_session():
return aiohttp.ClientSession()
loop = asyncio.get_event_loop()
self.websession = loop.run_until_complete(_create_session())
else:
self.websession = websession
self._timeout = timeout
self.oauth = oauth
self.token_info = token_info
self._devices = []
async def request(self, command, params, retry=3, get=True):
"""Request data."""
headers = {
"Accept": "application/json",
'Authorization': 'Bearer ' + self.token_info.get('access_token')
}
url = API_ENDPOINT + command
try:
with async_timeout.timeout(self._timeout):
if get:
resp = await self.websession.get(url, headers=headers, params=params)
else:
resp = await self.websession.post(url, headers=headers, json=params)
except asyncio.TimeoutError:
if retry < 1:
_LOGGER.error("Timed out sending command to Ambiclimate: %s", command)
return None
return await self.request(command, params, retry - 1, get)
except aiohttp.ClientError:
_LOGGER.error("Error sending command to Ambiclimate: %s", command, exc_info=True)
return None
if resp.status != 200:
_LOGGER.error(await resp.text())
return None
return await resp.text()
def find_device_by_room_name(self, room_name):
"""Get device by room name."""
for device in self._devices:
if device.name == room_name:
return device
return None
async def find_devices(self):
"""Get users Ambi Climate device information."""
res = await self.request('devices', {})
if not res:
return False
res = json.loads(res)
self._devices = []
for device in res.get('data', []):
self._devices.append(AmbiclimateDevice(device.get('room_name'),
device.get('location_name'),
device.get('device_id'),
self))
return bool(self._devices)
def get_devices(self):
"""Get users Ambi Climate device information."""
return self._devices
async def refresh_access_token(self):
"""Refresh access token."""
token_info = await self.oauth.refresh_access_token(self.token_info)
if token_info is None:
return None
self.token_info = token_info
return token_info
class AmbiclimateDevice:
"""Instance of Ambiclimate device."""
# pylint: disable=too-many-public-methods
def __init__(self, room_name, location_name, device_id, ambiclimate_control):
"""Initialize the Ambiclimate device class."""
self._room_name = room_name
self._location_name = location_name
self._device_id = device_id
self.control = ambiclimate_control
self.ir_features = None
self.ac_data = None
self._mode = None
@property
def device_id(self):
"""Return a device ID."""
return self._device_id
@property
def name(self):
"""Return a device name."""
return self._room_name
async def request(self, command, params, retry=3, get=True):
"""Request data."""
if 'multiple' in params:
params['multiple'] = 'True' if params['multiple'] else 'False'
params['room_name'] = self._room_name
params['location_name'] = self._location_name
res = await self.control.request(command, params, retry, get)
try:
res = json.loads(res)
if isinstance(res, dict) and res.get('error'):
_LOGGER.error(res.get('error'))
return res
except TypeError:
if isinstance(res, dict):
status = res.get('status')
if status is not None:
if status == 'ok':
return True
return False
return None
async def set_power_off(self, multiple=False):
"""Power off your AC."""
return await self.request('device/power/off', {'multiple': multiple})
async def set_comfort_mode(self, multiple=False):
"""Enable Comfort mode on your AC."""
return await self.request('device/mode/comfort', {'multiple': multiple})
async def set_comfort_feedback(self, value):
"""Send feedback for Comfort mode."""
valid_comfort_feedback = ['too_hot', 'too_warm', 'bit_warm', 'comfortable',
'bit_cold', 'too_cold', 'freezing']
if value not in valid_comfort_feedback:
_LOGGER.error("Invalid comfort feedback")
return
return await self.request('user/feedback', {'value': value})
async def set_away_mode_temperature_lower(self, value, multiple=False):
"""Enable Away mode and set an lower bound for temperature."""
return await self.request('device/mode/away_temperature_lower',
{'multiple': multiple, 'value': value})
async def set_away_mode_temperature_upper(self, value, multiple=False):
"""Enable Away mode and set an upper bound for temperature."""
return await self.request('device/mode/away_temperature_upper',
{'multiple': multiple, 'value': value})
async def set_away_humidity_upper(self, value, multiple=False):
"""Enable Away mode and set an upper bound for humidity."""
return await self.request('device/mode/away_humidity_upper',
{'multiple': multiple, 'value': value})
async def set_temperature_mode(self, value, multiple=False):
"""Enable Temperature mode on your AC."""
return await self.request('device/mode/temperature',
{'multiple': multiple, 'value': value})
async def get_sensor_temperature(self):
"""Get latest sensor temperature data."""
res = await self.request('device/sensor/temperature', {})
if res is None:
return None
return res[0].get('value')
async def get_sensor_humidity(self):
"""Get latest sensor humidity data."""
res = await self.request('device/sensor/humidity', {})
val = res[0].get('value')
if val is None:
return None
return round(val, 1)
async def get_mode(self):
"""Get Ambi Climate's current working mode."""
res = await self.request('device/mode', {})
if res is None:
return None
return res.get('mode', '')
async def get_ir_feature(self):
"""Get Ambi Climate's appliance IR feature."""
return await self.request('device/ir_feature', {})
async def get_appliance_states(self, limit=1, offset=0):
"""Get Ambi Climate's last N appliance states."""
return await self.request('device/appliance_states',
{'limit': limit, 'offset': offset})
async def set_target_temperature(self, temperature):
"""Set target temperature."""
if self._mode and self._mode.lower() != 'manual':
_LOGGER.error("Mode has to be sat to manual in the "
"Ambiclimate app. Current mode is %s.", self._mode)
return
data = self.ac_data[0]
params = {"mode": data['mode'].lower(),
"power": data['power'].lower(),
"feature": {
"temperature": str(int(temperature)),
"fan": data['fan'].lower(),
"louver": data.get('louver', "auto").lower(),
'swing': data.get('swing', "auto").lower(),
}}
return await self.request('device/deployments', params, get=False)
async def turn_off(self):
"""Turn off."""
return await self.set_power_off()
async def turn_on(self):
"""Turn on."""
data = self.ac_data[0]
feature = {}
feature["temperature"] = str(data.get('target_temperature', data.get('temperature', 20)))
feature['fan'] = data['fan'].lower() if data.get('fan') else 'med-high'
feature['louver'] = data['louver'].lower() if data.get('louver') else 'auto'
feature['swing'] = data['swing'].lower() if data.get('swing') else 'oscillate'
params = {"mode": data.get('mode', 'Heat').lower(),
"power": 'on',
"feature": feature}
return await self.request('device/deployments', params, get=False)
def get_min_temp(self):
"""Get min temperature."""
res = 1000
data = self.ir_features['data'][self.ac_data[0].get('mode').lower()]['temperature']['value']
for temp in data:
if float(temp) < res:
res = float(temp)
return res
def get_max_temp(self):
"""Get max temperature."""
res = -1000
data = self.ir_features['data'][self.ac_data[0].get('mode').lower()]['temperature']['value']
for temp in data:
if float(temp) > res:
res = float(temp)
return res
async def update_device_info(self):
"""Update device info."""
self.ir_features = await self.get_ir_feature()
async def update_device(self):
"""Update device."""
data = dict()
data['target_temperature'] = None
states = await self.get_appliance_states()
if states:
self.ac_data = states.get('data', [{}])
data['target_temperature'] = self.ac_data[0].get('temperature')
data['power'] = self.ac_data[0].get('power')
temp = await self.get_sensor_temperature()
data['temperature'] = round(temp, 1) if temp else None
humidity = await self.get_sensor_humidity()
data['humidity'] = round(humidity, 1) if humidity else None
self._mode = await self.get_mode()
return data
class AmbiclimateOauthError(Exception):
"""AmbiclimateOauthError."""
class AmbiclimateOAuth:
"""Implements Authorization Code Flow for Ambiclimate's OAuth implementation."""
OAUTH_AUTHORIZE_URL = 'https://api.ambiclimate.com/oauth2/authorize'
OAUTH_TOKEN_URL = 'https://api.ambiclimate.com/oauth2/token'
def __init__(self, client_id, client_secret, redirect_uri, werbsession):
"""Create a AmbiclimateOAuth object."""
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.websession = werbsession
def get_authorize_url(self):
"""Get the URL to use to authorize this app."""
payload = {'client_id': self.client_id,
'response_type': 'code',
'redirect_uri': self.redirect_uri}
return self.OAUTH_AUTHORIZE_URL + '?' + urlencode(payload)
async def get_access_token(self, code):
"""Get the access token for the app given the code."""
payload = {'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'code': code,
'client_secret': self.client_secret,
'grant_type': 'authorization_code'}
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
response = await self.websession.post(self.OAUTH_TOKEN_URL,
data=payload,
allow_redirects=True)
if response.status != 200:
raise AmbiclimateOauthError(response.status)
token_info = await response.json()
token_info['expires_at'] = int(time.time()) + token_info['expires_in']
return token_info
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout calling Ambiclimate to get auth token.")
return None
return None
async def refresh_access_token(self, token_info):
"""Refresh access token."""
if token_info is None:
return token_info
if not is_token_expired(token_info):
return token_info
payload = {'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'refresh_token': token_info['refresh_token'],
'client_secret': self.client_secret,
'grant_type': 'refresh_token'}
refresh_token = token_info.get('refresh_token')
try:
with async_timeout.timeout(DEFAULT_TIMEOUT):
response = await self.websession.post(self.OAUTH_TOKEN_URL,
data=payload,
allow_redirects=True)
if response.status != 200:
_LOGGER.error("Failed to refresh access token: %s", response)
return None
token_info = await response.json()
token_info['expires_at'] = int(time.time()) + token_info['expires_in']
if 'refresh_token' not in token_info:
token_info['refresh_token'] = refresh_token
return token_info
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout calling Ambiclimate to get auth token.")
return None
def is_token_expired(token_info):
"""Check if token is expired."""
return token_info['expires_at'] - int(time.time()) < 60*60
|
Ambiclimate
|
/Ambiclimate-0.2.1.tar.gz/Ambiclimate-0.2.1/ambiclimate/__init__.py
|
__init__.py
|
__author__ = 'pascal'
from tv import AmbilightTV
class Direction():
CCW = 1
CW = 0
VERTICAL = 1
HORIZONTAL = 0
class BufferedAmbilightTV(AmbilightTV):
def __init__(self, ip=None, dryrun=False):
super(BufferedAmbilightTV, self).__init__(ip=ip, dryrun=dryrun)
self.register_observer(self)
# Each pixel will be a (r, g, b) tuple
self.pixels = {AmbilightTV.LEFT: [], AmbilightTV.TOP: [], AmbilightTV.RIGHT: [], AmbilightTV.BOTTOM: []}
self.nb_pixels = 0
def register_subject(self, subject):
pass
def autoconfigure(self, ip=None):
super(BufferedAmbilightTV, self).autoconfigure(ip=ip)
for i in range(0, self.sizes[AmbilightTV.LEFT]):
self.pixels[AmbilightTV.LEFT].append((0, 0, 0))
for i in range(0, self.sizes[AmbilightTV.TOP]):
self.pixels[AmbilightTV.TOP].append((0, 0, 0))
for i in range(0, self.sizes[AmbilightTV.RIGHT]):
self.pixels[AmbilightTV.RIGHT].append((0, 0, 0))
for i in range(0, self.sizes[AmbilightTV.BOTTOM]):
self.pixels[AmbilightTV.BOTTOM].append((0, 0, 0))
def load_current_pixels(self):
raise Exception('TODO Not implemented function load_current_pixels')
def on_all_pixels_changed(self, red, green, blue):
#print "notify_resetted :"+str(red)+str(green)+str(blue)
for side in self.pixels.keys():
for i in range(0, self.sizes[side]):
self.pixels[side][i] = (red, green, blue)
def on_side_changed(self, side, red, green, blue, layer):
#print "on_side_changed :"+side + " : "+str(red)+str(green)+str(blue)
for i in range(0, self.sizes[side]):
self.pixels[side][i] = (red, green, blue)
def on_single_pixel_changed(self, side, position, red, green, blue, layer):
#print "on_single_pixel_changed :"+side + " : "+str(position) + " : " +str(red)+str(green)+str(blue)
self.pixels[side][position] = (red, green, blue)
def on_pixels_by_side_changed(self, left_pixels, top_pixels, right_pixels, bottom_pixels, layer):
#print "on_pixels_by_side_changed "
self._on_pixels_by_side_changed(side=AmbilightTV.LEFT, pixels=left_pixels, layer=layer)
self._on_pixels_by_side_changed(side=AmbilightTV.TOP, pixels=top_pixels, layer=layer)
self._on_pixels_by_side_changed(side=AmbilightTV.RIGHT, pixels=right_pixels, layer=layer)
self._on_pixels_by_side_changed(side=AmbilightTV.BOTTOM, pixels=bottom_pixels, layer=layer)
def _on_pixels_by_side_changed(self, side, pixels, layer):
if pixels is None:
return
if type(pixels) is list:
for pos in range(0, len(pixels)):
self.pixels[side][pos] = pixels[pos]
else:
for pixel_pos, pixel in pixels.iteritems():
self.pixels[side][pixel_pos] = pixel
def _serialize_pixels(self):
all_pixels = self.pixels[AmbilightTV.LEFT] + self.pixels[AmbilightTV.TOP] + self.pixels[AmbilightTV.RIGHT] + \
self.pixels[AmbilightTV.BOTTOM]
return all_pixels
def _unserialize_pixels(self, all_pixels):
self.pixels[AmbilightTV.LEFT] = all_pixels[
0:
self.sizes[AmbilightTV.LEFT]]
self.pixels[AmbilightTV.TOP] = all_pixels[
self.sizes[AmbilightTV.LEFT]:
self.sizes[AmbilightTV.LEFT]+self.sizes[AmbilightTV.TOP]]
self.pixels[AmbilightTV.RIGHT] = all_pixels[
self.sizes[AmbilightTV.LEFT]+self.sizes[AmbilightTV.TOP]:
self.sizes[AmbilightTV.LEFT]+self.sizes[AmbilightTV.TOP]+self.sizes[AmbilightTV.RIGHT]]
self.pixels[AmbilightTV.BOTTOM] = all_pixels[
self.sizes[AmbilightTV.LEFT]+self.sizes[AmbilightTV.TOP]+self.sizes[AmbilightTV.RIGHT]:]
def _send_pixels(self):
self.set_pixels_by_side(
left_pixels=self.pixels[AmbilightTV.LEFT],
top_pixels=self.pixels[AmbilightTV.TOP],
right_pixels=self.pixels[AmbilightTV.RIGHT],
bottom_pixels=self.pixels[AmbilightTV.BOTTOM])
def patternize(self, pattern_pixels):
all_pixels = self._serialize_pixels()
nb_pixels_before = len(all_pixels)
for pos in range(0, len(all_pixels), len(pattern_pixels)):
all_pixels[pos: pos+len(pattern_pixels)] = pattern_pixels
# truncate potentially surnumerous pattern pixels added at the end
del all_pixels[nb_pixels_before:]
self._unserialize_pixels(all_pixels)
self._send_pixels()
def push_clockwise(self, red=None, green=None, blue=None, color=None):
new_pixel = self._read_color_as_tuple(red, green, blue, color)
all_pixels = self._serialize_pixels()
for pos in range(len(all_pixels)-1, 0, -1):
all_pixels[pos] = all_pixels[pos-1]
all_pixels[0] = new_pixel
self._unserialize_pixels(all_pixels)
self._send_pixels()
def rotate(self, direction=Direction.CCW):
all_pixels = self._serialize_pixels()
if direction == Direction.CCW:
initial_last_pixel = all_pixels[len(all_pixels)-1]
for pos in range(len(all_pixels)-1, 0, -1):
all_pixels[pos] = all_pixels[pos-1]
all_pixels[0] = initial_last_pixel
else:
initial_first_pixel = all_pixels[0]
for pos in range(0, len(all_pixels)-1, 1):
all_pixels[pos] = all_pixels[pos+1]
all_pixels[len(all_pixels)-1] = initial_first_pixel
self._unserialize_pixels(all_pixels)
self._send_pixels()
def mirror(self, direction):
if direction == Direction.HORIZONTAL:
self.set_pixels_by_side(left_pixels=self.pixels[AmbilightTV.RIGHT],
right_pixels=self.pixels[AmbilightTV.LEFT])
else:
self.set_pixels_by_side(top_pixels=self.pixels[AmbilightTV.BOTTOM],
bottom_pixels=self.pixels[AmbilightTV.TOP])
|
AmbilightParty
|
/AmbilightParty-1.0.tar.gz/AmbilightParty-1.0/ambilight/tvbuff.py
|
tvbuff.py
|
__author__ = 'pascal'
from ambilight.tv import AmbilightTV
from ambilight.tvbuff import BufferedAmbilightTV, Direction
import argparse
import sys
import time
import random
import json
import os
import ambilight
class AmbilightParty():
def __init__(self, dryrun=False):
self.tv = BufferedAmbilightTV(dryrun=dryrun)
self._caterpillars = None
self._flags = None
def connect(self, ip=None):
self.tv.autoconfigure(ip=ip)
def rotate_auto(self, moves=None, duration=None, speed=1.0, direction=Direction.CCW):
""" Rotate pixel several time, by duration or by moves number.
:param moves: Number of rotation shift to doself.set_pixels_by_side(
left_pixels=self.pixels[AmbiTV.LEFT],
top_pixels=self.pixels[AmbiTV.TOP],
right_pixels=self.pixels[AmbiTV.RIGHT],
bottom_pixels=self.pixels[AmbiTV.BOTTOM])
:param duration: Or the total duration of animation (in seconds)
:param speed: Pause between each shift (in seconds)
:param direction: Rotation direction
:return: None
"""
if duration is not None and moves is not None:
raise Exception('moves and duration are mutually exclusive')
if moves is not None:
for i in range(0, moves):
self.tv.rotate(direction=direction)
time.sleep(speed)
return
if duration is None:
duration = sys.maxint
started = time.time()
while time.time() < started + duration:
try:
self.tv.rotate(direction=direction)
time.sleep(speed)
except KeyboardInterrupt:
return
def load_builtin_caterpillars(self):
builtin_filename = os.path.join(ambilight.__path__[0], 'data', 'caterpillars.json')
try:
with open(builtin_filename) as fp:
js = json.load(fp)
return js
except IOError:
raise Exception('Built-in caterpillars file [%s] not found' % builtin_filename)
def load_builtin_flags(self):
builtin_filename = os.path.join(ambilight.__path__[0], 'data', 'flags.json')
try:
with open(builtin_filename) as fp:
js = json.load(fp)
return js
except IOError:
raise Exception('Built-in flags file [%s] not found' % builtin_filename)
def get_caterpillars(self):
if self._caterpillars is None:
self._caterpillars = self.load_builtin_caterpillars()
return self._caterpillars
def get_flags(self):
if self._flags is None:
self._flags = self.load_builtin_flags()
return self._flags
def show_themes_list(self):
print('Available themes :')
print(' * Caterpillars :')
for caterpillar_name in sorted(self.get_caterpillars().keys()):
print(' - %s' % caterpillar_name)
print(' * Flags :')
for flag_name in sorted(self.get_flags().keys()):
print(' - %s' % flag_name)
def play_caterpillar(self, pattern_pixels=None, caterpillar_name=None,
duration=0,
speed=0.1,
direction=Direction.CCW):
if caterpillar_name is not None:
caterpillars = self.get_caterpillars()
if caterpillar_name not in caterpillars:
raise Exception('Invalid caterpillar name [{:s}]'.format(caterpillar_name))
pattern_pixels = caterpillars[caterpillar_name]
self.tv.patternize(pattern_pixels)
self.rotate_auto(duration=duration, speed=speed, direction=direction)
def play_flag(self, flag_name=None):
flags = self.get_flags()
if flag_name not in flags:
raise Exception('Invalid flag name [{:s}]'.format(flag_name))
flag_conf = flags[flag_name]
flag_type = flag_conf['type']
colors = flag_conf['colors']
if flag_type == '3V':
self.tv.set_sides(left_color=colors[0],
right_color=colors[2],
top_color=colors[1],
bottom_color=colors[1])
elif flag_type == '3H':
if self.tv.has_bottom():
self.tv.set_sides(top_color=colors[0],
left_color=colors[1],
right_color=colors[1],
bottom_color=colors[2])
else:
self.tv.set_side(AmbilightTV.TOP, color=colors[0])
side_size = self.tv.sizes[AmbilightTV.LEFT]
for i in range(0, side_size/2):
self.tv.set_pixel(AmbilightTV.LEFT, i, color=colors[2])
self.tv.set_pixel(AmbilightTV.RIGHT, i+side_size/2, color=colors[2])
for i in range(side_size/2, side_size):
self.tv.set_pixel(AmbilightTV.LEFT, i, color=colors[1])
self.tv.set_pixel(AmbilightTV.RIGHT, i-side_size/2, color=colors[1])
else:
raise Exception('Invalid flag type [{:s}]'.format(flag_type))
def play_flickering_flag(self, flag_name, duration_flag=1, duration_black=0.6, nb_display=10):
for i in range(0, nb_display):
self.play_flag(flag_name)
time.sleep(duration_flag)
self.tv.set_black()
time.sleep(duration_black)
def demo_basic(self):
print('Color everywhere...')
self.tv.set_color(255, 255, 255)
time.sleep(1)
self.tv.set_color(0, 0, 0)
time.sleep(1)
self.tv.set_color(255, 255, 0)
time.sleep(1)
self.tv.set_color(64, 128, 255)
time.sleep(1)
self.tv.set_color(255, 0, 0)
time.sleep(1.5)
print('Color by side...')
self.tv.set_side(AmbilightTV.LEFT, 0, 80, 255)
time.sleep(1)
self.tv.set_side(AmbilightTV.TOP, 224, 80, 0)
time.sleep(1)
self.tv.set_side(AmbilightTV.RIGHT, 80, 255, 0)
time.sleep(1.5)
print('Color by pixel...')
self.tv.set_pixel(AmbilightTV.LEFT, 0, 255, 0, 0)
self.tv.set_pixel(AmbilightTV.LEFT, 1, 255, 0, 0)
self.tv.set_pixel(AmbilightTV.TOP, 3, 128, 0, 255)
self.tv.set_pixel(AmbilightTV.TOP, 4, 128, 0, 255)
self.tv.set_pixel(AmbilightTV.TOP, 5, 128, 0, 255)
self.tv.set_pixel(AmbilightTV.RIGHT, 2, 255, 0, 0)
self.tv.set_pixel(AmbilightTV.RIGHT, 3, 255, 0, 0)
print('Mirrors...')
for i in range(0, 6):
self.tv.mirror(Direction.HORIZONTAL)
time.sleep(0.7)
if self.tv.has_bottom():
for i in range(0, 6):
self.tv.mirror(Direction.VERTICAL)
time.sleep(0.7)
print('Rotations...')
self.rotate_auto(direction=Direction.CW, moves=12, speed=0.3)
time.sleep(1)
self.rotate_auto(direction=Direction.CCW, moves=12, speed=0.3)
time.sleep(1)
print('Setting sub-pixels...')
self.tv.set_color(0, 0, 0)
for i in range(0, 120):
self.tv.set_color(0, 0, i)
for i in range(0, 120):
self.tv.set_color(i, 0, 0)
for i in range(0, 120):
self.tv.set_color(green=i)
for i in range(120, 0, -1):
self.tv.set_side(AmbilightTV.TOP, green=i)
print('End of basic demo :)')
def demo_kitt(self, speed=0.1, nb_pixels=1):
self.tv.set_color(0, 0, 0)
for i in range(0, nb_pixels):
self.tv.set_pixel(AmbilightTV.TOP, i, 255, 0, 0)
for i in range(0, 20000):
self.rotate_auto(direction=Direction.CCW, moves=self.tv.sizes[AmbilightTV.TOP]-nb_pixels, speed=speed)
self.rotate_auto(direction=Direction.CW, moves=self.tv.sizes[AmbilightTV.TOP]-nb_pixels, speed=speed)
def demo_caterpillars(self):
themes = self.get_caterpillars()
remaining_names = themes.keys()
for i in range(0, 10):
direction = random.sample({Direction.CW, Direction.CCW}, 1)[0]
speed = random.sample({0.1, 0.5, 0.9}, 1)[0]
caterpillar_name = random.sample(remaining_names, 1)[0]
remaining_names.remove(caterpillar_name)
print('Playing caterpillar [%s]' % caterpillar_name)
self.play_caterpillar(caterpillar_name=caterpillar_name, direction=direction, duration=6, speed=speed)
def demo_flags(self):
flags = self.get_flags()
remaining_names = flags.keys()
for i in range(0, 5):
flag_name = random.sample(remaining_names, 1)[0]
remaining_names.remove(flag_name)
print('Displaying flag [%s]' % flag_name)
self.play_flag(flag_name=flag_name)
time.sleep(2)
def main():
desc = 'Have fun with your Ambilight TV.'
parser = argparse.ArgumentParser(description=desc, add_help=True)
parser.add_argument('--info', action='store_true', required=False, default=None,
help='Display TV and library info')
parser.add_argument('--list', action='store_true', required=False, default=None,
help='List available caterpillars and flags')
parser.add_argument('--ip', action='store', required=False, default='192.168.0.59',
help='TV ip address')
parser.add_argument('--stop', action='store_true', required=False, default=None,
help='Restore the TV in automatic Ambilight management mode')
parser.add_argument('--demo', action='store', required=False, default=None,
help='Play a demo mode', choices=['basic', 'caterpillars', 'flags', 'kitt'])
parser.add_argument('--color', action='store', required=False, default=None,
help='Set a single color on all pixels. Format : RRGGBB, eg FF8800')
parser.add_argument('--caterpillar', action='store', required=False,
help='Name of the caterpillar to play')
parser.add_argument('--direction', action='store', required=False, default='ccw',
help='Direction of caterpillar', choices=['cw', 'ccw'])
parser.add_argument('--flag', action='store', required=False,
help='Name of the flag to display')
parser.add_argument('--flag-flicker', action='store', required=False, default=0,
help='Number of flag flickering cycles. 0 to disabled the effect.')
parser.add_argument('--duration', action='store', required=False, default=None,
help='Duration of animation. None for forever')
parser.add_argument('--speed', action='store', required=False, default=1000,
help='Animation speed in milliseconds')
args = parser.parse_args()
party = AmbilightParty()
speed_seconds = float(args.speed)/1000
if args.list:
party.show_themes_list()
exit()
party.connect(args.ip)
if args.info:
print(party.tv.info())
exit()
elif args.stop:
party.tv.set_mode_internal()
exit()
party.tv.set_mode_manual()
if args.color:
party.tv.set_color(int(args.color[0:2], 16), int(args.color[2:4], 16), int(args.color[4:6], 16))
elif args.demo is not None:
if args.demo == 'caterpillars':
party.demo_caterpillars()
elif args.demo == 'flags':
party.demo_flags()
elif args.demo == 'kitt':
party.demo_kitt(speed=speed_seconds, nb_pixels=1)
else:
party.demo_basic()
elif args.caterpillar:
direction = Direction.CW if args.direction == 'cw' else Direction.CCW
party.play_caterpillar(caterpillar_name=args.caterpillar, duration=args.duration, speed=speed_seconds,
direction=direction)
elif args.flag:
if args.flag_flicker:
party.play_flickering_flag(flag_name=args.flag, nb_display=int(args.flag_flicker))
else:
party.play_flag(flag_name=args.flag)
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Error:', e)
sys.exit(1)
|
AmbilightParty
|
/AmbilightParty-1.0.tar.gz/AmbilightParty-1.0/ambilight/party.py
|
party.py
|
import sys
import json
import requests
import os
import copy
from abc import ABCMeta, abstractmethod
class AmilightTVObserver:
"""
An object being notified of changes in the Ambilight pixels.
"""
__metaclass__ = ABCMeta
def __init__(self):
"""The observer object"""
self.subject = None
def register_subject(self, subject):
self.subject = subject
def remove_subject(self):
self.subject = None
@abstractmethod
def on_all_pixels_changed(self, red, green, blue):
"""All pixels have been changed"""
pass
@abstractmethod
def on_side_changed(self, side, red, green, blue, layer):
pass
@abstractmethod
def on_single_pixel_changed(self, side, position, red, green, blue, layer):
pass
@abstractmethod
def on_pixels_by_side_changed(self, left_pixels, top_pixels, right_pixels, bottom_pixels, layer):
pass
class AmbilightTV(object):
LEFT = 'left'
TOP = 'top'
RIGHT = 'right'
BOTTOM = 'bottom'
VERSION = 1.0
def __init__(self, ip=None, dryrun=False):
self.dryrun = dryrun
self.ip = ip
self.port = 1925
self.version = 1
self.nb_layers = 0
self.nb_pixels = 0
self.sizes = {AmbilightTV.LEFT: 0, AmbilightTV.TOP: 0, AmbilightTV.RIGHT: 0, AmbilightTV.BOTTOM: 0}
self._observer_list = []
def set_dryrun(self, dryrun):
self.dryrun = dryrun
def set_ip(self, ip):
self.ip = ip
def register_observer(self, observer):
if observer not in self._observer_list:
self._observer_list.append(observer)
observer.register_subject(self)
else:
raise Exception('Observer already registered')
def unregister_observer(self, observer):
if observer in self._observer_list:
observer.remove_subject()
self._observer_list.remove(observer)
else:
raise Exception('Observer not registered')
def _get_base_url(self):
url = 'http://' + self.ip + ':' + str(self.port) + '/' + str(self.version) + '/ambilight'
return url
def _debug_request(self, r):
print('')
print("***** Request :")
print('--- encoding : ' + r.encoding)
print("--- url : " + r.url)
print("--- headers : ")
print(r.request.headers)
print("--- body : ??? ")
print("***** Response :")
print("--- status code : " + str(r.status_code))
print("--- headers : ")
print(r.headers)
print("--- text : " + r.content)
print("--- json : ")
try:
print(r.json())
except Exception:
pass
print('')
def _build_url(self, endpath):
url = self._get_base_url()+endpath
return url
def _build_headers(self):
headers = {'User-Agent': 'AmbilightParty-1.00',
'Content-Type': 'application/json; charset=UTF-8'}
return headers
def ws_get(self, endpath, qs=None):
url = self._build_url(endpath)
if self.dryrun:
return None
r = requests.get(url, params=qs, headers=self._build_headers())
#self._debug_request(r)
return r
def ws_post(self, endpath, qs=None, body=None):
url = self._build_url(endpath)
if self.dryrun:
return None
r = requests.post(url, params=qs, headers=self._build_headers(), data=json.dumps(body))
#self._debug_request(r)
return r
def info(self):
return {'topology': self.get_topology(), 'lib-version': self.VERSION}
def has_top(self):
return self.sizes[AmbilightTV.TOP] != 0
def has_bottom(self):
return self.sizes[AmbilightTV.BOTTOM] != 0
def autoconfigure(self, ip=None):
if ip is not None:
self.ip = ip
js = self.get_topology()
self.nb_layers = js['layers']
self.sizes[AmbilightTV.LEFT] = js['left']
self.sizes[AmbilightTV.TOP] = js['top']
self.sizes[AmbilightTV.RIGHT] = js['right']
self.sizes[AmbilightTV.BOTTOM] = js['bottom']
self.nb_pixels = self.sizes[AmbilightTV.LEFT] + self.sizes[AmbilightTV.TOP] + self.sizes[AmbilightTV.RIGHT] + \
self.sizes[AmbilightTV.BOTTOM]
def get_mode(self):
self.ws_get('/mode')
def set_mode_internal(self):
self.ws_post('/mode', body={'current': 'internal'})
def set_mode_manual(self):
self.ws_post('/mode', body={'current': 'manual'})
def get_topology(self):
if self.dryrun:
return {"bottom": 0, "layers": 1, "left": 4, "right": 4, "top": 9}
return self.ws_get('/topology').json()
def check_parameters(self, side=None, layer=None, position=None):
if side is not None and side not in [AmbilightTV.LEFT, AmbilightTV.TOP, AmbilightTV.RIGHT, AmbilightTV.BOTTOM]:
raise Exception('Bad side value ['+str(side)+']')
if layer is not None and (layer < 0 or layer > self.nb_layers):
raise Exception('Bad layer value ['+str(layer)+']')
if position is not None:
if side is None:
raise Exception('side parameter must be specified when position is used')
if self.sizes[side] < position or position < 0:
raise Exception('Bad position value [%s] for side [%s]' % (position, side))
def set_color(self, red=None, green=None, blue=None):
body = {}
# todo funct to mutualize next lines
if red is not None:
body['r'] = red
if green is not None:
body['g'] = green
if blue is not None:
body['b'] = blue
self.ws_post('/cached', body=body)
for observer in self._observer_list:
observer.on_all_pixels_changed(red=red, green=green, blue=blue)
def set_black(self):
self.set_color(red=0, green=0, blue=0)
def set_white(self):
self.set_color(red=255, green=255, blue=255)
def set_red(self):
self.set_color(red=255, green=0, blue=0)
def set_green(self):
self.set_color(red=0, green=255, blue=0)
def set_blue(self):
self.set_color(red=0, green=0, blue=255)
def set_side(self, side, red=None, green=None, blue=None, color=None, layer=1):
self.check_parameters(side=side, layer=layer)
layer_key = 'layer'+str(layer)
body = {layer_key: {}}
body[layer_key][side] = self._generate_api_pixel(red, green, blue, color)
self.ws_post('/cached', body=body)
for observer in self._observer_list:
observer.on_side_changed(side=side, red=red, green=green, blue=blue, layer=layer)
def set_sides(self, left_color=None, top_color=None, right_color=None, bottom_color=None, layer=1):
self.check_parameters(layer=layer)
layer_key = 'layer'+str(layer)
body = {layer_key: {}}
if left_color:
body[layer_key][AmbilightTV.LEFT] = self._generate_api_pixel(color=left_color)
if right_color:
body[layer_key][AmbilightTV.RIGHT] = self._generate_api_pixel(color=right_color)
if top_color:
body[layer_key][AmbilightTV.TOP] = self._generate_api_pixel(color=top_color)
if bottom_color:
body[layer_key][AmbilightTV.BOTTOM] = self._generate_api_pixel(color=bottom_color)
self.ws_post('/cached', body=body)
for observer in self._observer_list:
if left_color:
observer.on_side_changed(side=AmbilightTV.LEFT, red=left_color[0], green=left_color[1],
blue=left_color[2], layer=layer)
if right_color:
observer.on_side_changed(side=AmbilightTV.RIGHT, red=right_color[0], green=right_color[1],
blue=right_color[2], layer=layer)
if top_color:
observer.on_side_changed(side=AmbilightTV.TOP, red=top_color[0], green=top_color[1],
blue=top_color[2], layer=layer)
if bottom_color:
observer.on_side_changed(side=AmbilightTV.BOTTOM, red=bottom_color[0], green=bottom_color[1],
blue=bottom_color[2], layer=layer)
def set_pixel(self, side, position, red=None, green=None, blue=None, color=None, layer=1):
self.check_parameters(side=side, layer=layer, position=position)
layer_key = 'layer'+str(layer)
body = {layer_key: {}}
body[layer_key][side] = {}
body[layer_key][side][position] = self._generate_api_pixel(red, green, blue, color)
self.ws_post('/cached', body=body)
for observer in self._observer_list:
observer.on_single_pixel_changed(side=side, position=position,
red=red, green=green, blue=blue, layer=layer)
def set_pixels_by_side(self, left_pixels=None, top_pixels=None, right_pixels=None, bottom_pixels=None, layer=1):
left_pixels = copy.deepcopy(left_pixels)
top_pixels = copy.deepcopy(top_pixels)
right_pixels = copy.deepcopy(right_pixels)
bottom_pixels = copy.deepcopy(bottom_pixels)
json_layer = {}
self._inject_pixels_for_side(json_layer, AmbilightTV.LEFT, left_pixels)
self._inject_pixels_for_side(json_layer, AmbilightTV.TOP, top_pixels)
self._inject_pixels_for_side(json_layer, AmbilightTV.RIGHT, right_pixels)
self._inject_pixels_for_side(json_layer, AmbilightTV.BOTTOM, bottom_pixels)
body = {'layer'+str(layer): json_layer}
self.ws_post('/cached', body=body)
for observer in self._observer_list:
observer.on_pixels_by_side_changed(left_pixels=left_pixels, top_pixels=top_pixels,
right_pixels=right_pixels, bottom_pixels=bottom_pixels, layer=layer)
@staticmethod
def _generate_api_pixel(red=None, green=None, blue=None, color=None):
if color is not None:
return {'r': color[0], 'g': color[1], 'b': color[2]}
pixel = {}
if red is not None:
pixel['r'] = red
if green is not None:
pixel['g'] = green
if blue is not None:
pixel['b'] = blue
return pixel
@staticmethod
def _inject_pixels_for_side(dict_for_layer, side, pixels):
if pixels is None:
return
dict_for_layer[side] = {}
if type(pixels) is list:
for i in range(0, len(pixels)):
dict_for_layer[side][str(i)] = {'r': pixels[i][0], 'g': pixels[i][1], 'b': pixels[i][2]}
elif type(pixels) is dict:
for pos, pixel in pixels:
dict_for_layer[side][pos] = {'r': pixel[0], 'g': pixel[1], 'b': pixel[2]}
else:
raise Exception('Unexpected type for pixels container')
|
AmbilightParty
|
/AmbilightParty-1.0.tar.gz/AmbilightParty-1.0/ambilight/tv.py
|
tv.py
|
################## module imports ##################
from scipy.signal import convolve2d
from scipy.integrate import quad
#from numba import njit
import numpy as np
import math
#flip the filter in the convolution step
#from .helpers.input_checks import check1
from .helpers.input_checks import check_grid_params
from .helpers.input_checks import check_cpp_params
from .helpers.input_checks import check_trawl_function
from .helpers.input_checks import check_jump_part_and_params
from .helpers.input_checks import check_gaussian_params
from .helpers.sampler import gaussian_part_sampler
from .helpers.sampler import jump_part_sampler
from .helpers.sampler import generate_cpp_points
from .helpers.acf_functions import fit_trawl_envelope_gmm
from .helpers.marginal_distribution_functions import fit_trawl_marginal
#from .heleprs.sampler import generate_cpp_values_associated_to_points
from .helpers.alternative_convolution_implementation import cumulative_and_diagonal_sums
from .helpers.forecasting_helpers import deterministic_forecasting
from .helpers.forecasting_helpers import probabilistic_forecasting
#from scipy.optimize import minimize
#from statsmodels.tsa.stattools import acf
#helper_module = import_file(os.path.join(Path().resolve().parent,'helpers','loss_functions'))
###################################################
class trawl:
def __init__(self,nr_simulations,nr_trawls = None, trawl_function=None, tau = None,
decorrelation_time = -np.inf, mesh_size = None, times_grid = None,
truncation_grid = None, gaussian_part_params= (0,0), jump_part_name=None,
jump_part_params= None, cpp_times = None, cpp_truncation = None, cpp_part_name = None, cpp_part_params = None,
cpp_intensity = None, custom_sampler = None, values=None):
"""Please consult the `Trawl processes example usage` jupyter notebook from https://github.com/danleonte/Ambit_Stochastics
to see a practical example with detailed explanations.
The implemented simulation algorithms are the grid, slice and cpp algorithms, as described in [paper link]. Parameter inference and forecasting methods to be added.
The arguments required for the `simulate` method are `nr_simulations` and `trawl_function`.
Further, the slice method requires `nr_trawls`,
`tau`, `decorrelation_time`, `gaussian_part_params`, `jump_part_name`,`jump_part_params`,
the grid method requires
`mesh_size`,`times_grid`,`truncation_grid`, `gaussian_part_params`,`jump_part_name`,`jump_part_params` and
the cpp method requires `cpp_truncation,`,`cpp_part_name`, `cpp_times`
`cpp_part_params`, `cpp_intensity` and `custom_sampler`.
Args:
The following parameters are for any of simulation algorithms.
nr_simulations: positive integer: number of simulations of the trawl process.
trawl_function: a non-negative, continuous, strictly increasing function \(\phi \colon (-\infty,0] \\to [0,\infty)\) with \(\phi(0) >0, \phi(t) =0\) for \(t>0\).
The following parameters are for both the slice and grid simulation methods.
gaussian_part_params: tuple with the mean and standard deviation of the Gaussian Part
jump_part_name: tuple with the parameters of the jump part distribution check `helpers.sampler` for the parametrisation.
jump_part_params: string: name of the jump part distribution. check `helpers.sampler` for the parametrisation.
The following parameters are for the slice simulation method.
nr_trawls: positive integer: number of ambit sets on the time axis.
tau: positive number: spacing between ambit sets on the time axis; the times at which we simulate the trawl processes are then \(\\tau, \\ldots,\\text{nr_trawls} \ \\tau\).
decorrelation_time: \(-\infty\) if the ambit set A is unbounded and finite, negative otherwise. For example, if \(\phi(x) = (1+x)(x>-1)(x<=0)\), `decorrelation_time =-1`.
The following parameters are for the grid simulation method.
mesh_size: positive float, side-length of each cell.
times_grid: array: times at which to simulate the trawl process, necessarly in increasing order.
truncation_grid: strictly negative float: in the grid simulation method, we simulate the parts of the ambit sets contained in \(t > \\text{truncation_grid} + \\text{min(times_grid)}\).
The following parameters are for both the cpp simulation methods.
cpp_times: array: times at which to simulate the trawl process.
cpp_truncation: strictly negative float: we simulate the parts of the ambit sets contained in \(t > \\text{cpp_truncation} + \\text{min(cpp_times)}\).
cpp_part_name: to add
cpp_part_params: to add
cpp_intensity: to add
custom_sampler: to add
values: a numpy array with shape \([\\text{nr_simulations},k_s,k_t]\) which is passed by the user or simulated with the method `simple_ambit_field.simulate`.
"""
#general attributes
self.nr_simulations = nr_simulations
#attributes required for simulation
self.trawl_function = trawl_function
#########################################################################
### attributes required for both grid and slice simulation algorithms ###
# distributional parameters of the gaussian and jump parts of the levy seed
# jump_part_name and jump_part_params are also requried for the grid method
self.gaussian_part_params = gaussian_part_params
self.jump_part_name = jump_part_name
self.jump_part_params = jump_part_params
#############################################################################
### attributes required only for the slice partition simulation algorithm ###
self.nr_trawls = nr_trawls
self.tau = tau
self.decorrelation_time = decorrelation_time
self.I = None
self.slice_areas_matrix = None
##################################################################
### attributes required only for the grid simulation algorithm ###
self.times_grid = times_grid
self.truncation_grid = truncation_grid
self.mesh_size = mesh_size
self.times_grid = times_grid
self.vol = None
#self.indicator_matrix = None
##################################################################
### attributes required only for the cpp simulation algorithm ###
self.cpp_truncation = cpp_truncation
self.cpp_part_name = cpp_part_name
self.cpp_part_params = cpp_part_params
self.cpp_intensity = cpp_intensity
self.custom_sampler = custom_sampler
self.cpp_times = cpp_times
### arrays containing the gaussian, jump and cpp parts of the simulation
self.gaussian_values = None
self.jump_values = None
self.cpp_values = None
### passed by the user or to be simulated using one of the simulation methods ###
self.values = values
#if the values are passed by the use and not simulated
if values is not None:
self.nr_simulations, self.nr_tralws = self.values.shape
#############################################################################
### attributes required only for the parameter inference ###
self.infered_parameters = None
# {'envelope': exponential, 'levy_seed': 'gamma', 'params' :
# {'envelope_params': tuple of tuples , 'levy_seed_params': tuple of tuples}}
######################################################################
### Simulation algorithms: I slice, II grid, III cpp ###
######################################################################
########################### I Slice ###########################
def compute_slice_areas_finite_decorrelation_time(self):
"""Computes the \(I \\times k\) matrix
\[\\begin{bmatrix}
a_0 & a_0 - a_1 \\ldots & a_0 - a_1 \\\\
a_1 & a_1 - a_2 \\ldots & a_1 - a_2 \\\\
a_2 & a_2 - a_3 \\ldots & a_2 - a_3 \\\\
& \\vdots & \\\\
a_{k-2} & a_{k-2} - a_{k-1} \\ldots & a_{k-2} - a_{k-1} \\\\
a_{k-1} & a_{k-1} & a_{k-1}
\\end{bmatrix}\]
corresponding to the areas of the slices
\[\\begin{bmatrix}
L(S_{11}) & \\ldots & L(S_{1,k-1}) & L(S_{1k}) \\\\
L(S_{21}) & \\ldots & L(S_{2,k-1}) & L(S_{2k}) \\\\
\\vdots & & \\vdots & \\vdots \\\\
L(S_{I1}) & \\ldots & L(S_{I,k-1}) & L(S_{I,k})
\\end{bmatrix}
\]
where \(k =\) `self.nr_trawls` and
\[\\begin{align}
a_0 &= \int_{-\\tau}^0 \phi(u)du, \\\\
\\vdots & \\\\
a_{k-2} &= \int_{(-k+1)\\tau} ^{(-k+2) \\tau} \phi(u) du, \\\\
a_{k-1} &= \int_{\\text{decorrelation_time}}^{(-k+1)\\tau} \phi(u)du.
\\end{align}
\]
"""
self.I = math.ceil(-self.decorrelation_time/self.tau)
s_i1 = [quad(self.trawl_function,a=-i *self.tau, b = (-i+1) * self.tau)[0]
for i in range(1,self.I+1)]
s_i2 = np.append(np.diff(s_i1[::-1])[::-1],s_i1[-1])
right_column = np.tile(s_i2[:,np.newaxis],(1,self.nr_trawls-1))
left_column = left_column = (np.array(s_i1))[:,np.newaxis]
self.slice_areas_matrix = np.concatenate([left_column,right_column],axis=1)
#to add I-1 columns of length I zeros
#check entire program here and comapre with previous versions
def compute_slice_areas_infinite_decorrelation_time(self):
"""Computes the \(k \\times k\) matrix
\[\\begin{bmatrix}
a_0 & a_0 - a_1 & a_0 - a_1 & \\ldots & a_0 - a_1 & a_0 - a_1 & a_0 \\\\
a_1 & a_1 - a_2 & a_1 - a_2 & \\ldots & a_1 - a_2 & a_1 & 0 \\\\
a_2 & a_2 - a_3 & a_2 - a_3 & \\ldots & a_2 & 0 & 0 \\\\
& & & \\vdots & & & \\\\
a_{k-2} & a_{k-1} & 0 & \\ldots & 0 & 0 & 0 \\\\
a_{k-1} & 0 & 0 & & 0 & 0 & 0
\\end{bmatrix}\]
corresponding to the areas of the slices
\[\\begin{bmatrix}
L(S_{11}) & \\ldots & L(S_{1k,-1}) & L(S_{1k}) \\\\
L(S_{21}) & \\ldots & L(S_{2,k-1}) & 0 \\\\
\\vdots & & \\vdots & \\vdots \\\\
L(S_{k1}) & \\ldots & 0 & 0
\\end{bmatrix}
\]
where \(k =\) `self.nr_trawls` and
\[\\begin{align}
a_0 &= \int_{-\\tau}^0 \phi(u)du, \\\\
\\vdots & \\\\
a_{k-2} &= \int_{(-k+1)\\tau} ^{(-k+2) \\tau} \phi(u) du, \\\\
a_{k-1} &= \int_{-\infty}^{(-k+1)\\tau} \phi(u)du.
\\end{align}
\]
"""
s_i1 = [quad(self.trawl_function,a=-i *self.tau, b = (-i+1) * self.tau)[0]
for i in range(1,self.nr_trawls)] + [quad(self.trawl_function,a=-np.inf,
b=(-self.nr_trawls+1)*self.tau)[0]]
# a[0] -a[1] ,a[1] -a[2], ... , a[k-2] - a[k-1] , 0
differences = np.append(np.diff(s_i1[::-1])[::-1],0)
left_column = np.array(s_i1)[:,np.newaxis]
right_column = np.zeros((self.nr_trawls,1))
#we reconstruct the elements on the secondary diagonal at the end
middle_matrix = np.tile(differences[:,np.newaxis],(1,self.nr_trawls-2))
whole_matrix = np.concatenate([left_column,middle_matrix,right_column],axis=1)
whole_matrix_reversed = np.triu(np.fliplr(whole_matrix), k=0)
np.fill_diagonal(whole_matrix_reversed,s_i1)
self.slice_areas_matrix = np.fliplr(whole_matrix_reversed)
def simulate_slice_finite_decorrelation_time(self,slice_convolution_type):
"""helper for the `simulate_slice` method"""
filter_ = np.fliplr(np.tril(np.ones(self.I),k=0))
zero_matrix = np.zeros([self.I,self.I-1])
for simulation_nr in range(self.nr_simulations):
gaussian_slices = gaussian_part_sampler(self.gaussian_part_params,self.slice_areas_matrix)
jump_slices = jump_part_sampler(self.jump_part_params,self.slice_areas_matrix,self.jump_part_name)
if slice_convolution_type == 'fft':
gaussian_slices = np.concatenate([zero_matrix,gaussian_slices],axis=1)
jump_slices = np.concatenate([zero_matrix,jump_slices],axis=1)
#matrix with 1's on and below the secondary diagonal
#flip the filter to agree with np convention
self.gaussian_values[simulation_nr,:] = convolve2d(gaussian_slices,filter_[::-1,::-1],'valid')[0]
self.jump_values[simulation_nr,:] = convolve2d(jump_slices,filter_[::-1,::-1],'valid')[0]
elif slice_convolution_type == 'diagonals':
self.gaussian_values[simulation_nr,:] = cumulative_and_diagonal_sums(gaussian_slices)
self.jump_values[simulation_nr,:] = cumulative_and_diagonal_sums(jump_slices)
def simulate_slice_infinite_decorrelation_time(self,slice_convolution_type):
"""Helper for the `simulate_slice` method."""
zero_matrix = np.zeros([self.nr_trawls,self.nr_trawls-1])
filter_ = np.fliplr(np.tril(np.ones(self.nr_trawls),k=0))
for simulation_nr in range(self.nr_simulations):
gaussian_slices = gaussian_part_sampler(self.gaussian_part_params,self.slice_areas_matrix)
#the lower triangular part of the matrix is made oup of 0's, which can result in an error
#in the scipy.stats sampler (for example, if the levy seed is gamma)
#to prevent this, we extract the upper triangular part of the matrix as a vector
#sample this way, then recast the samples as an upper triangular matrix
slice_areas_row = (np.fliplr(self.slice_areas_matrix))[np.triu_indices(self.slice_areas_matrix.shape[0], k = 0)]
jump_slices_row = jump_part_sampler(self.jump_part_params,slice_areas_row,self.jump_part_name)
jump_slices = np.zeros(gaussian_slices.shape)
jump_slices[np.triu_indices(jump_slices.shape[0], k = 0)] = jump_slices_row
jump_slices = np.fliplr(jump_slices)
if slice_convolution_type == 'fft':
gaussian_slices = np.concatenate([zero_matrix,gaussian_slices],axis=1)
jump_slices = np.concatenate([zero_matrix,jump_slices],axis=1)
self.gaussian_values[simulation_nr,:] = convolve2d(gaussian_slices,filter_[::-1,::-1],'valid')[0]
self.jump_values[simulation_nr,:] = convolve2d(jump_slices,filter_[::-1,::-1],'valid')[0]
elif slice_convolution_type == 'diagonals':
self.gaussian_values[simulation_nr,:] = cumulative_and_diagonal_sums(gaussian_slices)
self.jump_values[simulation_nr,:] = cumulative_and_diagonal_sums(jump_slices)
def simulate_slice(self,slice_convolution_type):
"""implements algorithm [] from [] and simulates teh trawl process at
\(\\tau,\\ldots,\\text{nr_trawls}\ \\tau\). `slice_convolution_type` can be either [to add]"""
if self.decorrelation_time == -np.inf:
self.compute_slice_areas_infinite_decorrelation_time()
self.simulate_slice_infinite_decorrelation_time(slice_convolution_type)
elif self.decorrelation_time > -np.inf:
assert(self.trawl_function(self.decorrelation_time)) == 0,'please check decorrelation time'
self.compute_slice_areas_finite_decorrelation_time()
self.simulate_slice_finite_decorrelation_time(slice_convolution_type)
#self.values = self.gaussian_values + self.jump_values
############################ II Grid ############################
def grid_creation(self,min_t,max_t):
"""Creates a grid on \([0,\phi(0)] \\times [\\text{min_t}, \\text{max_t}]\). Each cell is represented by
the coordinates of its bottom left corner. To each cell we associate a sample from each of the gaussian
and jump parts of the trawl process.
Returns:
gaussian_values: array with the Gaussian of the Levy basis evaluated over the cells
jump_values: array with the jump part of the Levy basis evaluated over the cells
"""
coords = np.mgrid[0:self.trawl_function(0):self.mesh_size,min_t:max_t:self.mesh_size]
x, t = coords[0].flatten(), coords[1].flatten()
areas = self.vol * np.ones([self.nr_simulations,len(t)])
gaussian_values = gaussian_part_sampler(self.gaussian_part_params,areas)
jump_values = jump_part_sampler(self.jump_part_params,areas,self.jump_part_name)
return x,t,gaussian_values,jump_values
def grid_update(self,i,t,gaussian_values,jump_values):
"""Inputs the values of the Levy basis evaluated over the grid cells on \([\\tau_{i-1}+\\text{truncation_grid},\\tau_{i-1}] \\times [0,\\phi(0)]\),
removes the values corresponding to cells with time coordinates less than \(\\tau_{i} + \\text{truncation_grid}\) and adds new samples
for the levy basis evaluated over the grid cells with time coordinates in \([\\tau_{i-1},\\tau_i]\) (see figure).
Assumes that the consecutive ambit sets at times \(\\tau_{i-1},\\tau_i\) are not disjoint, i.e.
\(\\tau_i + \\text{truncation_grid} < \\tau_{i-1}\).
Args:
i: index of the trawl to be simulated
t: time coordinates of the cells of the grid on \([\\tau_{i-1},\\tau_{i-1}+\\text{truncation_grid}] \\times [0,\phi(0)]\)
gaussian_values: gaussian values for the grid on \([\\tau_{i-1},\\tau_{i-1}+\\text{truncation_grid}] \\times [0,\phi(0)]\)
jump_values: jump values for the grid on \([\\tau_{i-1},\\tau_{i-1}+\\text{truncation_grid}] \\times [0,\phi(0)\)
Returns:
gaussian_values: gaussian values for the grid cells on \([\\tau_{i},\\tau_{i}+\\text{truncation_grid}] \\times [0,\phi(0)]\)
jump_values: jump values for the grid cells on \([\\tau_{i},\\tau_{i}+\\text{truncation_grid}] \\times [0,\phi(0)]\)
"""
ind_to_keep = t >= (self.times_grid[i] + self.truncation_grid)
t[~ind_to_keep] += -self.truncation_grid
areas = self.vol * np.ones([self.nr_simulations,sum(~ind_to_keep)])
#print(gaussian_values[:,~ind_to_keep].shape)
#print(self.gaussian_part_sampler(areas).shape)
gaussian_values[:,~ind_to_keep] = gaussian_part_sampler(self.gaussian_part_params,areas)
jump_values[:,~ind_to_keep] = jump_part_sampler(self.jump_part_params,areas,self.jump_part_name)
#print('ind to keep sum is ', ind_to_keep.sum())
#print('gaussian is ',gaussian_values.shape)
#print('non_gaussian_values ',non_gaussian_values[:,ind_to_keep].shape)
#print('new_gaussian_values ',new_gaussian_values.shape)
#print('t new shape is ',t.shape)
#print('x shape is', x.shape)
return t,gaussian_values,jump_values
def simulate_grid(self):
"""Simulate the trawl proces at times `self.times_grid`, which don't have to be
equally distant, via the grid method."""
#If `times_grid` are equidistnant, we do not need to compute `indicators` at each iteration, speeding up the process
for i in range(len(self.times_grid)):
if (i==0) or (self.times_grid[i-1] <= self.times_grid[i] + self.truncation_grid):
#check that we are creating the grid for the first time or that
#trawls at time i-1 and i have empty intersection
x,t,gaussian_values, jump_values = self.grid_creation(self.times_grid[i] + self.truncation_grid, self.times_grid[i])
elif self.times_grid[i-1] > self.times_grid[i] + self.truncation_grid:
#check that we have non empty intersection and update the grid
t,gaussian_values,jump_values = self.grid_update(i,t,gaussian_values,jump_values)
indicators = x < self.trawl_function(t-self.times_grid[i])
#print(gaussian_values.shape,indicators.shape)
self.gaussian_values[:,i] = gaussian_values @ indicators
self.jump_values[:,i] = jump_values @ indicators
#self.values = self.gaussian_values + self.jump_values
########################### III cpp ###########################
# @njit
def simulate_cpp(self):
""" text to be added"""
min_t = min(self.cpp_times) + self.cpp_truncation
max_t = max(self.cpp_times)
min_x = 0
max_x = self.trawl_function(0)
for simulation_nr in range(self.nr_simulations):
points_x, points_t, associated_values = generate_cpp_points(min_x = min_x, max_x = max_x,
min_t = min_t, max_t = max_t, cpp_part_name = self.cpp_part_name,
cpp_part_params = self.cpp_part_params, cpp_intensity = self.cpp_intensity,
custom_sampler = self.custom_sampler)
#(x_i,t_i) in A_t if t < t_i and x_i < phi(t_i-t)
indicator_matrix = np.tile(points_x[:,np.newaxis],(1,self.nr_trawls)) < \
self.trawl_function(np.subtract.outer(points_t, self.cpp_times))
self.cpp_values[simulation_nr,:] = associated_values @ indicator_matrix
####################### simulate meta-method #######################
def simulate(self,method,slice_convolution_type='diagonals'):
"""Function to simulate from the trawl function. Contains sanity checks
for the simulation parameters and uses helper functions for each simulation
method.
Args:
method: one of the strings `cpp`, `grid` or `slice`
slice_convolution_type: if method is set to `slice`, this can be one of the strings `diagonals` or `ftt`, depending on the way we add up the simulated slices. This argument is ignored if method is set to `grid` or `cpp`."""
#general checks
assert isinstance(self.nr_simulations,int) and self.nr_simulations >0
assert method in {'cpp','grid','slice'},'simulation method not supported'
check_trawl_function(self.trawl_function)
check_gaussian_params(self.gaussian_part_params)
#algorithm specific checks and attribute setting
if method == 'grid':
check_jump_part_and_params(self.jump_part_name,self.jump_part_params)
check_grid_params(self.mesh_size,self.truncation_grid,self.times_grid)
self.nr_trawls = len(self.times_grid)
self.vol = self.mesh_size **2
elif method == 'cpp':
check_cpp_params(self.cpp_part_name, self.cpp_part_params,self.cpp_intensity,self.custom_sampler)
self.nr_trawls = len(self.cpp_times)
elif method == 'slice':
assert slice_convolution_type in {'fft','diagonals'}
assert isinstance(self.nr_trawls,int) and self.nr_trawls > 0,'nr_trawls should be a strictly positive integer'
check_jump_part_and_params(self.jump_part_name,self.jump_part_params)
self.gaussian_values = np.zeros(shape = [self.nr_simulations,self.nr_trawls])
self.jump_values = np.zeros(shape = [self.nr_simulations,self.nr_trawls])
self.cpp_values = np.zeros(shape = [self.nr_simulations,self.nr_trawls])
if method == 'grid':
self.simulate_grid()
elif method == 'cpp':
self.simulate_cpp()
elif method == 'slice':
self.simulate_slice(slice_convolution_type)
self.values = self.gaussian_values + self.jump_values + self.cpp_values
def theoretical_acf(self,t_values):
"""Computes the theoretical acf of the trawl process
Args:
t_values: array of time values
Returns:
d_acf: a dictionary of the type \(t: \\text{corr}(X_0,X_t)\), where \(t\) ranges over the input array `t_values`.
"""
total_area = quad(self.trawl_function,a=-np.inf,b= 0)[0]
d_acf=dict()
for t in t_values:
d_acf[t] = quad(self.trawl_function,a=-np.inf,b= -t)[0]/total_area
return d_acf
######################################################################
### Forecasting: determinstic and probabilistic ###
######################################################################
def fit_gmm(self,input_values,envelope,levy_seed,lags,initial_guess=None):
assert isinstance(lags,tuple) and all(isinstance(i,int) for i in lags)
print('gmm fit started')
envelope_params = fit_trawl_envelope_gmm(self.tau,input_values,lags,envelope,initial_guess)
levy_seed_params = fit_trawl_marginal(input_values,levy_seed)
params = {'envelope_params':envelope_params,'levy_seed_params': levy_seed_params}
self.infered_parameters = {'envelope': envelope, 'levy_seed': levy_seed, 'params' : params}
print('gmm fit finished')
def fit_cl(self,input_values, envelope, levy_seed, lags, cl_optimisation_params =
{'nr_steps_optimisation' : 25,'nr_mc_samples' : 10**4, 'nr_repetitions' : 5},
initial_guess = None):
assert isinstance(lags,tuple) and all(isinstance(i,int) for i in lags)
print('cl fit started')
print('cl fit finished')
pass
def predict(self,input_values, steps_ahead, deterministic, max_gaussian_lag = 1.0, nr_samples = None):
#input_values = self.values[:,starting_index:]
assert isinstance(input_values,np.ndarray) and len(input_values.shape) == 2
assert deterministic in [True,False]
##get the fitted parameters for teh envelope and for the levy seed from the attribute self.infered_parameters
envelope = self.infered_parameters['envelope']
levy_seed = self.infered_parameters['levy_seed']
envelope_params = self.infered_parameters['params']['envelope_params']
levy_seed_params = self.infered_parameters['params']['levy_seed_params']
nr_simulations, simulation_length = input_values.shape
d={}
if levy_seed == 'gaussian':
assert isinstance(max_gaussian_lag,int) and max_gaussian_lag > 0 and input_values.shape[-1] >= max_gaussian_lag
for nr_steps_ahead in steps_ahead:
if deterministic == True:
array_to_add = np.zeros([nr_simulations,simulation_length - int((max_gaussian_lag - 1)) * (levy_seed == 'gaussian')])
elif deterministic == False:
array_to_add = np.zeros([nr_simulations, simulation_length - int((max_gaussian_lag - 1)) *(levy_seed == 'gaussian'),
nr_samples])
for i in range(nr_simulations):
#to deal with gaussian lag helper
if deterministic == True:
array_to_add[i] = deterministic_forecasting(tau = self.tau, nr_steps_ahead = nr_steps_ahead ,
values = input_values[i], levy_seed = levy_seed, levy_seed_params = levy_seed_params[i],
envelope = envelope, envelope_params = envelope_params[i],
envelope_function = None, max_gaussian_lag = max_gaussian_lag)
elif deterministic == False:
print(i)
array_to_add[i] = probabilistic_forecasting(tau = self.tau, nr_steps_ahead = nr_steps_ahead, values = input_values[i],
levy_seed = levy_seed, levy_seed_params = levy_seed_params[i],
envelope = envelope, envelope_params = envelope_params[i], nr_samples = nr_samples,
envelope_function = None, max_gaussian_lag = max_gaussian_lag)
d[nr_steps_ahead] = array_to_add
return d
#if type_ == 'deterministic':
# deterministic_forecasting(tau_ahead,values,levy_seed,levy_seed_params,envelope,
# envelope_params, nr_samples, envelope_function = None)
#elif type__ == 'probabilistic':
# pass
def fit_predict(self,steps_ahead,deterministic,fitting_method,envelope,levy_seed,lags,
initial_training_window,refit,refit_freq=None,initial_guess = None,
cl_optimisation_params =
{'nr_steps_optimisation' : 25,'nr_mc_samples' : 10**4, 'nr_repetitions' : 5},
max_gaussian_lag = None, nr_samples=None):
#check inputs
assert all(isinstance(x, int) and x >0 for x in steps_ahead)
assert deterministic in [True,False]
assert fitting_method in ['gmm','cl']
assert refit in [True,False]
if refit == True:
assert refit_freq >0 and isinstance(refit_freq,int)
assert isinstance(lags,tuple) and all(isinstance(i,int) for i in lags)
assert isinstance(initial_training_window,int) and initial_training_window > 0
if fitting_method == 'cl':
assert set(self.infered_parameters.keys()) == {'nr_mc_samples', 'nr_repetitions', 'nr_steps_optimisation'}
assert all((isinstance(x,int) and x > 0) for x in self.intered_parameters.values())
if deterministic == False:
assert isinstance(nr_samples,int) and nr_samples > 0
if fitting_method == 'gmm':
self.fit_gmm(input_values = self.values[:,:initial_training_window],envelope = envelope,
levy_seed = levy_seed, lags = lags, initial_guess = initial_guess)
elif fitting_method == 'cl':
self.fit_cl(input_values = self.values[:,:initial_training_window],
envelope = envelope, levy_seed = levy_seed, lags = lags,cl_optimisation_params =
{'nr_steps_optimisation' : 25,'nr_mc_samples' : 10**4, 'nr_repetitions' : 5},
initial_guess = None)
if refit == False:
return self.predict(input_values = self.values[:,:initial_training_window], steps_ahead = steps_ahead,
deterministic = deterministic, max_gaussian_lag = max_gaussian_lag,
nr_samples = nr_samples)
elif refit == True:
raise ValueError('not yet implemented')
|
Ambit-Stochastics
|
/Ambit_Stochastics-1.0.6-py3-none-any.whl/ambit_stochastics/trawl.py
|
trawl.py
|
###################################################################
#imports
from collections import Counter
from itertools import chain
from scipy.optimize import fsolve
from scipy.integrate import quad
import numpy as np
import math
import time
#from numba import njit
from .helpers.input_checks import check_trawl_function
from .helpers.input_checks import check_jump_part_and_params
from .helpers.input_checks import check_gaussian_params
from .helpers.input_checks import check_spatio_temporal_positions
from .helpers.sampler import gaussian_part_sampler
from .helpers.sampler import jump_part_sampler
###################################################################
class simple_ambit_field:
def __init__(self, x, tau, k_s, k_t, nr_simulations, ambit_function=None, decorrelation_time=-np.inf,
gaussian_part_params=None, jump_part_name=None, jump_part_params=None,
batch_size=None, total_nr_samples=None, values=None):
"""Container class for the simulation, parameter inference and forecasting of ambit fields of the form \(Y_t(x) = L(A+(x,t))\).
Args:
x: positive number: spacing between ambit sets on the space axis.
tau: positive number: spacing between ambit sets on the time axis.
k_s: positive integer: number of ambit sets on the space axix.
k_t: positive integer: number of ambit sets on the time axis.
nr_simulation: positive integer: number of simulations.
ambit_function: a non-negative, continuous, strictly increasing function \(\phi \colon (-\infty,0] \\to [0,\infty)\) with \(\phi(0) > 0, \phi(t) =0\) for \(t>0\).
decorrelation_time: \(-\infty\) if the ambit set A is unbounded and finite, negative otherwise.
gaussian_part_params: tuple with the mean and standard deviation of the Gaussian part.
jump_part_name: tuple with the parameters of the jump part distribution check `helpers.sampler` for the parametrisation.
jump_part_params: string: name of the jump part distribution. check `helpers.sampler` for the parametrisation.
batch_size: positive integer: number of points to be used at once in the `approximate_slices` method, in order to optimise for cache memory.
total_nr_samples: positive integer: total number of points to be used in the `approximate_slices` method.
values: a numpy array with shape \([\\text{nr_simulations},k_s,k_t]\) which is passed by the user or simulated with the method `simple_ambit_field.simulate`.
"""
#################################################################################
check_spatio_temporal_positions(x, tau, k_s, k_t, nr_simulations)
self.x = x
self.tau = tau
self.k_s = k_s
self.k_t = k_t
self.nr_simulations = nr_simulations
### simulation parameters ###
self.ambit_function = ambit_function
self.gaussian_part_params = gaussian_part_params
self.jump_part_name = jump_part_name
self.jump_part_params = jump_part_params
self.decorrelation_time = decorrelation_time
self.total_nr_samples = total_nr_samples
self.batch_size = batch_size
### dimension of the indicator matrix for each minimal slice
### if decorrelation_time > -inf, I_t = ceiling(decorrelation_time/tau)
### if decorrelation_time = -inf, I_t = k_t - T/tau,
### where T = tau * floor{\phi^{-1}(x)/tau + 1} ###
self.I_t = None
self.I_s = None #I_s = math.ceil(self.ambit_function(0)/self.x)
### minimal slices on t > T/tau ###
self.unique_slices = None
self.unique_slices_areas = None
### correction slices given by the intersections of ambit sets \(A_ij\) with \(1 \le i \le k_s, 1 \le j \le k_t\)
### with \(t < T/tau\), which we list from left to right in an array
self.correction_slices_areas = None
### container for gaussian and jump parts. their sum is the result ###
self.gaussian_values = None
self.jump_values = None
### passed by the user or simulated using the simulate method
### must have shape [nr_simulations,k_s,k_t] ###
if values == None:
self.values = None
else:
assert isinstance(
values, np.ndarray), 'the values argument is not a numpy array'
assert values.shape == (
nr_simulations, k_s, k_t), 'please check the shape of the values argument'
self.values = values
#########################################################################################
### infered simulation parameters: skip this if you are only interested in simulations###
self.inferred_parameters = None
# self.inferred_parameters is a list with elements dictionaries of the form
# {'inferred_ambit_function_name': , inferred_ambit_function_params: ,
# 'inferred_gaussian_params': ,'inferred_jump_params': }
# inferred_ambit_function is 'exponential','gamma', 'ig' or a lambda function
# inferred_ambit_function_params is a tuple
# inferred_gaussian_params is a tuple containing the mean and scale
# inferred_jump_params is a dictionary containing the name of the distribution
# and its params, such as {'gamma': (1,1)}
##########################################################################################
def delete_values(self):
"""Deletes the `values` attribute"""
if self.values != None:
self.values = None
print('self.values has been deleted')
#else:
#print('no values to delete')
def determine_slices_from_points(self, points_x, points_t):
"""Helper for the 'approximate_slices' method. Given random points with coordinates
`points_x` and `points_t` coordinates from the uniform distribution on \([x,x+\phi(0)] \\times [0,\\tau]\)
which do not belong to \(A_{01}\) or \(A_{10}\), we check in which slice \(S\) of \(\mathcal{S}_{kl}\) each point is.
we do so by using a 3d array with shape \([\\text{nr_sampled_points},I_s,I_t]\) where the \(i^{\\text{th}}\) element \([i,:,:]\) is a matrix with \(\\text{kl}^{\\text{th}}\) element
is a boolean given by \(x_i - x \cdot k < \phi(t_i -t \cdot l) \cdot (T < t_i-t \cdot l <0)\)
where \((x_i,t_i)\) are the coordinates of the \(i^{\\text{th}}\) uniform random sample.
[check description of the indicator here]
Args:\n
points_x: x coordinates of the uniformly sampled points
points_t: t coordinates of the uniformly sampled points
Returns:
a dictionary with keys given by tuples which represent the indicator matrices of
a minimal slice and values given by the number of points contained in the minimal slice
"""
# coordinates at which the ambit field is simulated
ambit_t_coords = self.tau * np.arange(1, self.I_t+1)
ambit_x_coords = self.x * np.arange(1, self.I_s+1)
x_ik = np.subtract.outer(points_x, ambit_x_coords)
x_ikl = np.repeat(x_ik[:, :, np.newaxis], repeats=self.I_t, axis=2)
t_il = np.subtract.outer(points_t, ambit_t_coords)
phi_t_ikl = np.repeat(self.ambit_function(
t_il)[:, np.newaxis, :], repeats=self.I_s, axis=1)
range_indicator = x_ik > 0
range_indicator = np.repeat(
range_indicator[:, :, np.newaxis], repeats=self.I_t, axis=2)
indicator = (x_ikl < phi_t_ikl) * range_indicator
#in the unlikely case no minimal slice is identified
if len(indicator) == 0:
raise ValueError('use more samples in each batch')
# we enumerate the unique indicator matrices together with the frequency counts
# we change the shape from [total_nr_samples, I_s, I_t] to [total_nr_samples, I_s * I_t]
reshaped_indicator = indicator.reshape(indicator.shape[:-2]+(-1,))
g = (tuple(i) for i in reshaped_indicator)
return Counter(chain(g))
def approximate_slices(self):
"""Identifies the minimal slices in \(S_{11}\) together with their areas and assigns these value to attributes
`unique_slices` and `unique_slices_areas`. See Algorithm 4 from https://arxiv.org/abs/2208.08784."""
print('Slice estimation procedure has started')
start_time = time.time()
if self.batch_size == None:
self.batch_size = self.total_nr_samples // 10
self.total_nr_samples = self.batch_size * (self.total_nr_samples // self.batch_size)
# rectangle to simulate uniform rvs in space-time is [x, x + ambit_function(0)] x [0,tau]
low_x, high_x = self.x, self.x + self.ambit_function(0)
low_t, high_t = max(self.tau + self.decorrelation_time, 0), self.tau
dict_ = dict()
#use batches of points to optimise for cache memory and prevent memory overflow
for batch in range(self.total_nr_samples // self.batch_size):
points_x = np.random.uniform(
low=low_x, high=high_x, size=self.batch_size)
points_t = np.random.uniform(
low=low_t, high=high_t, size=self.batch_size)
# throw away points not contained in A_11 = A + (x,tau):
# (points_x,points_t) in A_11 if: 0 < points_x - x < phi(points_t - tau)
# i.e. throw away points contained in ambit sets bottom or left of A_11
# left of A_11: no such points, since we only simulate points with t coordinate > 0
# bottom of A_11: must be in A_01; condition: (points_x < phi(points_t - tau)) * (points_x > 0)
indicator_in_A_11 = (
points_x - self.x < self.ambit_function(points_t - self.tau)) * (points_x - self.x > 0)
indicator_in_A_01 = (points_x < self.ambit_function(
points_t - self.tau)) * (points_x > 0)
indicator_bottom_of_A_11 = indicator_in_A_11 * (~indicator_in_A_01)
points_x = points_x[indicator_bottom_of_A_11]
points_t = points_t[indicator_bottom_of_A_11]
dict_to_add = self.determine_slices_from_points(points_x, points_t)
for k, v in dict_to_add.items():
if k in dict_:
dict_[k] += v
else:
dict_[k] = v
# to add more diagnostics
print('Slice estimation procedure has finished')
end_time = time.time()
print('elapsed minutes for the slice estimation procedure: ',
round((end_time - start_time)/60,2))
percentage_points_kept = 100 * sum(dict_.values()) / self.total_nr_samples
print(f"{round(percentage_points_kept,2)}% of points are used in the slice estimation")
nr_unique_indicators = len(dict_.keys())
self.unique_slices = np.array(list(dict_.keys())).reshape(
nr_unique_indicators, self.I_s, self.I_t)
self.unique_slices_areas = np.array(list(dict_.values())) * (high_x-low_x) * \
(high_t - low_t) / self.total_nr_samples
def determine_correction_slices(self,T):
"""Method to be used in the infinite decorrelation time to determine the areas of the
intersection of the ambit sets at time coordinates \(\\tau,\ldots, k_t \\tau\) with
the region of the plane given by \(t < T\). The result is stored in the attribute
`correction_slices_areas`. Required for Algorithm 7 from https://arxiv.org/abs/2208.08784.
"""
self.correction_slices_areas = [quad(self.ambit_function, a = T - (i+1) * self.tau, b= T - i * self.tau , limit=500)[0]
for i in range(1,self.k_t)] + [quad(self.ambit_function,a= -np.inf,b=T - self.k_t,limit=500)[0]]
# @njit
def simulate_finite_decorrelation_time(self):
"""Implementation of Algorithm 5 from https://arxiv.org/abs/2208.08784"""
Y_gaussian = np.zeros((self.nr_simulations,self.k_s + 2 *self.I_s -2,self.k_t + 2 * self.I_t-2))
Y_jump = np.zeros((self.nr_simulations,self.k_s + 2 *self.I_s -2,self.k_t + 2 * self.I_t-2))
for k in range(self.k_s + self.I_s -1):
for l in range(self.k_t + self.I_t - 1):
gaussian_to_add = np.zeros((self.nr_simulations,self.I_s,self.I_t))
jump_to_add = np.zeros((self.nr_simulations,self.I_s,self.I_t))
#simulate S.
for slice_S,area_S in zip(self.unique_slices,self.unique_slices_areas):
tiled_area_S = np.tile(area_S,(self.nr_simulations,1))
gaussian_sample_slice = gaussian_part_sampler(self.gaussian_part_params,tiled_area_S)
jump_sample_slice = jump_part_sampler(self.jump_part_params,tiled_area_S,self.jump_part_name)
gaussian_to_add = gaussian_to_add + slice_S * gaussian_sample_slice[:,:,np.newaxis]
jump_to_add = jump_to_add + slice_S * jump_sample_slice[:,:,np.newaxis]
Y_gaussian[:,k:k+self.I_s,l:l+self.I_t] += gaussian_to_add
Y_jump[:,k:k+self.I_s,l:l+self.I_t] += jump_to_add
self.gaussian_values = Y_gaussian[:,self.I_s-1:self.I_s+self.k_s-1,self.I_t-1:self.I_t+self.k_t-1]
self.jump_values = Y_jump[:,self.I_s-1:self.I_s+self.k_s-1,self.I_t-1:self.I_t+self.k_t-1]
# @njit
def simulate_infinite_decorrelation_time(self,T):
"""Implementation of Algorithm 7 from https://arxiv.org/abs/2208.08784"""
assert T/self.tau == int(T/self.tau)
T_tau = -int(T/self.tau)
Y_gaussian = np.zeros((self.nr_simulations,self.k_s + 2 *self.I_s -2,2 * self.k_t + 2 * T_tau -1))
Y_jump = np.zeros((self.nr_simulations,self.k_s + 2 *self.I_s -2,2 * self.k_t + 2 * T_tau -1))
#add correction slices
self.determine_correction_slices(T)
correction_slices_matrix = np.tile(self.correction_slices_areas,(self.nr_simulations,self.k_s,1))
gaussian_correction_slices = gaussian_part_sampler(self.gaussian_part_params,correction_slices_matrix)
jump_correction_slices = jump_part_sampler(self.jump_part_params,correction_slices_matrix,self.jump_part_name)
#gaussian_correction_slices = np.fliplr(np.cumsum(np.fliplr(gaussian_correction_slices),axis=1))
#jump_correction_slices = np.fliplr(np.cumsum(np.fliplr(jump_correction_slices),axis=1))
gaussian_correction_slices = (np.cumsum(gaussian_correction_slices[:,:,::-1],axis=2))[:,:,::-1]
jump_correction_slices = (np.cumsum(jump_correction_slices[:,:,::-1],axis=2))[:,:,::-1]
Y_gaussian[:,self.I_s-1:self.I_s - 1+ self.k_s, T_tau:T_tau + self.k_t] += gaussian_correction_slices
Y_jump[:,self.I_s-1:self.I_s - 1+ self.k_s, T_tau:T_tau + self.k_t] += jump_correction_slices
#implementation of algorithm [] from []
for k in range(self.k_s + self.I_s -1):
for l in range(self.k_t + T_tau):
gaussian_to_add = np.zeros((self.nr_simulations,self.I_s,self.I_t))
jump_to_add = np.zeros((self.nr_simulations,self.I_s,self.I_t))
for slice_S,area_S in zip(self.unique_slices,self.unique_slices_areas):
tiled_area_S = np.tile(area_S,(self.nr_simulations,1))
#simulate S
gaussian_sample_slice = gaussian_part_sampler(self.gaussian_part_params,tiled_area_S)
jump_sample_slice = jump_part_sampler(self.jump_part_params,tiled_area_S,self.jump_part_name)
gaussian_to_add = gaussian_to_add + slice_S * gaussian_sample_slice[:,:,np.newaxis]
jump_to_add = jump_to_add + slice_S * jump_sample_slice[:,:,np.newaxis]
Y_gaussian[:,k:k+self.I_s,l:l+self.I_t] += gaussian_to_add
Y_jump[:,k:k+self.I_s,l:l+self.I_t] += jump_to_add
self.gaussian_values = Y_gaussian[:,self.I_s-1:self.I_s - 1+ self.k_s,T_tau+1: T_tau+self.k_t+1]
self.jump_values = Y_jump[:,self.I_s-1:self.I_s-1+self.k_s,T_tau+1:T_tau+1+self.k_t]
def simulate(self):
"""Simulate the ambit field at time coordinates \(\\tau,\ldots,k_t\\tau\) and space
coordinates \(x,\ldots,k_s x\). The marginal law of this stationary
process is given by the independent sum of the Gaussian and jump parts. See [] for an example
and `helpers.sampler` for the parametrisations used. The simulated values are stored in the
attribute `values`."""
start_time = time.time()
# checks
self.delete_values()
check_trawl_function(self.ambit_function)
check_gaussian_params(self.gaussian_part_params)
check_jump_part_and_params(self.jump_part_name,self.jump_part_params)
if self.ambit_function(0) <= self.x :
raise ValueError('vertical translations of the ambit sets have no overlap')
#set I_s
self.I_s = math.ceil(self.ambit_function(0)/self.x)
# set I_t
if self.decorrelation_time > -np.inf:
# decorrelation_time checks
assert self.decorrelation_time < 0, 'please check the value of the decorrelation_time'
assert self.ambit_function(self.decorrelation_time) == 0,\
'ambit_function(decorrelation_time) should be 0'
self.I_t = math.ceil(-self.decorrelation_time/self.tau)
elif self.decorrelation_time == -np.inf:
T_tilde = fsolve(lambda t: self.ambit_function(t)-self.x,x0=-1)[0]
T = self.tau * math.floor(1 + T_tilde/self.tau)
assert (T/self.tau).is_integer()
self.I_t = int(-T / self.tau) + self.k_t
self.approximate_slices()
if self.decorrelation_time > -np.inf:
self.simulate_finite_decorrelation_time()
elif self.decorrelation_time == -np.inf:
#self.determine_correction_slices(T)
self.simulate_infinite_decorrelation_time(T)
self.values = self.gaussian_values + self.jump_values
end_time = time.time()
print('elapsed minutes for the simulation after the slice estimation procedure: ',
round((end_time - start_time)/60,2))
|
Ambit-Stochastics
|
/Ambit_Stochastics-1.0.6-py3-none-any.whl/ambit_stochastics/simple_ambit_field.py
|
simple_ambit_field.py
|
import numpy as np
def tangents(x,a,b,nr_values,f,f_der):
assert (a <= x and x <= b)
if x == a:
return f(a)
elif x == b:
return f(b)
increment = (b-a)/ (nr_values-1)
#a , a + i , a + 2*i ,..., a + nr_values * i
interval_index = int(np.floor((nr_values-1) * (x-a)/(b-a)))
meeting_point = (f(a + interval_index*increment) - f(a + (interval_index+1) * increment) +\
(a + increment * (interval_index+1)) * f_der(a + (interval_index+1)*increment) -\
(a + increment * interval_index) * f_der(a + interval_index * increment)) / \
(f_der(a + increment*(interval_index+1)) - f_der(a + interval_index*increment))
if x <= meeting_point:
return f(a + interval_index*increment) + f_der(a + (interval_index)*increment) * (x - (a + increment * interval_index))
elif x > meeting_point:
return f(a + increment * (interval_index+1)) + f_der(a + (interval_index+1)*increment) * (x - (a + increment * (interval_index+1)))
def tangets_old(x,a,b,values,f_values,der_f_values):
assert (a <= x and x <= b)
if x == a:
return f_values[0]
elif x == b:
return f_values[-1]
else:
interval_index = int(np.floor((len(values)-1) * (x-a)/(b-a)))
#if not (x - values[interval_index] >= 0) or not (values[interval_index+1] -x >= 0):
# print(x,values[interval_index],values[interval_index+1])
#f(x0) + f'(x0) (x-x0) = f(x1) + f'(x1)(x-x1)
#f(x0) - f(x1) +x1 f'(x1) - x0 f'(x0) = x(f'(x1)-f'(x0))
meeting_point = (f_values[interval_index] - f_values[interval_index+1] +\
values[interval_index+1] * der_f_values[interval_index+1] -\
values[interval_index] * der_f_values[interval_index]) / \
(der_f_values[interval_index+1] - der_f_values[interval_index])
if x <= meeting_point:
return f_values[interval_index] + der_f_values[interval_index] * (x - values[interval_index])
elif x > meeting_point:
return f_values[interval_index+1] + der_f_values[interval_index+1] * (x - values[interval_index+1])
#### USAGE EXAMPLE####
a,b,nr_values = 1,20,5
f = lambda x : np.log(x+0.25)
f_der = lambda x : 1/ (x+0.25)
der_f = lambda x : 1/ (x+0.25)
values = np.linspace(a,b,nr_values)
f_values = f(values)
der_f_values = der_f(values)
import matplotlib.pyplot as plt
z = np.linspace(a,b,100)
zz = [tangets_old(i,a,b,values,f_values,der_f_values) for i in z]
#zz = [tangents(i,a,b,nr_values,f,f_der) for i in z]
plt.plot(z,zz,c='b')
plt.plot(z,f(z),c='r')
#print ((zz - f(z) >= 0).all() )
|
Ambit-Stochastics
|
/Ambit_Stochastics-1.0.6-py3-none-any.whl/ambit_stochastics/helpers/adaptive_rejection_sampling_tangents.py
|
adaptive_rejection_sampling_tangents.py
|
###################################################################
#imports
import numpy as np
import math
import warnings
from statsmodels.graphics.gofplots import qqplot
from scipy.stats import gamma
###################################################################
def sample_from_laplace_transform(n, ltpdf, lt_params , tol=1e-4, x0=1, xinc=2, m=25, L=1, A=19, nburn=38):
"""Function for generating a random sample of size n from a distribution, given the Laplace transform of its p.d.f.
Args:
n:
"""
maxiter = 100 #to increase maybe
# -----------------------------------------------------
# Derived quantities that need only be calculated once,
# including the binomial coefficients
# -----------------------------------------------------
nterms = nburn + m*L
seqbtL = np.arange(nburn-L,nterms,L)
y = np.pi * (1j) * np.array(range(1,nterms+1)) / L
#print('first y is',y)
expy = np.exp(y)
A2L = 0.5 * A / L
expxt = np.exp(A2L) / L
coef = np.array([math.comb(m,i) for i in range(m+1)]) / 2**m
# --------------------------------------------------
# Generate sorted uniform random numbers. xrand will
# store the corresponding x values
# --------------------------------------------------
u = np.sort(np.random.uniform(low=0.0, high=1.0, size=n),kind = "quicksort")
xrand = u.copy()
#print('u is',u)
#------------------------------------------------------------
# Begin by finding an x-value that can act as an upper bound
# throughout. This will be stored in upplim. Its value is
# based on the maximum value in u. We also use the first
# value calculated (along with its pdf and cdf) as a starting
# value for finding the solution to F(x) = u_min. (This is
# used only once, so doesn't need to be a good starting value
#------------------------------------------------------------
t = x0/xinc
cdf = 0
kount0 = 0
set1st = False
while (kount0 < maxiter and cdf < u[n-1]):
t = xinc * t
kount0 = kount0 + 1
x = A2L / t
z = x + y/t
ltx = ltpdf(x, lt_params)
#print('y is',y)
if kount0 % 25 ==0 :
print('kount0 is',kount0)
#ltzexpy = ltpdf(z, lt_params) * expy #if ltpdf can be applied to a vector
ltzexpy = np.array([ltpdf(i, lt_params) for i in z]) * expy
par_sum = 0.5*np.real(ltx) + np.cumsum( np.real(ltzexpy) )
par_sum2 = 0.5*np.real(ltx/x) + np.cumsum( np.real(ltzexpy/z) )
#to check indeces
pdf = expxt * np.sum(coef * par_sum[seqbtL]) / t
cdf = expxt * np.sum(coef * par_sum2[seqbtL]) / t
#print(cdf)
if ((not set1st) and (cdf > u[0])):
print('aici')
cdf1 = cdf
pdf1 = pdf
t1 = t
set1st = True
if kount0 >= maxiter:
raise ValueError('Cannot locate upper quantile')
upplim = t
print('kount0 part 2 is',kount0)
#--------------------------------
# Now use modified Newton-Raphson
#--------------------------------
lower = 0
t = t1
cdf = cdf1
pdf = pdf1
kount = [0 for i in range(n)]
maxiter = 1000
for j in range(n) :
#-------------------------------
# Initial bracketing of solution
#-------------------------------
upper = upplim
kount[j] = 0
while (kount[j] < maxiter and abs(u[j]-cdf) > tol):
kount[j] = kount[j] + 1
#-----------------------------------------------
# Update t. Try Newton-Raphson approach. If this
# goes outside the bounds, use midpoint instead
#-----------------------------------------------
t = t - (cdf-u[j])/pdf
if t < lower or t > upper:
t = 0.5 * (lower + upper)
#print(u[j]-cdf)
#----------------------------------------------------
# Calculate the cdf and pdf at the updated value of t
#----------------------------------------------------
x = A2L / t
z = x + y/t
ltx = ltpdf(x, lt_params)
ltzexpy = np.array([ltpdf(i, lt_params) for i in z]) * expy
par_sum = 0.5 * np.real(ltx) + np.cumsum( np.real(ltzexpy) )
par_sum2 = 0.5 * np.real(ltx/x) + np.cumsum( np.real(ltzexpy/z) )
pdf = expxt * np.sum(coef * par_sum[seqbtL]) / t
cdf = expxt * np.sum(coef * par_sum2[seqbtL]) / t
#------------------
# Update the bounds
#------------------
if cdf <= u[j]:
lower = t
else:
upper = t
if kount[j] >= maxiter:
warnings.warn('Desired accuracy not achieved for F(x)=u.')
xrand[j] = t
lower = t
meankount = (kount0 + np.sum(kount))/n
if n > 1:
rsample = np.random.permutation(xrand)
else:
rsample = xrand
return rsample, meankount
#from scipy.stats import norm
#def gaussian_laplace_transform(t,aux_params):
# mu,sigma = aux_params
# return np.exp(0.5 * t**2 * sigma**2 - t * mu)
#v = sample_from_laplace_transform(5, gaussian_laplace_transform, [0.,1.] )
#def gamma_laplace_transform(t,gamma_distr_params): !WRONG!
# alpha,k = gamma_distr_params
# return (1 + t/k) ** (-alpha)
#v2 = sample_from_laplace_transform(10000, gamma_laplace_transform, [2.5,1.] )
#qqplot(v2[0], dist= gamma, distargs=(2.5,), loc=0.,scale= 1.,line='45')
|
Ambit-Stochastics
|
/Ambit_Stochastics-1.0.6-py3-none-any.whl/ambit_stochastics/helpers/Laplace_transform_inversion.py
|
Laplace_transform_inversion.py
|
import numpy as np
def compute_empirical_covariance_matrix(values,max_lag_time,max_lag_space):
"""Check the 'Simple Ambit field example usage' Jupyter notebook to see an example.
Args:
values: a numpy array containing the values of the simulated ambit field
max_lag_time: positive integer: the maximum number of lags at which to estimate autocorrelation and autocovariance on the time axis
max_lag_space: positive integer: the maximum number at which to estimate autocorrelation and autocovaraince on the space axis
Returns:
result_cov: a numpy array with shape
result_cor: a numpy array with shape
"""
nr_simulations, nr_rows, nr_columns = values.shape
result_cov = np.zeros((nr_simulations,max_lag_time+1,max_lag_space+1))
result_cor = np.zeros((nr_simulations,max_lag_time+1,max_lag_space+1))
for row in range(max_lag_time+1):
for column in range(max_lag_space+1):
nr_elements = (nr_rows - row)*(nr_columns - column)
sub_matrix_1 = values[:,:nr_rows - row, :nr_columns - column]
sub_matrix_2 = values[:,row :, column :]
#assert sub_matrix_1.shape == sub_matrix_2.shape
mean_1 = np.einsum('ijk->i',sub_matrix_1) / nr_elements
mean_2 = np.einsum('ijk->i',sub_matrix_2) / nr_elements
variance_estimator_1 = np.array([np.var(sub_matrix_1[i,:,:]) for i in range(nr_simulations)])
variance_estimator_2 = np.array([np.var(sub_matrix_2[i,:,:]) for i in range(nr_simulations)])
sub_matrix_1 = sub_matrix_1 - mean_1[:,np.newaxis,np.newaxis]
sub_matrix_2 = sub_matrix_2 - mean_2[:,np.newaxis,np.newaxis]
covariances = np.einsum('ijk,ijk->i',sub_matrix_1,sub_matrix_2) / nr_elements
result_cov[:,row,column] = covariances
result_cor[:,row,column] = covariances/(variance_estimator_1 * variance_estimator_2)**0.5
return result_cov,result_cor
|
Ambit-Stochastics
|
/Ambit_Stochastics-1.0.6-py3-none-any.whl/ambit_stochastics/helpers/cov_estimator.py
|
cov_estimator.py
|
from statsmodels.tsa.stattools import acf
from scipy.integrate import quad
from scipy.optimize import minimize
import numpy as np
def corr_matrix_from_corr_vector(corr_vector):
"""inputs a vector = [corr(0),corr(s),...,corr((k-1)*s)] and outputs the arrray
Sigma_tilde_ij = overlap area at lag |i-j| = corr(|i-j|*s)
"""
if isinstance(corr_vector,np.ndarray):
assert len(corr_vector.shape) == 1
corr_vector = tuple(corr_vector)
assert isinstance(corr_vector,(tuple,list))
k = len(corr_vector)
corr_matrix = [corr_vector[1:i+1][::-1] + corr_vector[:k-i] for i in range(k)]
return np.array(corr_matrix)
#in the following correlation functions corr_exponnetial,corr_gama,corr_ig h > 0
def corr_exponential_envelope(h,params):
u = params[0]
return np.exp(-u * h)
def corr_gamma_envelope(h,params):
H,delta = params
return (1+h/delta)**(-H)
def corr_ig_envelope(h,params):
gamma,delta = params
return np.exp(delta * gamma *(1-np.sqrt(2*h/gamma**2+1)))
def trawl_acf(envelope, envelope_function=None):
assert envelope in ['exponential','gamma','ig','custom'],'please check the value of envelope'
if envelope == "custom":
"""describe how to specify envelope_function"""
assert callable(envelope_function)
def corr_other(h,params):
return quad(envelope_function(params), a=-np.inf, b=-h)[0] / quad(envelope_function(params), a=-np.inf, b=0)[0]
return corr_other
else:
assert envelope_function == None
if envelope == "exponential":
return corr_exponential_envelope
if envelope == "gamma":
return corr_gamma_envelope
if envelope == "ig":
return corr_ig_envelope
def bounds_and_initial_guess_for_acf_params(envelope):
assert envelope in ['exponential','gamma','ig']
if envelope == 'exponential':
bounds = ((0,np.inf),)
initial_guess = (1,)
elif envelope == 'gamma' or envelope == 'ig':
bounds = ((0.0001,np.inf),(0.0001,np.inf))
initial_guess = (1,1)
return bounds,initial_guess
def fit_trawl_envelope_gmm(s,simulations,lags,envelope,initial_guess = None,
bounds = None, envelope_function = None):
#parameter checks
assert isinstance(s,(float,int))
assert isinstance(lags,tuple)
assert envelope in ['exponential','gamma','ig','custom']
assert len(simulations.shape) == 2
#assert isinstance(envelope_params,tuple)
assert (isinstance(initial_guess,tuple) and all(isinstance(i,tuple) for i in initial_guess)) or initial_guess == None
assert isinstance(bounds,tuple) or bounds == None
assert callable(envelope_function) or envelope_function == None
theoretical_acf_func = trawl_acf(envelope, envelope_function)
empirical_acf = np.apply_along_axis(lambda x: acf(x,nlags = max(lags)),arr = simulations,axis=1)
empirical_acf = empirical_acf[:,lags]
#this will look s up in the `fit_trawl_envelope_gmm` scope
def criterion(params,empirical_acf_row):
theoretical = np.array([theoretical_acf_func(s*i,params) for i in lags])
return np.sum((empirical_acf_row - theoretical)**2)
if envelope == 'custom':
#must pass the envelope function and the initial guess
assert isinstance(initial_guess,tuple)
assert callable(envelope_function)
if envelope != 'custom':
bounds,_ = bounds_and_initial_guess_for_acf_params(envelope)
if initial_guess == None:
initial_guess = tuple([_ for index_ in range(len(empirical_acf))])
#if the custom function has no bounds
#if bounds == None:
# result = [minimize(criterion,x0 = initial_guess, args= (empirical_acf_row,),
# method='BFGS').x for empirical_acf_row in empirical_acf]
#
#in all the other cases, we have bounds
#else:
result = [minimize(criterion,x0 = initial_guess[j], args= (empirical_acf[j],),
method='L-BFGS-B', bounds = bounds).x for j in range(len(empirical_acf))]
return np.array(result)
|
Ambit-Stochastics
|
/Ambit_Stochastics-1.0.6-py3-none-any.whl/ambit_stochastics/helpers/acf_functions.py
|
acf_functions.py
|
import numpy as np
from scipy.stats import norm,gamma,cauchy,invgauss,norminvgauss,\
geninvgauss,bernoulli,binom,nbinom,poisson,logser
def gaussian_part_sampler(gaussian_part_params,areas):
"""Simulates the Gaussian part (including drift) of the Levy basis over disjoint sets
Args:
gaussian_part_params: list or numpy array with the drift term and variance of the Gaussian part
areas: A number / numpy arrray containing the areas of the given sets
Returns:
A number / numpy array with law \(\mathcal{N}(drift \cdot areas, scale \cdot \sqrt{areas})\); we use the
mean-scale parametrisation for consistency with scipy
"""
drift,scale = gaussian_part_params
gaussian_sample = norm.rvs(loc = drift * areas, scale = scale *(areas)**0.5)
return gaussian_sample
def jump_part_sampler(jump_part_params,areas,distr_name):
"""Simulates the jump part of the Levy basis over disjoint sets; distributions are named
and parametrised as in https://docs.scipy.org/doc/scipy/reference/stats.html
Args:
distr_name: Name of the distribution of the jump part L_j
jump_part_params: List or numpy array which contains the parameters of the
distribution of the jump part L_j
areas: A number/numpy arrray containing the areas of the given sets
Returns:
A number / numpy array with law specified by params and distr_name
"""
areas_copy = areas.copy()
index = areas_copy == 0
if np.any(areas_copy < 0):
raise ValueError('slice areas cant be negative')
areas[index] = 100 #random number which will be removed
if distr_name == None:
samples = np.zeros(shape= areas.shape)
###continuous distributions
elif distr_name == 'gamma':
a,scale = jump_part_params
samples = gamma.rvs(a = a * areas, loc = 0, scale = scale)
elif distr_name == 'cauchy':
scale = jump_part_params[0]
samples = cauchy.rvs(loc = 0, scale = scale * areas)
elif distr_name == 'invgauss':
mu, scale = jump_part_params
samples = invgauss.rvs(loc = 0, mu = mu / areas , scale = scale * areas**2)
#this is a different parametrisation
#to change between (delta,gamma) and (mu,scale) and vice-versa
#sqrt{scale} = delta, gamma = 1 / (mu * sqrt{scale})
#scale = delta**2, mu = 1 / (gamma * delta)
#mu = delta / gamma, lambda = delta**2
#to check again if the below transformations are correct
#from the wikipedia pagey
#scipy (mu,scale) -> wiki (mu= mu * scale, lambda = scale)
#wiki (mu, lambda) -> scipy (mu = mu/lambda, scale = lambda)
# #wrong?
#scipy scaling: L' ~ IG(mu,scale) -> L(A) ~ IG(mu / (scale * Leb(A)) , scale * Leb(A)^2)
#correct?
#scipy scaling: L' ~ IG(mu,scale) -> L(A) ~ IG(mu / Leb(A) , scale * Leb(A)^2)
#scipy mean = mu * scale, scipy var: (mu * scale )^3 / scale = mu ^3 * scale ^2
#wiki scaling: L' ~ IG(mu,lambda) -> L(A) ~ IG( mu * Leb(A), lamda * Leb(A)^2)
#TO CHECK THIS AGAIN
#mu, scale = jump_part_params
# samples = invgauss.rvs(loc = 0, mu = mu / areas , scale = scale * areas**2)
elif distr_name == 'normalinvgauss':
a, b, loc, scale = jump_part_params
#switch to the second parameterization in scipy
#https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.norminvgauss.html
#i.e. the same as in tensorflow https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/NormalInverseGaussian
#alpha, beta, mu, delta = a/scale, b/scale, loc, scale
#do the scaling
#alpha_new, beta_new , mu_new, delta_new = alpha, beta, mu * areas, delta * areas
#back to scipy
#a_new, b_new, loc_new, scale_new = delta_new * alpha_new, delta_new * beta_new, mu_new, delta_new
#samples = norminvgauss.rvs(a = a_new, b = b_new, loc= loc_new, scale = scale_new)
samples = norminvgauss.rvs(a = a *areas, b = b * areas, loc = loc* areas, scale = scale * areas)
###discrete distributions
elif distr_name == 'poisson':
lambda_poisson = jump_part_params[0]
samples = poisson.rvs(mu = lambda_poisson * areas,loc=0)
samples[index] = 0
return samples
def generate_cpp_values_associated_to_points(nr_points,cpp_part_name,cpp_part_params,custom_sampler):
if cpp_part_name == 'custom':
return custom_sampler(nr_points)
elif cpp_part_name == 'bernoulli':
return bernoulli.rvs(p = cpp_part_params[0], size = nr_points)
elif cpp_part_name == 'poisson':
return poisson.rvs(mu = cpp_part_params[0], size = nr_points)
elif cpp_part_name == 'logser':
return logser.rvs(p = cpp_part_params[0], size = nr_points)
elif cpp_part_name == 'binom':
return binom.rvs(n = cpp_part_params[0], p = cpp_part_params[1], size = nr_points)
elif cpp_part_name == 'nbinom':
return nbinom.rvs(n = cpp_part_params[0], p = cpp_part_params[1], size = nr_points)
def generate_cpp_points(min_x,max_x,min_t,max_t,cpp_part_name,cpp_part_params,cpp_intensity,custom_sampler):
area_times_intensity = (max_x-min_x)*(max_t-min_t) * cpp_intensity
nr_points = poisson.rvs(mu = area_times_intensity)
points_x = np.random.uniform(low = min_x, high = max_x, size = nr_points)
points_t = np.random.uniform(low = min_t, high = max_t, size = nr_points)
associated_values = generate_cpp_values_associated_to_points(nr_points,cpp_part_name,\
cpp_part_params,custom_sampler)
return points_x,points_t,associated_values
|
Ambit-Stochastics
|
/Ambit_Stochastics-1.0.6-py3-none-any.whl/ambit_stochastics/helpers/sampler.py
|
sampler.py
|
from .acf_functions import trawl_acf,corr_matrix_from_corr_vector
#import acf_functions
from scipy.stats import beta,gamma,norm
import numpy as np
def get_trawl_process_mean(levy_seed,levy_seed_params):
#levy_seed_params is a tuple (not a tuple of tuples)
if levy_seed == 'gamma':
alpha,theta = levy_seed_params
return gamma(a = alpha, loc = 0, scale = theta).mean()
elif levy_seed == 'gaussian':
return levy_seed_params[0]
else:
raise ValueError('not yet implemented')
def conditional_gaussian_distribution(values,tau,nr_steps_ahead,max_gaussian_lag,levy_seed_params,envelope,envelope_params,envelope_function=None):
acf_function_helper = trawl_acf(envelope, envelope_function)
acf_function = lambda t: acf_function_helper(t,envelope_params)
mu_,scale_ = levy_seed_params
joints = [np.array(values[i:i+max_gaussian_lag]) for i in range(0,len(values) - max_gaussian_lag +1 )]
mu_1 = mu_ #mean of X_{gaussian_lags + nr_steps_ahead}
mu_2 = mu_ * np.ones(max_gaussian_lag) #mean of (X_1,...,X_{gaussian_lags})
sigma_11 = scale_**2
sigma_22 = scale_**2 * corr_matrix_from_corr_vector(acf_function(np.array([i*tau for i in range(max_gaussian_lag)])))
sigma_21 = scale_**2 * (acf_function(np.array([i*tau for i in range(nr_steps_ahead, max_gaussian_lag+nr_steps_ahead)])))[::-1]
sigma_12 = sigma_21
sigma_22_inv = np.linalg.inv(sigma_22)
conditional_mean = [mu_1 + sigma_12 @ sigma_22_inv @ (joint -mu_2) for joint in joints]
conditional_var = sigma_11 - sigma_12 @ sigma_22_inv @ sigma_21
return np.array(conditional_mean), conditional_var**0.5
def deterministic_forecasting_sanity_checks(values,tau,nr_steps_ahead,levy_seed,levy_seed_params,envelope_params):
assert isinstance(values,np.ndarray)
assert isinstance(tau,float) and tau > 0
assert isinstance(nr_steps_ahead,int) and nr_steps_ahead > 0
assert levy_seed in ['gaussian','gamma','custom']
assert isinstance(levy_seed_params,np.ndarray)
#assert envelope and envelope_function checked in trawl_acf
assert isinstance(envelope_params,np.ndarray)
def probabilistic_forecasting_sanity_checks(values,tau,nr_steps_ahead,levy_seed,levy_seed_params,nr_samples,envelope_params):
assert isinstance(values,np.ndarray)
assert isinstance(tau,float) and tau > 0
assert isinstance(nr_steps_ahead,int) and nr_steps_ahead > 0
assert levy_seed in ['gaussian','gamma','custom']
assert isinstance(levy_seed_params,np.ndarray)
#assert envelope and envelope_function checked in trawl_acf
assert isinstance(envelope_params,np.ndarray)
def deterministic_forecasting(tau, nr_steps_ahead,values,levy_seed,levy_seed_params,envelope,
envelope_params, envelope_function = None, max_gaussian_lag = None):
deterministic_forecasting_sanity_checks(values, tau, nr_steps_ahead, levy_seed,levy_seed_params,envelope_params)
if envelope == 'gaussian':
assert isinstance(max_gaussian_lag,int) and max_gaussian_lag > 0
conditional_mean,_ = conditional_gaussian_distribution(values,tau,nr_steps_ahead,max_gaussian_lag,levy_seed_params,envelope,envelope_params,envelope_function=None)
return conditional_mean
else:
acf_function_helper = trawl_acf(envelope, envelope_function)
overlap_area = acf_function_helper(tau * nr_steps_ahead,envelope_params)
#print (overlap_area,type(values),get_trawl_process_mean(levy_seed,levy_seed_params) )
return overlap_area * values + (1-overlap_area) * get_trawl_process_mean(levy_seed,levy_seed_params)
def probabilistic_forecasting(tau,nr_steps_ahead,values,levy_seed,levy_seed_params,envelope,
envelope_params, nr_samples, envelope_function = None, max_gaussian_lag = None):
"""assumes the area of the lebesgue measure is 1
values is a 1 dimensional array """
probabilistic_forecasting_sanity_checks(values,tau,nr_steps_ahead,levy_seed,levy_seed_params,nr_samples,envelope_params)
acf_function_helper = trawl_acf(envelope, envelope_function)
acf_function = lambda x: acf_function_helper(x,envelope_params)
overlap_area = acf_function(tau*nr_steps_ahead)
if levy_seed == 'gaussian':
assert isinstance(max_gaussian_lag,int) and max_gaussian_lag > 0
conditional_mean,conditional_scale = conditional_gaussian_distribution(values,tau,nr_steps_ahead,max_gaussian_lag,
levy_seed_params,envelope,envelope_params,envelope_function)
return np.array([norm.rvs(loc = i, scale = conditional_scale, size=nr_samples) for i in conditional_mean])
elif levy_seed == 'gamma':
alpha,theta = levy_seed_params
alpha0 = alpha * overlap_area
alpha1 = alpha * (1-overlap_area)
print('before overlap')
overlap_samples = values[:,np.newaxis] * beta.rvs(a = alpha0, b = alpha1, size = [len(values),nr_samples])
print('before independent')
independent_samples = gamma.rvs(a = alpha1, loc = 0, scale = theta, size = nr_samples)[np.newaxis,:]
print('after independent')
elif levy_seed in ['invgauss','gig','cauchy','student']:
raise ValueError('not yet implemented')
return overlap_samples + independent_samples
|
Ambit-Stochastics
|
/Ambit_Stochastics-1.0.6-py3-none-any.whl/ambit_stochastics/helpers/forecasting_helpers.py
|
forecasting_helpers.py
|
import pandas as pd
import numpy as np
def check1(nr_trawls,nr_simulations):
#assert (isinstance(tau,float) or isinstance(tau,int)) and tau > 0
assert isinstance(nr_trawls,int) and nr_trawls >0
assert isinstance(nr_simulations,int) and nr_simulations >0
def check_spatio_temporal_positions(x,tau,k_s,k_t,nr_simulations):
assert (isinstance(x,float) or isinstance(x,int)) and x > 0
assert (isinstance(tau,float) or isinstance(tau,int)) and tau > 0
assert isinstance(k_s,int) and k_s >0
assert isinstance(k_t,int) and k_t >0
assert isinstance(nr_simulations,int) and nr_simulations >0
def check_trawl_function(phi):
"""Check if the function is increasing and 0 for strictly positive values"""
assert callable(phi),'trawl_function is not a function'
assert phi(0.000001) == 0,'trawl_function does not satisfy trawl_function(t)=0 for t >0'
phi_values = phi(np.linspace(-100,0,10**5))
assert pd.Series(phi_values).is_monotonic_increasing,'trawl_function is not increasing'
#distributional checks
def check_gaussian_params(gaussian_part_params):
"""Check if the distribution of the jump part is supported and if the parameters
of the Gaussian part are numbers"""
#gaussian part params
assert isinstance(gaussian_part_params,tuple),'gaussian_part_params is not a tuple'
assert all(isinstance(i,(int,float)) for i in gaussian_part_params),'parameters of the gaussian part are not numbers'
def check_jump_part_and_params(jump_part_name,jump_part_params):
"""Check if the distribution of the jump part is supported and if the parameters
of the Jump parts are numbers"""
#jump part params
assert isinstance(jump_part_params,tuple),'jump_part_params is not a tuple'
assert all(isinstance(i,(int,float)) for i in jump_part_params),'parameters of the jump part are not numbers'
#jump part_name
if jump_part_name in ['norminvgauss','geninvgauss','nbinom']: #to also add hyperbolic distributions
raise ValueError('distribution not yet supported')
elif jump_part_name not in [None,'invgauss','gamma','cauchy','poisson']:
raise ValueError('unknown distribution')
def check_grid_params(mesh_size,truncation_grid,times_grid):
assert isinstance(mesh_size,(int,float)) and mesh_size >0,'please check mesh size'
assert isinstance(truncation_grid,(int,float)) and truncation_grid < 0
def check_cpp_params(cpp_part_name, cpp_part_params, cpp_intensity, custom_sampler):
assert isinstance(cpp_intensity,(int,float)) and cpp_intensity >0,'please check mesh size'
assert cpp_part_name in ['poisson','bernoulli','binom','nbinom','logser','custom'],'cpp_part_name should be one of the \
following: poisson, binom, nbinom, logser, custom'
if cpp_part_name == 'custom':
assert callable(custom_sampler),'cpp sampler should be a function'
elif cpp_part_name in ['poisson','bernoulli','logser']:
assert isinstance(cpp_part_params,tuple) and len(cpp_part_params) == 1,'cpp_part_params should be a tuple of 1 element'
assert isinstance(cpp_part_params[0],(int,float)) and cpp_part_params[0] >=0,'the first element in cpp_part_params should be a non-negative float'
if cpp_part_name == 'logser':
assert cpp_part_params[0] < 1, 'cpp_part_params[0] should be strictly less than 1'
elif cpp_part_name == 'bernoulli':
assert cpp_part_params[0] <=1 , 'cpp_part_params[0] should be less than or equal than 1'
elif cpp_part_name == 'binom' or cpp_part_name == 'nbinom':
assert isinstance(cpp_part_params,tuple) and len(cpp_part_params) == 2,'cpp_part_params should be a tuple with 2 elements'
assert isinstance(cpp_part_params[0],int) and cpp_part_params[0] > 0,'first parameter in cpp_part_params should be a positive integer'
assert isinstance(cpp_part_params[1],float) and 1 >= cpp_part_params[1] >= 0,'second parameter in cpp_part_params should be a float in [0,1]'
if cpp_part_name != 'custom' and custom_sampler != None:
raise ValueError('please check whether you are trying to use a custom sampler for \
a particular distribution or one of the distributions poisson, binom, nbinom, bernoulli')
|
Ambit-Stochastics
|
/Ambit_Stochastics-1.0.6-py3-none-any.whl/ambit_stochastics/helpers/input_checks.py
|
input_checks.py
|
pyresparser
===========
::
A simple resume parser used for extracting information from resumes
Built with ❤︎ and :coffee: by `Omkar
Pathak <https://github.com/OmkarPathak>`__
--------------
|GitHub stars| |PyPI| |Downloads| |GitHub| |PyPI - Python Version| |Say
Thanks!| |Build Status| |codecov|
Features
========
- Extract name
- Extract email
- Extract mobile numbers
- Extract skills
- Extract total experience
- Extract college name
- Extract degree
- Extract designation
- Extract company names
Installation
============
- You can install this package using
.. code:: bash
pip install pyresparser
- For NLP operations we use spacy and nltk. Install them using below
commands:
.. code:: bash
# spaCy
python -m spacy download en_core_web_sm
# nltk
python -m nltk.downloader words
Documentation
=============
Official documentation is available at:
https://www.omkarpathak.in/pyresparser/
Supported File Formats
======================
- PDF and DOCx files are supported on all Operating Systems
- If you want to extract DOC files you can install
`textract <https://textract.readthedocs.io/en/stable/installation.html>`__
for your OS (Linux, MacOS)
- Note: You just have to install textract (and nothing else) and doc
files will get parsed easily
Usage
=====
- Import it in your Python project
.. code:: python
from pyresparser import ResumeParser
data = ResumeParser('/path/to/resume/file').get_extracted_data()
CLI
===
For running the resume extractor you can also use the ``cli`` provided
.. code:: bash
usage: pyresparser [-h] [-f FILE] [-d DIRECTORY] [-r REMOTEFILE]
[-re CUSTOM_REGEX] [-sf SKILLSFILE] [-e EXPORT_FORMAT]
optional arguments:
-h, --help show this help message and exit
-f FILE, --file FILE resume file to be extracted
-d DIRECTORY, --directory DIRECTORY
directory containing all the resumes to be extracted
-r REMOTEFILE, --remotefile REMOTEFILE
remote path for resume file to be extracted
-re CUSTOM_REGEX, --custom-regex CUSTOM_REGEX
custom regex for parsing mobile numbers
-sf SKILLSFILE, --skillsfile SKILLSFILE
custom skills CSV file against which skills are
searched for
-e EXPORT_FORMAT, --export-format EXPORT_FORMAT
the information export format (json)
Notes:
======
- If you are running the app on windows, then you can only extract
.docs and .pdf files
Result
======
The module would return a list of dictionary objects with result as
follows:
::
[
{
'college_name': ['Marathwada Mitra Mandal’s College of Engineering'],
'company_names': None,
'degree': ['B.E. IN COMPUTER ENGINEERING'],
'designation': ['Manager',
'TECHNICAL CONTENT WRITER',
'DATA ENGINEER'],
'email': '[email protected]',
'mobile_number': '8087996634',
'name': 'Omkar Pathak',
'no_of_pages': 3,
'skills': ['Operating systems',
'Linux',
'Github',
'Testing',
'Content',
'Automation',
'Python',
'Css',
'Website',
'Django',
'Opencv',
'Programming',
'C',
...],
'total_experience': 1.83
}
]
References that helped me get here
==================================
- https://www.kaggle.com/nirant/hitchhiker-s-guide-to-nlp-in-spacy
- https://www.analyticsvidhya.com/blog/2017/04/natural-language-processing-made-easy-using-spacy-%E2%80%8Bin-python/
- [https://medium.com/@divalicious.priya/information-extraction-from-cv-acec216c3f48](https://medium.com/@divalicious.priya/information-extraction-from-cv-acec216c3f48)
- **Special thanks** to dataturks for their `annotated
dataset <https://dataturks.com/blog/named-entity-recognition-in-resumes.php>`__
Donation
========
If you have found my softwares to be of any use to you, do consider
helping me pay my internet bills. This would encourage me to create many
such softwares :smile:
+-----------+----+
| PayPal | |
+===========+====+
| ₹ (INR) | |
+-----------+----+
Stargazer over time
===================
|Stargazers over time|
.. |GitHub stars| image:: https://img.shields.io/github/stars/OmkarPathak/pyresparser.svg
:target: https://github.com/OmkarPathak/pyresparser/stargazers
.. |PyPI| image:: https://img.shields.io/pypi/v/pyresparser.svg
:target: https://pypi.org/project/pyresparser/
.. |Downloads| image:: https://pepy.tech/badge/pyresparser
:target: https://pepy.tech/project/pyresparser
.. |GitHub| image:: https://img.shields.io/github/license/omkarpathak/pyresparser.svg
:target: https://github.com/OmkarPathak/pyresparser/blob/master/LICENSE
.. |PyPI - Python Version| image:: https://img.shields.io/pypi/pyversions/Django.svg
.. |Say Thanks!| image:: https://img.shields.io/badge/Say%20Thanks-:D-1EAEDB.svg
:target: https://saythanks.io/to/OmkarPathak
.. |Build Status| image:: https://travis-ci.com/OmkarPathak/pyresparser.svg?branch=master
:target: https://travis-ci.com/OmkarPathak/pyresparser
.. |codecov| image:: https://codecov.io/gh/OmkarPathak/pyresparser/branch/master/graph/badge.svg
:target: https://codecov.io/gh/OmkarPathak/pyresparser
.. |Stargazers over time| image:: https://starchart.cc/OmkarPathak/pyresparser.svg
:target: https://starchart.cc/OmkarPathak/pyresparser
|
AmeriResumeParser
|
/AmeriResumeParser-1.0.0.tar.gz/AmeriResumeParser-1.0.0/README.rst
|
README.rst
|
# pyresparser
```
A simple resume parser used for extracting information from resumes
```
Built with ❤︎ and :coffee: by [Omkar Pathak](https://github.com/OmkarPathak)
---
[](https://github.com/OmkarPathak/pyresparser/stargazers)
[](https://pypi.org/project/pyresparser/)
[](https://pepy.tech/project/pyresparser)
[](https://github.com/OmkarPathak/pyresparser/blob/master/LICENSE)  [](https://saythanks.io/to/[email protected])
[](https://travis-ci.com/OmkarPathak/pyresparser)
[](https://codecov.io/gh/OmkarPathak/pyresparser)
# Features
- Extract name
- Extract email
- Extract mobile numbers
- Extract skills
- Extract total experience
- Extract college name
- Extract degree
- Extract designation
- Extract company names
# Installation
- You can install this package using
```bash
pip install pyresparser
```
- For NLP operations we use spacy and nltk. Install them using below commands:
```bash
# spaCy
python -m spacy download en_core_web_sm
# nltk
python -m nltk.downloader words
python -m nltk.downloader stopwords
```
# Documentation
Official documentation is available at: https://www.omkarpathak.in/pyresparser/
# Supported File Formats
- PDF and DOCx files are supported on all Operating Systems
- If you want to extract DOC files you can install [textract](https://textract.readthedocs.io/en/stable/installation.html) for your OS (Linux, MacOS)
- Note: You just have to install textract (and nothing else) and doc files will get parsed easily
# Usage
- Import it in your Python project
```python
from pyresparser import ResumeParser
data = ResumeParser('/path/to/resume/file').get_extracted_data()
```
# CLI
For running the resume extractor you can also use the `cli` provided
```bash
usage: pyresparser [-h] [-f FILE] [-d DIRECTORY] [-r REMOTEFILE]
[-re CUSTOM_REGEX] [-sf SKILLSFILE] [-e EXPORT_FORMAT]
optional arguments:
-h, --help show this help message and exit
-f FILE, --file FILE resume file to be extracted
-d DIRECTORY, --directory DIRECTORY
directory containing all the resumes to be extracted
-r REMOTEFILE, --remotefile REMOTEFILE
remote path for resume file to be extracted
-re CUSTOM_REGEX, --custom-regex CUSTOM_REGEX
custom regex for parsing mobile numbers
-sf SKILLSFILE, --skillsfile SKILLSFILE
custom skills CSV file against which skills are
searched for
-e EXPORT_FORMAT, --export-format EXPORT_FORMAT
the information export format (json)
```
# Notes:
- If you are running the app on windows, then you can only extract .docs and .pdf files
# Result
The module would return a list of dictionary objects with result as follows:
```
[
{
'college_name': ['Marathwada Mitra Mandal’s College of Engineering'],
'company_names': None,
'degree': ['B.E. IN COMPUTER ENGINEERING'],
'designation': ['Manager',
'TECHNICAL CONTENT WRITER',
'DATA ENGINEER'],
'email': '[email protected]',
'mobile_number': '8087996634',
'name': 'Omkar Pathak',
'no_of_pages': 3,
'skills': ['Operating systems',
'Linux',
'Github',
'Testing',
'Content',
'Automation',
'Python',
'Css',
'Website',
'Django',
'Opencv',
'Programming',
'C',
...],
'total_experience': 1.83
}
]
```
# References that helped me get here
- Some of the core concepts behind the algorithm have been taken from [https://github.com/divapriya/Language_Processing](https://github.com/divapriya/Language_Processing) which has been summed up in this blog [https://medium.com/@divalicious.priya/information-extraction-from-cv-acec216c3f48](https://medium.com/@divalicious.priya/information-extraction-from-cv-acec216c3f48). Thanks to Priya for sharing this concept
- [https://www.kaggle.com/nirant/hitchhiker-s-guide-to-nlp-in-spacy](https://www.kaggle.com/nirant/hitchhiker-s-guide-to-nlp-in-spacy)
- [https://www.analyticsvidhya.com/blog/2017/04/natural-language-processing-made-easy-using-spacy-%E2%80%8Bin-python/](https://www.analyticsvidhya.com/blog/2017/04/natural-language-processing-made-easy-using-spacy-%E2%80%8Bin-python/)
- **Special thanks** to dataturks for their [annotated dataset](https://dataturks.com/blog/named-entity-recognition-in-resumes.php)
# Donation
If you have found my softwares to be of any use to you, do consider helping me pay my internet bills. This would encourage me to create many such softwares :smile:
| PayPal | <a href="https://paypal.me/omkarpathak27" target="_blank"><img src="https://www.paypalobjects.com/webstatic/mktg/logo/AM_mc_vs_dc_ae.jpg" alt="Donate via PayPal!" title="Donate via PayPal!" /></a> |
|:-------------------------------------------:|:-------------------------------------------------------------:|
| ₹ (INR) | <a href="https://www.instamojo.com/@omkarpathak/" target="_blank"><img src="https://www.soldermall.com/images/pic-online-payment.jpg" alt="Donate via Instamojo" title="Donate via instamojo" /></a> |
# Stargazer over time
[](https://starchart.cc/OmkarPathak/pyresparser)
|
AmeriResumeParser
|
/AmeriResumeParser-1.0.0.tar.gz/AmeriResumeParser-1.0.0/README.md
|
README.md
|
import os
import json
import argparse
from pprint import pprint
import io
import sys
import multiprocessing as mp
import urllib
from urllib.request import Request, urlopen
from pyresparser import ResumeParser
def print_cyan(text):
print("\033[96m {}\033[00m" .format(text))
class ResumeParserCli(object):
def __init__(self):
self.__parser = argparse.ArgumentParser()
self.__parser.add_argument(
'-f',
'--file',
help="resume file to be extracted")
self.__parser.add_argument(
'-d',
'--directory',
help="directory containing all the resumes to be extracted")
self.__parser.add_argument(
'-r',
'--remotefile',
help="remote path for resume file to be extracted")
self.__parser.add_argument(
'-re',
'--custom-regex',
help="custom regex for parsing mobile numbers")
self.__parser.add_argument(
'-sf',
'--skillsfile',
help="custom skills CSV file against \
which skills are searched for")
self.__parser.add_argument(
'-e',
'--export-format',
help="the information export format (json)")
self.__parser.add_argument(
'-o',
'--export-filepath',
help="the export file path")
def __banner(self):
banner_string = r'''
____ __ __________ _________ ____ _____________ _____
/ __ \/ / / / ___/ _ \/ ___/ __ \/ __ `/ ___/ ___/ _ \/ ___/
/ /_/ / /_/ / / / __(__ ) /_/ / /_/ / / (__ ) __/ /
/ .___/\__, /_/ \___/____/ .___/\__,_/_/ /____/\___/_/
/_/ /____/ /_/
- By Omkar Pathak ([email protected])
'''
print(banner_string)
def export_data(self, exported_data, args):
'''function to export resume data in specified format
'''
if args.export_format:
if args.export_format == 'json':
with open(args.export_filepath, 'w') as fd:
json.dump(exported_data, fd, sort_keys=True, indent=4)
abs_path = os.path.abspath(args.export_filepath)
print('Data exported successfully at: ' + abs_path)
sys.exit(0)
else:
return exported_data
def extract_resume_data(self):
args = self.__parser.parse_args()
if args.export_format and not args.export_filepath:
print('Please specify output file path using -o option')
sys.exit(1)
if args.remotefile:
return self.export_data(
self.__extract_from_remote_file(
args.remotefile,
args.skillsfile,
args.custom_regex
),
args
)
if args.file and not args.directory:
return self.export_data(
self.__extract_from_file(
args.file,
args.skillsfile,
args.custom_regex
),
args
)
elif args.directory and not args.file:
return self.export_data(
self.__extract_from_directory(
args.directory,
args.skillsfile,
args.custom_regex
),
args
)
else:
self.__parser.print_help()
def __extract_from_file(self, file, skills_file=None, custom_regex=None):
if os.path.exists(file):
print_cyan('Extracting data from: {}'.format(file))
resume_parser = ResumeParser(file, skills_file, custom_regex)
return [resume_parser.get_extracted_data()]
else:
print('File not found. Please provide a valid file name')
sys.exit(1)
def __extract_from_directory(
self,
directory,
skills_file=None,
custom_regex=None
):
if os.path.exists(directory):
pool = mp.Pool(mp.cpu_count())
resumes = []
for root, _, filenames in os.walk(directory):
for filename in filenames:
file = os.path.join(root, filename)
resumes.append([file, skills_file, custom_regex])
results = pool.map(resume_result_wrapper, resumes)
pool.close()
pool.join()
return results
else:
print('Directory not found. Please provide a valid directory')
sys.exit(1)
def __extract_from_remote_file(
self,
remote_file,
skills_file,
custom_regex
):
try:
print_cyan('Extracting data from: {}'.format(remote_file))
req = Request(remote_file, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
_file = io.BytesIO(webpage)
_file.name = remote_file.split('/')[-1]
resume_parser = ResumeParser(_file, skills_file, custom_regex)
return [resume_parser.get_extracted_data()]
except urllib.error.HTTPError:
print('File not found. Please provide correct URL for resume file')
sys.exit(1)
def resume_result_wrapper(args):
print_cyan('Extracting data from: {}'.format(args[0]))
parser = ResumeParser(args[0], args[1], args[2])
return parser.get_extracted_data()
def main():
cli_obj = ResumeParserCli()
pprint(cli_obj.extract_resume_data())
|
AmeriResumeParser
|
/AmeriResumeParser-1.0.0.tar.gz/AmeriResumeParser-1.0.0/pyresparser/command_line.py
|
command_line.py
|
import os
import multiprocessing as mp
import io
import spacy
import pprint
from spacy.matcher import Matcher
from . import utils
class ResumeParser(object):
def __init__(
self,
resume,
skills_file=None,
custom_regex=None
):
nlp = spacy.load('en_core_web_sm')
custom_nlp = spacy.load(os.path.dirname(os.path.abspath(__file__)))
self.__skills_file = skills_file
self.__custom_regex = custom_regex
self.__matcher = Matcher(nlp.vocab)
self.__details = {
'name': None,
'email': None,
'mobile_number': None,
'skills': None,
'college_name': None,
'degree': None,
'designation': None,
'experience': None,
'company_names': None,
'no_of_pages': None,
'total_experience': None,
}
self.__resume = resume
if not isinstance(self.__resume, io.BytesIO):
ext = os.path.splitext(self.__resume)[1].split('.')[1]
else:
ext = self.__resume.name.split('.')[1]
self.__text_raw = utils.extract_text(self.__resume, '.' + ext)
self.__text = ' '.join(self.__text_raw.split())
self.__nlp = nlp(self.__text)
self.__custom_nlp = custom_nlp(self.__text_raw)
self.__noun_chunks = list(self.__nlp.noun_chunks)
self.__get_basic_details()
def get_extracted_data(self):
return self.__details
def __get_basic_details(self):
cust_ent = utils.extract_entities_wih_custom_model(
self.__custom_nlp
)
name = utils.extract_name(self.__nlp, matcher=self.__matcher)
email = utils.extract_email(self.__text)
mobile = utils.extract_mobile_number(self.__text, self.__custom_regex)
skills = utils.extract_skills(
self.__nlp,
self.__noun_chunks,
self.__skills_file
)
# edu = utils.extract_education(
# [sent.string.strip() for sent in self.__nlp.sents]
# )
entities = utils.extract_entity_sections_grad(self.__text_raw)
# extract name
try:
self.__details['name'] = cust_ent['Name'][0]
except (IndexError, KeyError):
self.__details['name'] = name
# extract email
self.__details['email'] = email
# extract mobile number
self.__details['mobile_number'] = mobile
# extract skills
self.__details['skills'] = skills
# extract college name
try:
self.__details['college_name'] = entities['College Name']
except KeyError:
pass
# extract education Degree
try:
self.__details['degree'] = cust_ent['Degree']
except KeyError:
pass
# extract designation
try:
self.__details['designation'] = cust_ent['Designation']
except KeyError:
pass
# extract company names
try:
self.__details['company_names'] = cust_ent['Companies worked at']
except KeyError:
pass
try:
self.__details['experience'] = entities['experience']
try:
exp = round(
utils.get_total_experience(entities['experience']) / 12,
2
)
self.__details['total_experience'] = exp
except KeyError:
self.__details['total_experience'] = 0
except KeyError:
self.__details['total_experience'] = 0
self.__details['no_of_pages'] = utils.get_number_of_pages(
self.__resume
)
return
def resume_result_wrapper(resume):
parser = ResumeParser(resume)
return parser.get_extracted_data()
if __name__ == '__main__':
pool = mp.Pool(mp.cpu_count())
resumes = []
data = []
for root, directories, filenames in os.walk('resumes/'):
for filename in filenames:
file = os.path.join(root, filename)
resumes.append(file)
results = [
pool.apply_async(
resume_result_wrapper,
args=(x,)
) for x in resumes
]
results = [p.get() for p in results]
pprint.pprint(results)
|
AmeriResumeParser
|
/AmeriResumeParser-1.0.0.tar.gz/AmeriResumeParser-1.0.0/pyresparser/resume_parser.py
|
resume_parser.py
|
import os
import io
import spacy
import docx2txt
import constants as cs
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFSyntaxError
def extract_text_from_pdf(pdf_path):
'''
Helper function to extract the plain text from .pdf files
:param pdf_path: path to PDF file to be extracted (remote or local)
:return: iterator of string of extracted text
'''
# https://www.blog.pythonlibrary.org/2018/05/03/exporting-data-from-pdfs-with-python/
if not isinstance(pdf_path, io.BytesIO):
# extract text from local pdf file
with open(pdf_path, 'rb') as fh:
try:
for page in PDFPage.get_pages(
fh,
caching=True,
check_extractable=True
):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(
resource_manager,
fake_file_handle,
codec='utf-8',
laparams=LAParams()
)
page_interpreter = PDFPageInterpreter(
resource_manager,
converter
)
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
yield text
# close open handles
converter.close()
fake_file_handle.close()
except PDFSyntaxError:
return
else:
# extract text from remote pdf file
try:
for page in PDFPage.get_pages(
pdf_path,
caching=True,
check_extractable=True
):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(
resource_manager,
fake_file_handle,
codec='utf-8',
laparams=LAParams()
)
page_interpreter = PDFPageInterpreter(
resource_manager,
converter
)
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
yield text
# close open handles
converter.close()
fake_file_handle.close()
except PDFSyntaxError:
return
def get_number_of_pages(file_name):
try:
if isinstance(file_name, io.BytesIO):
# for remote pdf file
count = 0
for page in PDFPage.get_pages(
file_name,
caching=True,
check_extractable=True
):
count += 1
return count
else:
# for local pdf file
if file_name.endswith('.pdf'):
count = 0
with open(file_name, 'rb') as fh:
for page in PDFPage.get_pages(
fh,
caching=True,
check_extractable=True
):
count += 1
return count
else:
return None
except PDFSyntaxError:
return None
def extract_text_from_docx(doc_path):
'''
Helper function to extract plain text from .docx files
:param doc_path: path to .docx file to be extracted
:return: string of extracted text
'''
try:
temp = docx2txt.process(doc_path)
text = [line.replace('\t', ' ') for line in temp.split('\n') if line]
return ' '.join(text)
except KeyError:
return ' '
def extract_text_from_doc(doc_path):
'''
Helper function to extract plain text from .doc files
:param doc_path: path to .doc file to be extracted
:return: string of extracted text
'''
try:
try:
import textract
except ImportError:
return ' '
temp = textract.process(doc_path).decode('utf-8')
text = [line.replace('\t', ' ') for line in temp.split('\n') if line]
return ' '.join(text)
except KeyError:
return ' '
def extract_text(file_path, extension):
'''
Wrapper function to detect the file extension and call text
extraction function accordingly
:param file_path: path of file of which text is to be extracted
:param extension: extension of file `file_name`
'''
text = ''
if extension == '.pdf':
for page in extract_text_from_pdf(file_path):
text += ' ' + page
elif extension == '.docx':
text = extract_text_from_docx(file_path)
elif extension == '.doc':
text = extract_text_from_doc(file_path)
return text
def extract_entity_sections_grad(text):
'''
Helper function to extract all the raw text from sections of resume
specifically for graduates and undergraduates
:param text: Raw text of resume
:return: dictionary of entities
'''
text_split = [i.strip() for i in text.split('\n')]
# sections_in_resume = [i for i in text_split if i.lower() in sections]
entities = {}
key = False
for phrase in text_split:
if len(phrase) == 1:
p_key = phrase
else:
p_key = set(phrase.lower().split()) & set(cs.RESUME_SECTIONS_GRAD)
try:
p_key = list(p_key)[0]
except IndexError:
pass
if p_key in cs.RESUME_SECTIONS_GRAD:
entities[p_key] = []
key = p_key
elif key and phrase.strip():
entities[key].append(phrase)
return entities
nlp = spacy.load(os.path.dirname(os.path.abspath(__file__)))
# resumes = '/home/omkarpathak27/Documents/GITS/resumeparser/resumes/'
# text_raw = extract_text(resume, '.pdf')
# text = ' '.join(text_raw.split())
# print(text)
# for resume in os.listdir(resumes):
text_raw = extract_text(
'/home/omkarpathak27/Downloads/OmkarResume.pdf',
'.pdf'
)
# entity = extract_entity_sections_grad(text_raw)
# if 'experience' in entity.keys():
doc2 = nlp(text_raw)
entities = {}
for ent in doc2.ents:
if ent.label_ not in entities.keys():
entities[ent.label_] = [ent.text]
else:
entities[ent.label_].append(ent.text)
for key in entities.keys():
entities[key] = list(set(entities[key]))
print(entities)
# print(doc2.ents)
|
AmeriResumeParser
|
/AmeriResumeParser-1.0.0.tar.gz/AmeriResumeParser-1.0.0/pyresparser/custom_t.py
|
custom_t.py
|
import io
import os
import re
import nltk
import pandas as pd
import docx2txt
from datetime import datetime
from dateutil import relativedelta
from . import constants as cs
from pdfminer.converter import TextConverter
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFSyntaxError
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
def extract_text_from_pdf(pdf_path):
'''
Helper function to extract the plain text from .pdf files
:param pdf_path: path to PDF file to be extracted (remote or local)
:return: iterator of string of extracted text
'''
# https://www.blog.pythonlibrary.org/2018/05/03/exporting-data-from-pdfs-with-python/
if not isinstance(pdf_path, io.BytesIO):
# extract text from local pdf file
with open(pdf_path, 'rb') as fh:
try:
for page in PDFPage.get_pages(
fh,
caching=True,
check_extractable=True
):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(
resource_manager,
fake_file_handle,
codec='utf-8',
laparams=LAParams()
)
page_interpreter = PDFPageInterpreter(
resource_manager,
converter
)
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
yield text
# close open handles
converter.close()
fake_file_handle.close()
except PDFSyntaxError:
return
else:
# extract text from remote pdf file
try:
for page in PDFPage.get_pages(
pdf_path,
caching=True,
check_extractable=True
):
resource_manager = PDFResourceManager()
fake_file_handle = io.StringIO()
converter = TextConverter(
resource_manager,
fake_file_handle,
codec='utf-8',
laparams=LAParams()
)
page_interpreter = PDFPageInterpreter(
resource_manager,
converter
)
page_interpreter.process_page(page)
text = fake_file_handle.getvalue()
yield text
# close open handles
converter.close()
fake_file_handle.close()
except PDFSyntaxError:
return
def get_number_of_pages(file_name):
try:
if isinstance(file_name, io.BytesIO):
# for remote pdf file
count = 0
for page in PDFPage.get_pages(
file_name,
caching=True,
check_extractable=True
):
count += 1
return count
else:
# for local pdf file
if file_name.endswith('.pdf'):
count = 0
with open(file_name, 'rb') as fh:
for page in PDFPage.get_pages(
fh,
caching=True,
check_extractable=True
):
count += 1
return count
else:
return None
except PDFSyntaxError:
return None
def extract_text_from_docx(doc_path):
'''
Helper function to extract plain text from .docx files
:param doc_path: path to .docx file to be extracted
:return: string of extracted text
'''
try:
temp = docx2txt.process(doc_path)
text = [line.replace('\t', ' ') for line in temp.split('\n') if line]
return ' '.join(text)
except KeyError:
return ' '
def extract_text_from_doc(doc_path):
'''
Helper function to extract plain text from .doc files
:param doc_path: path to .doc file to be extracted
:return: string of extracted text
'''
try:
try:
import textract
except ImportError:
return ' '
text = textract.process(doc_path).decode('utf-8')
return text
except KeyError:
return ' '
def extract_text(file_path, extension):
'''
Wrapper function to detect the file extension and call text
extraction function accordingly
:param file_path: path of file of which text is to be extracted
:param extension: extension of file `file_name`
'''
text = ''
if extension == '.pdf':
for page in extract_text_from_pdf(file_path):
text += ' ' + page
elif extension == '.docx':
text = extract_text_from_docx(file_path)
elif extension == '.doc':
text = extract_text_from_doc(file_path)
return text
def extract_entity_sections_grad(text):
'''
Helper function to extract all the raw text from sections of
resume specifically for graduates and undergraduates
:param text: Raw text of resume
:return: dictionary of entities
'''
text_split = [i.strip() for i in text.split('\n')]
# sections_in_resume = [i for i in text_split if i.lower() in sections]
entities = {}
key = False
for phrase in text_split:
if len(phrase) == 1:
p_key = phrase
else:
p_key = set(phrase.lower().split()) & set(cs.RESUME_SECTIONS_GRAD)
try:
p_key = list(p_key)[0]
except IndexError:
pass
if p_key in cs.RESUME_SECTIONS_GRAD:
entities[p_key] = []
key = p_key
elif key and phrase.strip():
entities[key].append(phrase)
# entity_key = False
# for entity in entities.keys():
# sub_entities = {}
# for entry in entities[entity]:
# if u'\u2022' not in entry:
# sub_entities[entry] = []
# entity_key = entry
# elif entity_key:
# sub_entities[entity_key].append(entry)
# entities[entity] = sub_entities
# pprint.pprint(entities)
# make entities that are not found None
# for entity in cs.RESUME_SECTIONS:
# if entity not in entities.keys():
# entities[entity] = None
return entities
def extract_entities_wih_custom_model(custom_nlp_text):
'''
Helper function to extract different entities with custom
trained model using SpaCy's NER
:param custom_nlp_text: object of `spacy.tokens.doc.Doc`
:return: dictionary of entities
'''
entities = {}
for ent in custom_nlp_text.ents:
if ent.label_ not in entities.keys():
entities[ent.label_] = [ent.text]
else:
entities[ent.label_].append(ent.text)
for key in entities.keys():
entities[key] = list(set(entities[key]))
return entities
def get_total_experience(experience_list):
'''
Wrapper function to extract total months of experience from a resume
:param experience_list: list of experience text extracted
:return: total months of experience
'''
exp_ = []
for line in experience_list:
experience = re.search(
r'(?P<fmonth>\w+.\d+)\s*(\D|to)\s*(?P<smonth>\w+.\d+|present)',
line,
re.I
)
if experience:
exp_.append(experience.groups())
total_exp = sum(
[get_number_of_months_from_dates(i[0], i[2]) for i in exp_]
)
total_experience_in_months = total_exp
return total_experience_in_months
def get_number_of_months_from_dates(date1, date2):
'''
Helper function to extract total months of experience from a resume
:param date1: Starting date
:param date2: Ending date
:return: months of experience from date1 to date2
'''
if date2.lower() == 'present':
date2 = datetime.now().strftime('%b %Y')
try:
if len(date1.split()[0]) > 3:
date1 = date1.split()
date1 = date1[0][:3] + ' ' + date1[1]
if len(date2.split()[0]) > 3:
date2 = date2.split()
date2 = date2[0][:3] + ' ' + date2[1]
except IndexError:
return 0
try:
date1 = datetime.strptime(str(date1), '%b %Y')
date2 = datetime.strptime(str(date2), '%b %Y')
months_of_experience = relativedelta.relativedelta(date2, date1)
months_of_experience = (months_of_experience.years
* 12 + months_of_experience.months)
except ValueError:
return 0
return months_of_experience
def extract_entity_sections_professional(text):
'''
Helper function to extract all the raw text from sections of
resume specifically for professionals
:param text: Raw text of resume
:return: dictionary of entities
'''
text_split = [i.strip() for i in text.split('\n')]
entities = {}
key = False
for phrase in text_split:
if len(phrase) == 1:
p_key = phrase
else:
p_key = set(phrase.lower().split()) \
& set(cs.RESUME_SECTIONS_PROFESSIONAL)
try:
p_key = list(p_key)[0]
except IndexError:
pass
if p_key in cs.RESUME_SECTIONS_PROFESSIONAL:
entities[p_key] = []
key = p_key
elif key and phrase.strip():
entities[key].append(phrase)
return entities
def extract_email(text):
'''
Helper function to extract email id from text
:param text: plain text extracted from resume file
'''
email = re.findall(r"([^@|\s]+@[^@]+\.[^@|\s]+)", text)
if email:
try:
return email[0].split()[0].strip(';')
except IndexError:
return None
def extract_name(nlp_text, matcher):
'''
Helper function to extract name from spacy nlp text
:param nlp_text: object of `spacy.tokens.doc.Doc`
:param matcher: object of `spacy.matcher.Matcher`
:return: string of full name
'''
pattern = [cs.NAME_PATTERN]
matcher.add('NAME', None, *pattern)
matches = matcher(nlp_text)
for _, start, end in matches[1:]:
span = nlp_text[start:end]
if 'name' not in span.text.lower():
return span.text
def extract_mobile_number(text, custom_regex=None):
'''
Helper function to extract mobile number from text
:param text: plain text extracted from resume file
:return: string of extracted mobile numbers
'''
# Found this complicated regex on :
# https://zapier.com/blog/extract-links-email-phone-regex/
# mob_num_regex = r'''(?:(?:\+?([1-9]|[0-9][0-9]|
# [0-9][0-9][0-9])\s*(?:[.-]\s*)?)?(?:\(\s*([2-9]1[02-9]|
# [2-9][02-8]1|[2-9][02-8][02-9])\s*\)|([0-9][1-9]|
# [0-9]1[02-9]|[2-9][02-8]1|
# [2-9][02-8][02-9]))\s*(?:[.-]\s*)?)?([2-9]1[02-9]|
# [2-9][02-9]1|[2-9][02-9]{2})\s*(?:[.-]\s*)?([0-9]{7})
# (?:\s*(?:#|x\.?|ext\.?|
# extension)\s*(\d+))?'''
if not custom_regex:
mob_num_regex = r'''(\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)
[-\.\s]*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4})'''
phone = re.findall(re.compile(mob_num_regex), text)
else:
phone = re.findall(re.compile(custom_regex), text)
if phone:
number = ''.join(phone[0])
return number
def extract_skills(nlp_text, noun_chunks, skills_file=None):
'''
Helper function to extract skills from spacy nlp text
:param nlp_text: object of `spacy.tokens.doc.Doc`
:param noun_chunks: noun chunks extracted from nlp text
:return: list of skills extracted
'''
tokens = [token.text for token in nlp_text if not token.is_stop]
if not skills_file:
data = pd.read_csv(
os.path.join(os.path.dirname(__file__), 'skills.csv')
)
else:
data = pd.read_csv(skills_file)
skills = list(data.columns.values)
skillset = []
# check for one-grams
for token in tokens:
if token.lower() in skills:
skillset.append(token)
# check for bi-grams and tri-grams
for token in noun_chunks:
token2 = token.text.lower().strip()
if token2 in skills:
skillset.append(token.text)
#return [i.capitalize() for i in set([i.lower() for i in skillset])]
return skillset
def cleanup(token, lower=True):
if lower:
token = token.lower()
return token.strip()
def extract_education(nlp_text):
'''
Helper function to extract education from spacy nlp text
:param nlp_text: object of `spacy.tokens.doc.Doc`
:return: tuple of education degree and year if year if found
else only returns education degree
'''
edu = {}
# Extract education degree
try:
for index, text in enumerate(nlp_text):
for tex in text.split():
tex = re.sub(r'[?|$|.|!|,]', r'', tex)
if tex.upper() in cs.EDUCATION and tex not in cs.STOPWORDS:
edu[tex] = text + nlp_text[index + 1]
except IndexError:
pass
# Extract year
education = []
for key in edu.keys():
year = re.search(re.compile(cs.YEAR), edu[key])
if year:
education.append((key, ''.join(year.group(0))))
else:
education.append(key)
return education
def extract_experience(resume_text):
'''
Helper function to extract experience from resume text
:param resume_text: Plain resume text
:return: list of experience
'''
wordnet_lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
# word tokenization
word_tokens = nltk.word_tokenize(resume_text)
# remove stop words and lemmatize
filtered_sentence = [
w for w in word_tokens if w not
in stop_words and wordnet_lemmatizer.lemmatize(w)
not in stop_words
]
sent = nltk.pos_tag(filtered_sentence)
# parse regex
cp = nltk.RegexpParser('P: {<NNP>+}')
cs = cp.parse(sent)
# for i in cs.subtrees(filter=lambda x: x.label() == 'P'):
# print(i)
test = []
for vp in list(
cs.subtrees(filter=lambda x: x.label() == 'P')
):
test.append(" ".join([
i[0] for i in vp.leaves()
if len(vp.leaves()) >= 2])
)
# Search the word 'experience' in the chunk and
# then print out the text after it
x = [
x[x.lower().index('experience') + 10:]
for i, x in enumerate(test)
if x and 'experience' in x.lower()
]
return x
|
AmeriResumeParser
|
/AmeriResumeParser-1.0.0.tar.gz/AmeriResumeParser-1.0.0/pyresparser/utils.py
|
utils.py
|
from io import BytesIO
import struct
import datetime, time
from collections import OrderedDict
from weakref import WeakKeyDictionary as weakdict
class Undefined(object):
__slots__ = ()
def __new__(cls):
return undefined
def __eq__(self, other):
return self is other
def __neq__(self, other):
return self is not other
undefined = object().__new__(Undefined)
class Loader(object):
def add_alias(self, alias, constructor):
raise NotImplementedError()
def load(self, stream, proto=0, context=None):
# please keep it reentrant
if context is None:
context = ReadContext()
if proto == 0:
return self._read_item0(stream, context)
elif proto == 3:
return self._read_item3(stream, context)
else:
raise ValueError(proto)
def loads(self, value, proto=0):
return self.load(BytesIO(value), proto)
def load_all(self, stream, proto=0):
context = Context()
try:
while True:
yield self.load(stream, proto, context)
except EOFError:
return
def loads_all(self, value, proto=0):
return self.load_all(BytesIO(value), proto)
def _read_item3(self, stream, context):
marker = stream.read(1)[0]
if marker == 0x00:
return undefined
elif marker == 0x01:
return None
elif marker == 0x02:
return False
elif marker == 0x03:
return True
elif marker == 0x04:
return self._read_vli(stream)
elif marker == 0x05:
return struct.unpack('!d', stream.read(8))[0]
elif marker == 0x06:
return self._read_string3(stream, context)
elif marker == 0x07:
raise NotImplementedError("XML Document")
elif marker == 0x08:
num = self._read_vli(stream)
if num & 1:
res = datetime.datetime.utcfromtimestamp(
struct.unpack('!d', stream.read(8))[0]/1000)
context.add_object(res)
else:
res = context.get_object(num >> 1)
return res
elif marker == 0x09:
num = self._read_vli(stream)
if num & 1:
res = None
while True:
val = self._read_string3(stream, context)
if val == '':
if res is None:
res = [None]*(num >> 1)
context.add_object(res)
break
elif res is None:
res = OrderedDict()
context.add_object(res)
res[val] = self._read_item3(stream, context)
for i in range(num >> 1):
res[i] = self._read_item3(stream, context)
else:
res = context.get_object(num >> 1)
return res
elif marker == 0x0A:
num = self._read_vli(stream)
if num & 1:
if num & 2:
if num & 4: # traits-ext
trait = Trait()
raise NotImplementedError('Traits ext')
else: # traits
dyn = bool(num & 8)
memb = num >> 4
trait = Trait(dyn,
self._read_string3(stream, context),
(self._read_string3(stream, context)
for i in range(memb)))
context.add_trait(trait)
else: # traits-ref
trait = context.get_trait(num >> 2)
else:
return context.get_object(num >> 1)
if trait.members:
raise NotImplementedError("Trait members")
if not trait.dynamic:
raise NotImplementedError("Dynamic trait")
res = {}
while True:
key = self._read_string3(stream, context)
if key == "":
break
value = self._read_item3(stream, context)
res[key] = value
return res
elif marker == 0x0B:
# xml
raise NotImplementedError()
elif marker == 0x0C:
num = self._read_vli(stream)
if num & 1:
res = stream.read(num >> 1)
context.add_object(res)
else:
res = context.get_object(num >> 1)
return res
else:
raise NotImplementedError("Marker 0x{:02x}".format(marker))
def _read_vli(self, stream):
val = 0
while True:
byte = stream.read(1)[0]
val = (val << 7) | (byte & 0x7f)
if not (byte & 0x80):
break
return val
def _read_string3(self, stream, context):
num = self._read_vli(stream)
if num & 1:
num >>= 1
if num:
res = stream.read(num).decode('utf-8')
context.add_string(res)
return res
else:
return ''
else:
num >>= 1
return context.get_string(num)
def _read_string0(self, stream):
len = struct.unpack('!H', stream.read(2))[0]
return stream.read(len).decode('utf-8')
def _read_item0(self, stream, context):
marker = stream.read(1)
if marker:
marker = marker[0]
else:
raise EOFError()
if marker == 0x00:
return struct.unpack('!d', stream.read(8))[0]
elif marker == 0x01:
return bool(stream.read(1)[0])
elif marker == 0x02:
return self._read_string0(stream)
elif marker == 0x03:
res = {}
context.add_complex(res)
while True:
key = self._read_string0(stream)
if key == '':
break
res[key] = self._read_item0(stream, context)
end = stream.read(1)[0]
assert end == 0x09
return res
elif marker == 0x05: # null
return None
elif marker == 0x06: # undefined
return undefined
elif marker == 0x07: # ref
idx = struct.unpack('!H', stream.read(2))[0]
return context.get_complex(idx)
elif marker == 0x08: # assoc arr
cnt = struct.unpack('!L', stream.read(4))[0]
res = {}
context.add_complex(res)
for i in range(cnt):
key = self._read_string0(stream)
res[key] = self._read_item0(stream, context)
context.add_complex(res)
return res
elif marker == 0x0A: # strict array
cnt = struct.unpack('!L', stream.read(4))[0]
res = []
context.add_complex(res)
for i in range(cnt):
res.append(self._read_item0(stream, context))
return res
elif marker == 0x0B: # date
val = struct.unpack('!d', stream.read(8))[0]
res = datetime.datetime.utcfromtimestamp(val/1000)
tz = stream.read(2)
assert tz == b'\x00\x00'
return res
elif marker == 0x0C: # longstring
len = struct.unpack('!L', stream.read(4))[0]
return stream.read(len).decode('utf-8')
elif marker == 0x11: # AVM+
return self._read_item3(stream, context)
else:
raise NotImplementedError("Marker {:02x}".format(marker))
class Trait(object):
__slots__ = ('dynamic', 'classname', 'members')
def __init__(self, dynamic, classname, members=()):
self.dynamic = dynamic
self.members = tuple(members)
self.classname = classname
anonymous_trait = Trait(True, "")
class Dumper(object):
def dump(self, data, stream=None, proto=None, context=None):
# please keep it reentrant
if context is None:
context = WriteContext()
if proto == 0:
return self._write_item0(data, stream, context)
elif proto == 3:
return self._write_item3(data, stream, context)
else:
raise ValueError(proto)
def _write_item0(self, data, stream, context):
if isinstance(data, bool):
stream.write(b'\x01\x01' if data else b'\x01\x00')
elif isinstance(data, (float, int)):
stream.write(b'\x00' + struct.pack('!d', data))
elif isinstance(data, str):
if len(data) < 65536:
stream.write(b'\x02')
self._write_string0(data, stream, context)
else:
data = data.encode('utf-8')
stream.write(b'\x0c' + struct.pack('!L', len(data)))
stream.write(data)
elif isinstance(data, dict):
ref = context.get_complex(data)
if ref is not None:
stream.write(b'\x07' + struct.pack('!H', ref))
else:
context.add_complex(data)
stream.write(b'\x03')
for k, v in data.items():
self._write_string0(k, stream, context)
self._write_item0(v, stream, context)
self._write_string0("", stream, context)
stream.write(b'\x09')
elif data is None: # null
stream.write(b'\x05')
elif data is undefined: # undefined
stream.write(b'\x06')
elif isinstance(data, (list, tuple)): # strict array
ref = context.get_complex(data)
if ref is not None:
stream.write(b'\x07' + struct.pack('!H', ref))
else:
context.add_complex(data)
stream.write(b'\x0A' + struct.pack('!L', len(data)))
for i in data:
self._write_item0(i, stream, context)
elif isinstance(data, datetime.datetime):
stream.write(b'\x0b' + struct.pack('!d',
time.mktime(data.utctimetuple())*1000) + b'\x00\x00')
else:
raise NotImplementedError("Type {!r}".format(type(data)))
def _write_string0(self, data, stream, context):
data = data.encode('utf-8')
stream.write(struct.pack('!H', len(data)))
stream.write(data)
def _write_item3(self, data, stream, context):
if data is undefined:
stream.write(b'\x00')
elif data is None:
stream.write(b'\x01')
elif data is False:
stream.write(b'\x02')
elif data is True:
stream.write(b'\x03')
elif isinstance(data, int) and data >= 0 and data < (1 << 31):
stream.write(b'\x04')
self._write_vli(data, stream)
elif isinstance(data, (int, float)):
stream.write(b'\x05' + struct.pack('!d', data))
elif isinstance(data, str):
stream.write(b'\x06')
self._write_string3(data, stream, context)
elif isinstance(data, datetime.datetime):
stream.write(b'\x08')
ref = context.get_object(data)
if ref is not None:
self._write_vli((ref << 1), stream)
else:
self._write_vli(1, stream)
stream.write(struct.pack('!d',
time.mktime(data.utctimetuple())*1000))
context.add_object(data)
elif isinstance(data, dict):
stream.write(b'\x0A')
ref = context.get_object(data)
if ref is not None:
self._write_vli((ref << 1), stream)
else:
ref = context.get_trait(anonymous_trait)
if ref is not None:
self._write_vli((ref << 2)|1, stream)
else:
context.add_trait(anonymous_trait)
self._write_vli(11, stream)
self._write_string3(anonymous_trait.classname, stream, context)
for k, v in data.items():
self._write_string3(k, stream, context)
self._write_item3(v, stream, context)
self._write_string3("", stream, context)
elif isinstance(data, list):
stream.write(b'\x09')
ref = context.get_object(data)
if ref is not None:
self._write_vli((ref << 1), stream)
else:
context.add_object(data)
self._write_vli((len(data) << 1)|1, stream)
self._write_string3("", stream, context)
for i in data:
self._write_item3(i, stream, context)
elif isinstance(data, bytes):
stream.write(b'\x0C')
ref = context.get_object(data)
if ref is not None:
self._write_vli((ref << 1), stream)
else:
context.add_object(data)
self._write_vli((len(data) << 1)|1, stream)
stream.write(data)
else:
raise NotImplementedError("Type {!r}".format(type(data)))
def _write_vli(self, data, stream):
ba = bytearray()
if not data:
stream.write(b'\x00')
return
while data:
ba.append((data & 0x7f) | 0x80)
data >>= 7
ba.reverse()
ba[-1] &= 0x7f
stream.write(ba)
def _write_string3(self, data, stream, context):
ref = context.get_string(data)
if data and ref is not None:
self._write_vli(ref << 1, stream)
else:
if data:
context.add_string(data)
data = data.encode('utf-8')
self._write_vli((len(data) << 1)|1, stream)
stream.write(data)
class ReadContext(object):
def __init__(self):
self.strings = []
self.objects = []
self.traits = []
self.complex = []
def add_string(self, val):
self.strings.append(val)
def get_string(self, key):
return self.strings[key]
def add_object(self, val):
self.objects.append(val)
def get_object(self, key):
return self.objects[key]
def add_trait(self, val):
self.traits.append(val)
def get_trait(self, key):
return self.traits[key]
def add_complex(self, val):
self.complex.append(val)
def get_complex(self, key):
return self.complex[key]
class WriteContext(object):
def __init__(self):
self.strings = {}
self.nstrings = 0
self.objects = {}
self.nobjects = 0
self.traits = {}
self.ntraits = 0
self.complex = {}
self.ncomplex = 0
def add_string(self, val):
self.strings[val] = self.nstrings
self.nstrings += 1
def get_string(self, key):
return self.strings.get(key, None)
def add_object(self, val):
self.objects[id(val)] = self.nobjects
self.nobjects += 1
def get_object(self, key):
return self.objects.get(id(key), None)
def add_trait(self, val):
self.traits[val] = self.ntraits
self.ntraits += 1
def get_trait(self, key):
return self.traits.get(key, None)
def add_complex(self, val):
self.complex[id(val)] = self.ncomplex
self.ncomplex += 1
def get_complex(self, key):
return self.complex.get(id(key), None)
|
Amfy
|
/Amfy-0.1.1.tar.gz/Amfy-0.1.1/amfy/core.py
|
core.py
|
# RPH extraction
Contains a tool to read a .rph file into a RphData structure.
#### Usage
A simple example is given below:
```
from AmiAutomation import RphData
# Read from a rph file to a RphData object
rphData = RphData.rphToDf(path = "path_to_rph_file")
# Table data inside a dataframe
dataframe = rphData.dataFrame
```
---
# Binaries extraction
This package contains the tools to easily extract binary data from PX3's:
* Heat Log
* 2 Second Log
* Wave Log
* Composite
* Histogram
Into a pandas dataframe for further processing
## Usage
Importing a function is done the same way as any python package:
```
from AmiAutomation import PX3_Bin, LogData
```
From there you can call a method with the module prefix:
```
dataFrame = PX3_Bin.file_to_df(path = "C:\\Binaries")
```
or
```
dataFrame = LogData.binFileToDF(path = "C:\\Binaries")
```
---
## LogData Methods
You can get Binary log data in a LogData format that contains useful data about the binary file, including samples inside a pandas dataframe
___
### LogData.binFileToDF
Unpacks binary file into LogData
- ### Parameters:
* **path** : str
Complete file path
* **extension** : str, optional
Explicitly enforce file extension. Value must be "bin", "cpst" or "hist"\
If no value is given, the extension will be infered from the file name if it cannot be infered, the default value will be used instead
Default Value: bin
* **null_promoting** : dict, optional
A dictionary with a .NET Source Type key and a value of either one of the following (default, object, float, Int64, string, error).
The possible dictionary keys are the .NET simple types:
- "SByte" : Signed Byte
- "Byte" : Unsigned Byte
- "Int16" : 16 bit integer
- "UInt16" : 16 bit unsigned integer
- "Int32" : 32 bit integer
- "UInt32" : 32 bit unsigned integer
- "Int64" : 64 bit integer
- "UInt64" : 64 bit unsigned integer
- "Char" : Character
- "Single" : Floating point single precision
- "Double" : Floating point double precision
- "Boolean" : bit
- "Decimal" : 16 byte decimal precision
- "DateTime" : Date time
This dictionary values determines how null values in deserialization affect the resulting LogData dataframe column:
* "default" : use pandas automatic inference when dealing with null values on a column
* "object" : The returned type is the generic python object type
* "float" : The returned type is the python float type
* "Int64" : The returned type is the pandas Nullable Integer Int64 type
* "string" : Values are returned as strings
* "error" : Raises and exception when null values are encountered
Default value: None
- ### Returns:
* **LogData** :\
Structure containing most file data
___
### LogData.binStreamToDF
Unpacks binary file stream into LogData
- ### Parameters:
* **file** : stream
A python IOStream of the binary file
* **extension** : str, optional
Explicitly enforce file extension. Value must be "bin", "cpst" or "hist"
Default: bin
* **null_promoting** : dict, optional\
Same as in [LogData.binFileToDF](####LogData.binFileToDF). A dictionary with a .NET Source Type key and a value of either one of the following (default, object, float, Int64, string, error).
- ### Returns:
* **LogData** :\
Structure containing most file data
___
### **Examples**
#### Simple file conversion
```
from AmiAutomation import LogData
# Execute the conversion from source file
logData = LogData.binFileToDF("bin_file_path.bin")
# To access samples just access the dataframe inside the LogData object
dataFrame = logData.dataFrame
```
#### Conversion from an IO Stream
```
from AmiAutomation import LogData
# Get the file stream
file_stream = open(file_path, "rb")
# Execute the conversion from stream
logData = LogData.binStreamToDF(file_stream)
# Access the dataframe inside the LogData object
dataFrame = logData.dataFrame
```
#### Conversion of a file without extension
```
from AmiAutomation import LogData
# Perform the conversion, explicitly using a file extension
logData = LogData.binFileToDF("file_path", extension="rph" )
# Access the dataframe inside the LogData object
dataFrame = logData.dataFrame
```
#### Conversion with null promoting
```
from AmiAutomation import LogData
# Adding null promoting to handle missing values in these types of data as object
logData = LogData.binFileToDF("bin_file_path.bin", null_promoting={"Int32":"object", "Int16":"object", "Int64":"object"})
# Access the dataframe inside the LogData object
dataFrame = logData.dataFrame
```
This method can also be used to retrive the data table from inside a ".cpst" or ".hist" file, detection is automatic based on file extension in the filename, if none is given, a warning will be issued and ".bin" is assumed
---
## LogData Object
The log data object contains some properties metadata from the file read as well as the logged data inside a Pandas Dataframe
The data is structured as follows:
- **properties** : dict\
A Dictionary containing some metadata from each file, it changes depending the type of file read :
* Bin File:
| Key | Type | Value |
|------------|-----------|--------------------------------------------------------|
| Definition | str | Xml string with the table definition of contained data |
| Version | int | file compresion version |
| Name | str | file type name |
| StartTime | datetime | first sample record time |
| Increment | timedelta | time between samples |
| Duration | float | Total logged time in seconds |
* Cpst File:
| Key | Type | Value |
|-----------|------|-------------------|
| Name | str | file type name |
| FurnaceId | int | Furnace Id Number |
* Hist File:
| Key | Type | Value |
|--------------|----------|--------------------|
| Name | str | file type name |
| HeatId | int | Heat Id Number |
| ModelVersion | int | File Model Version |
| Sequence | int | Sequence number |
| Timestamp | datetime | File Timestamp |
- **dataFrame** : DataFrame\
A pandas.core.frame.DataFrame containing logged data
---
## PX3_Bin Methods
This method returns a single pandas dataframe containing extracted data from the provided
file, path or path with constrained dates
* **file_to_df ( path, file, start_time, end_time, verbose = False )**
* To process a single file you need to provide the absolute path in the file argument
```
dataFrame = PX3_Bin.file_to_df(file = "C:\\Binaries\\20240403T002821Z$-4038953271967.bin")
```
* To process several files just provide the directory path where the binaries are (binaries inside sub-directories are also included)
```
dataFrame = PX3_Bin.file_to_df(path = "C:\\Binaries\\")
```
* You can constrain the binaries inside a directory (and sub-directories) by also providing a start-date or both a start date and end date as a python datetime.datetime object
```
import datetime
time = datetime.datetime(2020,2,15,13,30) # February 15th 2020, 1:30 PM
### This returns ALL the data available in the path from the given date to the actual time
dataFrame = PX3_Bin.file_to_df(path = "C:\\Binaries\\", start_time=time)
```
```
import datetime
time_start = datetime.datetime(2020,2,15,13,30) # February 15th 2020, 1:30 PM
time_end = datetime.datetime(2020,2,15,13,45) # February 15th 2020, 1:45 PM
### This returns all the data available in the path from the given 15 minutes
dataFrame = PX3_Bin.file_to_df(path = "C:\\Binaries\\", start_time=time_start, end_time=time_end )
```
#### Tested with packages version
* pythonnet 2.5.1
* pandas 1.1.0
|
AmiAutomation
|
/AmiAutomation-0.1.4.3.tar.gz/AmiAutomation-0.1.4.3/README.md
|
README.md
|
[

](https://https://amino-socket.readthedocs.io/)
[

](https://github.com/Hanamixp/Amino-Socket)
[

](https://pypi.org/project/Amino-Socket)
### Amino-Socket
Amino-Socket is an Amino client for Python. It provides to access [aminoapps](https://aminoapps.com) WebSockets.
### Installation
You can use either `python3 setup.py install` or `pip3 install Amino-Socket` to install.
- **Note** This Python Module tested on `python3.10`
### Documentation
This project's documentation can be found at [](https://https://amino-socket.readthedocs.io/)
|
Amino-Socket
|
/Amino-Socket-1.0.6.tar.gz/Amino-Socket-1.0.6/README.md
|
README.md
|
import json
import requests
import websocket
import threading
import time as timer
from typing import BinaryIO
from .src import headers as header
from .src.exception import exception
from sys import _getframe as getframe
from .src.objects import Event, Payload
class Callbacks:
def __init__(self):
self.handlers = {}
self.methods = {
10: self._resolve_payload,
304: self._resolve_chat_action_start,
306: self._resolve_chat_action_end,
1000: self._resolve_chat_message
}
self.chat_methods = {
"0:0": self.on_text_message,
"0:100": self.on_image_message,
"0:103": self.on_youtube_message,
"1:0": self.on_strike_message,
"2:110": self.on_voice_message,
"3:113": self.on_sticker_message,
"50:0": self.TYPE_USER_SHARE_EXURL,
"51:0": self.TYPE_USER_SHARE_USER,
"52:0": self.on_voice_chat_not_answered,
"53:0": self.on_voice_chat_not_cancelled,
"54:0": self.on_voice_chat_not_declined,
"55:0": self.on_video_chat_not_answered,
"56:0": self.on_video_chat_not_cancelled,
"57:0": self.on_video_chat_not_declined,
"58:0": self.on_avatar_chat_not_answered,
"59:0": self.on_avatar_chat_not_cancelled,
"60:0": self.on_avatar_chat_not_declined,
"100:0": self.on_delete_message,
"101:0": self.on_group_member_join,
"102:0": self.on_group_member_leave,
"103:0": self.on_chat_invite,
"104:0": self.on_chat_background_changed,
"105:0": self.on_chat_title_changed,
"106:0": self.on_chat_icon_changed,
"107:0": self.on_voice_chat_start,
"108:0": self.on_video_chat_start,
"109:0": self.on_avatar_chat_start,
"110:0": self.on_voice_chat_end,
"111:0": self.on_video_chat_end,
"112:0": self.on_avatar_chat_end,
"113:0": self.on_chat_content_changed,
"114:0": self.on_screen_room_start,
"115:0": self.on_screen_room_end,
"116:0": self.on_chat_host_transfered,
"117:0": self.on_text_message_force_removed,
"118:0": self.on_chat_removed_message,
"119:0": self.on_text_message_removed_by_admin,
"120:0": self.on_chat_tip,
"121:0": self.on_chat_pin_announcement,
"122:0": self.on_voice_chat_permission_open_to_everyone,
"123:0": self.on_voice_chat_permission_invited_and_requested,
"124:0": self.on_voice_chat_permission_invite_only,
"125:0": self.on_chat_view_only_enabled,
"126:0": self.on_chat_view_only_disabled,
"127:0": self.on_chat_unpin_announcement,
"128:0": self.on_chat_tipping_enabled,
"129:0": self.on_chat_tipping_disabled,
"65281:0": self.on_timestamp_message,
"65282:0": self.on_welcome_message,
"65283:0": self.on_invite_message
}
self.notif_methods = {
"53": self.on_member_set_you_host,
"67": self.on_member_set_you_cohost,
"68": self.on_member_remove_you_cohost
}
self.chat_actions_start = {
"Typing": self.on_user_typing_start,
}
self.chat_actions_end = {
"Typing": self.on_user_typing_end,
}
def _resolve_payload(self, data):
key = f"{data['o']['payload']['notifType']}"
return self.notif_methods.get(key, self.default)(data)
def _resolve_chat_message(self, data):
key = f"{data['o']['chatMessage']['type']}:{data['o']['chatMessage'].get('mediaType', 0)}"
return self.chat_methods.get(key, self.default)(data)
def _resolve_chat_action_start(self, data):
key = data['o'].get('actions', 0)
return self.chat_actions_start.get(key, self.default)(data)
def _resolve_chat_action_end(self, data):
key = data['o'].get('actions', 0)
return self.chat_actions_end.get(key, self.default)(data)
def resolve(self, data):
data = json.loads(data)
return self.methods.get(data["t"], self.default)(data)
def call(self, type, data):
if type in self.handlers:
for handler in self.handlers[type]:
handler(data)
def event(self, type):
def registerHandler(handler):
if type in self.handlers:
self.handlers[type].append(handler)
else:
self.handlers[type] = [handler]
return handler
return registerHandler
def on_member_set_you_host(self, data): self.call(getframe(0).f_code.co_name, Payload(data["o"]).Payload)
def on_member_remove_you_cohost(self, data): self.call(getframe(0).f_code.co_name, Payload(data["o"]).Payload)
def on_member_set_you_cohost(self, data): self.call(getframe(0).f_code.co_name, Payload(data["o"]).Payload)
def on_text_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_image_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_youtube_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_strike_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_voice_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_sticker_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def TYPE_USER_SHARE_EXURL(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def TYPE_USER_SHARE_USER(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_voice_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_voice_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_voice_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_video_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_video_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_video_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_avatar_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_avatar_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_avatar_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_delete_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_group_member_join(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_group_member_leave(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_invite(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_background_changed(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_title_changed(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_icon_changed(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_voice_chat_start(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_video_chat_start(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_avatar_chat_start(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_voice_chat_end(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_video_chat_end(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_avatar_chat_end(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_content_changed(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_screen_room_start(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_screen_room_end(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_host_transfered(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_text_message_force_removed(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_removed_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_text_message_removed_by_admin(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_tip(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_pin_announcement(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_voice_chat_permission_open_to_everyone(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_voice_chat_permission_invited_and_requested(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_voice_chat_permission_invite_only(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_view_only_enabled(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_view_only_disabled(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_unpin_announcement(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_tipping_enabled(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_chat_tipping_disabled(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_timestamp_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_welcome_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_invite_message(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_user_typing_start(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def on_user_typing_end(self, data): self.call(getframe(0).f_code.co_name, Event(data["o"]).Event)
def default(self, data): self.call(getframe(0).f_code.co_name, data)
class SetAction:
def __init__(self, wss, data):
self.action = data
self.wss = wss
def start(self):
"""
Start the Action
"""
self.wss.send(self.action)
def stop(self):
"""
Get back to the last board
"""
act = self.action
act["t"] = 303
self.wss.send(self.action)
class Actions:
def __init__(self, socket, comId, chatId):
self.socket = socket
self.chatId = chatId
self.comId = comId
def default(self):
"""
Default Browsing
"""
SetAction(self.socket, {
"o": {"actions": ["Browsing"], "target": f"ndc://x{self.comId}/", "ndcId": int(self.comId),
"params": {"duration": 27605}, "id": "363483"}, "t": 306}).start()
def Browsing(self, blogId: str = None, blogType: int = 0):
"""
Send Browsing Action
**Paramaters**
- **blogId**: 2 For Public 1 & 0 For Private (str)
- **blogType**: Type Of the Blog *poll & blog & wiki* (int)
**Return**
- **SetAction**: (Class)
"""
if blogId and blogType:
target = f"ndc://x{self.comId}/blog/"
else:
target = f"ndc://x{self.comId}/featured"
data = {
"o": {
"actions": ["Browsing"],
"target": target,
"ndcId": int(self.comId),
"params": {"blogType": blogType},
"id": "363483"
},
"t": 306
}
self.default()
return SetAction(self.socket, data)
def Chatting(self, threadType: int = 2):
"""
Send Chatting Action
**Paramaters**
- **threadType**: 2 For Public 1 & 0 For Private (int)
**Return**
- **SetAction**: (Class)
"""
data = {
"o": {
"actions": ["Chatting"],
"target": f"ndc://x{self.comId}/chat-thread/{self.chatId}",
"ndcId": int(self.comId),
"params": {
"duration": 12800,
"membershipStatus": 1,
"threadType": threadType
},
"id": "1715976"
},
"t": 306
}
self.default()
return SetAction(self.socket, data)
def PublicChats(self):
"""
Send PublicChats Action
**Return**
- **SetAction**: (Class)
"""
data = {
"o": {
"actions": ["Browsing"],
"target": f"ndc://x{self.comId}/public-chats",
"ndcId": int(self.comId),
"params": {"duration": 859},
"id": "363483"
},
"t": 306
}
self.default()
return SetAction(self.socket, data)
def LeaderBoards(self):
"""
Send LeaderBoard Action
**Return**
- **SetAction**: (Class)
"""
data = {
"o": {
"actions": ["Browsing"],
"target": f"ndc://x{self.comId}/leaderboards",
"ndcId": int(self.comId),
"params": {"duration": 859},
"id": "363483"
},
"t": 306
}
self.default()
return SetAction(self.socket, data)
def Custom(self, actions: [str, list], target: str, params: dict):
"""
Send Custom Action
**Parameters**
- **actions**: List of action Types (list[str])
- **target**: Example | ndc://x000000/leaderboards (str)
- **params**: Set the blogType and more with params (dict)
**Return**
- **SetAction**: (Class)
"""
data = {
"o": {
"actions": actions,
"target": target,
"ndcId": int(self.comId),
"params": params,
"id": "363483"
},
"t": 306
}
self.default()
return SetAction(self.socket, data)
class WssClient:
def __init__(self, socket, wss):
self.wss = wss
self.socket = socket
def joinVoiceChat(self, comId: str, chatId: str, joinType: int = 1):
"""
Join The Voice Chat
**Parameters**
- **comId**: ID of the Community (str)
- **chatId**: ID of the Chat (str)
- **joinType**: Join type to Join Voice as.. (int)
"""
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"id": "37549515"
},
"t": 112
}
timer.sleep(2)
self.wss.send(data)
def joinVideoChat(self, comId: str, chatId: str, joinType: int = 1):
"""
Join The Video Chat
**Parameters**
- **comId**: ID of the Community (str)
- **chatId**: ID of the Chat (str)
- **joinType**: Join type to Join Video as.. (int)
"""
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 5,
"id": "2154531"
},
"t": 108
}
timer.sleep(2)
self.wss.send(data)
def startVoiceChat(self, comId, chatId: str, joinType: int = 1):
"""
Start The Voice Chat
**Parameters**
- **comId**: ID of the Community (str)
- **chatId**: ID of the Chat (str)
- **joinType**: Join type to Start voice as.. (int)
"""
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531"
},
"t": 112
}
timer.sleep(2)
self.wss.send(data)
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"channelType": 1,
"id": "2154531"
},
"t": 108
}
timer.sleep(2)
self.wss.send(data)
def endVoiceChat(self, comId: str, chatId: str, leaveType: int = 2):
"""
End The Voice Chat
**Parameters**
- **comId**: ID of the Community (str)
- **chatId**: ID of the Chat (str)
- **leaveType**: Leave type to end voice as.. (int)
"""
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": leaveType,
"id": "2154531"
},
"t": 112
}
timer.sleep(2)
self.wss.send(data)
def joinVideoChatAsSpectator(self, comId: str, chatId: str):
"""
Join Video Chat As Spectator
**Parameters**
- **comId**: ID of the Community (str)
- **chatId**: ID of the Chat (str)
"""
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 2,
"id": "72446"
},
"t": 112
}
timer.sleep(2)
self.wss.send(data)
def threadJoin(self, comId: str, chatId: str):
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 1,
"id": "10335106"
},
"t": 112
}
self.wss.send(data)
def channelJoin(self, comId: str, chatId: str):
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"channelType": 5,
"id": "10335436"
},
"t": 108
}
self.wss.send(data)
def videoPlayer(self, comId: str, chatId: str, path: str, title: str, background: str, duration: int):
self.actions(comId, chatId).Chatting().start()
self.threadJoin(comId, chatId)
self.channelJoin(comId, chatId)
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"playlist": {
"currentItemIndex": 0,
"currentItemStatus": 1,
"items": [{
"author": None,
"duration": duration,
"isDone": False,
"mediaList": [[100, background, None]],
"title": title,
"type": 1,
"url": f"file://{path}"
}]
},
"id": "3423239"
},
"t": 120
}
self.wss.send(data)
timer.sleep(2)
data["o"]["playlist"]["currentItemStatus"] = 2
data["o"]["playlist"]["items"][0]["isDone"] = True
self.wss.send(data)
def playVideo(self, comId: str, chatId: str, path: str, title: str, background: BinaryIO, duration: int):
"""
Play Custom Video
**Parameters**
- **comId** : ID of the Community (str)
- **chatId** : ID of the Chat (str)
- **path** : Video Path | /storage/emulated/0/Download/video.mp4 (str)
- **title** : Video Title (str)
- **background** : Background of the video (BinaryIO)
- **duration** : length of the mp4/mp3 (int)
"""
threading.Thread(target=self.videoPlayer, args=(comId, chatId, path, title, self.wss.uploadMedia(background, "image"), duration)).start()
def getActionUsers(self, comId: str, path: str):
"""
Get Action Users
**Parameters**
- **comId**: ID of the Community (str)
- **path**: Example: "users-chatting" (str)
"""
data = {
"o": {
"ndcId": int(comId),
"topic": f"ndtopic:x{comId}:{path}",
"id": "4538416"
},
"t": 300
}
timer.sleep(2)
self.wss.send(data)
data["t"] += 2
self.wss.send(data)
timer.sleep(0.50)
return self.wss.receive()
def actions(self, comId: str, chatId: str):
threading.Thread(target=self.wss.sendWebActive, args=(comId, )).start()
return Actions(self.wss, comId, chatId)
class Wss(Callbacks):
def __init__(self, headers: dict, trace: bool = False):
"""
Scheduling WssClient with Wss
**Parameters**
- **headers**: Your Amino Headers (dict)
"""
self.isOpened = False
Callbacks.__init__(self)
if headers.get("NDCAUTH") and headers.get("NDCDEVICEID"):
self.sid = headers["NDCAUTH"]
self.deviceid = headers["NDCDEVICEID"]
self.headers = header.Headers().headers
self.web_headers = header.Headers(sid=self.sid).web_headers
self.headers.update(headers)
else:
exception({"api:message": "Headers Should Contains \"NDCAUTH\" and \"NDCDEVICEID\" header or key"})
self.narvi = "https://service.narvii.com/api/v1/"
self.socket_url = "wss://ws1.narvii.com"
self.lastMessage = {}
websocket.enableTrace(trace)
def onOpen(self, ws=None):
self.isOpened = True
def onClose(self, ws=None):
self.isOpened = False
def send(self, data):
"""
Send data to wss
**Parameters**
- **data**: The data you want to send (dict)
"""
self.socket.send(json.dumps(data))
def receive(self):
"""
Receive data from ws
**Returns**
- **data**: Received data (json)
"""
return self.lastMessage
def on_message(self, data):
self.lastMessage = json.loads(data)
self.resolve(data)
def webSocketUrl(self):
response = requests.get("https://aminoapps.com/api/chat/web-socket-url", headers=self.web_headers)
if response.status_code != 200:
return exception(response.json())
else:
self.socket_url = response.json()["result"]["url"]
return self.socket_url
def launch(self):
"""
Launching the Socket
"""
self.socket = websocket.WebSocketApp(self.webSocketUrl(), on_message=self.on_message, on_close=self.onClose, on_open=self.onOpen, header=self.web_headers)
threading.Thread(target=self.socket.run_forever, kwargs={"ping_interval": 60}).start()
def close(self):
"""
Closing the Socket
"""
self.socket.close()
def status(self):
"""
Get Socket Status
"""
return self.isOpened
def getClient(self):
"""
Get Amino Websocket Types
**Returns**
- **WssClient**: A Client With Amino Socket Functions (Class)
"""
return WssClient(self.socket, self)
def uploadMedia(self, file: BinaryIO, fileType: str):
if fileType == "audio":
typee = "audio/aac"
elif fileType == "image":
typee = "image/jpg"
else:
raise TypeError("[i] Report this error in Discord Server as [15:0]")
data = file.read()
headers = self.headers
headers["content-type"] = typee
headers["content-length"] = str(len(data))
response = requests.post(f"{self.narvi}/g/s/media/upload", data=data, headers=headers)
if response.json()["api:statuscode"] != 0:
return exception(response.json())
else:
return response.json()["mediaValue"]
def sendWebActive(self, comId: str):
"""
Send A Active Time To Community
**Returns**
- **Success**: Post Request objects
"""
data = {"ndcId": comId}
response = requests.post("https://aminoapps.com/api/community/stats/web-user-active-time", json=data, headers=self.web_headers)
if response.json()["code"] != 200:
return exception(response.json())
else:
return response
|
Amino-Socket
|
/Amino-Socket-1.0.6.tar.gz/Amino-Socket-1.0.6/aminos/api.py
|
api.py
|
class RankingTableList:
def __init__(self, data):
self.json = data
self.title = []
self.level = []
self.reputation = []
self.id = []
@property
def RankingTableList(self):
for x in self.json:
try: self.title.append(x["title"])
except (KeyError, TypeError): self.title.append(None)
try: self.level.append(x["level"])
except (KeyError, TypeError): self.level.append(None)
try: self.reputation.append(x["reputation"])
except (KeyError, TypeError): self.reputation.append(None)
try: self.id.append(x["id"])
except (KeyError, TypeError): self.id.append(None)
return self
class Community:
def __init__(self, data):
self.json = data
try: self.agent: UserProfile = UserProfile(data["agent"]).UserProfile
except (KeyError, TypeError): self.agent: UserProfile = UserProfile([])
try: self.rankingTable: RankingTableList = RankingTableList(data["advancedSettings"]["rankingTable"]).RankingTableList
except (KeyError, TypeError): self.rankingTable: RankingTableList = RankingTableList([])
self.usersCount = None
self.createdTime = None
self.aminoId = None
self.icon = None
self.link = None
self.comId = None
self.modifiedTime = None
self.status = None
self.joinType = None
self.tagline = None
self.primaryLanguage = None
self.heat = None
self.themePack = None
self.probationStatus = None
self.listedStatus = None
self.userAddedTopicList = None
self.name = None
self.isStandaloneAppDeprecated = None
self.searchable = None
self.influencerList = None
self.keywords = None
self.mediaList = None
self.description = None
self.isStandaloneAppMonetizationEnabled = None
self.advancedSettings = None
self.activeInfo = None
self.configuration = None
self.extensions = None
self.nameAliases = None
self.templateId = None
self.promotionalMediaList = None
self.defaultRankingTypeInLeaderboard = None
self.joinedBaselineCollectionIdList = None
self.newsfeedPages = None
self.catalogEnabled = None
self.pollMinFullBarVoteCount = None
self.leaderboardStyle = None
self.facebookAppIdList = None
self.welcomeMessage = None
self.welcomeMessageEnabled = None
self.hasPendingReviewRequest = None
self.frontPageLayout = None
self.themeColor = None
self.themeHash = None
self.themeVersion = None
self.themeUrl = None
self.themeHomePageAppearance = None
self.themeLeftSidePanelTop = None
self.themeLeftSidePanelBottom = None
self.themeLeftSidePanelColor = None
self.customList = None
@property
def Community(self):
try: self.name = self.json["name"]
except (KeyError, TypeError): pass
try: self.usersCount = self.json["membersCount"]
except (KeyError, TypeError): pass
try: self.createdTime = self.json["createdTime"]
except (KeyError, TypeError): pass
try: self.aminoId = self.json["endpoint"]
except (KeyError, TypeError): pass
try: self.icon = self.json["icon"]
except (KeyError, TypeError): pass
try: self.link = self.json["link"]
except (KeyError, TypeError): pass
try: self.comId = self.json["ndcId"]
except (KeyError, TypeError): pass
try: self.modifiedTime = self.json["modifiedTime"]
except (KeyError, TypeError): pass
try: self.status = self.json["status"]
except (KeyError, TypeError): pass
try: self.joinType = self.json["joinType"]
except (KeyError, TypeError): pass
try: self.primaryLanguage = self.json["primaryLanguage"]
except (KeyError, TypeError): pass
try: self.heat = self.json["communityHeat"]
except (KeyError, TypeError): pass
try: self.userAddedTopicList = self.json["userAddedTopicList"]
except (KeyError, TypeError): pass
try: self.probationStatus = self.json["probationStatus"]
except (KeyError, TypeError): pass
try: self.listedStatus = self.json["listedStatus"]
except (KeyError, TypeError): pass
try: self.themePack = self.json["themePack"]
except (KeyError, TypeError): pass
try: self.themeColor = self.json["themePack"]["themeColor"]
except (KeyError, TypeError): pass
try: self.themeHash = self.json["themePack"]["themePackHash"]
except (KeyError, TypeError): pass
try: self.themeVersion = self.json["themePack"]["themePackRevision"]
except (KeyError, TypeError): pass
try: self.themeUrl = self.json["themePack"]["themePackUrl"]
except (KeyError, TypeError): pass
try: self.themeHomePageAppearance = self.json["configuration"]["appearance"]["homePage"]["navigation"]
except (KeyError, TypeError): pass
try: self.themeLeftSidePanelTop = self.json["configuration"]["appearance"]["leftSidePanel"]["navigation"]["level1"]
except (KeyError, TypeError): pass
try: self.themeLeftSidePanelBottom = self.json["configuration"]["appearance"]["leftSidePanel"]["navigation"]["level2"]
except (KeyError, TypeError): pass
try: self.themeLeftSidePanelColor = self.json["configuration"]["appearance"]["leftSidePanel"]["style"]["iconColor"]
except (KeyError, TypeError): pass
try: self.customList = self.json["configuration"]["page"]["customList"]
except (KeyError, TypeError): pass
try: self.tagline = self.json["tagline"]
except (KeyError, TypeError): pass
try: self.searchable = self.json["searchable"]
except (KeyError, TypeError): pass
try: self.isStandaloneAppDeprecated = self.json["isStandaloneAppDeprecated"]
except (KeyError, TypeError): pass
try: self.influencerList = self.json["influencerList"]
except (KeyError, TypeError): pass
try: self.keywords = self.json["keywords"]
except (KeyError, TypeError): pass
try: self.mediaList = self.json["mediaList"]
except (KeyError, TypeError): pass
try: self.description = self.json["content"]
except (KeyError, TypeError): pass
try: self.isStandaloneAppMonetizationEnabled = self.json["isStandaloneAppMonetizationEnabled"]
except (KeyError, TypeError): pass
try: self.advancedSettings = self.json["advancedSettings"]
except (KeyError, TypeError): pass
try: self.defaultRankingTypeInLeaderboard = self.json["advancedSettings"]["defaultRankingTypeInLeaderboard"]
except (KeyError, TypeError): pass
try: self.frontPageLayout = self.json["advancedSettings"]["frontPageLayout"]
except (KeyError, TypeError): pass
try: self.hasPendingReviewRequest = self.json["advancedSettings"]["hasPendingReviewRequest"]
except (KeyError, TypeError): pass
try: self.welcomeMessageEnabled = self.json["advancedSettings"]["welcomeMessageEnabled"]
except (KeyError, TypeError): pass
try: self.welcomeMessage = self.json["advancedSettings"]["welcomeMessageText"]
except (KeyError, TypeError): pass
try: self.pollMinFullBarVoteCount = self.json["advancedSettings"]["pollMinFullBarVoteCount"]
except (KeyError, TypeError): pass
try: self.catalogEnabled = self.json["advancedSettings"]["catalogEnabled"]
except (KeyError, TypeError): pass
try: self.leaderboardStyle = self.json["advancedSettings"]["leaderboardStyle"]
except (KeyError, TypeError): pass
try: self.facebookAppIdList = self.json["advancedSettings"]["facebookAppIdList"]
except (KeyError, TypeError): pass
try: self.newsfeedPages = self.json["advancedSettings"]["newsfeedPages"]
except (KeyError, TypeError): pass
try: self.joinedBaselineCollectionIdList = self.json["advancedSettings"]["joinedBaselineCollectionIdList"]
except (KeyError, TypeError): pass
try: self.activeInfo = self.json["activeInfo"]
except (KeyError, TypeError): pass
try: self.configuration = self.json["configuration"]
except (KeyError, TypeError): pass
try: self.extensions = self.json["extensions"]
except (KeyError, TypeError): pass
try: self.nameAliases = self.json["extensions"]["communityNameAliases"]
except (KeyError, TypeError): pass
try: self.templateId = self.json["templateId"]
except (KeyError, TypeError): pass
try: self.promotionalMediaList = self.json["promotionalMediaList"]
except (KeyError, TypeError): pass
return self
class UserProfileList:
def __init__(self, data):
_fanClub = []
self.json = data
for y in data:
try: _fanClub.append(FanClubList(y["fanClubList"]).FanClubList)
except (KeyError, TypeError): _fanClub.append(None)
self.accountMembershipStatus = []
self.activation = []
self.activePublicLiveThreadId = []
self.age = []
self.aminoId = []
self.aminoIdEditable = []
self.appleId = []
self.avatarFrame = []
self.avatarFrameId = []
self.backgroundColor = []
self.backgroundImage = []
self.blogsCount = []
self.commentsCount = []
self.content = []
self.coverAnimation = []
self.createdTime = []
self.customTitles = []
self.dateOfBirth = []
self.defaultBubbleId = []
self.disabledLevel = []
self.disabledStatus = []
self.disabledTime = []
self.email = []
self.extensions = []
self.facebookId = []
self.fansCount = []
self.fanClub = _fanClub
self.followersCount = []
self.followingCount = []
self.followingStatus = []
self.gender = []
self.globalStrikeCount = []
self.googleId = []
self.icon = []
self.influencerCreatedTime = []
self.influencerInfo = []
self.influencerMonthlyFee = []
self.influencerPinned = []
self.isGlobal = []
self.isMemberOfTeamAmino = []
self.isNicknameVerified = []
self.itemsCount = []
self.lastStrikeTime = []
self.lastWarningTime = []
self.level = []
self.mediaList = []
self.membershipStatus = []
self.modifiedTime = []
self.mood = []
self.moodSticker = []
self.nickname = []
self.notificationSubscriptionStatus = []
self.onlineStatus = []
self.onlineStatus2 = []
self.phoneNumber = []
self.postsCount = []
self.privilegeOfChatInviteRequest = []
self.privilegeOfCommentOnUserProfile = []
self.pushEnabled = []
self.race = []
self.reputation = []
self.role = []
self.securityLevel = []
self.staffInfo = []
self.status = []
self.storiesCount = []
self.strikeCount = []
self.tagList = []
self.twitterId = []
self.userId = []
self.verified = []
self.visitPrivacy = []
self.visitorsCount = []
self.warningCount = []
self.totalQuizPlayedTimes = []
self.totalQuizHighestScore = []
self.requestId = []
self.message = []
self.applicant = []
self.avgDailySpendTimeIn7Days = []
self.adminLogCountIn7Days = []
@property
def UserProfileList(self):
for x in self.json:
try: self.accountMembershipStatus.append(x["accountMembershipStatus"])
except (KeyError, TypeError): self.accountMembershipStatus.append(None)
try: self.activation.append(x["activation"])
except (KeyError, TypeError): self.activation.append(None)
try: self.activePublicLiveThreadId.append(x["activePublicLiveThreadId"])
except (KeyError, TypeError): self.activePublicLiveThreadId.append(None)
try: self.age.append(x["age"])
except (KeyError, TypeError): self.age.append(None)
try: self.aminoId.append(x["aminoId"])
except (KeyError, TypeError): self.aminoId.append(None)
try: self.aminoIdEditable.append(x["aminoIdEditable"])
except (KeyError, TypeError): self.aminoIdEditable.append(None)
try: self.appleId.append(x["appleID"])
except (KeyError, TypeError): self.appleId.append(None)
try: self.avatarFrame.append(x["avatarFrame"])
except (KeyError, TypeError): self.avatarFrame.append(None)
try: self.avatarFrameId.append(x["avatarFrameId"])
except (KeyError, TypeError): self.avatarFrameId.append(None)
try: self.backgroundColor.append(x["extensions"]["style"]["backgroundColor"])
except (KeyError, TypeError): self.backgroundColor.append(None)
try: self.backgroundImage.append(x["extensions"]["style"]["backgroundMediaList"][1])
except (KeyError, TypeError, IndexError): self.backgroundImage.append(None)
try: self.blogsCount.append(x["blogsCount"])
except (KeyError, TypeError): self.blogsCount.append(None)
try: self.commentsCount.append(x["commentsCount"])
except (KeyError, TypeError): self.commentsCount.append(None)
try: self.content.append(x["content"])
except (KeyError, TypeError): self.content.append(None)
try: self.coverAnimation.append(x["extensions"]["coverAnimation"])
except (KeyError, TypeError): self.coverAnimation.append(None)
try: self.createdTime.append(x["createdTime"])
except (KeyError, TypeError): self.createdTime.append(None)
try: self.customTitles.append(x["extensions"]["customTitles"])
except (KeyError, TypeError): self.customTitles.append(None)
try: self.dateOfBirth.append(x["dateOfBirth"])
except (KeyError, TypeError): self.dateOfBirth.append(None)
try: self.defaultBubbleId.append(x["extensions"]["defaultBubbleId"])
except (KeyError, TypeError): self.defaultBubbleId.append(None)
try: self.disabledLevel.append(x["extensions"]["__disabledLevel__"])
except (KeyError, TypeError): self.disabledLevel.append(None)
try: self.disabledStatus.append(x["extensions"]["__disabledStatus__"])
except (KeyError, TypeError): self.disabledStatus.append(None)
try: self.disabledTime.append(x["extensions"]["__disabledTime__"])
except (KeyError, TypeError): self.disabledTime.append(None)
try: self.email.append(x["email"])
except (KeyError, TypeError): self.email.append(None)
try: self.extensions.append(x["extensions"])
except (KeyError, TypeError): self.extensions.append(None)
try: self.facebookId.append(x["facebookID"])
except (KeyError, TypeError): self.facebookId.append(None)
try: self.fansCount.append(x["influencerInfo"]["fansCount"])
except (KeyError, TypeError): self.fansCount.append(None)
try: self.followersCount.append(x["membersCount"])
except (KeyError, TypeError): self.followersCount.append(None)
try: self.followingCount.append(x["joinedCount"])
except (KeyError, TypeError): self.followingCount.append(None)
try: self.followingStatus.append(x["followingStatus"])
except (KeyError, TypeError): self.followingStatus.append(None)
try: self.gender.append(x["gender"])
except (KeyError, TypeError): self.gender.append(None)
try: self.globalStrikeCount.append(x["adminInfo"]["globalStrikeCount"])
except (KeyError, TypeError): self.globalStrikeCount.append(None)
try: self.googleId.append(x["googleID"])
except (KeyError, TypeError): self.googleId.append(None)
try: self.icon.append(x["icon"])
except (KeyError, TypeError): self.icon.append(None)
try: self.influencerCreatedTime.append(x["influencerInfo"]["createdTime"])
except (KeyError, TypeError): self.influencerCreatedTime.append(None)
try: self.influencerInfo.append(x["influencerInfo"])
except (KeyError, TypeError): self.influencerInfo.append(None)
try: self.influencerMonthlyFee.append(x["influencerInfo"]["monthlyFee"])
except (KeyError, TypeError): self.influencerMonthlyFee.append(None)
try: self.influencerPinned.append(x["influencerInfo"]["pinned"])
except (KeyError, TypeError): self.influencerPinned.append(None)
try: self.isGlobal.append(x["isGlobal"])
except (KeyError, TypeError): self.isGlobal.append(None)
try: self.isMemberOfTeamAmino.append(x["extensions"]["isMemberOfTeamAmino"])
except (KeyError, TypeError): self.isMemberOfTeamAmino.append(None)
try: self.isNicknameVerified.append(x["isNicknameVerified"])
except (KeyError, TypeError): self.isNicknameVerified.append(None)
try: self.itemsCount.append(x["itemsCount"])
except (KeyError, TypeError): self.itemsCount.append(None)
try: self.lastStrikeTime.append(x["adminInfo"]["lastStrikeTime"])
except (KeyError, TypeError): self.lastStrikeTime.append(None)
try: self.lastWarningTime.append(x["adminInfo"]["lastWarningTime"])
except (KeyError, TypeError): self.lastWarningTime.append(None)
try: self.level.append(x["level"])
except (KeyError, TypeError): self.level.append(None)
try: self.mediaList.append(x["mediaList"])
except (KeyError, TypeError): self.mediaList.append(None)
try: self.membershipStatus.append(x["membershipStatus"])
except (KeyError, TypeError): self.membershipStatus.append(None)
try: self.modifiedTime.append(x["modifiedTime"])
except (KeyError, TypeError): self.modifiedTime.append(None)
try: self.mood.append(x["mood"])
except (KeyError, TypeError): self.mood.append(None)
try: self.moodSticker.append(x["moodSticker"])
except (KeyError, TypeError): self.moodSticker.append(None)
try: self.nickname.append(x["nickname"])
except (KeyError, TypeError): self.nickname.append(None)
try: self.notificationSubscriptionStatus.append(x["notificationSubscriptionStatus"])
except (KeyError, TypeError): self.notificationSubscriptionStatus.append(None)
try: self.onlineStatus.append(x["onlineStatus"])
except (KeyError, TypeError): self.onlineStatus.append(None)
try: self.onlineStatus2.append(x["settings"]["onlineStatus"])
except (KeyError, TypeError): self.onlineStatus2.append(None)
try: self.phoneNumber.append(x["phoneNumber"])
except (KeyError, TypeError): self.phoneNumber.append(None)
try: self.postsCount.append(x["postsCount"])
except (KeyError, TypeError): self.postsCount.append(None)
try: self.privilegeOfChatInviteRequest.append(x["extensions"]["privilegeOfChatInviteRequest"])
except (KeyError, TypeError): self.privilegeOfChatInviteRequest.append(None)
try: self.privilegeOfCommentOnUserProfile.append(x["extensions"]["privilegeOfCommentOnUserProfile"])
except (KeyError, TypeError): self.privilegeOfCommentOnUserProfile.append(None)
try: self.pushEnabled.append(x["pushEnabled"])
except (KeyError, TypeError): self.pushEnabled.append(None)
try: self.race.append(x["race"])
except (KeyError, TypeError): self.race.append(None)
try: self.reputation.append(x["reputation"])
except (KeyError, TypeError): self.reputation.append(None)
try: self.role.append(x["role"])
except (KeyError, TypeError): self.role.append(None)
try: self.securityLevel.append(x["securityLevel"])
except (KeyError, TypeError): self.securityLevel.append(None)
try: self.staffInfo.append(x["adminInfo"])
except (KeyError, TypeError): self.staffInfo.append(None)
try: self.status.append(x["status"])
except (KeyError, TypeError): self.status.append(None)
try: self.storiesCount.append(x["storiesCount"])
except (KeyError, TypeError): self.storiesCount.append(None)
try: self.strikeCount.append(x["adminInfo"]["strikeCount"])
except (KeyError, TypeError): self.strikeCount.append(None)
try: self.tagList.append(x["tagList"])
except (KeyError, TypeError): self.tagList.append(None)
try: self.twitterId.append(x["twitterID"])
except (KeyError, TypeError): self.twitterId.append(None)
try: self.userId.append(x["uid"])
except (KeyError, TypeError): self.userId.append(None)
try: self.verified.append(x["verified"])
except (KeyError, TypeError): self.verified.append(None)
try: self.visitPrivacy.append(x["visitPrivacy"])
except (KeyError, TypeError): self.visitPrivacy.append(None)
try: self.visitorsCount.append(x["visitorsCount"])
except (KeyError, TypeError): self.visitorsCount.append(None)
try: self.warningCount.append(x["adminInfo"]["warningCount"])
except (KeyError, TypeError): self.warningCount.append(None)
try: self.totalQuizPlayedTimes.append(x["totalQuizPlayedTimes"])
except (KeyError, TypeError): self.totalQuizPlayedTimes.append(None)
try: self.totalQuizHighestScore.append(x["totalQuizHighestScore"])
except (KeyError, TypeError): self.totalQuizHighestScore.append(None)
try: self.requestId.append(x["requestId"])
except (KeyError, TypeError): self.requestId.append(None)
try: self.message.append(x["message"])
except (KeyError, TypeError): self.message.append(None)
try: self.applicant.append(x["applicant"])
except (KeyError, TypeError): self.applicant.append(None)
try: self.avgDailySpendTimeIn7Days.append(x["avgDailySpendTimeIn7Days"])
except (KeyError, TypeError): self.avgDailySpendTimeIn7Days.append(None)
try: self.adminLogCountIn7Days.append(x["adminLogCountIn7Days"])
except (KeyError, TypeError): self.adminLogCountIn7Days.append(None)
return self
class FanClubList:
def __init__(self, data):
_profile, _targetUserProfile = [], []
self.json = data
for y in data:
try: _profile.append(y["fansUserProfile"])
except (KeyError, TypeError): _profile.append(None)
try: _targetUserProfile.append(y["targetUserProfile"])
except (KeyError, TypeError): _targetUserProfile.append(None)
self.profile: UserProfileList = UserProfileList(_profile).UserProfileList
self.targetUserProfile: UserProfileList = UserProfileList(_targetUserProfile).UserProfileList
self.userId = []
self.lastThankedTime = []
self.expiredTime = []
self.createdTime = []
self.status = []
self.targetUserId = []
@property
def FanClubList(self):
for x in self.json:
try: self.userId.append(x["uid"])
except (KeyError, TypeError): self.userId.append(None)
try: self.lastThankedTime.append(x["lastThankedTime"])
except (KeyError, TypeError): self.lastThankedTime.append(None)
try: self.expiredTime.append(x["expiredTime"])
except (KeyError, TypeError): self.expiredTime.append(None)
try: self.createdTime.append(x["createdTime"])
except (KeyError, TypeError): self.createdTime.append(None)
try: self.status.append(x["fansStatus"])
except (KeyError, TypeError): self.status.append(None)
try: self.targetUserId.append(x["targetUid"])
except (KeyError, TypeError): self.targetUserId.append(None)
return self
class UserProfile:
def __init__(self, data):
self.json = data
try: self.fanClub: FanClubList = FanClubList(data["fanClubList"]).FanClubList
except (KeyError, TypeError): self.fanClub: FanClubList = FanClubList([])
self.accountMembershipStatus = None
self.activation = None
self.activePublicLiveThreadId = None
self.age = None
self.aminoId = None
self.aminoIdEditable = None
self.appleId = None
self.avatarFrame = None
self.avatarFrameId = None
self.backgroundImage = None
self.backgroundColor = None
self.blogsCount = None
self.commentsCount = None
self.content = None
self.coverAnimation = None
self.createdTime = None
self.customTitles = None
self.dateOfBirth = None
self.defaultBubbleId = None
self.disabledLevel = None
self.disabledStatus = None
self.disabledTime = None
self.email = None
self.extensions = None
self.facebookId = None
self.fansCount = None
self.followersCount = None
self.followingCount = None
self.followingStatus = None
self.gender = None
self.globalStrikeCount = None
self.googleId = None
self.icon = None
self.influencerCreatedTime = None
self.influencerInfo = None
self.influencerMonthlyFee = None
self.influencerPinned = None
self.isGlobal = None
self.isMemberOfTeamAmino = None
self.isNicknameVerified = None
self.itemsCount = None
self.lastStrikeTime = None
self.lastWarningTime = None
self.level = None
self.mediaList = None
self.membershipStatus = None
self.modifiedTime = None
self.mood = None
self.moodSticker = None
self.nickname = None
self.notificationSubscriptionStatus = None
self.onlineStatus = None
self.onlineStatus2 = None
self.phoneNumber = None
self.postsCount = None
self.privilegeOfChatInviteRequest = None
self.privilegeOfCommentOnUserProfile = None
self.pushEnabled = None
self.race = None
self.reputation = None
self.role = None
self.securityLevel = None
self.staffInfo = None
self.status = None
self.storiesCount = None
self.strikeCount = None
self.tagList = None
self.twitterId = None
self.userId = None
self.verified = None
self.visitPrivacy = None
self.visitorsCount = None
self.warningCount = None
self.totalQuizHighestScore = None
self.totalQuizPlayedTimes = None
self.requestId = None
self.message = None
self.applicant = None
self.avgDailySpendTimeIn7Days = None
self.adminLogCountIn7Days = None
@property
def UserProfile(self):
try: self.accountMembershipStatus = self.json["accountMembershipStatus"]
except (KeyError, TypeError): pass
try: self.activation = self.json["activation"]
except (KeyError, TypeError): pass
try: self.activePublicLiveThreadId = self.json["activePublicLiveThreadId"]
except (KeyError, TypeError): pass
try: self.age = self.json["age"]
except (KeyError, TypeError): pass
try: self.aminoId = self.json["aminoId"]
except (KeyError, TypeError): pass
try: self.aminoIdEditable = self.json["aminoIdEditable"]
except (KeyError, TypeError): pass
try: self.appleId = self.json["appleID"]
except (KeyError, TypeError): pass
try: self.avatarFrame = self.json["avatarFrame"]
except (KeyError, TypeError): pass
try: self.avatarFrameId = self.json["avatarFrameId"]
except (KeyError, TypeError): pass
try: self.backgroundColor = self.json["extensions"]["style"]["backgroundColor"]
except (KeyError, TypeError): pass
try: self.backgroundImage = self.json["extensions"]["style"]["backgroundMediaList"][1]
except (KeyError, TypeError, IndexError): pass
try: self.blogsCount = self.json["blogsCount"]
except (KeyError, TypeError): pass
try: self.commentsCount = self.json["commentsCount"]
except (KeyError, TypeError): pass
try: self.content = self.json["content"]
except (KeyError, TypeError): pass
try: self.coverAnimation = self.json["extensions"]["coverAnimation"]
except (KeyError, TypeError): pass
try: self.createdTime = self.json["createdTime"]
except (KeyError, TypeError): pass
try: self.customTitles = self.json["extensions"]["customTitles"]
except (KeyError, TypeError): pass
try: self.dateOfBirth = self.json["dateOfBirth"]
except (KeyError, TypeError): pass
try: self.defaultBubbleId = self.json["extensions"]["defaultBubbleId"]
except (KeyError, TypeError): pass
try: self.disabledLevel = self.json["extensions"]["__disabledLevel__"]
except (KeyError, TypeError): pass
try: self.disabledStatus = self.json["extensions"]["__disabledStatus__"]
except (KeyError, TypeError): pass
try: self.disabledTime = self.json["extensions"]["__disabledTime__"]
except (KeyError, TypeError): pass
try: self.email = self.json["email"]
except (KeyError, TypeError): pass
try: self.extensions = self.json["extensions"]
except (KeyError, TypeError): pass
try: self.facebookId = self.json["facebookID"]
except (KeyError, TypeError): pass
try: self.fansCount = self.json["influencerInfo"]["fansCount"]
except (KeyError, TypeError): pass
try: self.followersCount = self.json["membersCount"]
except (KeyError, TypeError): pass
try: self.followingCount = self.json["joinedCount"]
except (KeyError, TypeError): pass
try: self.followingStatus = self.json["followingStatus"]
except (KeyError, TypeError): pass
try: self.gender = self.json["gender"]
except (KeyError, TypeError): pass
try: self.globalStrikeCount = self.json["adminInfo"]["globalStrikeCount"]
except (KeyError, TypeError): pass
try: self.googleId = self.json["googleID"]
except (KeyError, TypeError): pass
try: self.icon = self.json["icon"]
except (KeyError, TypeError): pass
try: self.influencerCreatedTime = self.json["influencerInfo"]["createdTime"]
except (KeyError, TypeError): pass
try: self.influencerInfo = self.json["influencerInfo"]
except (KeyError, TypeError): pass
try: self.influencerMonthlyFee = self.json["influencerInfo"]["monthlyFee"]
except (KeyError, TypeError): pass
try: self.influencerPinned = self.json["influencerInfo"]["pinned"]
except (KeyError, TypeError): pass
try: self.isGlobal = self.json["isGlobal"]
except (KeyError, TypeError): pass
try: self.isMemberOfTeamAmino = self.json["extensions"]["isMemberOfTeamAmino"]
except (KeyError, TypeError): pass
try: self.isNicknameVerified = self.json["isNicknameVerified"]
except (KeyError, TypeError): pass
try: self.itemsCount = self.json["itemsCount"]
except (KeyError, TypeError): pass
try: self.lastStrikeTime = self.json["adminInfo"]["lastStrikeTime"]
except (KeyError, TypeError): pass
try: self.lastWarningTime = self.json["adminInfo"]["lastWarningTime"]
except (KeyError, TypeError): pass
try: self.level = self.json["level"]
except (KeyError, TypeError): pass
try: self.mediaList = self.json["mediaList"]
except (KeyError, TypeError): pass
try: self.membershipStatus = self.json["membershipStatus"]
except (KeyError, TypeError): pass
try: self.modifiedTime = self.json["modifiedTime"]
except (KeyError, TypeError): pass
try: self.mood = self.json["mood"]
except (KeyError, TypeError): pass
try: self.moodSticker = self.json["moodSticker"]
except (KeyError, TypeError): pass
try: self.nickname = self.json["nickname"]
except (KeyError, TypeError): pass
try: self.notificationSubscriptionStatus = self.json["notificationSubscriptionStatus"]
except (KeyError, TypeError): pass
try: self.onlineStatus = self.json["onlineStatus"]
except (KeyError, TypeError): pass
try: self.onlineStatus2 = self.json["settings"]["onlineStatus"]
except (KeyError, TypeError): pass
try: self.phoneNumber = self.json["phoneNumber"]
except (KeyError, TypeError): pass
try: self.postsCount = self.json["postsCount"]
except (KeyError, TypeError): pass
try: self.privilegeOfChatInviteRequest = self.json["extensions"]["privilegeOfChatInviteRequest"]
except (KeyError, TypeError): pass
try: self.privilegeOfCommentOnUserProfile = self.json["extensions"]["privilegeOfCommentOnUserProfile"]
except (KeyError, TypeError): pass
try: self.pushEnabled = self.json["pushEnabled"]
except (KeyError, TypeError): pass
try: self.race = self.json["race"]
except (KeyError, TypeError): pass
try: self.reputation = self.json["reputation"]
except (KeyError, TypeError): pass
try: self.role = self.json["role"]
except (KeyError, TypeError): pass
try: self.securityLevel = self.json["securityLevel"]
except (KeyError, TypeError): pass
try: self.staffInfo = self.json["adminInfo"]
except (KeyError, TypeError): pass
try: self.status = self.json["status"]
except (KeyError, TypeError): pass
try: self.storiesCount = self.json["storiesCount"]
except (KeyError, TypeError): pass
try: self.strikeCount = self.json["adminInfo"]["strikeCount"]
except (KeyError, TypeError): pass
try: self.tagList = self.json["tagList"]
except (KeyError, TypeError): pass
try: self.twitterId = self.json["twitterID"]
except (KeyError, TypeError): pass
try: self.userId = self.json["uid"]
except (KeyError, TypeError): pass
try: self.verified = self.json["verified"]
except (KeyError, TypeError): pass
try: self.visitPrivacy = self.json["visitPrivacy"]
except (KeyError, TypeError): pass
try: self.visitorsCount = self.json["visitorsCount"]
except (KeyError, TypeError): pass
try: self.warningCount = self.json["adminInfo"]["warningCount"]
except (KeyError, TypeError): pass
try: self.totalQuizHighestScore = self.json["totalQuizHighestScore"]
except (KeyError, TypeError): pass
try: self.totalQuizPlayedTimes = self.json["totalQuizPlayedTimes"]
except (KeyError, TypeError): pass
try: self.requestId = self.json["requestId"]
except (KeyError, TypeError): pass
try: self.message = self.json["message"]
except (KeyError, TypeError): pass
try: self.applicant = self.json["applicant"]
except (KeyError, TypeError): pass
try: self.avgDailySpendTimeIn7Days = self.json["avgDailySpendTimeIn7Days"]
except (KeyError, TypeError): pass
try: self.adminLogCountIn7Days = self.json["adminLogCountIn7Days"]
except (KeyError, TypeError): pass
return self
class StickerCollection:
def __init__(self, data):
self.json = data
try: self.author: UserProfile = UserProfile(data["author"]).UserProfile
except (KeyError, TypeError): self.author: UserProfile = UserProfile([])
try: self.originalAuthor: UserProfile = UserProfile(data["extensions"]["originalAuthor"]).UserProfile
except (KeyError, TypeError): self.originalAuthor: UserProfile = UserProfile([])
try: self.originalCommunity: Community = Community(data["extensions"]["originalCommunity"]).Community
except (KeyError, TypeError): self.originalCommunity: Community = Community([])
self.status = None
self.collectionType = None
self.modifiedTime = None
self.bannerUrl = None
self.smallIcon = None
self.stickersCount = None
self.usedCount = None
self.icon = None
self.title = None
self.collectionId = None
self.extensions = None
self.isActivated = None
self.ownershipStatus = None
self.isNew = None
self.availableComIds = None
self.description = None
self.iconSourceStickerId = None
self.restrictionInfo = None
self.discountValue = None
self.discountStatus = None
self.ownerId = None
self.ownerType = None
self.restrictType = None
self.restrictValue = None
self.availableDuration = None
@property
def StickerCollection(self):
try: self.status = self.json["status"]
except (KeyError, TypeError): pass
try: self.collectionType = self.json["collectionType"]
except (KeyError, TypeError): pass
try: self.modifiedTime = self.json["modifiedTime"]
except (KeyError, TypeError): pass
try: self.bannerUrl = self.json["bannerUrl"]
except (KeyError, TypeError): pass
try: self.smallIcon = self.json["smallIcon"]
except (KeyError, TypeError): pass
try: self.stickersCount = self.json["stickersCount"]
except (KeyError, TypeError): pass
try: self.usedCount = self.json["usedCount"]
except (KeyError, TypeError): pass
try: self.icon = self.json["icon"]
except (KeyError, TypeError): pass
try: self.title = self.json["name"]
except (KeyError, TypeError): pass
try: self.collectionId = self.json["collectionId"]
except (KeyError, TypeError): pass
try: self.extensions = self.json["extensions"]
except (KeyError, TypeError): pass
try: self.iconSourceStickerId = self.json["extensions"]["iconSourceStickerId"]
except (KeyError, TypeError): pass
try: self.isActivated = self.json["isActivated"]
except (KeyError, TypeError): pass
try: self.ownershipStatus = self.json["ownershipStatus"]
except (KeyError, TypeError): pass
try: self.isNew = self.json["isNew"]
except (KeyError, TypeError): pass
try: self.availableComIds = self.json["availableNdcIds"]
except (KeyError, TypeError): pass
try: self.description = self.json["description"]
except (KeyError, TypeError): pass
try: self.restrictionInfo = self.json["restrictionInfo"]
except (KeyError, TypeError): pass
try: self.discountStatus = self.json["restrictionInfo"]["discountStatus"]
except (KeyError, TypeError): pass
try: self.discountValue = self.json["restrictionInfo"]["discountValue"]
except (KeyError, TypeError): pass
try: self.ownerId = self.json["restrictionInfo"]["ownerUid"]
except (KeyError, TypeError): pass
try: self.ownerType = self.json["restrictionInfo"]["ownerType"]
except (KeyError, TypeError): pass
try: self.restrictType = self.json["restrictionInfo"]["restrictType"]
except (KeyError, TypeError): pass
try: self.restrictValue = self.json["restrictionInfo"]["restrictValue"]
except (KeyError, TypeError): pass
try: self.availableDuration = self.json["restrictionInfo"]["availableDuration"]
except (KeyError, TypeError): pass
return self
class Sticker:
def __init__(self, data):
self.json = data
try: self.collection: StickerCollection = StickerCollection(data["stickerCollectionSummary"]).StickerCollection
except (KeyError, TypeError): self.collection: StickerCollection = StickerCollection([])
self.status = None
self.icon = None
self.iconV2 = None
self.name = None
self.stickerId = None
self.smallIcon = None
self.smallIconV2 = None
self.stickerCollectionId = None
self.mediumIcon = None
self.mediumIconV2 = None
self.extensions = None
self.usedCount = None
self.createdTime = None
@property
def Sticker(self):
try: self.status = self.json["status"]
except (KeyError, TypeError): pass
try: self.icon = self.json["icon"]
except (KeyError, TypeError): pass
try: self.iconV2 = self.json["iconV2"]
except (KeyError, TypeError): pass
try: self.name = self.json["name"]
except (KeyError, TypeError): pass
try: self.stickerId = self.json["stickerId"]
except (KeyError, TypeError): pass
try: self.smallIcon = self.json["smallIcon"]
except (KeyError, TypeError): pass
try: self.smallIconV2 = self.json["smallIconV2"]
except (KeyError, TypeError): pass
try: self.stickerCollectionId = self.json["stickerCollectionId"]
except (KeyError, TypeError): pass
try: self.mediumIcon = self.json["mediumIcon"]
except (KeyError, TypeError): pass
try: self.mediumIconV2 = self.json["mediumIconV2"]
except (KeyError, TypeError): pass
try: self.extensions = self.json["extensions"]
except (KeyError, TypeError): pass
try: self.usedCount = self.json["usedCount"]
except (KeyError, TypeError): pass
try: self.createdTime = self.json["createdTime"]
except (KeyError, TypeError): pass
return self
class Message:
def __init__(self, data):
self.json = data
try: self.author: UserProfile = UserProfile(data["author"]).UserProfile
except (KeyError, TypeError): self.author: UserProfile = UserProfile([])
try: self.sticker: Sticker = Sticker(data["extensions"]["sticker"]).Sticker
except (KeyError, TypeError): self.sticker: Sticker = Sticker([])
self.content = None
self.includedInSummary = None
self.isHidden = None
self.messageType = None
self.messageId = None
self.mediaType = None
self.mediaValue = None
self.chatBubbleId = None
self.clientRefId = None
self.chatId = None
self.createdTime = None
self.chatBubbleVersion = None
self.type = None
self.extensions = None
self.duration = None
self.originalStickerId = None
self.videoDuration = None
self.videoExtensions = None
self.videoHeight = None
self.videoCoverImage = None
self.videoWidth = None
self.mentionUserIds = None
self.tippingCoins = None
@property
def Message(self):
try: self.content = self.json["content"]
except (KeyError, TypeError): pass
try: self.includedInSummary = self.json["includedInSummary"]
except (KeyError, TypeError): pass
try: self.isHidden = self.json["isHidden"]
except (KeyError, TypeError): pass
try: self.messageId = self.json["messageId"]
except (KeyError, TypeError): pass
try: self.messageType = self.json["messageType"]
except (KeyError, TypeError): pass
try: self.mediaType = self.json["mediaType"]
except (KeyError, TypeError): pass
try: self.chatBubbleId = self.json["chatBubbleId"]
except (KeyError, TypeError): pass
try: self.clientRefId = self.json["clientRefId"]
except (KeyError, TypeError): pass
try: self.chatId = self.json["threadId"]
except (KeyError, TypeError): pass
try: self.createdTime = self.json["createdTime"]
except (KeyError, TypeError): pass
try: self.chatBubbleVersion = self.json["chatBubbleVersion"]
except (KeyError, TypeError): pass
try: self.type = self.json["type"]
except (KeyError, TypeError): pass
try: self.mediaValue = self.json["mediaValue"]
except (KeyError, TypeError): pass
try: self.extensions = self.json["extensions"]
except (KeyError, TypeError): pass
try: self.duration = self.json["extensions"]["duration"]
except (KeyError, TypeError): pass
try: self.videoDuration = self.json["extensions"]["videoExtensions"]["duration"]
except (KeyError, TypeError): pass
try: self.videoHeight = self.json["extensions"]["videoExtensions"]["height"]
except (KeyError, TypeError): pass
try: self.videoWidth = self.json["extensions"]["videoExtensions"]["width"]
except (KeyError, TypeError): pass
try: self.videoCoverImage = self.json["extensions"]["videoExtensions"]["coverImage"]
except (KeyError, TypeError): pass
try: self.originalStickerId = self.json["extensions"]["originalStickerId"]
except (KeyError, TypeError): pass
# mentions fixed by enchart
try: self.mentionUserIds = [m["uid"] for m in self.json["extensions"]["mentionedArray"]]
except (KeyError, TypeError): pass
try: self.tippingCoins = self.json["extensions"]["tippingCoins"]
except (KeyError, TypeError): pass
return self
class Payload:
def __init__(self, data):
self.json = data
self.ndcId = None
self.chatId = None
self.alert = None
@property
def Payload(self):
try: self.ndcId = self.json["ndcId"]
except (KeyError, TypeError): pass
try: self.ndcId = self.json["tid"]
except (KeyError, TypeError): pass
try: self.alert = self.json["aps"]["alert"]
except (KeyError, TypeError): pass
return self
class Event:
def __init__(self, data):
self.json = data
self.comId = None
self.alertOption = None
self.membershipStatus = None
self.actions = None
self.target = None
self.params = None
self.threadType = None
self.id = None
self.duration = None
try: self.message: Message = Message(data["chatMessage"]).Message
except (KeyError, TypeError): self.message: Message = Message([])
@property
def Event(self):
try: self.comId = self.json["ndcId"]
except (KeyError, TypeError): pass
try: self.alertOption = self.json["alertOption"]
except (KeyError, TypeError): pass
try: self.membershipStatus = self.json["membershipStatus"]
except (KeyError, TypeError): pass
try: self.actions = self.json["actions"]
except (KeyError, TypeError): pass
try: self.target = self.json["target"]
except (KeyError, TypeError): pass
try: self.params = self.json["params"]
except (KeyError, TypeError): pass
try: self.threadType = self.json["params"]["threadType"]
except (KeyError, TypeError): pass
try: self.duration = self.json["params"]["duration"]
except (KeyError, TypeError): pass
try: self.id = self.json["id"]
except (KeyError, TypeError): pass
return self
|
Amino-Socket
|
/Amino-Socket-1.0.6.tar.gz/Amino-Socket-1.0.6/aminos/src/objects.py
|
objects.py
|
[//]: # (**README Improved By ODYSS3EUS**)
[//]: # (Never Underestimate Presentation)
<h1 align="center">
<br>
<a href="https://github.com/aminobot22/MAmino.py"><img src="https://i.imgur.com/LLzB0nxt.png?size=512" alt="Amino_new.py" width="200"></a>
<br>
Amino_new
<br>
</h1>
#### What Is This?
MAmino.py Is A Python API For Communicating With Amino Servers Whilst Pretending That You're An App User. This Is Mostly Accomplished By Spoofing Device Configuration Headers. It Is Also For Objectifying And Organizing Amino Response Data, So That Actually Doing Anything Is Easier.
### API Reference
Read The Docs: [Link Here](https://aminopy.readthedocs.io/en/latest/)
#### How do I use this?
You'll Need Python 3 To Use This Library, I Recommend Using Python 3.6 Or Earlier.
To Install It Just Open The Command Line And Type This Command `python3 -m pip install MAmino.py`
- **NOTE:** MAKE SURE YOU TYPE `Amino_new` **NOT** `Amino` AS IT WILL INSTALL THE **WRONG** PACKAGE UNINSTALL Amino.py!
#### Pip Not Working?
0. Make Sure Pip Is Installed See: [How To Set-Up Pip](https://nitratine.net/blog/post/how-to-setup-pythons-pip/)
1. Alternatively Download The API [HERE](https://github.com/Slimakoi/Amino.py/archive/refs/heads/master.zip)
2. Extract The .zip File Either With [Winrar](https://www.win-rar.com/download.html?&L=0) or [7zip](https://www.7-zip.org/download.html)
3. Open The Extracted Folder In An [IDE](https://visualstudio.microsoft.com/downloads/) Of Your Choosing
And Navigate To The Folder Named `Amino_new-master`
4. Select The Python Interpreter
5. Make A New `*.py` File And Start Creating
[//]: # (*Verbose Instructions For Those Who Want To Start Programming But Don't Know How.)
#### If You Have Any Questions Join Us On Discord: https://discord.gg/mMNJmrKMgw
|
Amino-new.py
|
/Amino_new.py-5.0.tar.gz/Amino_new.py-5.0/README.md
|
README.md
|
import hmac
import json
import base64
import requests
from uuid import UUID
from os import urandom
from hashlib import sha1
from time import timezone
from typing import BinaryIO
from binascii import hexlify
from time import time as timestamp
from json_minify import json_minify
from . import client
from .lib.util import exceptions, headers, device, objects
device = device.DeviceGenerator()
headers.sid = client.Client().sid
class VCHeaders:
def __init__(self, data = None):
vc_headers = {
"Accept-Language": "en-US",
"Content-Type": "application/json",
"User-Agent": "Amino/45725 CFNetwork/1126 Darwin/19.5.0", # Closest server (this one for me)
"Host": "rt.applovin.com",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "Keep-Alive",
"Accept": "*/*"
}
if data: vc_headers["Content-Length"] = str(len(data))
self.vc_headers = vc_headers
class SubClient(client.Client):
def __init__(self, comId: str = None, aminoId: str = None, *, profile: objects.UserProfile):
client.Client.__init__(self)
self.vc_connect = False
self.new_headers["NDCAUTH"]=f"sid={headers.sid}"
if comId is not None:
self.comId = comId
self.community: objects.Community = self.get_community_info(comId)
if aminoId is not None:
self.comId = client.Client().search_community(aminoId).comId[0]
self.community: objects.Community = client.Client().get_community_info(self.comId)
if comId is None and aminoId is None: raise exceptions.NoCommunity()
try: self.profile: objects.UserProfile = self.get_user_info(userId=profile.userId)
except AttributeError: raise exceptions.FailedLogin()
except exceptions.UserUnavailable: pass
def parse_headers(self, data = None):
if data is not None:
return headers.Headers(data=data, deviceId=self.device_id).headers
else:
return headers.Headers(deviceId=self.device_id).headers
def get_invite_codes(self, status: str = "normal", start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/g/s-x{self.comId}/community/invitation?status={status}&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.InviteCodeList(json.loads(response.text)["communityInvitationList"]).InviteCodeList
def generate_invite_code(self, duration: int = 0, force: bool = True):
data = json.dumps({
"duration": duration,
"force": force,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/g/s-x{self.comId}/community/invitation", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.InviteCode(json.loads(response.text)["communityInvitation"]).InviteCode
def delete_invite_code(self, inviteId: str):
response = requests.delete(f"{self.apie}/g/s-x{self.comId}/community/invitation/{inviteId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def post_blog(self, title: str, content: str, imageList: list = None, captionList: list = None, categoriesList: list = None, backgroundColor: str = None, fansOnly: bool = False, extensions: dict = None, crash: bool = False):
mediaList = []
if captionList is not None:
for image, caption in zip(imageList, captionList):
mediaList.append([100, self.upload_media(image, "image"), caption])
else:
if imageList is not None:
for image in imageList:
print(self.upload_media(image, "image"))
mediaList.append([100, self.upload_media(image, "image"), None])
data = {
"address": None,
"content": content,
"title": title,
"mediaList": mediaList,
"extensions": extensions,
"latitude": 0,
"longitude": 0,
"eventSource": "GlobalComposeMenu",
"timestamp": int(timestamp() * 1000)
}
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if backgroundColor: data["extensions"] = {"style": {"backgroundColor": backgroundColor}}
if categoriesList: data["taggedBlogCategoryIdList"] = categoriesList
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def post_wiki(self, title: str, content: str, icon: str = None, imageList: list = None, keywords: str = None, backgroundColor: str = None, fansOnly: bool = False):
mediaList = []
for image in imageList:
mediaList.append([100, self.upload_media(image, "image"), None])
data = {
"label": title,
"content": content,
"mediaList": mediaList,
"eventSource": "GlobalComposeMenu",
"timestamp": int(timestamp() * 1000)
}
if icon: data["icon"] = icon
if keywords: data["keywords"] = keywords
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if backgroundColor: data["extensions"] = {"style": {"backgroundColor": backgroundColor}}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/item", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def edit_blog(self, blogId: str, title: str = None, content: str = None, imageList: list = None, categoriesList: list = None, backgroundColor: str = None, fansOnly: bool = False):
mediaList = []
for image in imageList:
mediaList.append([100, self.upload_media(image, "image"), None])
data = {
"address": None,
"mediaList": mediaList,
"latitude": 0,
"longitude": 0,
"eventSource": "PostDetailView",
"timestamp": int(timestamp() * 1000)
}
if title: data["title"] = title
if content: data["content"] = content
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if backgroundColor: data["extensions"] = {"style": {"backgroundColor": backgroundColor}}
if categoriesList: data["taggedBlogCategoryIdList"] = categoriesList
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_blog(self, blogId: str):
response = requests.delete(f"{self.apie}/x{self.comId}/s/blog/{blogId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def live_notify(self, chatId: str, userId: str):
response = requests.delete(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/member/{userId}/invite-av-chat", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_wiki(self, wikiId: str):
response = requests.delete(f"{self.apie}/x{self.comId}/s/item/{wikiId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def repost_blog(self, content: str = None, blogId: str = None, wikiId: str = None):
if blogId is not None: refObjectId, refObjectType = blogId, 1
elif wikiId is not None: refObjectId, refObjectType = wikiId, 2
else: raise exceptions.SpecifyType()
data = json.dumps({
"content": content,
"refObjectId": refObjectId,
"refObjectType": refObjectType,
"type": 2,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/blog", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def check_in(self, tz: int = -timezone // 1000):
data = json.dumps({
"timezone": tz,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/check-in", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def repair_check_in(self, method: int = 0):
data = {"timestamp": int(timestamp() * 1000)}
if method == 0: data["repairMethod"] = "1" # Coins
if method == 1: data["repairMethod"] = "2" # Amino+
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/check-in/repair", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def lottery(self, tz: int = -timezone // 1000):
data = json.dumps({
"timezone": tz,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/check-in/lottery", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.LotteryLog(json.loads(response.text)["lotteryLog"]).LotteryLog
def edit_profile(self, nickname: str = None, content: str = None, icon: BinaryIO = None, chatRequestPrivilege: str = None, imageList: list = None, captionList: list = None, backgroundImage: str = None, backgroundColor: str = None, titles: list = None, colors: list = None, defaultBubbleId: str = None):
mediaList = []
data = {"timestamp": int(timestamp() * 1000)}
if captionList is not None:
for image, caption in zip(imageList, captionList):
mediaList.append([100, self.upload_media(image, "image"), caption])
else:
if imageList is not None:
for image in imageList:
mediaList.append([100, self.upload_media(image, "image"), None])
if imageList is not None or captionList is not None:
data["mediaList"] = mediaList
if nickname: data["nickname"] = nickname
if icon: data["icon"] = self.upload_media(icon, "image")
if content: data["content"] = content
if chatRequestPrivilege: data["extensions"] = {"privilegeOfChatInviteRequest": chatRequestPrivilege}
if backgroundImage: data["extensions"] = {"style": {"backgroundMediaList": [[100, backgroundImage, None, None, None]]}}
if backgroundColor: data["extensions"] = {"style": {"backgroundColor": backgroundColor}}
if defaultBubbleId: data["extensions"] = {"defaultBubbleId": defaultBubbleId}
if titles or colors:
tlt = []
for titles, colors in zip(titles, colors):
tlt.append({"title": titles, "color": colors})
data["extensions"] = {"customTitles": tlt}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{self.profile.userId}", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def vote_poll(self, blogId: str, optionId: str):
data = json.dumps({
"value": 1,
"eventSource": "PostDetailView",
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/poll/option/{optionId}/vote", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def comment(self, message: str, userId: str = None, blogId: str = None, wikiId: str = None, replyTo: str = None, isGuest: bool = False):
data = {
"content": message,
"stickerId": None,
"type": 0,
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["respondTo"] = replyTo
if isGuest: comType = "g-comment"
else: comType = "comment"
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/{comType}", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/{comType}", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/item/{wikiId}/{comType}", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
if userId: response = requests.delete(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/comment/{commentId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = requests.delete(f"{self.apie}/x{self.comId}/s/blog/{blogId}/comment/{commentId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.apie}/x{self.comId}/s/item/{wikiId}/comment/{commentId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def like_blog(self, blogId: [str, list] = None, wikiId: str = None):
"""
Like a Blog, Multiple Blogs or a Wiki.
**Parameters**
- **blogId** : ID of the Blog or List of IDs of the Blogs. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if blogId:
if isinstance(blogId, str):
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/vote?cv=1.2", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif isinstance(blogId, list):
data["targetIdList"] = blogId
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/feed/vote", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self. comId}/s/item/{wikiId}/vote?cv=1.2", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unlike_blog(self, blogId: str = None, wikiId: str = None):
if blogId: response = requests.delete(f"{self.apie}/x{self.comId}/s/blog/{blogId}/vote?eventSource=UserProfileView", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.apie}/x{self.comId}/s/item/{wikiId}/vote?eventSource=PostDetailView", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def like_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
data = {
"value": 1,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/comment/{commentId}/vote?cv=1.2&value=1", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/comment/{commentId}/vote?cv=1.2&value=1", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/item/{wikiId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unlike_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
if userId: response = requests.delete(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/comment/{commentId}/g-vote?eventSource=UserProfileView", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = requests.delete(f"{self.apie}/x{self.comId}/s/blog/{blogId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.apie}/x{self.comId}/s/item/{wikiId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def upvote_comment(self, blogId: str, commentId: str):
data = json.dumps({
"value": 1,
"eventSource": "PostDetailView",
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/comment/{commentId}/vote?cv=1.2&value=1", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def downvote_comment(self, blogId: str, commentId: str):
data = json.dumps({
"value": -1,
"eventSource": "PostDetailView",
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/comment/{commentId}/vote?cv=1.2&value=-1", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unvote_comment(self, blogId: str, commentId: str):
response = requests.delete(f"{self.apie}/x{self.comId}/s/blog/{blogId}/comment/{commentId}/vote?eventSource=PostDetailView", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def reply_wall(self, userId: str, commentId: str, message: str):
data = json.dumps({
"content": message,
"stackedId": None,
"respondTo": commentId,
"type": 0,
"eventSource": "UserProfileView",
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/comment", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_active_obj(self, startTime: int = None, endTime: int = None, optInAdsFlags: int = 2147483647, tz: int = -timezone // 1000, timers: list = None, timestamp: int = int(timestamp() * 1000)):
data = {
"userActiveTimeChunkList": [{
"start": startTime,
"end": endTime
}],
"timestamp": timestamp,
"optInAdsFlags": optInAdsFlags,
"timezone": tz
}
if timers:
data["userActiveTimeChunkList"] = timers
data = json_minify(json.dumps(data))
mac = hmac.new(bytes.fromhex("715ffccf8c0536f186bf127a16c14682827fc581"), data.encode("utf-8"), sha1)
signature = base64.b64encode(bytes.fromhex("01") + mac.digest()).decode("utf-8")
response = requests.post(f"{self.apie}/x{self.comId}/s/community/stats/user-active-time", headers=headers.Headers(data=data, sig=signature, deviceId=self.device_id).headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def activity_status(self, status: str):
if "on" in status.lower(): status = 1
elif "off" in status.lower(): status = 2
else: raise exceptions.WrongType(status)
data = json.dumps({
"onlineStatus": status,
"duration": 86400,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{self.profile.userId}/online-status", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
# TODO : Finish this
def watch_ad(self):
response = requests.post(f"{self.apie}/g/s/wallet/ads/video/start", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def check_notifications(self):
response = requests.post(f"{self.apie}/x{self.comId}/s/notification/checked", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_notification(self, notificationId: str):
response = requests.delete(f"{self.apie}/x{self.comId}/s/notification/{notificationId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def clear_notifications(self):
response = requests.delete(f"{self.apie}/x{self.comId}/s/notification", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def start_chat(self, userId: [str, list], message: str, title: str = None, content: str = None, isGlobal: bool = False, publishToGlobal: bool = False):
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType(type(userId))
data = {
"title": title,
"inviteeUids": userIds,
"initialMessageContent": message,
"content": content,
"timestamp": int(timestamp() * 1000)
}
if isGlobal is True: data["type"] = 2; data["eventSource"] = "GlobalComposeMenu"
else: data["type"] = 0
if publishToGlobal is True: data["publishToGlobal"] = 1
else: data["publishToGlobal"] = 0
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def invite_to_chat(self, userId: [str, list], chatId: str):
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType(type(userId))
data = json.dumps({
"uids": userIds,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/member/invite", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def add_to_favorites(self, userId: str):
response = requests.post(f"{self.apie}/x{self.comId}/s/user-group/quick-access/{userId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_coins(self, coins: int, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
url = None
if transactionId is None: transactionId = str(UUID(hexlify(urandom(16)).decode('ascii')))
data = {
"coins": coins,
"tippingContext": {"transactionId": transactionId},
"timestamp": int(timestamp() * 1000)
}
if blogId is not None: url = f"{self.apie}/x{self.comId}/s/blog/{blogId}/tipping"
if chatId is not None: url = f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/tipping"
if objectId is not None:
data["objectId"] = objectId
data["objectType"] = 2
url = f"{self.apie}/x{self.comId}/s/tipping"
if url is None: raise exceptions.SpecifyType()
data = json.dumps(data)
response = requests.post(url, headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def thank_tip(self, chatId: str, userId: str):
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/tipping/tipped-users/{userId}/thank", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def follow(self, userId: [str, list]):
"""
Follow an User or Multiple Users.
**Parameters**
- **userId** : ID of the User or List of IDs of the Users.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if isinstance(userId, str):
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/member", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif isinstance(userId, list):
data = json.dumps({"targetUidList": userId, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{self.profile.userId}/joined", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType(type(userId))
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unfollow(self, userId: str):
"""
Unfollow an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.apie}/x{self.comId}/s/user-profile/{self.profile.userId}/joined/{userId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def block(self, userId: str):
"""
Block an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.apie}/x{self.comId}/s/block/{userId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unblock(self, userId: str):
"""
Unblock an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.apie}/x{self.comId}/s/block/{userId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def visit(self, userId: str):
"""
Visit an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile/{userId}?action=visit", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def flag(self, reason: str, flagType: int, userId: str = None, blogId: str = None, wikiId: str = None, asGuest: bool = False):
"""
Flag a User, Blog or Wiki.
**Parameters**
- **reason** : Reason of the Flag.
- **flagType** : Type of the Flag.
- **userId** : ID of the User.
- **blogId** : ID of the Blog.
- **wikiId** : ID of the Wiki.
- *asGuest* : Execute as a Guest.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded()
if flagType is None: raise exceptions.FlagTypeNeeded()
data = {
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["objectId"] = userId
data["objectType"] = 0
elif blogId:
data["objectId"] = blogId
data["objectType"] = 1
elif wikiId:
data["objectId"] = wikiId
data["objectType"] = 2
else: raise exceptions.SpecifyType()
if asGuest: flg = "g-flag"
else: flg = "flag"
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/{flg}", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_message(self, chatId: str, message: str = None, messageType: int = 0, file: BinaryIO = None, fileType: str = None, replyTo: str = None, mentionUserIds: list = None, stickerId: str = None, embedId: str = None, embedType: int = None, embedLink: str = None, embedTitle: str = None, embedContent: str = None, embedImage: BinaryIO = None):
"""
Send a Message to a Chat.
**Parameters**
- **message** : Message to be sent
- **chatId** : ID of the Chat.
- **file** : File to be sent.
- **fileType** : Type of the file.
- ``audio``, ``image``, ``gif``
- **messageType** : Type of the Message.
- **mentionUserIds** : List of User IDS to mention. '@' needed in the Message.
- **replyTo** : Message ID to reply to.
- **stickerId** : Sticker ID to be sent.
- **embedTitle** : Title of the Embed.
- **embedContent** : Content of the Embed.
- **embedLink** : Link of the Embed.
- **embedImage** : Image of the Embed.
- **embedId** : ID of the Embed.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if message is not None and file is None:
message = message.replace("<$", "").replace("$>", "")
mentions = []
if mentionUserIds:
for mention_uid in mentionUserIds:
mentions.append({"uid": mention_uid})
if embedImage:
embedImage = [[100, self.upload_media(embedImage, "image"), None]]
data = {
"type": messageType,
"content": message,
"clientRefId": int(timestamp() / 10 % 1000000000),
"attachedObject": {
"objectId": embedId,
"objectType": embedType,
"link": embedLink,
"title": embedTitle,
"content": embedContent,
"mediaList": embedImage
},
"extensions": {"mentionedArray": mentions},
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["replyMessageId"] = replyTo
if stickerId:
data["content"] = None
data["stickerId"] = stickerId
data["type"] = 3
if file:
data["content"] = None
if fileType == "audio":
data["type"] = 2
data["mediaType"] = 110
elif fileType == "image":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/jpg"
data["mediaUhqEnabled"] = True
elif fileType == "gif":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/gif"
data["mediaUhqEnabled"] = True
else: raise exceptions.SpecifyType(fileType)
data["mediaUploadValue"] = base64.b64encode(file.read()).decode()
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/message", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_message(self, chatId: str, messageId: str, asStaff: bool = False, reason: str = None):
"""
Delete a Message from a Chat.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
- **asStaff** : If execute as a Staff member (Leader or Curator).
- **reason** : Reason of the action to show on the Moderation History.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {
"adminOpName": 102,
# "adminOpNote": {"content": reason},
"timestamp": int(timestamp() * 1000)
}
if asStaff and reason:
data["adminOpNote"] = {"content": reason}
data = json.dumps(data)
if not asStaff: response = requests.delete(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/message/{messageId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
else: response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/message/{messageId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def mark_as_read(self, chatId: str, messageId: str):
"""
Mark a Message from a Chat as Read.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"messageId": messageId,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/mark-as-read", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def edit_chat(self, chatId: str, doNotDisturb: bool = None, pinChat: bool = None, title: str = None, icon: str = None, backgroundImage: str = None, content: str = None, announcement: str = None, keywords: list = None, pinAnnouncement: bool = None, publishToGlobal: bool = None, canTip: bool = None, viewOnly: bool = None, canInvite: bool = None, fansOnly: bool = None):
"""
Send a Message to a Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **title** : Title of the Chat.
- **content** : Content of the Chat.
- **icon** : Icon of the Chat.
- **backgroundImage** : Url of the Background Image of the Chat.
- **announcement** : Announcement of the Chat.
- **pinAnnouncement** : If the Chat Announcement should Pinned or not.
- **coHosts** : List of User IDS to be Co-Host.
- **keywords** : List of Keywords of the Chat.
- **viewOnly** : If the Chat should be on View Only or not.
- **canTip** : If the Chat should be Tippable or not.
- **canInvite** : If the Chat should be Invitable or not.
- **fansOnly** : If the Chat should be Fans Only or not.
- **publishToGlobal** : If the Chat should show on Public Chats or not.
- **doNotDisturb** : If the Chat should Do Not Disturb or not.
- **pinChat** : If the Chat should Pinned or not.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if title: data["title"] = title
if content: data["content"] = content
if icon: data["icon"] = icon
if keywords: data["keywords"] = keywords
if announcement: data["extensions"] = {"announcement": announcement}
if pinAnnouncement: data["extensions"] = {"pinAnnouncement": pinAnnouncement}
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if publishToGlobal: data["publishToGlobal"] = 0
if not publishToGlobal: data["publishToGlobal"] = 1
res = []
if doNotDisturb is not None:
if doNotDisturb:
data = json.dumps({"alertOption": 2, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/member/{self.profile.userId}/alert", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not doNotDisturb:
data = json.dumps({"alertOption": 1, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/member/{self.profile.userId}/alert", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if pinChat is not None:
if pinChat:
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/pin", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not pinChat:
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/unpin", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if backgroundImage is not None:
data = json.dumps({"media": [100, backgroundImage, None], "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/member/{self.profile.userId}/background", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if viewOnly is not None:
if viewOnly:
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/view-only/enable", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not viewOnly:
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/view-only/disable", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canInvite is not None:
if canInvite:
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/members-can-invite/enable", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canInvite:
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/members-can-invite/disable", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canTip is not None:
if canTip:
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/tipping-perm-status/enable", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canTip:
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/tipping-perm-status/disable", data=data, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
return res
def transfer_host(self, chatId: str, userIds: list):
data = json.dumps({
"uidList": userIds,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/transfer-organizer", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def transfer_organizer(self, chatId: str, userIds: list):
self.transfer_host(chatId, userIds)
def accept_host(self, chatId: str, requestId: str):
data = json.dumps({})
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/transfer-organizer/{requestId}/accept", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def accept_organizer(self, chatId: str, requestId: str):
self.accept_host(chatId, requestId)
def kick(self, userId: str, chatId: str, allowRejoin: bool = True):
if allowRejoin: allowRejoin = 1
if not allowRejoin: allowRejoin = 0
response = requests.delete(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/member/{userId}?allowRejoin={allowRejoin}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def join_chat(self, chatId: str):
"""
Join an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/member/{self.profile.userId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def leave_chat(self, chatId: str):
"""
Leave an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/member/{self.profile.userId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_chat(self, chatId: str):
"""
Delete a Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.delete(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def subscribe(self, userId: str, autoRenew: str = False, transactionId: str = None):
if transactionId is None: transactionId = str(UUID(hexlify(urandom(16)).decode('ascii')))
data = json.dumps({
"paymentContext": {
"transactionId": transactionId,
"isAutoRenew": autoRenew
},
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/influencer/{userId}/subscribe", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def promotion(self, noticeId: str, type: str = "accept"):
response = requests.post(f"{self.apie}/x{self.comId}/s/notice/{noticeId}/{type}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def play_quiz_raw(self, quizId: str, quizAnswerList: list, quizMode: int = 0):
data = json.dumps({
"mode": quizMode,
"quizAnswerList": quizAnswerList,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{quizId}/quiz/result", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def play_quiz(self, quizId: str, questionIdsList: list, answerIdsList: list, quizMode: int = 0):
quizAnswerList = []
for question, answer in zip(questionIdsList, answerIdsList):
part = json.dumps({
"optIdList": [answer],
"quizQuestionId": question,
"timeSpent": 0.0
})
quizAnswerList.append(json.loads(part))
data = json.dumps({
"mode": quizMode,
"quizAnswerList": quizAnswerList,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{quizId}/quiz/result", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def vc_permission(self, chatId: str, permission: int):
"""Voice Chat Join Permissions
1 - Open to Everyone
2 - Approval Required
3 - Invite Only
"""
data = json.dumps({
"vvChatJoinType": permission,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/vvchat-permission", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_vc_reputation_info(self, chatId: str):
response = requests.get(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/avchat-reputation", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.VcReputation(json.loads(response.text)).VcReputation
def claim_vc_reputation(self, chatId: str):
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/avchat-reputation", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.VcReputation(json.loads(response.text)).VcReputation
def get_all_users(self, type: str = "recent", start: int = 0, size: int = 25):
if type == "recent": response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile?type=recent&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif type == "banned": response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile?type=banned&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif type == "featured": response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile?type=featured&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif type == "leaders": response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile?type=leaders&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif type == "curators": response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile?type=curators&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType(type)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileCountList(json.loads(response.text)).UserProfileCountList
def get_online_users(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/live-layer?topic=ndtopic:x{self.comId}:online-members&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileCountList(json.loads(response.text)).UserProfileCountList
def get_online_favorite_users(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/user-group/quick-access?type=online&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileCountList(json.loads(response.text)).UserProfileCountList
def get_user_info(self, userId: str):
"""
Information of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`User Object <amino.lib.util.objects.UserProfile>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile/{userId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["userProfile"]).UserProfile
def get_user_following(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that the User is Following.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/joined?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_followers(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that are Following the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/member?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_visitors(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that Visited the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Visitors List <amino.lib.util.objects.visitorsList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/visitors?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.VisitorsList(json.loads(response.text)).VisitorsList
def get_user_checkins(self, userId: str):
response = requests.get(f"{self.apie}/x{self.comId}/s/check-in/stats/{userId}?timezone={-timezone // 1000}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserCheckIns(json.loads(response.text)).UserCheckIns
def get_user_blogs(self, userId: str, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/blog?type=user&q={userId}&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def get_user_wikis(self, userId: str, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/item?type=user-all&start={start}&size={size}&cv=1.2&uid={userId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.WikiList(json.loads(response.text)["itemList"]).WikiList
def get_user_achievements(self, userId: str):
response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/achievements", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserAchievements(json.loads(response.text)["achievements"]).UserAchievements
def get_influencer_fans(self, userId: str, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/influencer/{userId}/fans?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.InfluencerFans(json.loads(response.text)).InfluencerFans
def get_blocked_users(self, start: int = 0, size: int = 25):
"""
List of Users that the User Blocked.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Users List <amino.lib.util.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/block?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_blocker_users(self, start: int = 0, size: int = 25):
"""
List of Users that are Blocking the User.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List of User IDs <List>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/block?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["blockerUidList"]
def search_users(self, nickname: str, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile?type=name&q={nickname}&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_saved_blogs(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/bookmark?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserSavedBlogs(json.loads(response.text)["bookmarkList"]).UserSavedBlogs
def get_leaderboard_info(self, type: str, start: int = 0, size: int = 25):
if "24" in type or "hour" in type: response = requests.get(f"{self.apie}/g/s-x{self.comId}/community/leaderboard?rankingType=1&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif "7" in type or "day" in type: response = requests.get(f"{self.apie}/g/s-x{self.comId}/community/leaderboard?rankingType=2&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif "rep" in type: response = requests.get(f"{self.apie}/g/s-x{self.comId}/community/leaderboard?rankingType=3&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif "check" in type: response = requests.get(f"{self.apie}/g/s-x{self.comId}/community/leaderboard?rankingType=4", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif "quiz" in type: response = requests.get(f"{self.apie}/g/s-x{self.comId}/community/leaderboard?rankingType=5&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType(type)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_wiki_info(self, wikiId: str):
response = requests.get(f"{self.apie}/x{self.comId}/s/item/{wikiId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetWikiInfo(json.loads(response.text)).GetWikiInfo
def get_recent_wiki_items(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/item?type=catalog-all&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.WikiList(json.loads(response.text)["itemList"]).WikiList
def get_wiki_categories(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/item-category?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.WikiCategoryList(json.loads(response.text)["itemCategoryList"]).WikiCategoryList
def get_wiki_category(self, categoryId: str, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/item-category/{categoryId}?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.WikiCategory(json.loads(response.text)).WikiCategory
def get_tipped_users(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, chatId: str = None, start: int = 0, size: int = 25):
if blogId or quizId:
if quizId is not None: blogId = quizId
response = requests.get(f"{self.apie}/x{self.comId}/s/blog/{blogId}/tipping/tipped-users-summary?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.get(f"{self.apie}/x{self.comId}/s/item/{wikiId}/tipping/tipped-users-summary?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif chatId: response = requests.get(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/tipping/tipped-users-summary?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif fileId: response = requests.get(f"{self.apie}/x{self.comId}/s/shared-folder/files/{fileId}/tipping/tipped-users-summary?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.TippedUsersSummary(json.loads(response.text)).TippedUsersSummary
def get_chat_threads(self, start: int = 0, size: int = 25):
"""
List of Chats the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Chat List <amino.lib.util.objects.ThreadList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/chat/thread?type=joined-me&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.ThreadList(json.loads(response.text)["threadList"]).ThreadList
def get_public_chat_threads(self, type: str = "recommended", start: int = 0, size: int = 25):
"""
List of Public Chats of the Community.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Chat List <amino.lib.util.objects.ThreadList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/chat/thread?type=public-all&filterType={type}&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.ThreadList(json.loads(response.text)["threadList"]).ThreadList
def get_chat_thread(self, chatId: str):
"""
Get the Chat Object from an Chat ID.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : :meth:`Chat Object <amino.lib.util.objects.Thread>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.Thread(json.loads(response.text)["thread"]).Thread
def get_chat_messages(self, chatId: str, size: int = 25, pageToken: str = None):
"""
List of Messages from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- *size* : Size of the list.
- *pageToken* : Next Page Token.
**Returns**
- **Success** : :meth:`Message List <amino.lib.util.objects.MessageList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if pageToken is not None: url = f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/message?v=2&pagingType=t&pageToken={pageToken}&size={size}"
else: url = f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/message?v=2&pagingType=t&size={size}"
response = requests.get(url, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetMessages(json.loads(response.text)).GetMessages
def get_message_info(self, chatId: str, messageId: str):
"""
Information of an Message from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **message** : ID of the Message.
**Returns**
- **Success** : :meth:`Message Object <amino.lib.util.objects.Message>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/message/{messageId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.Message(json.loads(response.text)["message"]).Message
def get_blog_info(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None):
if blogId or quizId:
if quizId is not None: blogId = quizId
response = requests.get(f"{self.apie}/x{self.comId}/s/blog/{blogId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetBlogInfo(json.loads(response.text)).GetBlogInfo
elif wikiId:
response = requests.get(f"{self.apie}/x{self.comId}/s/item/{wikiId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetWikiInfo(json.loads(response.text)).GetWikiInfo
elif fileId:
response = requests.get(f"{self.apie}/x{self.comId}/s/shared-folder/files/{fileId}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.SharedFolderFile(json.loads(response.text)["file"]).SharedFolderFile
else: raise exceptions.SpecifyType()
def get_blog_comments(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, sorting: str = "newest", start: int = 0, size: int = 25):
if sorting == "newest": sorting = "newest"
elif sorting == "oldest": sorting = "oldest"
elif sorting == "top": sorting = "vote"
if blogId or quizId:
if quizId is not None: blogId = quizId
response = requests.get(f"{self.apie}/x{self.comId}/s/blog/{blogId}/comment?sort={sorting}&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.get(f"{self.apie}/x{self.comId}/s/item/{wikiId}/comment?sort={sorting}&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif fileId: response = requests.get(f"{self.apie}/x{self.comId}/s/shared-folder/files/{fileId}/comment?sort={sorting}&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def get_blog_categories(self, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/blog-category?size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogCategoryList(json.loads(response.text)["blogCategoryList"]).BlogCategoryList
def get_blogs_by_category(self, categoryId: str,start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/blog-category/{categoryId}/blog-list?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def get_quiz_rankings(self, quizId: str, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/blog/{quizId}/quiz/result?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.QuizRankings(json.loads(response.text)).QuizRankings
def get_wall_comments(self, userId: str, sorting: str, start: int = 0, size: int = 25):
"""
List of Wall Comments of an User.
**Parameters**
- **userId** : ID of the User.
- **sorting** : Order of the Comments.
- ``newest``, ``oldest``, ``top``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Comments List <amino.lib.util.objects.CommentList>`
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
if sorting == "newest": sorting = "newest"
elif sorting == "oldest": sorting = "oldest"
elif sorting == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/comment?sort={sorting}&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def get_recent_blogs(self, pageToken: str = None, start: int = 0, size: int = 25):
if pageToken is not None: url = f"{self.apie}/x{self.comId}/s/feed/blog-all?pagingType=t&pageToken={pageToken}&size={size}"
else: url = f"{self.apie}/x{self.comId}/s/feed/blog-all?pagingType=t&start={start}&size={size}"
response = requests.get(url, headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.RecentBlogs(json.loads(response.text)).RecentBlogs
def get_chat_users(self, chatId: str, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/member?start={start}&size={size}&type=default&cv=1.2", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["memberList"]).UserProfileList
def get_notifications(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/notification?pagingType=t&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.NotificationList(json.loads(response.text)["notificationList"]).NotificationList
# TODO : Get notice to finish this
def get_notices(self, start: int = 0, size: int = 25):
"""
:param start: Start of the List (Start: 0)
:param size: Amount of Notices to Show
:return: Notices List
"""
response = requests.get(f"{self.apie}/x{self.comId}/s/notice?type=usersV2&status=1&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["noticeList"]
def get_sticker_pack_info(self, sticker_pack_id: str):
response = requests.get(f"{self.apie}/x{self.comId}/s/sticker-collection/{sticker_pack_id}?includeStickers=true", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.StickerCollection(json.loads(response.text)["stickerCollection"]).StickerCollection
def get_sticker_packs(self):
response = requests.get(f"{self.apie}/x{self.comId}/s/sticker-collection?includeStickers=false&type=my-active-collection", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
return objects.StickerCollection(json.loads(response.text)["stickerCollection"]).StickerCollection
# TODO : Finish this
def get_store_chat_bubbles(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/store/items?sectionGroupId=chat-bubble&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else:
response = json.loads(response.text)
del response["api:message"], response["api:statuscode"], response["api:duration"], response["api:timestamp"]
return response
# TODO : Finish this
def get_store_stickers(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/store/items?sectionGroupId=sticker&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else:
response = json.loads(response.text)
del response["api:message"], response["api:statuscode"], response["api:duration"], response["api:timestamp"]
return response
def get_community_stickers(self):
response = requests.get(f"{self.apie}/x{self.comId}/s/sticker-collection?type=community-shared", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityStickerCollection(json.loads(response.text)).CommunityStickerCollection
def get_sticker_collection(self, collectionId: str):
response = requests.get(f"{self.apie}/x{self.comId}/s/sticker-collection/{collectionId}?includeStickers=true", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.StickerCollection(json.loads(response.text)["stickerCollection"]).StickerCollection
def get_shared_folder_info(self):
response = requests.get(f"{self.apie}/x{self.comId}/s/shared-folder/stats", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetSharedFolderInfo(json.loads(response.text)["stats"]).GetSharedFolderInfo
def get_shared_folder_files(self, type: str = "latest", start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/shared-folder/files?type={type}&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.SharedFolderFileList(json.loads(response.text)["fileList"]).SharedFolderFileList
#
# MODERATION MENU
#
def moderation_history(self, userId: str = None, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, size: int = 25):
if userId: response = requests.get(f"{self.apie}/x{self.comId}/s/admin/operation?objectId={userId}&objectType=0&pagingType=t&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = requests.get(f"{self.apie}/x{self.comId}/s/admin/operation?objectId={blogId}&objectType=1&pagingType=t&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif quizId: response = requests.get(f"{self.apie}/x{self.comId}/s/admin/operation?objectId={quizId}&objectType=1&pagingType=t&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.get(f"{self.apie}/x{self.comId}/s/admin/operation?objectId={wikiId}&objectType=2&pagingType=t&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
elif fileId: response = requests.get(f"{self.apie}/x{self.comId}/s/admin/operation?objectId={fileId}&objectType=109&pagingType=t&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
else: response = requests.get(f"{self.apie}/x{self.comId}/s/admin/operation?pagingType=t&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.AdminLogList(json.loads(response.text)["adminLogList"]).AdminLogList
def feature(self, time: int, userId: str = None, chatId: str = None, blogId: str = None, wikiId: str = None):
if chatId:
if time == 1: time = 3600
if time == 1: time = 7200
if time == 1: time = 10800
else:
if time == 1: time = 86400
elif time == 2: time = 172800
elif time == 3: time = 259200
else: raise exceptions.WrongType(time)
data = {
"adminOpName": 114,
"adminOpValue": {
"featuredDuration": time
},
"timestamp": int(timestamp() * 1000)
}
if userId:
data["adminOpValue"] = {"featuredType": 4}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["adminOpValue"] = {"featuredType": 1}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["adminOpValue"] = {"featuredType": 1}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/item/{wikiId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif chatId:
data["adminOpValue"] = {"featuredType": 5}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def unfeature(self, userId: str = None, chatId: str = None, blogId: str = None, wikiId: str = None):
data = {
"adminOpName": 114,
"adminOpValue": {},
"timestamp": int(timestamp() * 1000)
}
if userId:
data["adminOpValue"] = {"featuredType": 0}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["adminOpValue"] = {"featuredType": 0}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["adminOpValue"] = {"featuredType": 0}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/item/{wikiId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif chatId:
data["adminOpValue"] = {"featuredType": 0}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def hide(self, userId: str = None, chatId: str = None, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, reason: str = None):
data = {
"adminOpNote": {
"content": reason
},
"timestamp": int(timestamp() * 1000)
}
if userId:
data["adminOpName"] = 18
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["adminOpName"] = 110
data["adminOpValue"] = 9
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif quizId:
data["adminOpName"] = 110
data["adminOpValue"] = 9
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{quizId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["adminOpName"] = 110
data["adminOpValue"] = 9
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/item/{wikiId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif chatId:
data["adminOpName"] = 110
data["adminOpValue"] = 9
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif fileId:
data["adminOpName"] = 110
data["adminOpValue"] = 9
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/shared-folder/files/{fileId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def unhide(self, userId: str = None, chatId: str = None, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, reason: str = None):
data = {
"adminOpNote": {
"content": reason
},
"timestamp": int(timestamp() * 1000)
}
if userId:
data["adminOpName"] = 19
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["adminOpName"] = 110
data["adminOpValue"] = 0
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif quizId:
data["adminOpName"] = 110
data["adminOpValue"] = 0
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{quizId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["adminOpName"] = 110
data["adminOpValue"] = 0
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/item/{wikiId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif chatId:
data["adminOpName"] = 110
data["adminOpValue"] = 0
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
elif fileId:
data["adminOpName"] = 110
data["adminOpValue"] = 0
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/shared-folder/files/{fileId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def edit_titles(self, userId: str, titles: list, colors: list):
tlt = []
for titles, colors in zip(titles, colors):
tlt.append({"title": titles, "color": colors})
data = json.dumps({
"adminOpName": 207,
"adminOpValue": {
"titles": tlt
},
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/admin", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
# TODO : List all warning texts
def warn(self, userId: str, reason: str = None):
data = json.dumps({
"uid": userId,
"title": "Custom",
"content": reason,
"attachedObject": {
"objectId": userId,
"objectType": 0
},
"penaltyType": 0,
"adminOpNote": {},
"noticeType": 7,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/notice", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
# TODO : List all strike texts
def strike(self, userId: str, time: int, title: str = None, reason: str = None):
if time == 1: time = 86400
elif time == 2: time = 10800
elif time == 3: time = 21600
elif time == 4: time = 43200
elif time == 5: time = 86400
else: raise exceptions.WrongType(time)
data = json.dumps({
"uid": userId,
"title": title,
"content": reason,
"attachedObject": {
"objectId": userId,
"objectType": 0
},
"penaltyType": 1,
"penaltyValue": time,
"adminOpNote": {},
"noticeType": 4,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/notice", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def ban(self, userId: str, reason: str, banType: int = None):
data = json.dumps({
"reasonType": banType,
"note": {
"content": reason
},
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/ban", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def unban(self, userId: str, reason: str):
data = json.dumps({
"note": {
"content": reason
},
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/{userId}/unban", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def reorder_featured_users(self, userIds: list):
data = json.dumps({
"uidList": userIds,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/user-profile/featured/reorder", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def get_hidden_blogs(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/feed/blog-disabled?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def get_featured_users(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/user-profile?type=featured&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileCountList(json.loads(response.text)).UserProfileCountList
def review_quiz_questions(self, quizId: str):
response = requests.get(f"{self.apie}/x{self.comId}/s/blog/{quizId}?action=review", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.QuizQuestionList(json.loads(response.text)["blog"]["quizQuestionList"]).QuizQuestionList
def get_recent_quiz(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/blog?type=quizzes-recent&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def get_trending_quiz(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/feed/quiz-trending?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def get_best_quiz(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/feed/quiz-best-quizzes?start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def send_action(self, actions: list, blogId: str = None, quizId: str = None, lastAction: bool = False):
# Action List
# Browsing
if lastAction is True: t = 306
else: t = 304
data = {
"o": {
"actions": actions,
"target": f"ndc://x{self.comId}/",
"ndcId": int(self.comId),
"params": {"topicIds": [45841, 17254, 26542, 42031, 22542, 16371, 6059, 41542, 15852]},
"id": "831046"
},
"t": t
}
if blogId is not None or quizId is not None:
data["target"] = f"ndc://x{self.comId}/blog/{blogId}"
if blogId is not None: data["params"]["blogType"] = 0
if quizId is not None: data["params"]["blogType"] = 6
return self.send(json.dumps(data))
# Provided by "spectrum#4691"
def purchase(self, objectId: str, objectType: int, aminoPlus: bool = True, autoRenew: bool = False):
data = {'objectId': objectId,
'objectType': objectType,
'v': 1,
"timestamp": int(timestamp() * 1000)}
if aminoPlus: data['paymentContext'] = {'discountStatus': 1, 'discountValue': 1, 'isAutoRenew': autoRenew}
else: data['paymentContext'] = {'discountStatus': 0, 'discountValue': 1, 'isAutoRenew': autoRenew}
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/store/purchase", headers=self.new_headers, data=data)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
# Provided by "spectrum#4691"
def apply_avatar_frame(self, avatarId: str, applyToAll: bool = True):
"""
Apply avatar frame.
**Parameters**
- **avatarId** : ID of the avatar frame.
- **applyToAll** : Apply to all.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = {"frameId": avatarId,
"applyToAll": 0,
"timestamp": int(timestamp() * 1000)}
if applyToAll: data["applyToAll"] = 1
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/avatar-frame/apply", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def invite_to_vc(self, chatId: str, userId: str):
"""
Invite a User to a Voice Chat
**Parameters**
- **chatId** - ID of the Chat
- **userId** - ID of the User
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"uid": userId
})
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/{chatId}/vvchat-presenter/invite/", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def add_poll_option(self, blogId: str, question: str):
data = json.dumps({
"mediaList": None,
"title": question,
"type": 0,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/blog/{blogId}/poll/option", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def create_wiki_category(self, title: str, parentCategoryId: str, content: str = None):
data = json.dumps({
"content": content,
"icon": None,
"label": title,
"mediaList": None,
"parentCategoryId": parentCategoryId,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/item-category", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def create_shared_folder(self,title: str):
data = json.dumps({
"title":title,
"timestamp":int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/shared-folder/folders", headers=self.new_headers,data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def submit_to_wiki(self, wikiId: str, message: str):
data = json.dumps({
"message": message,
"itemId": wikiId,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/knowledge-base-request", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def accept_wiki_request(self, requestId: str, destinationCategoryIdList: list):
data = json.dumps({
"destinationCategoryIdList": destinationCategoryIdList,
"actionType": "create",
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.apie}/x{self.comId}/s/knowledge-base-request/{requestId}/approve", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def reject_wiki_request(self, requestId: str):
data = json.dumps({})
response = requests.post(f"{self.apie}/x{self.comId}/s/knowledge-base-request/{requestId}/reject", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_wiki_submissions(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.apie}/x{self.comId}/s/knowledge-base-request?type=all&start={start}&size={size}", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.WikiRequestList(json.loads(response.text)["knowledgeBaseRequestList"]).WikiRequestList
def get_live_layer(self):
response = requests.get(f"{self.apie}/x{self.comId}/s/live-layer/homepage?v=2", headers=self.new_headers, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return objects.LiveLayer(json.loads(response.text)["liveLayerList"]).LiveLayer
def apply_bubble(self, bubbleId: str, chatId: str, applyToAll: bool = False):
data = {
"applyToAll": 0,
"bubbleId": bubbleId,
"threadId": chatId,
"timestamp": int(timestamp() * 1000)
}
if applyToAll is True:
data["applyToAll"] = 1
data = json.dumps(data)
response = requests.post(f"{self.apie}/x{self.comId}/s/chat/thread/apply-bubble", headers=self.new_headers, data=data, proxies=self.proxies, verify=self.certificatePath)
if json.loads(response.text)["api:statuscode"] != 0: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
|
Amino-new.py
|
/Amino_new.py-5.0.tar.gz/Amino_new.py-5.0/new/sub_client.py
|
sub_client.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.