PK q8W genshi/__init__.py# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""This package provides various means for generating and processing web markup
(XML or HTML).
The design is centered around the concept of streams of markup events (similar
in concept to SAX parsing events) which can be processed in a uniform manner
independently of where or how they are produced.
"""
__docformat__ = 'restructuredtext en'
try:
from pkg_resources import get_distribution, ResolutionError
try:
__version__ = get_distribution('Genshi').version
except ResolutionError:
__version__ = None # unknown
except ImportError:
__version__ = None # unknown
from genshi.core import *
from genshi.input import ParseError, XML, HTML
PK q8@_ _ genshi/core.py# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Core classes for markup processing."""
from itertools import chain
import operator
from genshi.util import plaintext, stripentities, striptags
__all__ = ['Stream', 'Markup', 'escape', 'unescape', 'Attrs', 'Namespace',
'QName']
__docformat__ = 'restructuredtext en'
class StreamEventKind(str):
"""A kind of event on a markup stream."""
__slots__ = []
_instances = {}
def __new__(cls, val):
return cls._instances.setdefault(val, str.__new__(cls, val))
class Stream(object):
"""Represents a stream of markup events.
This class is basically an iterator over the events.
Stream events are tuples of the form::
(kind, data, position)
where ``kind`` is the event kind (such as `START`, `END`, `TEXT`, etc),
``data`` depends on the kind of event, and ``position`` is a
``(filename, line, offset)`` tuple that contains the location of the
original element or text in the input. If the original location is unknown,
``position`` is ``(None, -1, -1)``.
Also provided are ways to serialize the stream to text. The `serialize()`
method will return an iterator over generated strings, while `render()`
returns the complete generated text at once. Both accept various parameters
that impact the way the stream is serialized.
"""
__slots__ = ['events', 'serializer']
START = StreamEventKind('START') #: a start tag
END = StreamEventKind('END') #: an end tag
TEXT = StreamEventKind('TEXT') #: literal text
XML_DECL = StreamEventKind('XML_DECL') #: XML declaration
DOCTYPE = StreamEventKind('DOCTYPE') #: doctype declaration
START_NS = StreamEventKind('START_NS') #: start namespace mapping
END_NS = StreamEventKind('END_NS') #: end namespace mapping
START_CDATA = StreamEventKind('START_CDATA') #: start CDATA section
END_CDATA = StreamEventKind('END_CDATA') #: end CDATA section
PI = StreamEventKind('PI') #: processing instruction
COMMENT = StreamEventKind('COMMENT') #: comment
def __init__(self, events, serializer=None):
"""Initialize the stream with a sequence of markup events.
:param events: a sequence or iterable providing the events
:param serializer: the default serialization method to use for this
stream
:note: Changed in 0.5: added the `serializer` argument
"""
self.events = events #: The underlying iterable producing the events
self.serializer = serializer #: The default serializion method
def __iter__(self):
return iter(self.events)
def __or__(self, function):
"""Override the "bitwise or" operator to apply filters or serializers
to the stream, providing a syntax similar to pipes on Unix shells.
Assume the following stream produced by the `HTML` function:
>>> from genshi.input import HTML
>>> html = HTML('''
Hello, world!
''')
>>> print html
Hello, world!
A filter such as the HTML sanitizer can be applied to that stream using
the pipe notation as follows:
>>> from genshi.filters import HTMLSanitizer
>>> sanitizer = HTMLSanitizer()
>>> print html | sanitizer
Hello, world!
Filters can be any function that accepts and produces a stream (where
a stream is anything that iterates over events):
>>> def uppercase(stream):
... for kind, data, pos in stream:
... if kind is TEXT:
... data = data.upper()
... yield kind, data, pos
>>> print html | sanitizer | uppercase
HELLO, WORLD!
Serializers can also be used with this notation:
>>> from genshi.output import TextSerializer
>>> output = TextSerializer()
>>> print html | sanitizer | uppercase | output
HELLO, WORLD!
Commonly, serializers should be used at the end of the "pipeline";
using them somewhere in the middle may produce unexpected results.
:param function: the callable object that should be applied as a filter
:return: the filtered stream
:rtype: `Stream`
"""
return Stream(_ensure(function(self)), serializer=self.serializer)
def filter(self, *filters):
"""Apply filters to the stream.
This method returns a new stream with the given filters applied. The
filters must be callables that accept the stream object as parameter,
and return the filtered stream.
The call::
stream.filter(filter1, filter2)
is equivalent to::
stream | filter1 | filter2
:param filters: one or more callable objects that should be applied as
filters
:return: the filtered stream
:rtype: `Stream`
"""
return reduce(operator.or_, (self,) + filters)
def render(self, method=None, encoding='utf-8', out=None, **kwargs):
"""Return a string representation of the stream.
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:param method: determines how the stream is serialized; can be either
"xml", "xhtml", "html", "text", or a custom serializer
class; if `None`, the default serialization method of
the stream is used
:param encoding: how the output string should be encoded; if set to
`None`, this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:rtype: `basestring`
:see: XMLSerializer, XHTMLSerializer, HTMLSerializer, TextSerializer
:note: Changed in 0.5: added the `out` parameter
"""
from genshi.output import encode
if method is None:
method = self.serializer or 'xml'
generator = self.serialize(method=method, **kwargs)
return encode(generator, method=method, encoding=encoding, out=out)
def select(self, path, namespaces=None, variables=None):
"""Return a new stream that contains the events matching the given
XPath expression.
>>> from genshi import HTML
>>> stream = HTML('foobar')
>>> print stream.select('elem')
foobar
>>> print stream.select('elem/text()')
foobar
Note that the outermost element of the stream becomes the *context
node* for the XPath test. That means that the expression "doc" would
not match anything in the example above, because it only tests against
child elements of the outermost element:
>>> print stream.select('doc')
You can use the "." expression to match the context node itself
(although that usually makes little sense):
>>> print stream.select('.')
foobar
:param path: a string containing the XPath expression
:param namespaces: mapping of namespace prefixes used in the path
:param variables: mapping of variable names to values
:return: the selected substream
:rtype: `Stream`
:raises PathSyntaxError: if the given path expression is invalid or not
supported
"""
from genshi.path import Path
return Path(path).select(self, namespaces, variables)
def serialize(self, method='xml', **kwargs):
"""Generate strings corresponding to a specific serialization of the
stream.
Unlike the `render()` method, this method is a generator that returns
the serialized output incrementally, as opposed to returning a single
string.
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:param method: determines how the stream is serialized; can be either
"xml", "xhtml", "html", "text", or a custom serializer
class; if `None`, the default serialization method of
the stream is used
:return: an iterator over the serialization results (`Markup` or
`unicode` objects, depending on the serialization method)
:rtype: ``iterator``
:see: XMLSerializer, XHTMLSerializer, HTMLSerializer, TextSerializer
"""
from genshi.output import get_serializer
if method is None:
method = self.serializer or 'xml'
return get_serializer(method, **kwargs)(_ensure(self))
def __str__(self):
return self.render()
def __unicode__(self):
return self.render(encoding=None)
def __html__(self):
return self
START = Stream.START
END = Stream.END
TEXT = Stream.TEXT
XML_DECL = Stream.XML_DECL
DOCTYPE = Stream.DOCTYPE
START_NS = Stream.START_NS
END_NS = Stream.END_NS
START_CDATA = Stream.START_CDATA
END_CDATA = Stream.END_CDATA
PI = Stream.PI
COMMENT = Stream.COMMENT
def _ensure(stream):
"""Ensure that every item on the stream is actually a markup event."""
stream = iter(stream)
event = stream.next()
# Check whether the iterable is a real markup event stream by examining the
# first item it yields; if it's not we'll need to do some conversion
if type(event) is not tuple or len(event) != 3:
for event in chain([event], stream):
if hasattr(event, 'totuple'):
event = event.totuple()
else:
event = TEXT, unicode(event), (None, -1, -1)
yield event
return
# This looks like a markup event stream, so we'll just pass it through
# unchanged
yield event
for event in stream:
yield event
class Attrs(tuple):
"""Immutable sequence type that stores the attributes of an element.
Ordering of the attributes is preserved, while access by name is also
supported.
>>> attrs = Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs
Attrs([('href', '#'), ('title', 'Foo')])
>>> 'href' in attrs
True
>>> 'tabindex' in attrs
False
>>> attrs.get('title')
'Foo'
Instances may not be manipulated directly. Instead, the operators ``|`` and
``-`` can be used to produce new instances that have specific attributes
added, replaced or removed.
To remove an attribute, use the ``-`` operator. The right hand side can be
either a string or a set/sequence of strings, identifying the name(s) of
the attribute(s) to remove:
>>> attrs - 'title'
Attrs([('href', '#')])
>>> attrs - ('title', 'href')
Attrs()
The original instance is not modified, but the operator can of course be
used with an assignment:
>>> attrs
Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs -= 'title'
>>> attrs
Attrs([('href', '#')])
To add a new attribute, use the ``|`` operator, where the right hand value
is a sequence of ``(name, value)`` tuples (which includes `Attrs`
instances):
>>> attrs | [('title', 'Bar')]
Attrs([('href', '#'), ('title', 'Bar')])
If the attributes already contain an attribute with a given name, the value
of that attribute is replaced:
>>> attrs | [('href', 'http://example.org/')]
Attrs([('href', 'http://example.org/')])
"""
__slots__ = []
def __contains__(self, name):
"""Return whether the list includes an attribute with the specified
name.
:return: `True` if the list includes the attribute
:rtype: `bool`
"""
for attr, _ in self:
if attr == name:
return True
def __getslice__(self, i, j):
"""Return a slice of the attributes list.
>>> attrs = Attrs([('href', '#'), ('title', 'Foo')])
>>> attrs[1:]
Attrs([('title', 'Foo')])
"""
return Attrs(tuple.__getslice__(self, i, j))
def __or__(self, attrs):
"""Return a new instance that contains the attributes in `attrs` in
addition to any already existing attributes.
:return: a new instance with the merged attributes
:rtype: `Attrs`
"""
repl = dict([(an, av) for an, av in attrs if an in self])
return Attrs([(sn, repl.get(sn, sv)) for sn, sv in self] +
[(an, av) for an, av in attrs if an not in self])
def __repr__(self):
if not self:
return 'Attrs()'
return 'Attrs([%s])' % ', '.join([repr(item) for item in self])
def __sub__(self, names):
"""Return a new instance with all attributes with a name in `names` are
removed.
:param names: the names of the attributes to remove
:return: a new instance with the attribute removed
:rtype: `Attrs`
"""
if isinstance(names, basestring):
names = (names,)
return Attrs([(name, val) for name, val in self if name not in names])
def get(self, name, default=None):
"""Return the value of the attribute with the specified name, or the
value of the `default` parameter if no such attribute is found.
:param name: the name of the attribute
:param default: the value to return when the attribute does not exist
:return: the attribute value, or the `default` value if that attribute
does not exist
:rtype: `object`
"""
for attr, value in self:
if attr == name:
return value
return default
def totuple(self):
"""Return the attributes as a markup event.
The returned event is a `TEXT` event, the data is the value of all
attributes joined together.
>>> Attrs([('href', '#'), ('title', 'Foo')]).totuple()
('TEXT', u'#Foo', (None, -1, -1))
:return: a `TEXT` event
:rtype: `tuple`
"""
return TEXT, u''.join([x[1] for x in self]), (None, -1, -1)
class Markup(unicode):
"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped.
"""
__slots__ = []
def __add__(self, other):
return Markup(unicode(self) + unicode(escape(other)))
def __radd__(self, other):
return Markup(unicode(escape(other)) + unicode(self))
def __mod__(self, args):
if isinstance(args, dict):
args = dict(zip(args.keys(), map(escape, args.values())))
elif isinstance(args, (list, tuple)):
args = tuple(map(escape, args))
else:
args = escape(args)
return Markup(unicode.__mod__(self, args))
def __mul__(self, num):
return Markup(unicode(self) * num)
def __rmul__(self, num):
return Markup(num * unicode(self))
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, unicode(self))
def join(self, seq, escape_quotes=True):
"""Return a `Markup` object which is the concatenation of the strings
in the given sequence, where this `Markup` object is the separator
between the joined elements.
Any element in the sequence that is not a `Markup` instance is
automatically escaped.
:param seq: the sequence of strings to join
:param escape_quotes: whether double quote characters in the elements
should be escaped
:return: the joined `Markup` object
:rtype: `Markup`
:see: `escape`
"""
return Markup(unicode(self).join([escape(item, quotes=escape_quotes)
for item in seq]))
def escape(cls, text, quotes=True):
"""Create a Markup instance from a string and escape special characters
it may contain (<, >, & and \").
>>> escape('"1 < 2"')
If the `quotes` parameter is set to `False`, the \" character is left
as is. Escaping quotes is generally only required for strings that are
to be used in attribute values.
>>> escape('"1 < 2"', quotes=False)
:param text: the text to escape
:param quotes: if ``True``, double quote characters are escaped in
addition to the other special characters
:return: the escaped `Markup` string
:rtype: `Markup`
"""
if not text:
return cls()
if type(text) is cls:
return text
if hasattr(text, '__html__'):
return Markup(text.__html__())
text = unicode(text).replace('&', '&') \
.replace('<', '<') \
.replace('>', '>')
if quotes:
text = text.replace('"', '"')
return cls(text)
escape = classmethod(escape)
def unescape(self):
"""Reverse-escapes &, <, >, and \" and returns a `unicode` object.
>>> Markup('1 < 2').unescape()
u'1 < 2'
:return: the unescaped string
:rtype: `unicode`
:see: `genshi.core.unescape`
"""
if not self:
return u''
return unicode(self).replace('"', '"') \
.replace('>', '>') \
.replace('<', '<') \
.replace('&', '&')
def stripentities(self, keepxmlentities=False):
"""Return a copy of the text with any character or numeric entities
replaced by the equivalent UTF-8 characters.
If the `keepxmlentities` parameter is provided and evaluates to `True`,
the core XML entities (``&``, ``'``, ``>``, ``<`` and
``"``) are not stripped.
:return: a `Markup` instance with entities removed
:rtype: `Markup`
:see: `genshi.util.stripentities`
"""
return Markup(stripentities(self, keepxmlentities=keepxmlentities))
def striptags(self):
"""Return a copy of the text with all XML/HTML tags removed.
:return: a `Markup` instance with all tags removed
:rtype: `Markup`
:see: `genshi.util.striptags`
"""
return Markup(striptags(self))
try:
from genshi._speedups import Markup
except ImportError:
pass # just use the Python implementation
escape = Markup.escape
def unescape(text):
"""Reverse-escapes &, <, >, and \" and returns a `unicode` object.
>>> unescape(Markup('1 < 2'))
u'1 < 2'
If the provided `text` object is not a `Markup` instance, it is returned
unchanged.
>>> unescape('1 < 2')
'1 < 2'
:param text: the text to unescape
:return: the unescsaped string
:rtype: `unicode`
"""
if not isinstance(text, Markup):
return text
return text.unescape()
class Namespace(object):
"""Utility class creating and testing elements with a namespace.
Internally, namespace URIs are encoded in the `QName` of any element or
attribute, the namespace URI being enclosed in curly braces. This class
helps create and test these strings.
A `Namespace` object is instantiated with the namespace URI.
>>> html = Namespace('http://www.w3.org/1999/xhtml')
>>> html
>>> html.uri
u'http://www.w3.org/1999/xhtml'
The `Namespace` object can than be used to generate `QName` objects with
that namespace:
>>> html.body
QName(u'http://www.w3.org/1999/xhtml}body')
>>> html.body.localname
u'body'
>>> html.body.namespace
u'http://www.w3.org/1999/xhtml'
The same works using item access notation, which is useful for element or
attribute names that are not valid Python identifiers:
>>> html['body']
QName(u'http://www.w3.org/1999/xhtml}body')
A `Namespace` object can also be used to test whether a specific `QName`
belongs to that namespace using the ``in`` operator:
>>> qname = html.body
>>> qname in html
True
>>> qname in Namespace('http://www.w3.org/2002/06/xhtml2')
False
"""
def __new__(cls, uri):
if type(uri) is cls:
return uri
return object.__new__(cls)
def __getnewargs__(self):
return (self.uri,)
def __getstate__(self):
return self.uri
def __setstate__(self, uri):
self.uri = uri
def __init__(self, uri):
self.uri = unicode(uri)
def __contains__(self, qname):
return qname.namespace == self.uri
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if isinstance(other, Namespace):
return self.uri == other.uri
return self.uri == other
def __getitem__(self, name):
return QName(self.uri + u'}' + name)
__getattr__ = __getitem__
def __repr__(self):
return '' % self.uri
def __str__(self):
return self.uri.encode('utf-8')
def __unicode__(self):
return self.uri
# The namespace used by attributes such as xml:lang and xml:space
XML_NAMESPACE = Namespace('http://www.w3.org/XML/1998/namespace')
class QName(unicode):
"""A qualified element or attribute name.
The unicode value of instances of this class contains the qualified name of
the element or attribute, in the form ``{namespace-uri}local-name``. The
namespace URI can be obtained through the additional `namespace` attribute,
while the local name can be accessed through the `localname` attribute.
>>> qname = QName('foo')
>>> qname
QName(u'foo')
>>> qname.localname
u'foo'
>>> qname.namespace
>>> qname = QName('http://www.w3.org/1999/xhtml}body')
>>> qname
QName(u'http://www.w3.org/1999/xhtml}body')
>>> qname.localname
u'body'
>>> qname.namespace
u'http://www.w3.org/1999/xhtml'
"""
__slots__ = ['namespace', 'localname']
def __new__(cls, qname):
"""Create the `QName` instance.
:param qname: the qualified name as a string of the form
``{namespace-uri}local-name``, where the leading curly
brace is optional
"""
if type(qname) is cls:
return qname
parts = qname.lstrip(u'{').split(u'}', 1)
if len(parts) > 1:
self = unicode.__new__(cls, u'{%s' % qname)
self.namespace, self.localname = map(unicode, parts)
else:
self = unicode.__new__(cls, qname)
self.namespace, self.localname = None, unicode(qname)
return self
def __getnewargs__(self):
return (self.lstrip('{'),)
def __repr__(self):
return 'QName(%s)' % unicode.__repr__(self.lstrip('{'))
PK q8?
Լ genshi/path.py# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Basic support for evaluating XPath expressions against streams.
>>> from genshi.input import XML
>>> doc = XML('''
...
... -
... Foo
...
... -
... Bar
...
... -
... Baz
...
... -
... Waz
...
...
... ''')
>>> print doc.select('items/item[@status="closed" and '
... '(@resolution="invalid" or not(@resolution))]/summary/text()')
BarBaz
Because the XPath engine operates on markup streams (as opposed to tree
structures), it only implements a subset of the full XPath 1.0 language.
"""
from math import ceil, floor
import operator
import re
from genshi.core import Stream, Attrs, Namespace, QName
from genshi.core import START, END, TEXT, START_NS, END_NS, COMMENT, PI, \
START_CDATA, END_CDATA
__all__ = ['Path', 'PathSyntaxError']
__docformat__ = 'restructuredtext en'
class Axis(object):
"""Defines constants for the various supported XPath axes."""
ATTRIBUTE = 'attribute'
CHILD = 'child'
DESCENDANT = 'descendant'
DESCENDANT_OR_SELF = 'descendant-or-self'
SELF = 'self'
def forname(cls, name):
"""Return the axis constant for the given name, or `None` if no such
axis was defined.
"""
return getattr(cls, name.upper().replace('-', '_'), None)
forname = classmethod(forname)
ATTRIBUTE = Axis.ATTRIBUTE
CHILD = Axis.CHILD
DESCENDANT = Axis.DESCENDANT
DESCENDANT_OR_SELF = Axis.DESCENDANT_OR_SELF
SELF = Axis.SELF
class Path(object):
"""Implements basic XPath support on streams.
Instances of this class represent a "compiled" XPath expression, and provide
methods for testing the path against a stream, as well as extracting a
substream matching that path.
"""
def __init__(self, text, filename=None, lineno=-1):
"""Create the path object from a string.
:param text: the path expression
:param filename: the name of the file in which the path expression was
found (used in error messages)
:param lineno: the line on which the expression was found
"""
self.source = text
self.paths = PathParser(text, filename, lineno).parse()
def __repr__(self):
paths = []
for path in self.paths:
steps = []
for axis, nodetest, predicates in path:
steps.append('%s::%s' % (axis, nodetest))
for predicate in predicates:
steps[-1] += '[%s]' % predicate
paths.append('/'.join(steps))
return '<%s "%s">' % (self.__class__.__name__, '|'.join(paths))
def select(self, stream, namespaces=None, variables=None):
"""Returns a substream of the given stream that matches the path.
If there are no matches, this method returns an empty stream.
>>> from genshi.input import XML
>>> xml = XML('Text')
>>> print Path('.//child').select(xml)
Text
>>> print Path('.//child/text()').select(xml)
Text
:param stream: the stream to select from
:param namespaces: (optional) a mapping of namespace prefixes to URIs
:param variables: (optional) a mapping of variable names to values
:return: the substream matching the path, or an empty stream
:rtype: `Stream`
"""
if namespaces is None:
namespaces = {}
if variables is None:
variables = {}
stream = iter(stream)
def _generate():
test = self.test()
for event in stream:
result = test(event, namespaces, variables)
if result is True:
yield event
if event[0] is START:
depth = 1
while depth > 0:
subevent = stream.next()
if subevent[0] is START:
depth += 1
elif subevent[0] is END:
depth -= 1
yield subevent
test(subevent, namespaces, variables,
updateonly=True)
elif result:
yield result
return Stream(_generate(),
serializer=getattr(stream, 'serializer', None))
def test(self, ignore_context=False):
"""Returns a function that can be used to track whether the path matches
a specific stream event.
The function returned expects the positional arguments ``event``,
``namespaces`` and ``variables``. The first is a stream event, while the
latter two are a mapping of namespace prefixes to URIs, and a mapping
of variable names to values, respectively. In addition, the function
accepts an ``updateonly`` keyword argument that default to ``False``. If
it is set to ``True``, the function only updates its internal state,
but does not perform any tests or return a result.
If the path matches the event, the function returns the match (for
example, a `START` or `TEXT` event.) Otherwise, it returns ``None``.
>>> from genshi.input import XML
>>> xml = XML('')
>>> test = Path('child').test()
>>> for event in xml:
... if test(event, {}, {}):
... print event[0], repr(event[1])
START (QName(u'child'), Attrs([(QName(u'id'), u'2')]))
:param ignore_context: if `True`, the path is interpreted like a pattern
in XSLT, meaning for example that it will match
at any depth
:return: a function that can be used to test individual events in a
stream against the path
:rtype: ``function``
"""
paths = [(p, len(p), [0], [], [0] * len(p)) for p in [
(ignore_context and [_DOTSLASHSLASH] or []) + p for p in self.paths
]]
def _test(event, namespaces, variables, updateonly=False):
kind, data, pos = event[:3]
retval = None
for steps, size, cursors, cutoff, counter in paths:
# Manage the stack that tells us "where we are" in the stream
if kind is END:
if cursors:
cursors.pop()
continue
elif kind is START:
cursors.append(cursors and cursors[-1] or 0)
elif kind is START_NS or kind is END_NS \
or kind is START_CDATA or kind is END_CDATA:
continue
if updateonly or retval or not cursors:
continue
cursor = cursors[-1]
depth = len(cursors)
if cutoff and depth + int(kind is not START) > cutoff[0]:
continue
ctxtnode = not ignore_context and kind is START \
and depth == 2
matched = None
while 1:
# Fetch the next location step
axis, nodetest, predicates = steps[cursor]
# If this is the start event for the context node, and the
# axis of the location step doesn't include the current
# element, skip the test
if ctxtnode and (axis is CHILD or axis is DESCENDANT):
break
# Is this the last step of the location path?
last_step = cursor + 1 == size
# Perform the actual node test
matched = nodetest(kind, data, pos, namespaces, variables)
# The node test matched
if matched:
# Check all the predicates for this step
if predicates:
for predicate in predicates:
pretval = predicate(kind, data, pos, namespaces,
variables)
if type(pretval) is float: # FIXME <- need to
# check this for
# other types that
# can be coerced to
# float
counter[cursor] += 1
if counter[cursor] != int(pretval):
pretval = False
if not pretval:
matched = None
break
# Both the node test and the predicates matched
if matched:
if last_step:
if not ctxtnode or kind is not START \
or axis is ATTRIBUTE or axis is SELF:
retval = matched
elif not ctxtnode or axis is SELF \
or axis is DESCENDANT_OR_SELF:
cursor += 1
cursors[-1] = cursor
cutoff[:] = []
if kind is START:
if last_step and not (axis is DESCENDANT or
axis is DESCENDANT_OR_SELF):
cutoff[:] = [depth]
elif steps[cursor][0] is ATTRIBUTE:
# If the axis of the next location step is the
# attribute axis, we need to move on to processing
# that step without waiting for the next markup
# event
continue
# We're done with this step if it's the last step or the
# axis isn't "self"
if not matched or last_step or not (
axis is SELF or axis is DESCENDANT_OR_SELF):
break
if ctxtnode and axis is DESCENDANT_OR_SELF:
ctxtnode = False
if (retval or not matched) and kind is START and \
not (axis is DESCENDANT or axis is DESCENDANT_OR_SELF):
# If this step is not a closure, it cannot be matched until
# the current element is closed... so we need to move the
# cursor back to the previous closure and retest that
# against the current element
backsteps = [(i, k, d, p) for i, (k, d, p)
in enumerate(steps[:cursor])
if k is DESCENDANT or k is DESCENDANT_OR_SELF]
backsteps.reverse()
for cursor, axis, nodetest, predicates in backsteps:
if nodetest(kind, data, pos, namespaces, variables):
cutoff[:] = []
break
cursors[-1] = cursor
return retval
return _test
class PathSyntaxError(Exception):
"""Exception raised when an XPath expression is syntactically incorrect."""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
if filename:
message = '%s (%s, line %d)' % (message, filename, lineno)
Exception.__init__(self, message)
self.filename = filename
self.lineno = lineno
self.offset = offset
class PathParser(object):
"""Tokenizes and parses an XPath expression."""
_QUOTES = (("'", "'"), ('"', '"'))
_TOKENS = ('::', ':', '..', '.', '//', '/', '[', ']', '()', '(', ')', '@',
'=', '!=', '!', '|', ',', '>=', '>', '<=', '<', '$')
_tokenize = re.compile('("[^"]*")|(\'[^\']*\')|((?:\d+)?\.\d+)|(%s)|([^%s\s]+)|\s+' % (
'|'.join([re.escape(t) for t in _TOKENS]),
''.join([re.escape(t[0]) for t in _TOKENS]))).findall
def __init__(self, text, filename=None, lineno=-1):
self.filename = filename
self.lineno = lineno
self.tokens = filter(None, [dqstr or sqstr or number or token or name
for dqstr, sqstr, number, token, name in
self._tokenize(text)])
self.pos = 0
# Tokenizer
at_end = property(lambda self: self.pos == len(self.tokens) - 1)
cur_token = property(lambda self: self.tokens[self.pos])
def next_token(self):
self.pos += 1
return self.tokens[self.pos]
def peek_token(self):
if not self.at_end:
return self.tokens[self.pos + 1]
return None
# Recursive descent parser
def parse(self):
"""Parses the XPath expression and returns a list of location path
tests.
For union expressions (such as `*|text()`), this function returns one
test for each operand in the union. For patch expressions that don't
use the union operator, the function always returns a list of size 1.
Each path test in turn is a sequence of tests that correspond to the
location steps, each tuples of the form `(axis, testfunc, predicates)`
"""
paths = [self._location_path()]
while self.cur_token == '|':
self.next_token()
paths.append(self._location_path())
if not self.at_end:
raise PathSyntaxError('Unexpected token %r after end of expression'
% self.cur_token, self.filename, self.lineno)
return paths
def _location_path(self):
steps = []
while True:
if self.cur_token.startswith('/'):
if self.cur_token == '//':
steps.append((DESCENDANT_OR_SELF, NodeTest(), []))
elif not steps:
raise PathSyntaxError('Absolute location paths not '
'supported', self.filename,
self.lineno)
self.next_token()
axis, nodetest, predicates = self._location_step()
if not axis:
axis = CHILD
steps.append((axis, nodetest, predicates))
if self.at_end or not self.cur_token.startswith('/'):
break
return steps
def _location_step(self):
if self.cur_token == '@':
axis = ATTRIBUTE
self.next_token()
elif self.cur_token == '.':
axis = SELF
elif self.cur_token == '..':
raise PathSyntaxError('Unsupported axis "parent"', self.filename,
self.lineno)
elif self.peek_token() == '::':
axis = Axis.forname(self.cur_token)
if axis is None:
raise PathSyntaxError('Unsupport axis "%s"' % axis,
self.filename, self.lineno)
self.next_token()
self.next_token()
else:
axis = None
nodetest = self._node_test(axis or CHILD)
predicates = []
while self.cur_token == '[':
predicates.append(self._predicate())
return axis, nodetest, predicates
def _node_test(self, axis=None):
test = prefix = None
next_token = self.peek_token()
if next_token in ('(', '()'): # Node type test
test = self._node_type()
elif next_token == ':': # Namespace prefix
prefix = self.cur_token
self.next_token()
localname = self.next_token()
if localname == '*':
test = QualifiedPrincipalTypeTest(axis, prefix)
else:
test = QualifiedNameTest(axis, prefix, localname)
else: # Name test
if self.cur_token == '*':
test = PrincipalTypeTest(axis)
elif self.cur_token == '.':
test = NodeTest()
else:
test = LocalNameTest(axis, self.cur_token)
if not self.at_end:
self.next_token()
return test
def _node_type(self):
name = self.cur_token
self.next_token()
args = []
if self.cur_token != '()':
# The processing-instruction() function optionally accepts the
# name of the PI as argument, which must be a literal string
self.next_token() # (
if self.cur_token != ')':
string = self.cur_token
if (string[0], string[-1]) in self._QUOTES:
string = string[1:-1]
args.append(string)
cls = _nodetest_map.get(name)
if not cls:
raise PathSyntaxError('%s() not allowed here' % name, self.filename,
self.lineno)
return cls(*args)
def _predicate(self):
assert self.cur_token == '['
self.next_token()
expr = self._or_expr()
if self.cur_token != ']':
raise PathSyntaxError('Expected "]" to close predicate, '
'but found "%s"' % self.cur_token,
self.filename, self.lineno)
if not self.at_end:
self.next_token()
return expr
def _or_expr(self):
expr = self._and_expr()
while self.cur_token == 'or':
self.next_token()
expr = OrOperator(expr, self._and_expr())
return expr
def _and_expr(self):
expr = self._equality_expr()
while self.cur_token == 'and':
self.next_token()
expr = AndOperator(expr, self._equality_expr())
return expr
def _equality_expr(self):
expr = self._relational_expr()
while self.cur_token in ('=', '!='):
op = _operator_map[self.cur_token]
self.next_token()
expr = op(expr, self._relational_expr())
return expr
def _relational_expr(self):
expr = self._sub_expr()
while self.cur_token in ('>', '>=', '<', '>='):
op = _operator_map[self.cur_token]
self.next_token()
expr = op(expr, self._sub_expr())
return expr
def _sub_expr(self):
token = self.cur_token
if token != '(':
return self._primary_expr()
self.next_token()
expr = self._or_expr()
if self.cur_token != ')':
raise PathSyntaxError('Expected ")" to close sub-expression, '
'but found "%s"' % self.cur_token,
self.filename, self.lineno)
self.next_token()
return expr
def _primary_expr(self):
token = self.cur_token
if len(token) > 1 and (token[0], token[-1]) in self._QUOTES:
self.next_token()
return StringLiteral(token[1:-1])
elif token[0].isdigit() or token[0] == '.':
self.next_token()
return NumberLiteral(as_float(token))
elif token == '$':
token = self.next_token()
self.next_token()
return VariableReference(token)
elif not self.at_end and self.peek_token().startswith('('):
return self._function_call()
else:
axis = None
if token == '@':
axis = ATTRIBUTE
self.next_token()
return self._node_test(axis)
def _function_call(self):
name = self.cur_token
if self.next_token() == '()':
args = []
else:
assert self.cur_token == '('
self.next_token()
args = [self._or_expr()]
while self.cur_token == ',':
self.next_token()
args.append(self._or_expr())
if not self.cur_token == ')':
raise PathSyntaxError('Expected ")" to close function argument '
'list, but found "%s"' % self.cur_token,
self.filename, self.lineno)
self.next_token()
cls = _function_map.get(name)
if not cls:
raise PathSyntaxError('Unsupported function "%s"' % name,
self.filename, self.lineno)
return cls(*args)
# Type coercion
def as_scalar(value):
"""Convert value to a scalar. If a single element Attrs() object is passed
the value of the single attribute will be returned."""
if isinstance(value, Attrs):
assert len(value) == 1
return value[0][1]
else:
return value
def as_float(value):
# FIXME - if value is a bool it will be coerced to 0.0 and consequently
# compared as a float. This is probably not ideal.
return float(as_scalar(value))
def as_long(value):
return long(as_scalar(value))
def as_string(value):
value = as_scalar(value)
if value is False:
return u''
return unicode(value)
def as_bool(value):
return bool(as_scalar(value))
# Node tests
class PrincipalTypeTest(object):
"""Node test that matches any event with the given principal type."""
__slots__ = ['principal_type']
def __init__(self, principal_type):
self.principal_type = principal_type
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
if self.principal_type is ATTRIBUTE:
return data[1] or None
else:
return True
def __repr__(self):
return '*'
class QualifiedPrincipalTypeTest(object):
"""Node test that matches any event with the given principal type in a
specific namespace."""
__slots__ = ['principal_type', 'prefix']
def __init__(self, principal_type, prefix):
self.principal_type = principal_type
self.prefix = prefix
def __call__(self, kind, data, pos, namespaces, variables):
namespace = Namespace(namespaces.get(self.prefix))
if kind is START:
if self.principal_type is ATTRIBUTE and data[1]:
return Attrs([(name, value) for name, value in data[1]
if name in namespace]) or None
else:
return data[0] in namespace
def __repr__(self):
return '%s:*' % self.prefix
class LocalNameTest(object):
"""Node test that matches any event with the given principal type and
local name.
"""
__slots__ = ['principal_type', 'name']
def __init__(self, principal_type, name):
self.principal_type = principal_type
self.name = name
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
if self.principal_type is ATTRIBUTE and self.name in data[1]:
return Attrs([(self.name, data[1].get(self.name))])
else:
return data[0].localname == self.name
def __repr__(self):
return self.name
class QualifiedNameTest(object):
"""Node test that matches any event with the given principal type and
qualified name.
"""
__slots__ = ['principal_type', 'prefix', 'name']
def __init__(self, principal_type, prefix, name):
self.principal_type = principal_type
self.prefix = prefix
self.name = name
def __call__(self, kind, data, pos, namespaces, variables):
qname = QName('%s}%s' % (namespaces.get(self.prefix), self.name))
if kind is START:
if self.principal_type is ATTRIBUTE and qname in data[1]:
return Attrs([(self.name, data[1].get(self.name))])
else:
return data[0] == qname
def __repr__(self):
return '%s:%s' % (self.prefix, self.name)
class CommentNodeTest(object):
"""Node test that matches any comment events."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return kind is COMMENT
def __repr__(self):
return 'comment()'
class NodeTest(object):
"""Node test that matches any node."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return True
return kind, data, pos
def __repr__(self):
return 'node()'
class ProcessingInstructionNodeTest(object):
"""Node test that matches any processing instruction event."""
__slots__ = ['target']
def __init__(self, target=None):
self.target = target
def __call__(self, kind, data, pos, namespaces, variables):
return kind is PI and (not self.target or data[0] == self.target)
def __repr__(self):
arg = ''
if self.target:
arg = '"' + self.target + '"'
return 'processing-instruction(%s)' % arg
class TextNodeTest(object):
"""Node test that matches any text event."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return kind is TEXT
def __repr__(self):
return 'text()'
_nodetest_map = {'comment': CommentNodeTest, 'node': NodeTest,
'processing-instruction': ProcessingInstructionNodeTest,
'text': TextNodeTest}
# Functions
class Function(object):
"""Base class for function nodes in XPath expressions."""
class BooleanFunction(Function):
"""The `boolean` function, which converts its argument to a boolean
value.
"""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
val = self.expr(kind, data, pos, namespaces, variables)
return as_bool(val)
def __repr__(self):
return 'boolean(%r)' % self.expr
class CeilingFunction(Function):
"""The `ceiling` function, which returns the nearest lower integer number
for the given number.
"""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
number = self.number(kind, data, pos, namespaces, variables)
return ceil(as_float(number))
def __repr__(self):
return 'ceiling(%r)' % self.number
class ConcatFunction(Function):
"""The `concat` function, which concatenates (joins) the variable number of
strings it gets as arguments.
"""
__slots__ = ['exprs']
def __init__(self, *exprs):
self.exprs = exprs
def __call__(self, kind, data, pos, namespaces, variables):
strings = []
for item in [expr(kind, data, pos, namespaces, variables)
for expr in self.exprs]:
strings.append(as_string(item))
return u''.join(strings)
def __repr__(self):
return 'concat(%s)' % ', '.join([repr(expr) for expr in self.exprs])
class ContainsFunction(Function):
"""The `contains` function, which returns whether a string contains a given
substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = self.string1(kind, data, pos, namespaces, variables)
string2 = self.string2(kind, data, pos, namespaces, variables)
return as_string(string2) in as_string(string1)
def __repr__(self):
return 'contains(%r, %r)' % (self.string1, self.string2)
class MatchesFunction(Function):
"""The `matches` function, which returns whether a string matches a regular
expression.
"""
__slots__ = ['string1', 'string2']
flag_mapping = {'s': re.S, 'm': re.M, 'i': re.I, 'x': re.X}
def __init__(self, string1, string2, flags=''):
self.string1 = string1
self.string2 = string2
self.flags = self._map_flags(flags)
def __call__(self, kind, data, pos, namespaces, variables):
string1 = as_string(self.string1(kind, data, pos, namespaces, variables))
string2 = as_string(self.string2(kind, data, pos, namespaces, variables))
return re.search(string2, string1, self.flags)
def _map_flags(self, flags):
return reduce(operator.or_,
[self.flag_map[flag] for flag in flags], re.U)
def __repr__(self):
return 'contains(%r, %r)' % (self.string1, self.string2)
class FalseFunction(Function):
"""The `false` function, which always returns the boolean `false` value."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return False
def __repr__(self):
return 'false()'
class FloorFunction(Function):
"""The `ceiling` function, which returns the nearest higher integer number
for the given number.
"""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
number = self.number(kind, data, pos, namespaces, variables)
return floor(as_float(number))
def __repr__(self):
return 'floor(%r)' % self.number
class LocalNameFunction(Function):
"""The `local-name` function, which returns the local name of the current
element.
"""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return data[0].localname
def __repr__(self):
return 'local-name()'
class NameFunction(Function):
"""The `name` function, which returns the qualified name of the current
element.
"""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return data[0]
def __repr__(self):
return 'name()'
class NamespaceUriFunction(Function):
"""The `namespace-uri` function, which returns the namespace URI of the
current element.
"""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
if kind is START:
return data[0].namespace
def __repr__(self):
return 'namespace-uri()'
class NotFunction(Function):
"""The `not` function, which returns the negated boolean value of its
argument.
"""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
return not as_bool(self.expr(kind, data, pos, namespaces, variables))
def __repr__(self):
return 'not(%s)' % self.expr
class NormalizeSpaceFunction(Function):
"""The `normalize-space` function, which removes leading and trailing
whitespace in the given string, and replaces multiple adjacent whitespace
characters inside the string with a single space.
"""
__slots__ = ['expr']
_normalize = re.compile(r'\s{2,}').sub
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
string = self.expr(kind, data, pos, namespaces, variables)
return self._normalize(' ', as_string(string).strip())
def __repr__(self):
return 'normalize-space(%s)' % repr(self.expr)
class NumberFunction(Function):
"""The `number` function that converts its argument to a number."""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
val = self.expr(kind, data, pos, namespaces, variables)
return as_float(val)
def __repr__(self):
return 'number(%r)' % self.expr
class RoundFunction(Function):
"""The `round` function, which returns the nearest integer number for the
given number.
"""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
number = self.number(kind, data, pos, namespaces, variables)
return round(as_float(number))
def __repr__(self):
return 'round(%r)' % self.number
class StartsWithFunction(Function):
"""The `starts-with` function that returns whether one string starts with
a given substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = self.string1(kind, data, pos, namespaces, variables)
string2 = self.string2(kind, data, pos, namespaces, variables)
return as_string(string1).startswith(as_string(string2))
def __repr__(self):
return 'starts-with(%r, %r)' % (self.string1, self.string2)
class StringLengthFunction(Function):
"""The `string-length` function that returns the length of the given
string.
"""
__slots__ = ['expr']
def __init__(self, expr):
self.expr = expr
def __call__(self, kind, data, pos, namespaces, variables):
string = self.expr(kind, data, pos, namespaces, variables)
return len(as_string(string))
def __repr__(self):
return 'string-length(%r)' % self.expr
class SubstringFunction(Function):
"""The `substring` function that returns the part of a string that starts
at the given offset, and optionally limited to the given length.
"""
__slots__ = ['string', 'start', 'length']
def __init__(self, string, start, length=None):
self.string = string
self.start = start
self.length = length
def __call__(self, kind, data, pos, namespaces, variables):
string = self.string(kind, data, pos, namespaces, variables)
start = self.start(kind, data, pos, namespaces, variables)
length = 0
if self.length is not None:
length = self.length(kind, data, pos, namespaces, variables)
return string[as_long(start):len(as_string(string)) - as_long(length)]
def __repr__(self):
if self.length is not None:
return 'substring(%r, %r, %r)' % (self.string, self.start,
self.length)
else:
return 'substring(%r, %r)' % (self.string, self.start)
class SubstringAfterFunction(Function):
"""The `substring-after` function that returns the part of a string that
is found after the given substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = as_string(self.string1(kind, data, pos, namespaces, variables))
string2 = as_string(self.string2(kind, data, pos, namespaces, variables))
index = string1.find(string2)
if index >= 0:
return string1[index + len(string2):]
return u''
def __repr__(self):
return 'substring-after(%r, %r)' % (self.string1, self.string2)
class SubstringBeforeFunction(Function):
"""The `substring-before` function that returns the part of a string that
is found before the given substring.
"""
__slots__ = ['string1', 'string2']
def __init__(self, string1, string2):
self.string1 = string1
self.string2 = string2
def __call__(self, kind, data, pos, namespaces, variables):
string1 = as_string(self.string1(kind, data, pos, namespaces, variables))
string2 = as_string(self.string2(kind, data, pos, namespaces, variables))
index = string1.find(string2)
if index >= 0:
return string1[:index]
return u''
def __repr__(self):
return 'substring-after(%r, %r)' % (self.string1, self.string2)
class TranslateFunction(Function):
"""The `translate` function that translates a set of characters in a
string to target set of characters.
"""
__slots__ = ['string', 'fromchars', 'tochars']
def __init__(self, string, fromchars, tochars):
self.string = string
self.fromchars = fromchars
self.tochars = tochars
def __call__(self, kind, data, pos, namespaces, variables):
string = as_string(self.string(kind, data, pos, namespaces, variables))
fromchars = as_string(self.fromchars(kind, data, pos, namespaces, variables))
tochars = as_string(self.tochars(kind, data, pos, namespaces, variables))
table = dict(zip([ord(c) for c in fromchars],
[ord(c) for c in tochars]))
return string.translate(table)
def __repr__(self):
return 'translate(%r, %r, %r)' % (self.string, self.fromchars,
self.tochars)
class TrueFunction(Function):
"""The `true` function, which always returns the boolean `true` value."""
__slots__ = []
def __call__(self, kind, data, pos, namespaces, variables):
return True
def __repr__(self):
return 'true()'
_function_map = {'boolean': BooleanFunction, 'ceiling': CeilingFunction,
'concat': ConcatFunction, 'contains': ContainsFunction,
'matches': MatchesFunction, 'false': FalseFunction, 'floor':
FloorFunction, 'local-name': LocalNameFunction, 'name':
NameFunction, 'namespace-uri': NamespaceUriFunction,
'normalize-space': NormalizeSpaceFunction, 'not': NotFunction,
'number': NumberFunction, 'round': RoundFunction,
'starts-with': StartsWithFunction, 'string-length':
StringLengthFunction, 'substring': SubstringFunction,
'substring-after': SubstringAfterFunction, 'substring-before':
SubstringBeforeFunction, 'translate': TranslateFunction,
'true': TrueFunction}
# Literals & Variables
class Literal(object):
"""Abstract base class for literal nodes."""
class StringLiteral(Literal):
"""A string literal node."""
__slots__ = ['text']
def __init__(self, text):
self.text = text
def __call__(self, kind, data, pos, namespaces, variables):
return self.text
def __repr__(self):
return '"%s"' % self.text
class NumberLiteral(Literal):
"""A number literal node."""
__slots__ = ['number']
def __init__(self, number):
self.number = number
def __call__(self, kind, data, pos, namespaces, variables):
return self.number
def __repr__(self):
return str(self.number)
class VariableReference(Literal):
"""A variable reference node."""
__slots__ = ['name']
def __init__(self, name):
self.name = name
def __call__(self, kind, data, pos, namespaces, variables):
return variables.get(self.name)
def __repr__(self):
return str(self.name)
# Operators
class AndOperator(object):
"""The boolean operator `and`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_bool(self.lval(kind, data, pos, namespaces, variables))
if not lval:
return False
rval = self.rval(kind, data, pos, namespaces, variables)
return as_bool(rval)
def __repr__(self):
return '%s and %s' % (self.lval, self.rval)
class EqualsOperator(object):
"""The equality operator `=`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_scalar(self.lval(kind, data, pos, namespaces, variables))
rval = as_scalar(self.rval(kind, data, pos, namespaces, variables))
return lval == rval
def __repr__(self):
return '%s=%s' % (self.lval, self.rval)
class NotEqualsOperator(object):
"""The equality operator `!=`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_scalar(self.lval(kind, data, pos, namespaces, variables))
rval = as_scalar(self.rval(kind, data, pos, namespaces, variables))
return lval != rval
def __repr__(self):
return '%s!=%s' % (self.lval, self.rval)
class OrOperator(object):
"""The boolean operator `or`."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = as_bool(self.lval(kind, data, pos, namespaces, variables))
if lval:
return True
rval = self.rval(kind, data, pos, namespaces, variables)
return as_bool(rval)
def __repr__(self):
return '%s or %s' % (self.lval, self.rval)
class GreaterThanOperator(object):
"""The relational operator `>` (greater than)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) > as_float(rval)
def __repr__(self):
return '%s>%s' % (self.lval, self.rval)
class GreaterThanOrEqualOperator(object):
"""The relational operator `>=` (greater than or equal)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) >= as_float(rval)
def __repr__(self):
return '%s>=%s' % (self.lval, self.rval)
class LessThanOperator(object):
"""The relational operator `<` (less than)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) < as_float(rval)
def __repr__(self):
return '%s<%s' % (self.lval, self.rval)
class LessThanOrEqualOperator(object):
"""The relational operator `<=` (less than or equal)."""
__slots__ = ['lval', 'rval']
def __init__(self, lval, rval):
self.lval = lval
self.rval = rval
def __call__(self, kind, data, pos, namespaces, variables):
lval = self.lval(kind, data, pos, namespaces, variables)
rval = self.rval(kind, data, pos, namespaces, variables)
return as_float(lval) <= as_float(rval)
def __repr__(self):
return '%s<=%s' % (self.lval, self.rval)
_operator_map = {'=': EqualsOperator, '!=': NotEqualsOperator,
'>': GreaterThanOperator, '>=': GreaterThanOrEqualOperator,
'<': LessThanOperator, '>=': LessThanOrEqualOperator}
_DOTSLASHSLASH = (DESCENDANT_OR_SELF, PrincipalTypeTest(None), ())
PK q8AKD D genshi/util.py# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Various utility classes and functions."""
import htmlentitydefs
import re
try:
set
except NameError:
from sets import ImmutableSet as frozenset
from sets import Set as set
__docformat__ = 'restructuredtext en'
class LRUCache(dict):
"""A dictionary-like object that stores only a certain number of items, and
discards its least recently used item when full.
>>> cache = LRUCache(3)
>>> cache['A'] = 0
>>> cache['B'] = 1
>>> cache['C'] = 2
>>> len(cache)
3
>>> cache['A']
0
Adding new items to the cache does not increase its size. Instead, the least
recently used item is dropped:
>>> cache['D'] = 3
>>> len(cache)
3
>>> 'B' in cache
False
Iterating over the cache returns the keys, starting with the most recently
used:
>>> for key in cache:
... print key
D
A
C
This code is based on the LRUCache class from ``myghtyutils.util``, written
by Mike Bayer and released under the MIT license. See:
http://svn.myghty.org/myghtyutils/trunk/lib/myghtyutils/util.py
"""
class _Item(object):
def __init__(self, key, value):
self.previous = self.next = None
self.key = key
self.value = value
def __repr__(self):
return repr(self.value)
def __init__(self, capacity):
self._dict = dict()
self.capacity = capacity
self.head = None
self.tail = None
def __contains__(self, key):
return key in self._dict
def __iter__(self):
cur = self.head
while cur:
yield cur.key
cur = cur.next
def __len__(self):
return len(self._dict)
def __getitem__(self, key):
item = self._dict[key]
self._update_item(item)
return item.value
def __setitem__(self, key, value):
item = self._dict.get(key)
if item is None:
item = self._Item(key, value)
self._dict[key] = item
self._insert_item(item)
else:
item.value = value
self._update_item(item)
self._manage_size()
def __repr__(self):
return repr(self._dict)
def _insert_item(self, item):
item.previous = None
item.next = self.head
if self.head is not None:
self.head.previous = item
else:
self.tail = item
self.head = item
self._manage_size()
def _manage_size(self):
while len(self._dict) > self.capacity:
olditem = self._dict[self.tail.key]
del self._dict[self.tail.key]
if self.tail != self.head:
self.tail = self.tail.previous
self.tail.next = None
else:
self.head = self.tail = None
def _update_item(self, item):
if self.head == item:
return
previous = item.previous
previous.next = item.next
if item.next is not None:
item.next.previous = previous
else:
self.tail = previous
item.previous = None
item.next = self.head
self.head.previous = self.head = item
def flatten(items):
"""Flattens a potentially nested sequence into a flat list.
:param items: the sequence to flatten
>>> flatten((1, 2))
[1, 2]
>>> flatten([1, (2, 3), 4])
[1, 2, 3, 4]
>>> flatten([1, (2, [3, 4]), 5])
[1, 2, 3, 4, 5]
"""
retval = []
for item in items:
if isinstance(item, (frozenset, list, set, tuple)):
retval += flatten(item)
else:
retval.append(item)
return retval
def plaintext(text, keeplinebreaks=True):
"""Returns the text as a `unicode` string with all entities and tags
removed.
>>> plaintext('1 < 2')
u'1 < 2'
The `keeplinebreaks` parameter can be set to ``False`` to replace any line
breaks by simple spaces:
>>> plaintext('''1
... <
... 2''', keeplinebreaks=False)
u'1 < 2'
:param text: the text to convert to plain text
:param keeplinebreaks: whether line breaks in the text should be kept intact
:return: the text with tags and entities removed
"""
text = stripentities(striptags(text))
if not keeplinebreaks:
text = text.replace(u'\n', u' ')
return text
_STRIPENTITIES_RE = re.compile(r'&(?:#((?:\d+)|(?:[xX][0-9a-fA-F]+));?|(\w+);)')
def stripentities(text, keepxmlentities=False):
"""Return a copy of the given text with any character or numeric entities
replaced by the equivalent UTF-8 characters.
>>> stripentities('1 < 2')
u'1 < 2'
>>> stripentities('more …')
u'more \u2026'
>>> stripentities('…')
u'\u2026'
>>> stripentities('…')
u'\u2026'
If the `keepxmlentities` parameter is provided and is a truth value, the
core XML entities (&, ', >, < and ") are left intact.
>>> stripentities('1 < 2 …', keepxmlentities=True)
u'1 < 2 \u2026'
"""
def _replace_entity(match):
if match.group(1): # numeric entity
ref = match.group(1)
if ref.startswith('x'):
ref = int(ref[1:], 16)
else:
ref = int(ref, 10)
return unichr(ref)
else: # character entity
ref = match.group(2)
if keepxmlentities and ref in ('amp', 'apos', 'gt', 'lt', 'quot'):
return u'&%s;' % ref
try:
return unichr(htmlentitydefs.name2codepoint[ref])
except KeyError:
if keepxmlentities:
return u'&%s;' % ref
else:
return ref
return _STRIPENTITIES_RE.sub(_replace_entity, text)
_STRIPTAGS_RE = re.compile(r'(|<[^>]*>)')
def striptags(text):
"""Return a copy of the text with any XML/HTML tags removed.
>>> striptags('Foo bar')
'Foo bar'
>>> striptags('Foo')
'Foo'
>>> striptags('Foo
')
'Foo'
HTML/XML comments are stripped, too:
>>> striptags('test')
'test'
:param text: the string to remove tags from
:return: the text with tags removed
"""
return _STRIPTAGS_RE.sub('', text)
PK q8> > genshi/input.py# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support for constructing markup streams from files, strings, or other
sources.
"""
from itertools import chain
from xml.parsers import expat
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
import HTMLParser as html
import htmlentitydefs
from StringIO import StringIO
from genshi.core import Attrs, QName, Stream, stripentities
from genshi.core import START, END, XML_DECL, DOCTYPE, TEXT, START_NS, END_NS, \
START_CDATA, END_CDATA, PI, COMMENT
__all__ = ['ET', 'ParseError', 'XMLParser', 'XML', 'HTMLParser', 'HTML']
__docformat__ = 'restructuredtext en'
def ET(element):
"""Convert a given ElementTree element to a markup stream.
:param element: an ElementTree element
:return: a markup stream
"""
tag_name = QName(element.tag.lstrip('{'))
attrs = Attrs([(QName(attr.lstrip('{')), value)
for attr, value in element.items()])
yield START, (tag_name, attrs), (None, -1, -1)
if element.text:
yield TEXT, element.text, (None, -1, -1)
for child in element.getchildren():
for item in ET(child):
yield item
yield END, tag_name, (None, -1, -1)
if element.tail:
yield TEXT, element.tail, (None, -1, -1)
class ParseError(Exception):
"""Exception raised when fatal syntax errors are found in the input being
parsed.
"""
def __init__(self, message, filename=None, lineno=-1, offset=-1):
"""Exception initializer.
:param message: the error message from the parser
:param filename: the path to the file that was parsed
:param lineno: the number of the line on which the error was encountered
:param offset: the column number where the error was encountered
"""
self.msg = message
if filename:
message += ', in ' + filename
Exception.__init__(self, message)
self.filename = filename or ''
self.lineno = lineno
self.offset = offset
class XMLParser(object):
"""Generator-based XML parser based on roughly equivalent code in
Kid/ElementTree.
The parsing is initiated by iterating over the parser object:
>>> parser = XMLParser(StringIO('Foo'))
>>> for kind, data, pos in parser:
... print kind, data
START (QName(u'root'), Attrs([(QName(u'id'), u'2')]))
START (QName(u'child'), Attrs())
TEXT Foo
END child
END root
"""
_entitydefs = ['' % (name, value) for name, value in
htmlentitydefs.name2codepoint.items()]
_external_dtd = '\n'.join(_entitydefs)
def __init__(self, source, filename=None, encoding=None):
"""Initialize the parser for the given XML input.
:param source: the XML text as a file-like object
:param filename: the name of the file, if appropriate
:param encoding: the encoding of the file; if not specified, the
encoding is assumed to be ASCII, UTF-8, or UTF-16, or
whatever the encoding specified in the XML declaration
(if any)
"""
self.source = source
self.filename = filename
# Setup the Expat parser
parser = expat.ParserCreate(encoding, '}')
parser.buffer_text = True
parser.returns_unicode = True
parser.ordered_attributes = True
parser.StartElementHandler = self._handle_start
parser.EndElementHandler = self._handle_end
parser.CharacterDataHandler = self._handle_data
parser.StartDoctypeDeclHandler = self._handle_doctype
parser.StartNamespaceDeclHandler = self._handle_start_ns
parser.EndNamespaceDeclHandler = self._handle_end_ns
parser.StartCdataSectionHandler = self._handle_start_cdata
parser.EndCdataSectionHandler = self._handle_end_cdata
parser.ProcessingInstructionHandler = self._handle_pi
parser.XmlDeclHandler = self._handle_xml_decl
parser.CommentHandler = self._handle_comment
# Tell Expat that we'll handle non-XML entities ourselves
# (in _handle_other)
parser.DefaultHandler = self._handle_other
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.UseForeignDTD()
parser.ExternalEntityRefHandler = self._build_foreign
# Location reporting is only support in Python >= 2.4
if not hasattr(parser, 'CurrentLineNumber'):
self._getpos = self._getpos_unknown
self.expat = parser
self._queue = []
def parse(self):
"""Generator that parses the XML source, yielding markup events.
:return: a markup event stream
:raises ParseError: if the XML text is not well formed
"""
def _generate():
try:
bufsize = 4 * 1024 # 4K
done = False
while 1:
while not done and len(self._queue) == 0:
data = self.source.read(bufsize)
if data == '': # end of data
if hasattr(self, 'expat'):
self.expat.Parse('', True)
del self.expat # get rid of circular references
done = True
else:
if isinstance(data, unicode):
data = data.encode('utf-8')
self.expat.Parse(data, False)
for event in self._queue:
yield event
self._queue = []
if done:
break
except expat.ExpatError, e:
msg = str(e)
raise ParseError(msg, self.filename, e.lineno, e.offset)
return Stream(_generate()).filter(_coalesce)
def __iter__(self):
return iter(self.parse())
def _build_foreign(self, context, base, sysid, pubid):
parser = self.expat.ExternalEntityParserCreate(context)
parser.ParseFile(StringIO(self._external_dtd))
return 1
def _enqueue(self, kind, data=None, pos=None):
if pos is None:
pos = self._getpos()
if kind is TEXT:
# Expat reports the *end* of the text event as current position. We
# try to fix that up here as much as possible. Unfortunately, the
# offset is only valid for single-line text. For multi-line text,
# it is apparently not possible to determine at what offset it
# started
if '\n' in data:
lines = data.splitlines()
lineno = pos[1] - len(lines) + 1
offset = -1
else:
lineno = pos[1]
offset = pos[2] - len(data)
pos = (pos[0], lineno, offset)
self._queue.append((kind, data, pos))
def _getpos_unknown(self):
return (self.filename, -1, -1)
def _getpos(self):
return (self.filename, self.expat.CurrentLineNumber,
self.expat.CurrentColumnNumber)
def _handle_start(self, tag, attrib):
attrs = Attrs([(QName(name), value) for name, value in
zip(*[iter(attrib)] * 2)])
self._enqueue(START, (QName(tag), attrs))
def _handle_end(self, tag):
self._enqueue(END, QName(tag))
def _handle_data(self, text):
self._enqueue(TEXT, text)
def _handle_xml_decl(self, version, encoding, standalone):
self._enqueue(XML_DECL, (version, encoding, standalone))
def _handle_doctype(self, name, sysid, pubid, has_internal_subset):
self._enqueue(DOCTYPE, (name, pubid, sysid))
def _handle_start_ns(self, prefix, uri):
self._enqueue(START_NS, (prefix or '', uri))
def _handle_end_ns(self, prefix):
self._enqueue(END_NS, prefix or '')
def _handle_start_cdata(self):
self._enqueue(START_CDATA)
def _handle_end_cdata(self):
self._enqueue(END_CDATA)
def _handle_pi(self, target, data):
self._enqueue(PI, (target, data))
def _handle_comment(self, text):
self._enqueue(COMMENT, text)
def _handle_other(self, text):
if text.startswith('&'):
# deal with undefined entities
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
self._enqueue(TEXT, text)
except KeyError:
filename, lineno, offset = self._getpos()
error = expat.error('undefined entity "%s": line %d, column %d'
% (text, lineno, offset))
error.code = expat.errors.XML_ERROR_UNDEFINED_ENTITY
error.lineno = lineno
error.offset = offset
raise error
def XML(text):
"""Parse the given XML source and return a markup stream.
Unlike with `XMLParser`, the returned stream is reusable, meaning it can be
iterated over multiple times:
>>> xml = XML('FooBar')
>>> print xml
FooBar
>>> print xml.select('elem')
FooBar
>>> print xml.select('elem/text()')
FooBar
:param text: the XML source
:return: the parsed XML event stream
:raises ParseError: if the XML text is not well-formed
"""
return Stream(list(XMLParser(StringIO(text))))
class HTMLParser(html.HTMLParser, object):
"""Parser for HTML input based on the Python `HTMLParser` module.
This class provides the same interface for generating stream events as
`XMLParser`, and attempts to automatically balance tags.
The parsing is initiated by iterating over the parser object:
>>> parser = HTMLParser(StringIO(''))
>>> for kind, data, pos in parser:
... print kind, data
START (QName(u'ul'), Attrs([(QName(u'compact'), u'compact')]))
START (QName(u'li'), Attrs())
TEXT Foo
END li
END ul
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
def __init__(self, source, filename=None, encoding='utf-8'):
"""Initialize the parser for the given HTML input.
:param source: the HTML text as a file-like object
:param filename: the name of the file, if known
:param filename: encoding of the file; ignored if the input is unicode
"""
html.HTMLParser.__init__(self)
self.source = source
self.filename = filename
self.encoding = encoding
self._queue = []
self._open_tags = []
def parse(self):
"""Generator that parses the HTML source, yielding markup events.
:return: a markup event stream
:raises ParseError: if the HTML text is not well formed
"""
def _generate():
try:
bufsize = 4 * 1024 # 4K
done = False
while 1:
while not done and len(self._queue) == 0:
data = self.source.read(bufsize)
if data == '': # end of data
self.close()
done = True
else:
self.feed(data)
for kind, data, pos in self._queue:
yield kind, data, pos
self._queue = []
if done:
open_tags = self._open_tags
open_tags.reverse()
for tag in open_tags:
yield END, QName(tag), pos
break
except html.HTMLParseError, e:
msg = '%s: line %d, column %d' % (e.msg, e.lineno, e.offset)
raise ParseError(msg, self.filename, e.lineno, e.offset)
return Stream(_generate()).filter(_coalesce)
def __iter__(self):
return iter(self.parse())
def _enqueue(self, kind, data, pos=None):
if pos is None:
pos = self._getpos()
self._queue.append((kind, data, pos))
def _getpos(self):
lineno, column = self.getpos()
return (self.filename, lineno, column)
def handle_starttag(self, tag, attrib):
fixed_attrib = []
for name, value in attrib: # Fixup minimized attributes
if value is None:
value = unicode(name)
elif not isinstance(value, unicode):
value = value.decode(self.encoding, 'replace')
fixed_attrib.append((QName(name), stripentities(value)))
self._enqueue(START, (QName(tag), Attrs(fixed_attrib)))
if tag in self._EMPTY_ELEMS:
self._enqueue(END, QName(tag))
else:
self._open_tags.append(tag)
def handle_endtag(self, tag):
if tag not in self._EMPTY_ELEMS:
while self._open_tags:
open_tag = self._open_tags.pop()
self._enqueue(END, QName(open_tag))
if open_tag.lower() == tag.lower():
break
def handle_data(self, text):
if not isinstance(text, unicode):
text = text.decode(self.encoding, 'replace')
self._enqueue(TEXT, text)
def handle_charref(self, name):
if name.lower().startswith('x'):
text = unichr(int(name[1:], 16))
else:
text = unichr(int(name))
self._enqueue(TEXT, text)
def handle_entityref(self, name):
try:
text = unichr(htmlentitydefs.name2codepoint[name])
except KeyError:
text = '&%s;' % name
self._enqueue(TEXT, text)
def handle_pi(self, data):
target, data = data.split(None, 1)
if data.endswith('?'):
data = data[:-1]
self._enqueue(PI, (target.strip(), data.strip()))
def handle_comment(self, text):
self._enqueue(COMMENT, text)
def HTML(text, encoding='utf-8'):
"""Parse the given HTML source and return a markup stream.
Unlike with `HTMLParser`, the returned stream is reusable, meaning it can be
iterated over multiple times:
>>> html = HTML('Foo
')
>>> print html
Foo
>>> print html.select('h1')
Foo
>>> print html.select('h1/text()')
Foo
:param text: the HTML source
:return: the parsed XML event stream
:raises ParseError: if the HTML text is not well-formed, and error recovery
fails
"""
return Stream(list(HTMLParser(StringIO(text), encoding=encoding)))
def _coalesce(stream):
"""Coalesces adjacent TEXT events into a single event."""
textbuf = []
textpos = None
for kind, data, pos in chain(stream, [(None, None, None)]):
if kind is TEXT:
textbuf.append(data)
if textpos is None:
textpos = pos
else:
if textbuf:
yield TEXT, u''.join(textbuf), textpos
del textbuf[:]
textpos = None
if kind:
yield kind, data, pos
PK q8vm m genshi/output.py# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""This module provides different kinds of serialization methods for XML event
streams.
"""
from itertools import chain
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
import re
from genshi.core import escape, Attrs, Markup, Namespace, QName, StreamEventKind
from genshi.core import START, END, TEXT, XML_DECL, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI, COMMENT, XML_NAMESPACE
__all__ = ['encode', 'get_serializer', 'DocType', 'XMLSerializer',
'XHTMLSerializer', 'HTMLSerializer', 'TextSerializer']
__docformat__ = 'restructuredtext en'
def encode(iterator, method='xml', encoding='utf-8', out=None):
"""Encode serializer output into a string.
:param iterator: the iterator returned from serializing a stream (basically
any iterator that yields unicode objects)
:param method: the serialization method; determines how characters not
representable in the specified encoding are treated
:param encoding: how the output string should be encoded; if set to `None`,
this method returns a `unicode` object
:param out: a file-like object that the output should be written to
instead of being returned as one big string; note that if
this is a file or socket (or similar), the `encoding` must
not be `None` (that is, the output must be encoded)
:return: a `str` or `unicode` object (depending on the `encoding`
parameter), or `None` if the `out` parameter is provided
:since: version 0.4.1
:note: Changed in 0.5: added the `out` parameter
"""
if encoding is not None:
errors = 'replace'
if method != 'text' and not isinstance(method, TextSerializer):
errors = 'xmlcharrefreplace'
_encode = lambda string: string.encode(encoding, errors)
else:
_encode = lambda string: string
if out is None:
return _encode(u''.join(list(iterator)))
for chunk in iterator:
out.write(_encode(chunk))
def get_serializer(method='xml', **kwargs):
"""Return a serializer object for the given method.
:param method: the serialization method; can be either "xml", "xhtml",
"html", "text", or a custom serializer class
Any additional keyword arguments are passed to the serializer, and thus
depend on the `method` parameter value.
:see: `XMLSerializer`, `XHTMLSerializer`, `HTMLSerializer`, `TextSerializer`
:since: version 0.4.1
"""
if isinstance(method, basestring):
method = {'xml': XMLSerializer,
'xhtml': XHTMLSerializer,
'html': HTMLSerializer,
'text': TextSerializer}[method.lower()]
return method(**kwargs)
class DocType(object):
"""Defines a number of commonly used DOCTYPE declarations as constants."""
HTML_STRICT = (
'html', '-//W3C//DTD HTML 4.01//EN',
'http://www.w3.org/TR/html4/strict.dtd'
)
HTML_TRANSITIONAL = (
'html', '-//W3C//DTD HTML 4.01 Transitional//EN',
'http://www.w3.org/TR/html4/loose.dtd'
)
HTML_FRAMESET = (
'html', '-//W3C//DTD HTML 4.01 Frameset//EN',
'http://www.w3.org/TR/html4/frameset.dtd'
)
HTML = HTML_STRICT
HTML5 = ('html', None, None)
XHTML_STRICT = (
'html', '-//W3C//DTD XHTML 1.0 Strict//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'
)
XHTML_TRANSITIONAL = (
'html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'
)
XHTML_FRAMESET = (
'html', '-//W3C//DTD XHTML 1.0 Frameset//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd'
)
XHTML = XHTML_STRICT
XHTML11 = (
'html', '-//W3C//DTD XHTML 1.1//EN',
'http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd'
)
SVG_FULL = (
'svg', '-//W3C//DTD SVG 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'
)
SVG_BASIC = (
'svg', '-//W3C//DTD SVG Basic 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd'
)
SVG_TINY = (
'svg', '-//W3C//DTD SVG Tiny 1.1//EN',
'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd'
)
SVG = SVG_FULL
def get(cls, name):
"""Return the ``(name, pubid, sysid)`` tuple of the ``DOCTYPE``
declaration for the specified name.
The following names are recognized in this version:
* "html" or "html-strict" for the HTML 4.01 strict DTD
* "html-transitional" for the HTML 4.01 transitional DTD
* "html-frameset" for the HTML 4.01 frameset DTD
* "html5" for the ``DOCTYPE`` proposed for HTML5
* "xhtml" or "xhtml-strict" for the XHTML 1.0 strict DTD
* "xhtml-transitional" for the XHTML 1.0 transitional DTD
* "xhtml-frameset" for the XHTML 1.0 frameset DTD
* "xhtml11" for the XHTML 1.1 DTD
* "svg" or "svg-full" for the SVG 1.1 DTD
* "svg-basic" for the SVG Basic 1.1 DTD
* "svg-tiny" for the SVG Tiny 1.1 DTD
:param name: the name of the ``DOCTYPE``
:return: the ``(name, pubid, sysid)`` tuple for the requested
``DOCTYPE``, or ``None`` if the name is not recognized
:since: version 0.4.1
"""
return {
'html': cls.HTML, 'html-strict': cls.HTML_STRICT,
'html-transitional': DocType.HTML_TRANSITIONAL,
'html-frameset': DocType.HTML_FRAMESET,
'html5': cls.HTML5,
'xhtml': cls.XHTML, 'xhtml-strict': cls.XHTML_STRICT,
'xhtml-transitional': cls.XHTML_TRANSITIONAL,
'xhtml-frameset': cls.XHTML_FRAMESET,
'xhtml11': cls.XHTML11,
'svg': cls.SVG, 'svg-full': cls.SVG_FULL,
'svg-basic': cls.SVG_BASIC,
'svg-tiny': cls.SVG_TINY
}.get(name.lower())
get = classmethod(get)
class XMLSerializer(object):
"""Produces XML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print ''.join(XMLSerializer()(elem.generate()))
"""
_PRESERVE_SPACE = frozenset()
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None):
"""Initialize the XML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output, or the name of a DOCTYPE as
defined in `DocType.get`
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
:note: Changed in 0.4.2: The `doctype` parameter can now be a string.
"""
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes))
if doctype:
self.filters.append(DocTypeInserter(doctype))
def __call__(self, stream):
have_decl = have_doctype = False
in_cdata = False
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
buf += [' ', attr, '="', escape(value), '"']
buf.append(kind is EMPTY and '/>' or '>')
yield Markup(u''.join(buf))
elif kind is END:
yield Markup('%s>' % data)
elif kind is TEXT:
if in_cdata:
yield data
else:
yield escape(data, quotes=False)
elif kind is COMMENT:
yield Markup('' % data)
elif kind is XML_DECL and not have_decl:
version, encoding, standalone = data
buf = ['\n')
yield Markup(u''.join(buf))
have_decl = True
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['\n')
yield Markup(u''.join(buf)) % filter(None, data)
have_doctype = True
elif kind is START_CDATA:
yield Markup('')
in_cdata = False
elif kind is PI:
yield Markup('%s %s?>' % data)
class XHTMLSerializer(XMLSerializer):
"""Produces XHTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print ''.join(XHTMLSerializer()(elem.generate()))
"""
_EMPTY_ELEMS = frozenset(['area', 'base', 'basefont', 'br', 'col', 'frame',
'hr', 'img', 'input', 'isindex', 'link', 'meta',
'param'])
_BOOLEAN_ATTRS = frozenset(['selected', 'checked', 'compact', 'declare',
'defer', 'disabled', 'ismap', 'multiple',
'nohref', 'noresize', 'noshade', 'nowrap'])
_PRESERVE_SPACE = frozenset([
QName('pre'), QName('http://www.w3.org/1999/xhtml}pre'),
QName('textarea'), QName('http://www.w3.org/1999/xhtml}textarea')
])
def __init__(self, doctype=None, strip_whitespace=True,
namespace_prefixes=None, drop_xml_decl=True):
super(XHTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE))
namespace_prefixes = namespace_prefixes or {}
namespace_prefixes['http://www.w3.org/1999/xhtml'] = ''
self.filters.append(NamespaceFlattener(prefixes=namespace_prefixes))
if doctype:
self.filters.append(DocTypeInserter(doctype))
self.drop_xml_decl = drop_xml_decl
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
drop_xml_decl = self.drop_xml_decl
have_decl = have_doctype = False
in_cdata = False
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
value = attr
elif attr == u'xml:lang' and u'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr == u'xml:space':
continue
buf += [' ', attr, '="', escape(value), '"']
if kind is EMPTY:
if tag in empty_elems:
buf.append(' />')
else:
buf.append('>%s>' % tag)
else:
buf.append('>')
yield Markup(u''.join(buf))
elif kind is END:
yield Markup('%s>' % data)
elif kind is TEXT:
if in_cdata:
yield data
else:
yield escape(data, quotes=False)
elif kind is COMMENT:
yield Markup('' % data)
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['\n')
yield Markup(u''.join(buf)) % filter(None, data)
have_doctype = True
elif kind is XML_DECL and not have_decl and not drop_xml_decl:
version, encoding, standalone = data
buf = ['\n')
yield Markup(u''.join(buf))
have_decl = True
elif kind is START_CDATA:
yield Markup('')
in_cdata = False
elif kind is PI:
yield Markup('%s %s?>' % data)
class HTMLSerializer(XHTMLSerializer):
"""Produces HTML text from an event stream.
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a(href='foo'), tag.br, tag.hr(noshade=True))
>>> print ''.join(HTMLSerializer()(elem.generate()))
"""
_NOESCAPE_ELEMS = frozenset([
QName('script'), QName('http://www.w3.org/1999/xhtml}script'),
QName('style'), QName('http://www.w3.org/1999/xhtml}style')
])
def __init__(self, doctype=None, strip_whitespace=True):
"""Initialize the HTML serializer.
:param doctype: a ``(name, pubid, sysid)`` tuple that represents the
DOCTYPE declaration that should be included at the top
of the generated output
:param strip_whitespace: whether extraneous whitespace should be
stripped from the output
"""
super(HTMLSerializer, self).__init__(doctype, False)
self.filters = [EmptyTagFilter()]
if strip_whitespace:
self.filters.append(WhitespaceFilter(self._PRESERVE_SPACE,
self._NOESCAPE_ELEMS))
self.filters.append(NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
}))
if doctype:
self.filters.append(DocTypeInserter(doctype))
def __call__(self, stream):
boolean_attrs = self._BOOLEAN_ATTRS
empty_elems = self._EMPTY_ELEMS
noescape_elems = self._NOESCAPE_ELEMS
have_doctype = False
noescape = False
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
if attr in boolean_attrs:
if value:
buf += [' ', attr]
elif ':' in attr:
if attr == 'xml:lang' and u'lang' not in attrib:
buf += [' lang="', escape(value), '"']
elif attr != 'xmlns':
buf += [' ', attr, '="', escape(value), '"']
buf.append('>')
if kind is EMPTY:
if tag not in empty_elems:
buf.append('%s>' % tag)
yield Markup(u''.join(buf))
if tag in noescape_elems:
noescape = True
elif kind is END:
yield Markup('%s>' % data)
noescape = False
elif kind is TEXT:
if noescape:
yield data
else:
yield escape(data, quotes=False)
elif kind is COMMENT:
yield Markup('' % data)
elif kind is DOCTYPE and not have_doctype:
name, pubid, sysid = data
buf = ['\n')
yield Markup(u''.join(buf)) % filter(None, data)
have_doctype = True
elif kind is PI:
yield Markup('%s %s?>' % data)
class TextSerializer(object):
"""Produces plain text from an event stream.
Only text events are included in the output. Unlike the other serializer,
special XML characters are not escaped:
>>> from genshi.builder import tag
>>> elem = tag.div(tag.a('', href='foo'), tag.br)
>>> print elem
>>> print ''.join(TextSerializer()(elem.generate()))
If text events contain literal markup (instances of the `Markup` class),
that markup is by default passed through unchanged:
>>> elem = tag.div(Markup('Hello & Bye!
'))
>>> print elem.generate().render(TextSerializer)
Hello & Bye!
You can use the ``strip_markup`` to change this behavior, so that tags and
entities are stripped from the output (or in the case of entities,
replaced with the equivalent character):
>>> print elem.generate().render(TextSerializer, strip_markup=True)
Hello & Bye!
"""
def __init__(self, strip_markup=False):
"""Create the serializer.
:param strip_markup: whether markup (tags and encoded characters) found
in the text should be removed
"""
self.strip_markup = strip_markup
def __call__(self, stream):
strip_markup = self.strip_markup
for event in stream:
if event[0] is TEXT:
data = event[1]
if strip_markup and type(data) is Markup:
data = data.striptags().stripentities()
yield unicode(data)
class EmptyTagFilter(object):
"""Combines `START` and `STOP` events into `EMPTY` events for elements that
have no contents.
"""
EMPTY = StreamEventKind('EMPTY')
def __call__(self, stream):
prev = (None, None, None)
for ev in stream:
if prev[0] is START:
if ev[0] is END:
prev = EMPTY, prev[1], prev[2]
yield prev
continue
else:
yield prev
if ev[0] is not START:
yield ev
prev = ev
EMPTY = EmptyTagFilter.EMPTY
class NamespaceFlattener(object):
r"""Output stream filter that removes namespace information from the stream,
instead adding namespace attributes and prefixes as needed.
:param prefixes: optional mapping of namespace URIs to prefixes
>>> from genshi.input import XML
>>> xml = XML('''
...
... ''')
>>> for kind, data, pos in NamespaceFlattener()(xml):
... print kind, repr(data)
START (u'doc', Attrs([(u'xmlns', u'NS1'), (u'xmlns:two', u'NS2')]))
TEXT u'\n '
START (u'two:item', Attrs())
END u'two:item'
TEXT u'\n'
END u'doc'
"""
def __init__(self, prefixes=None):
self.prefixes = {XML_NAMESPACE.uri: 'xml'}
if prefixes is not None:
self.prefixes.update(prefixes)
def __call__(self, stream):
prefixes = dict([(v, [k]) for k, v in self.prefixes.items()])
namespaces = {XML_NAMESPACE.uri: ['xml']}
def _push_ns(prefix, uri):
namespaces.setdefault(uri, []).append(prefix)
prefixes.setdefault(prefix, []).append(uri)
ns_attrs = []
_push_ns_attr = ns_attrs.append
def _make_ns_attr(prefix, uri):
return u'xmlns%s' % (prefix and ':%s' % prefix or ''), uri
def _gen_prefix():
val = 0
while 1:
val += 1
yield 'ns%d' % val
_gen_prefix = _gen_prefix().next
for kind, data, pos in stream:
if kind is START or kind is EMPTY:
tag, attrs = data
tagname = tag.localname
tagns = tag.namespace
if tagns:
if tagns in namespaces:
prefix = namespaces[tagns][-1]
if prefix:
tagname = u'%s:%s' % (prefix, tagname)
else:
_push_ns_attr((u'xmlns', tagns))
_push_ns('', tagns)
new_attrs = []
for attr, value in attrs:
attrname = attr.localname
attrns = attr.namespace
if attrns:
if attrns not in namespaces:
prefix = _gen_prefix()
_push_ns(prefix, attrns)
_push_ns_attr(('xmlns:%s' % prefix, attrns))
else:
prefix = namespaces[attrns][-1]
if prefix:
attrname = u'%s:%s' % (prefix, attrname)
new_attrs.append((attrname, value))
yield kind, (tagname, Attrs(ns_attrs + new_attrs)), pos
del ns_attrs[:]
elif kind is END:
tagname = data.localname
tagns = data.namespace
if tagns:
prefix = namespaces[tagns][-1]
if prefix:
tagname = u'%s:%s' % (prefix, tagname)
yield kind, tagname, pos
elif kind is START_NS:
prefix, uri = data
if uri not in namespaces:
prefix = prefixes.get(uri, [prefix])[-1]
_push_ns_attr(_make_ns_attr(prefix, uri))
_push_ns(prefix, uri)
elif kind is END_NS:
if data in prefixes:
uris = prefixes.get(data)
uri = uris.pop()
if not uris:
del prefixes[data]
if uri not in uris or uri != uris[-1]:
uri_prefixes = namespaces[uri]
uri_prefixes.pop()
if not uri_prefixes:
del namespaces[uri]
if ns_attrs:
attr = _make_ns_attr(data, uri)
if attr in ns_attrs:
ns_attrs.remove(attr)
else:
yield kind, data, pos
class WhitespaceFilter(object):
"""A filter that removes extraneous ignorable white space from the
stream.
"""
def __init__(self, preserve=None, noescape=None):
"""Initialize the filter.
:param preserve: a set or sequence of tag names for which white-space
should be preserved
:param noescape: a set or sequence of tag names for which text content
should not be escaped
The `noescape` set is expected to refer to elements that cannot contain
further child elements (such as ``