[Zope3-checkins] SVN: Zope3/trunk/src/ Updated docutils to 0.3.9
and:
Martijn Pieters
mj at zopatista.com
Thu Jul 14 08:25:19 EDT 2005
Log message for revision 33317:
Updated docutils to 0.3.9 and:
- Fixed relevant tests.
- Refactored the rest renderer, simplifying it and removing functionality now handled by docutils itself.
- Updated roman.py to the latest iteration.
Changed:
U Zope3/trunk/src/docutils/__init__.py
U Zope3/trunk/src/docutils/core.py
A Zope3/trunk/src/docutils/examples.py
U Zope3/trunk/src/docutils/frontend.py
U Zope3/trunk/src/docutils/io.py
U Zope3/trunk/src/docutils/languages/__init__.py
A Zope3/trunk/src/docutils/languages/af.py
A Zope3/trunk/src/docutils/languages/ca.py
A Zope3/trunk/src/docutils/languages/cs.py
U Zope3/trunk/src/docutils/languages/de.py
U Zope3/trunk/src/docutils/languages/en.py
A Zope3/trunk/src/docutils/languages/eo.py
U Zope3/trunk/src/docutils/languages/es.py
A Zope3/trunk/src/docutils/languages/fi.py
U Zope3/trunk/src/docutils/languages/fr.py
U Zope3/trunk/src/docutils/languages/it.py
A Zope3/trunk/src/docutils/languages/nl.py
A Zope3/trunk/src/docutils/languages/pt_br.py
A Zope3/trunk/src/docutils/languages/ru.py
U Zope3/trunk/src/docutils/languages/sk.py
U Zope3/trunk/src/docutils/languages/sv.py
A Zope3/trunk/src/docutils/languages/zh_tw.py
U Zope3/trunk/src/docutils/nodes.py
U Zope3/trunk/src/docutils/parsers/__init__.py
U Zope3/trunk/src/docutils/parsers/rst/__init__.py
U Zope3/trunk/src/docutils/parsers/rst/directives/__init__.py
U Zope3/trunk/src/docutils/parsers/rst/directives/admonitions.py
U Zope3/trunk/src/docutils/parsers/rst/directives/body.py
U Zope3/trunk/src/docutils/parsers/rst/directives/html.py
U Zope3/trunk/src/docutils/parsers/rst/directives/images.py
U Zope3/trunk/src/docutils/parsers/rst/directives/misc.py
U Zope3/trunk/src/docutils/parsers/rst/directives/parts.py
U Zope3/trunk/src/docutils/parsers/rst/directives/references.py
A Zope3/trunk/src/docutils/parsers/rst/directives/tables.py
U Zope3/trunk/src/docutils/parsers/rst/languages/__init__.py
A Zope3/trunk/src/docutils/parsers/rst/languages/af.py
A Zope3/trunk/src/docutils/parsers/rst/languages/ca.py
A Zope3/trunk/src/docutils/parsers/rst/languages/cs.py
U Zope3/trunk/src/docutils/parsers/rst/languages/de.py
U Zope3/trunk/src/docutils/parsers/rst/languages/en.py
A Zope3/trunk/src/docutils/parsers/rst/languages/eo.py
U Zope3/trunk/src/docutils/parsers/rst/languages/es.py
A Zope3/trunk/src/docutils/parsers/rst/languages/fi.py
U Zope3/trunk/src/docutils/parsers/rst/languages/fr.py
U Zope3/trunk/src/docutils/parsers/rst/languages/it.py
A Zope3/trunk/src/docutils/parsers/rst/languages/nl.py
A Zope3/trunk/src/docutils/parsers/rst/languages/pt_br.py
A Zope3/trunk/src/docutils/parsers/rst/languages/ru.py
U Zope3/trunk/src/docutils/parsers/rst/languages/sk.py
U Zope3/trunk/src/docutils/parsers/rst/languages/sv.py
A Zope3/trunk/src/docutils/parsers/rst/languages/zh_tw.py
A Zope3/trunk/src/docutils/parsers/rst/roles.py
U Zope3/trunk/src/docutils/parsers/rst/roman.py
U Zope3/trunk/src/docutils/parsers/rst/states.py
U Zope3/trunk/src/docutils/parsers/rst/tableparser.py
U Zope3/trunk/src/docutils/readers/__init__.py
U Zope3/trunk/src/docutils/readers/pep.py
U Zope3/trunk/src/docutils/readers/python/__init__.py
U Zope3/trunk/src/docutils/readers/python/moduleparser.py
A Zope3/trunk/src/docutils/readers/python/pynodes.py
U Zope3/trunk/src/docutils/readers/standalone.py
U Zope3/trunk/src/docutils/statemachine.py
U Zope3/trunk/src/docutils/transforms/__init__.py
U Zope3/trunk/src/docutils/transforms/components.py
U Zope3/trunk/src/docutils/transforms/frontmatter.py
U Zope3/trunk/src/docutils/transforms/misc.py
U Zope3/trunk/src/docutils/transforms/parts.py
U Zope3/trunk/src/docutils/transforms/peps.py
U Zope3/trunk/src/docutils/transforms/references.py
U Zope3/trunk/src/docutils/transforms/universal.py
U Zope3/trunk/src/docutils/urischemes.py
U Zope3/trunk/src/docutils/utils.py
U Zope3/trunk/src/docutils/writers/__init__.py
U Zope3/trunk/src/docutils/writers/docutils_xml.py
U Zope3/trunk/src/docutils/writers/html4css1.py
U Zope3/trunk/src/docutils/writers/latex2e.py
A Zope3/trunk/src/docutils/writers/newlatex2e.py
A Zope3/trunk/src/docutils/writers/null.py
U Zope3/trunk/src/docutils/writers/pep_html.py
U Zope3/trunk/src/docutils/writers/pseudoxml.py
U Zope3/trunk/src/zope/app/apidoc/browser/apidoc.css
U Zope3/trunk/src/zope/app/apidoc/codemodule/browser/README.txt
U Zope3/trunk/src/zope/app/apidoc/component.txt
U Zope3/trunk/src/zope/app/apidoc/ifacemodule/browser.txt
U Zope3/trunk/src/zope/app/apidoc/interface.txt
U Zope3/trunk/src/zope/app/apidoc/utilities.txt
U Zope3/trunk/src/zope/app/generations/browser/managerdetails.py
U Zope3/trunk/src/zope/app/renderer/rest.py
-=-
Modified: Zope3/trunk/src/docutils/__init__.py
===================================================================
--- Zope3/trunk/src/docutils/__init__.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/__init__.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
-# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:04 $
+# Contact: goodger at python.org
+# Revision: $Revision: 3374 $
+# Date: $Date: 2005-05-26 23:21:48 +0200 (Thu, 26 May 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -12,12 +12,14 @@
Modules:
-- __init__.py: Contains the package docstring only (this text).
+- __init__.py: Contains component base classes, exception classes, and
+ Docutils `__version__`.
-- core.py: Contains the ``Publisher`` class and ``publish()`` convenience
- function.
+- core.py: Contains the ``Publisher`` class and ``publish_*()`` convenience
+ functions.
-- frontend.py: Command-line and common processing for Docutils front-ends.
+- frontend.py: Runtime settings (command-line interface, configuration files)
+ processing, for Docutils front-ends.
- io.py: Provides a uniform API for low-level input and output.
@@ -49,12 +51,12 @@
__docformat__ = 'reStructuredText'
-__version__ = '0.3.0'
-"""``major.minor.micro`` version number. The micro number is bumped any time
-there's a change in the API incompatible with one of the front ends. The
-minor number is bumped whenever there is a project release. The major number
-will be bumped when the project is feature-complete, and perhaps if there is a
-major change in the design."""
+__version__ = '0.3.9'
+"""``major.minor.micro`` version number. The micro number is bumped for API
+changes, for new functionality, and for interim project releases. The minor
+number is bumped whenever there is a significant project release. The major
+number will be bumped when the project is feature-complete, and perhaps if
+there is a major change in the design."""
class ApplicationError(StandardError): pass
@@ -72,18 +74,36 @@
settings_spec = ()
"""Runtime settings specification. Override in subclasses.
- Specifies runtime settings and associated command-line options, as used by
- `docutils.frontend.OptionParser`. This tuple contains one or more sets of
- option group title, description, and a list/tuple of tuples: ``('help
- text', [list of option strings], {keyword arguments})``. Group title
- and/or description may be `None`; no group title implies no group, just a
- list of single options. Runtime settings names are derived implicitly
- from long option names ("--a-setting" becomes ``settings.a_setting``) or
- explicitly from the "dest" keyword argument."""
+ Defines runtime settings and associated command-line options, as used by
+ `docutils.frontend.OptionParser`. This is a tuple of:
+ - Option group title (string or `None` which implies no group, just a list
+ of single options).
+
+ - Description (string or `None`).
+
+ - A sequence of option tuples. Each consists of:
+
+ - Help text (string)
+
+ - List of option strings (e.g. ``['-Q', '--quux']``).
+
+ - Dictionary of keyword arguments. It contains arguments to the
+ OptionParser/OptionGroup ``add_option`` method, possibly with the
+ addition of a 'validator' keyword (see the
+ `docutils.frontend.OptionParser.validators` instance attribute). Runtime
+ settings names are derived implicitly from long option names
+ ('--a-setting' becomes ``settings.a_setting``) or explicitly from the
+ 'dest' keyword argument. See optparse docs for more details.
+
+ - More triples of group title, description, options, as many times as
+ needed. Thus, `settings_spec` tuples can be simply concatenated.
+ """
+
settings_defaults = None
- """A dictionary of defaults for internal or inaccessible (by command-line
- or config file) settings. Override in subclasses."""
+ """A dictionary of defaults for settings not in `settings_spec` (internal
+ settings, intended to be inaccessible by command-line and config file).
+ Override in subclasses."""
settings_default_overrides = None
"""A dictionary of auxiliary defaults, to override defaults for settings
@@ -91,11 +111,21 @@
relative_path_settings = ()
"""Settings containing filesystem paths. Override in subclasses.
-
Settings listed here are to be interpreted relative to the current working
directory."""
+ config_section = None
+ """The name of the config file section specific to this component
+ (lowercase, no brackets). Override in subclasses."""
+ config_section_dependencies = None
+ """A list of names of config file sections that are to be applied before
+ `config_section`, in order (from general to specific). In other words,
+ the settings in `config_section` are to be overlaid on top of the settings
+ from these sections. The "general" section is assumed implicitly.
+ Override in subclasses."""
+
+
class TransformSpec:
"""
@@ -106,18 +136,42 @@
default_transforms = ()
"""Transforms required by this class. Override in subclasses."""
+
+ unknown_reference_resolvers = ()
+ """List of functions to try to resolve unknown references. Unknown
+ references have a 'refname' attribute which doesn't correspond to any
+ target in the document. Called when FinalCheckVisitor is unable to find a
+ correct target. The list should contain functions which will try to
+ resolve unknown references, with the following signature::
+ def reference_resolver(node):
+ '''Returns boolean: true if resolved, false if not.'''
+ If the function is able to resolve the reference, it should also remove
+ the 'refname' attribute and mark the node as resolved::
+
+ del node['refname']
+ node.resolved = 1
+
+ Each function must have a "priority" attribute which will affect the order
+ the unknown_reference_resolvers are run::
+
+ reference_resolver.priority = 100
+
+ Override in subclasses."""
+
+
class Component(SettingsSpec, TransformSpec):
"""Base class for Docutils components."""
component_type = None
- """Override in subclasses."""
+ """Name of the component type ('reader', 'parser', 'writer'). Override in
+ subclasses."""
supported = ()
"""Names for this component. Override in subclasses."""
-
+
def supports(self, format):
"""
Is `format` supported by this component?
Modified: Zope3/trunk/src/docutils/core.py
===================================================================
--- Zope3/trunk/src/docutils/core.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/core.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Authors: David Goodger
-# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:04 $
+# Contact: goodger at python.org
+# Revision: $Revision: 2987 $
+# Date: $Date: 2005-02-26 19:17:59 +0100 (Sat, 26 Feb 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -9,15 +9,18 @@
`Publisher` object) with component names will result in default
behavior. For custom behavior (setting component options), create
custom component objects first, and pass *them* to
-``publish_*``/`Publisher`.
+``publish_*``/`Publisher`. See `The Docutils Publisher`_.
+
+.. _The Docutils Publisher: http://docutils.sf.net/docs/api/publisher.html
"""
__docformat__ = 'reStructuredText'
import sys
-from docutils import Component
-from docutils import frontend, io, readers, parsers, writers
-from docutils.frontend import OptionParser, ConfigParser
+import pprint
+from docutils import __version__, SettingsSpec
+from docutils import frontend, io, utils, readers, writers
+from docutils.frontend import OptionParser
class Publisher:
@@ -37,22 +40,23 @@
"""
self.reader = reader
- """A `readers.Reader` instance."""
+ """A `docutils.readers.Reader` instance."""
self.parser = parser
- """A `parsers.Parser` instance."""
+ """A `docutils.parsers.Parser` instance."""
self.writer = writer
- """A `writers.Writer` instance."""
+ """A `docutils.writers.Writer` instance."""
self.source = source
- """The source of input data, an `io.Input` instance."""
+ """The source of input data, a `docutils.io.Input` instance."""
self.source_class = source_class
"""The class for dynamically created source objects."""
self.destination = destination
- """The destination for docutils output, an `io.Output` instance."""
+ """The destination for docutils output, a `docutils.io.Output`
+ instance."""
self.destination_class = destination_class
"""The class for dynamically created destination objects."""
@@ -83,39 +87,58 @@
self.set_writer(writer_name)
def setup_option_parser(self, usage=None, description=None,
- settings_spec=None, **defaults):
+ settings_spec=None, config_section=None,
+ **defaults):
+ if config_section:
+ if not settings_spec:
+ settings_spec = SettingsSpec()
+ settings_spec.config_section = config_section
+ parts = config_section.split()
+ if len(parts) > 1 and parts[-1] == 'application':
+ settings_spec.config_section_dependencies = ['applications']
#@@@ Add self.source & self.destination to components in future?
option_parser = OptionParser(
- components=(settings_spec, self.parser, self.reader, self.writer),
+ components=(self.parser, self.reader, self.writer, settings_spec),
defaults=defaults, read_config_files=1,
usage=usage, description=description)
return option_parser
def get_settings(self, usage=None, description=None,
- settings_spec=None, **defaults):
+ settings_spec=None, config_section=None, **defaults):
"""
- Set and return default settings (overrides in `defaults` keyword
- argument).
+ Set and return default settings (overrides in `defaults` dict).
Set components first (`self.set_reader` & `self.set_writer`).
Explicitly setting `self.settings` disables command line option
processing from `self.publish()`.
"""
- option_parser = self.setup_option_parser(usage, description,
- settings_spec, **defaults)
+ option_parser = self.setup_option_parser(
+ usage, description, settings_spec, config_section, **defaults)
self.settings = option_parser.get_default_values()
return self.settings
+ def process_programmatic_settings(self, settings_spec,
+ settings_overrides,
+ config_section):
+ if self.settings is None:
+ defaults = (settings_overrides or {}).copy()
+ # Propagate exceptions by default when used programmatically:
+ defaults.setdefault('traceback', 1)
+ self.get_settings(settings_spec=settings_spec,
+ config_section=config_section,
+ **defaults)
+
def process_command_line(self, argv=None, usage=None, description=None,
- settings_spec=None, **defaults):
+ settings_spec=None, config_section=None,
+ **defaults):
"""
Pass an empty list to `argv` to avoid reading `sys.argv` (the
default).
Set components first (`self.set_reader` & `self.set_writer`).
"""
- option_parser = self.setup_option_parser(usage, description,
- settings_spec, **defaults)
+ option_parser = self.setup_option_parser(
+ usage, description, settings_spec, config_section, **defaults)
if argv is None:
argv = sys.argv[1:]
self.settings = option_parser.parse_args(argv)
@@ -153,43 +176,111 @@
def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None,
- enable_exit=None):
+ config_section=None, enable_exit_status=None):
"""
Process command line options and arguments (if `self.settings` not
already set), run `self.reader` and then `self.writer`. Return
`self.writer`'s output.
"""
if self.settings is None:
- self.process_command_line(argv, usage, description, settings_spec,
- **(settings_overrides or {}))
- elif settings_overrides:
- self.settings._update(settings_overrides, 'loose')
+ self.process_command_line(
+ argv, usage, description, settings_spec, config_section,
+ **(settings_overrides or {}))
self.set_io()
- document = self.reader.read(self.source, self.parser, self.settings)
- self.apply_transforms(document)
- output = self.writer.write(document, self.destination)
+ exit = None
+ document = None
+ try:
+ document = self.reader.read(self.source, self.parser,
+ self.settings)
+ self.apply_transforms(document)
+ output = self.writer.write(document, self.destination)
+ self.writer.assemble_parts()
+ except Exception, error:
+ if self.settings.traceback: # propagate exceptions?
+ self.debugging_dumps(document)
+ raise
+ self.report_Exception(error)
+ exit = 1
+ self.debugging_dumps(document)
+ if (enable_exit_status and document
+ and (document.reporter.max_level
+ >= self.settings.exit_status_level)):
+ sys.exit(document.reporter.max_level + 10)
+ elif exit:
+ sys.exit(1)
+ return output
+
+ def debugging_dumps(self, document):
+ if not document:
+ return
if self.settings.dump_settings:
- from pprint import pformat
print >>sys.stderr, '\n::: Runtime settings:'
- print >>sys.stderr, pformat(self.settings.__dict__)
- if self.settings.dump_internals:
- from pprint import pformat
+ print >>sys.stderr, pprint.pformat(self.settings.__dict__)
+ if self.settings.dump_internals and document:
print >>sys.stderr, '\n::: Document internals:'
- print >>sys.stderr, pformat(document.__dict__)
- if self.settings.dump_transforms:
- from pprint import pformat
+ print >>sys.stderr, pprint.pformat(document.__dict__)
+ if self.settings.dump_transforms and document:
print >>sys.stderr, '\n::: Transforms applied:'
- print >>sys.stderr, pformat(document.transformer.applied)
- if self.settings.dump_pseudo_xml:
+ print >>sys.stderr, pprint.pformat(document.transformer.applied)
+ if self.settings.dump_pseudo_xml and document:
print >>sys.stderr, '\n::: Pseudo-XML:'
print >>sys.stderr, document.pformat().encode(
'raw_unicode_escape')
- if enable_exit and (document.reporter.max_level
- >= self.settings.exit_level):
- sys.exit(document.reporter.max_level + 10)
- return output
+ def report_Exception(self, error):
+ if isinstance(error, utils.SystemMessage):
+ self.report_SystemMessage(error)
+ elif isinstance(error, UnicodeError):
+ self.report_UnicodeError(error)
+ else:
+ print >>sys.stderr, '%s: %s' % (error.__class__.__name__, error)
+ print >>sys.stderr, ("""\
+Exiting due to error. Use "--traceback" to diagnose.
+Please report errors to <docutils-users at lists.sf.net>.
+Include "--traceback" output, Docutils version (%s),
+Python version (%s), your OS type & version, and the
+command line used.""" % (__version__, sys.version.split()[0]))
+ def report_SystemMessage(self, error):
+ print >>sys.stderr, ('Exiting due to level-%s (%s) system message.'
+ % (error.level,
+ utils.Reporter.levels[error.level]))
+
+ def report_UnicodeError(self, error):
+ sys.stderr.write(
+ '%s: %s\n'
+ '\n'
+ 'The specified output encoding (%s) cannot\n'
+ 'handle all of the output.\n'
+ 'Try setting "--output-encoding-error-handler" to\n'
+ '\n'
+ '* "xmlcharrefreplace" (for HTML & XML output);\n'
+ % (error.__class__.__name__, error,
+ self.settings.output_encoding))
+ try:
+ data = error.object[error.start:error.end]
+ sys.stderr.write(
+ ' the output will contain "%s" and should be usable.\n'
+ '* "backslashreplace" (for other output formats, Python 2.3+);\n'
+ ' look for "%s" in the output.\n'
+ % (data.encode('ascii', 'xmlcharrefreplace'),
+ data.encode('ascii', 'backslashreplace')))
+ except AttributeError:
+ sys.stderr.write(' the output should be usable as-is.\n')
+ sys.stderr.write(
+ '* "replace"; look for "?" in the output.\n'
+ '\n'
+ '"--output-encoding-error-handler" is currently set to "%s".\n'
+ '\n'
+ 'Exiting due to error. Use "--traceback" to diagnose.\n'
+ 'If the advice above doesn\'t eliminate the error,\n'
+ 'please report it to <docutils-users at lists.sf.net>.\n'
+ 'Include "--traceback" output, Docutils version (%s),\n'
+ 'Python version (%s), your OS type & version, and the\n'
+ 'command line used.\n'
+ % (self.settings.output_encoding_error_handler,
+ __version__, sys.version.split()[0]))
+
default_usage = '%prog [options] [<source> [<destination>]]'
default_description = ('Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout).')
@@ -198,28 +289,16 @@
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
- settings_overrides=None, enable_exit=1, argv=None,
+ settings_overrides=None, config_section=None,
+ enable_exit_status=1, argv=None,
usage=default_usage, description=default_description):
"""
- Set up & run a `Publisher`. For command-line front ends.
+ Set up & run a `Publisher` for command-line-based file I/O (input and
+ output file paths taken automatically from the command line). Return the
+ encoded string output also.
- Parameters:
+ Parameters: see `publish_programmatically` for the remainder.
- - `reader`: A `docutils.readers.Reader` object.
- - `reader_name`: Name or alias of the Reader class to be instantiated if
- no `reader` supplied.
- - `parser`: A `docutils.parsers.Parser` object.
- - `parser_name`: Name or alias of the Parser class to be instantiated if
- no `parser` supplied.
- - `writer`: A `docutils.writers.Writer` object.
- - `writer_name`: Name or alias of the Writer class to be instantiated if
- no `writer` supplied.
- - `settings`: Runtime settings object.
- - `settings_spec`: Extra settings specification; a `docutils.SettingsSpec`
- subclass. Used only if no `settings` specified.
- - `settings_overrides`: A dictionary containing program-specific overrides
- of component settings.
- - `enable_exit`: Boolean; enable exit status at end of processing?
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
@@ -228,8 +307,10 @@
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
- pub.publish(argv, usage, description, settings_spec, settings_overrides,
- enable_exit=enable_exit)
+ output = pub.publish(
+ argv, usage, description, settings_spec, settings_overrides,
+ config_section=config_section, enable_exit_status=enable_exit_status)
+ return output
def publish_file(source=None, source_path=None,
destination=None, destination_path=None,
@@ -237,59 +318,40 @@
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings_overrides=None,
- enable_exit=None):
+ config_section=None, enable_exit_status=None):
"""
- Set up & run a `Publisher`. For programmatic use with file-like I/O.
+ Set up & run a `Publisher` for programmatic use with file-like I/O.
+ Return the encoded string output also.
- Parameters:
-
- - `source`: A file-like object (must have "read" and "close" methods).
- - `source_path`: Path to the input file. Opened if no `source` supplied.
- If neither `source` nor `source_path` are supplied, `sys.stdin` is used.
- - `destination`: A file-like object (must have "write" and "close"
- methods).
- - `destination_path`: Path to the input file. Opened if no `destination`
- supplied. If neither `destination` nor `destination_path` are supplied,
- `sys.stdout` is used.
- - `reader`: A `docutils.readers.Reader` object.
- - `reader_name`: Name or alias of the Reader class to be instantiated if
- no `reader` supplied.
- - `parser`: A `docutils.parsers.Parser` object.
- - `parser_name`: Name or alias of the Parser class to be instantiated if
- no `parser` supplied.
- - `writer`: A `docutils.writers.Writer` object.
- - `writer_name`: Name or alias of the Writer class to be instantiated if
- no `writer` supplied.
- - `settings`: Runtime settings object.
- - `settings_spec`: Extra settings specification; a `docutils.SettingsSpec`
- subclass. Used only if no `settings` specified.
- - `settings_overrides`: A dictionary containing program-specific overrides
- of component settings.
- - `enable_exit`: Boolean; enable exit status at end of processing?
+ Parameters: see `publish_programmatically`.
"""
- pub = Publisher(reader, parser, writer, settings=settings)
- pub.set_components(reader_name, parser_name, writer_name)
- if settings is None:
- settings = pub.get_settings(settings_spec=settings_spec)
- if settings_overrides:
- settings._update(settings_overrides, 'loose')
- pub.set_source(source, source_path)
- pub.set_destination(destination, destination_path)
- pub.publish(enable_exit=enable_exit)
+ output, pub = publish_programmatically(
+ source_class=io.FileInput, source=source, source_path=source_path,
+ destination_class=io.FileOutput,
+ destination=destination, destination_path=destination_path,
+ reader=reader, reader_name=reader_name,
+ parser=parser, parser_name=parser_name,
+ writer=writer, writer_name=writer_name,
+ settings=settings, settings_spec=settings_spec,
+ settings_overrides=settings_overrides,
+ config_section=config_section,
+ enable_exit_status=enable_exit_status)
+ return output
-def publish_string(source, source_path=None, destination_path=None,
+def publish_string(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
- settings_overrides=None, enable_exit=None):
+ settings_overrides=None, config_section=None,
+ enable_exit_status=None):
"""
- Set up & run a `Publisher`, and return the string output.
- For programmatic use with string I/O.
+ Set up & run a `Publisher` for programmatic use with string I/O. Return
+ the encoded string or Unicode string output.
- For encoded string output, be sure to set the "output_encoding" setting to
- the desired encoding. Set it to "unicode" for unencoded Unicode string
- output. Here's how::
+ For encoded string output, be sure to set the 'output_encoding' setting to
+ the desired encoding. Set it to 'unicode' for unencoded Unicode string
+ output. Here's one way::
publish_string(..., settings_overrides={'output_encoding': 'unicode'})
@@ -297,40 +359,159 @@
publish_string(..., settings_overrides={'input_encoding': 'unicode'})
+ Parameters: see `publish_programmatically`.
+ """
+ output, pub = publish_programmatically(
+ source_class=io.StringInput, source=source, source_path=source_path,
+ destination_class=io.StringOutput,
+ destination=None, destination_path=destination_path,
+ reader=reader, reader_name=reader_name,
+ parser=parser, parser_name=parser_name,
+ writer=writer, writer_name=writer_name,
+ settings=settings, settings_spec=settings_spec,
+ settings_overrides=settings_overrides,
+ config_section=config_section,
+ enable_exit_status=enable_exit_status)
+ return output
+
+def publish_parts(source, source_path=None, destination_path=None,
+ reader=None, reader_name='standalone',
+ parser=None, parser_name='restructuredtext',
+ writer=None, writer_name='pseudoxml',
+ settings=None, settings_spec=None,
+ settings_overrides=None, config_section=None,
+ enable_exit_status=None):
+ """
+ Set up & run a `Publisher`, and return a dictionary of document parts.
+ Dictionary keys are the names of parts, and values are Unicode strings;
+ encoding is up to the client. For programmatic use with string I/O.
+
+ For encoded string input, be sure to set the 'input_encoding' setting to
+ the desired encoding. Set it to 'unicode' for unencoded Unicode string
+ input. Here's how::
+
+ publish_string(..., settings_overrides={'input_encoding': 'unicode'})
+
+ Parameters: see `publish_programmatically`.
+ """
+ output, pub = publish_programmatically(
+ source_class=io.StringInput, source=source, source_path=source_path,
+ destination_class=io.StringOutput,
+ destination=None, destination_path=destination_path,
+ reader=reader, reader_name=reader_name,
+ parser=parser, parser_name=parser_name,
+ writer=writer, writer_name=writer_name,
+ settings=settings, settings_spec=settings_spec,
+ settings_overrides=settings_overrides,
+ config_section=config_section,
+ enable_exit_status=enable_exit_status)
+ return pub.writer.parts
+
+def publish_programmatically(source_class, source, source_path,
+ destination_class, destination, destination_path,
+ reader, reader_name,
+ parser, parser_name,
+ writer, writer_name,
+ settings, settings_spec,
+ settings_overrides, config_section,
+ enable_exit_status):
+ """
+ Set up & run a `Publisher` for custom programmatic use. Return the
+ encoded string output and the Publisher object.
+
+ Applications should not need to call this function directly. If it does
+ seem to be necessary to call this function directly, please write to the
+ docutils-develop at lists.sourceforge.net mailing list.
+
Parameters:
- - `source`: An input string; required. This can be an encoded 8-bit
- string (set the "input_encoding" setting to the correct encoding) or a
- Unicode string (set the "input_encoding" setting to "unicode").
- - `source_path`: Path to the file or object that produced `source`;
- optional. Only used for diagnostic output.
- - `destination_path`: Path to the file or object which will receive the
- output; optional. Used for determining relative paths (stylesheets,
- source links, etc.).
- - `reader`: A `docutils.readers.Reader` object.
- - `reader_name`: Name or alias of the Reader class to be instantiated if
+ * `source_class` **required**: The class for dynamically created source
+ objects. Typically `io.FileInput` or `io.StringInput`.
+
+ * `source`: Type depends on `source_class`:
+
+ - `io.FileInput`: Either a file-like object (must have 'read' and
+ 'close' methods), or ``None`` (`source_path` is opened). If neither
+ `source` nor `source_path` are supplied, `sys.stdin` is used.
+
+ - `io.StringInput` **required**: The input string, either an encoded
+ 8-bit string (set the 'input_encoding' setting to the correct
+ encoding) or a Unicode string (set the 'input_encoding' setting to
+ 'unicode').
+
+ * `source_path`: Type depends on `source_class`:
+
+ - `io.FileInput`: Path to the input file, opened if no `source`
+ supplied.
+
+ - `io.StringInput`: Optional. Path to the file or object that produced
+ `source`. Only used for diagnostic output.
+
+ * `destination_class` **required**: The class for dynamically created
+ destination objects. Typically `io.FileOutput` or `io.StringOutput`.
+
+ * `destination`: Type depends on `destination_class`:
+
+ - `io.FileOutput`: Either a file-like object (must have 'write' and
+ 'close' methods), or ``None`` (`destination_path` is opened). If
+ neither `destination` nor `destination_path` are supplied,
+ `sys.stdout` is used.
+
+ - `io.StringOutput`: Not used; pass ``None``.
+
+ * `destination_path`: Type depends on `destination_class`:
+
+ - `io.FileOutput`: Path to the output file. Opened if no `destination`
+ supplied.
+
+ - `io.StringOutput`: Path to the file or object which will receive the
+ output; optional. Used for determining relative paths (stylesheets,
+ source links, etc.).
+
+ * `reader`: A `docutils.readers.Reader` object.
+
+ * `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
- - `parser`: A `docutils.parsers.Parser` object.
- - `parser_name`: Name or alias of the Parser class to be instantiated if
+
+ * `parser`: A `docutils.parsers.Parser` object.
+
+ * `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
- - `writer`: A `docutils.writers.Writer` object.
- - `writer_name`: Name or alias of the Writer class to be instantiated if
+
+ * `writer`: A `docutils.writers.Writer` object.
+
+ * `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
- - `settings`: Runtime settings object.
- - `settings_spec`: Extra settings specification; a `docutils.SettingsSpec`
- subclass. Used only if no `settings` specified.
- - `settings_overrides`: A dictionary containing program-specific overrides
- of component settings.
- - `enable_exit`: Boolean; enable exit status at end of processing?
+
+ * `settings`: A runtime settings (`docutils.frontend.Values`) object, for
+ dotted-attribute access to runtime settings. It's the end result of the
+ `SettingsSpec`, config file, and option processing. If `settings` is
+ passed, it's assumed to be complete and no further setting/config/option
+ processing is done.
+
+ * `settings_spec`: A `docutils.SettingsSpec` subclass or object. Provides
+ extra application-specific settings definitions independently of
+ components. In other words, the application becomes a component, and
+ its settings data is processed along with that of the other components.
+ Used only if no `settings` specified.
+
+ * `settings_overrides`: A dictionary containing application-specific
+ settings defaults that override the defaults of other components.
+ Used only if no `settings` specified.
+
+ * `config_section`: A string, the name of the configuration file section
+ for this application. Overrides the ``config_section`` attribute
+ defined by `settings_spec`. Used only if no `settings` specified.
+
+ * `enable_exit_status`: Boolean; enable exit status at end of processing?
"""
pub = Publisher(reader, parser, writer, settings=settings,
- source_class=io.StringInput,
- destination_class=io.StringOutput)
+ source_class=source_class,
+ destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
- if settings is None:
- settings = pub.get_settings(settings_spec=settings_spec)
- if settings_overrides:
- settings._update(settings_overrides, 'loose')
+ pub.process_programmatic_settings(
+ settings_spec, settings_overrides, config_section)
pub.set_source(source, source_path)
- pub.set_destination(destination_path=destination_path)
- return pub.publish(enable_exit=enable_exit)
+ pub.set_destination(destination, destination_path)
+ output = pub.publish(enable_exit_status=enable_exit_status)
+ return output, pub
Added: Zope3/trunk/src/docutils/examples.py
===================================================================
--- Zope3/trunk/src/docutils/examples.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/examples.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,95 @@
+# Authors: David Goodger
+# Contact: goodger at python.org
+# Revision: $Revision: 3247 $
+# Date: $Date: 2005-04-23 21:23:57 +0200 (Sat, 23 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+"""
+This module contains practical examples of Docutils client code.
+
+Importing this module from client code is not recommended; its contents are
+subject to change in future Docutils releases. Instead, it is recommended
+that you copy and paste the parts you need into your own code, modifying as
+necessary.
+"""
+
+from docutils import core, io
+
+
+def html_parts(input_string, source_path=None, destination_path=None,
+ input_encoding='unicode', doctitle=1, initial_header_level=1):
+ """
+ Given an input string, returns a dictionary of HTML document parts.
+
+ Dictionary keys are the names of parts, and values are Unicode strings;
+ encoding is up to the client.
+
+ Parameters:
+
+ - `input_string`: A multi-line text string; required.
+ - `source_path`: Path to the source file or object. Optional, but useful
+ for diagnostic output (system messages).
+ - `destination_path`: Path to the file or object which will receive the
+ output; optional. Used for determining relative paths (stylesheets,
+ source links, etc.).
+ - `input_encoding`: The encoding of `input_string`. If it is an encoded
+ 8-bit string, provide the correct encoding. If it is a Unicode string,
+ use "unicode", the default.
+ - `doctitle`: Disable the promotion of a lone top-level section title to
+ document title (and subsequent section title to document subtitle
+ promotion); enabled by default.
+ - `initial_header_level`: The initial level for header elements (e.g. 1
+ for "<h1>").
+ """
+ overrides = {'input_encoding': input_encoding,
+ 'doctitle_xform': doctitle,
+ 'initial_header_level': initial_header_level}
+ parts = core.publish_parts(
+ source=input_string, source_path=source_path,
+ destination_path=destination_path,
+ writer_name='html', settings_overrides=overrides)
+ return parts
+
+def html_fragment(input_string, source_path=None, destination_path=None,
+ input_encoding='unicode', output_encoding='unicode',
+ doctitle=1, initial_header_level=1):
+ """
+ Given an input string, returns an HTML fragment as a string.
+
+ The return value is the contents of the <body> tag, less the title,
+ subtitle, and docinfo.
+
+ Parameters (see `html_parts()` for the remainder):
+
+ - `output_encoding`: The desired encoding of the output. If a Unicode
+ string is desired, use the default value of "unicode" .
+ """
+ parts = html_parts(
+ input_string=input_string, source_path=source_path,
+ destination_path=destination_path,
+ input_encoding=input_encoding, doctitle=doctitle,
+ initial_header_level=initial_header_level)
+ fragment = parts['fragment']
+ if output_encoding != 'unicode':
+ fragment = fragment.encode(output_encoding)
+ return fragment
+
+def internals(input_string, source_path=None, destination_path=None,
+ input_encoding='unicode'):
+ """
+ Return the document tree and publisher, for exploring Docutils internals.
+
+ Parameters: see `html_parts()`.
+ """
+ overrides = {'input_encoding': input_encoding}
+ output, pub = core.publish_programmatically(
+ source_class=io.StringInput, source=input_string,
+ source_path=source_path,
+ destination_class=io.NullOutput, destination=None,
+ destination_path=destination_path,
+ reader=None, reader_name='standalone',
+ parser=None, parser_name='restructuredtext',
+ writer=None, writer_name='null',
+ settings=None, settings_spec=None, settings_overrides=overrides,
+ config_section=None, enable_exit_status=None)
+ return pub.writer.document, pub
Modified: Zope3/trunk/src/docutils/frontend.py
===================================================================
--- Zope3/trunk/src/docutils/frontend.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/frontend.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:04 $
+# Revision: $Revision: 3358 $
+# Date: $Date: 2005-05-21 02:00:25 +0200 (Sat, 21 May 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -9,10 +9,21 @@
Exports the following classes:
-- `OptionParser`: Standard Docutils command-line processing.
-- `Values`: Runtime settings; objects are simple structs
- (``object.attribute``).
-- `ConfigParser`: Standard Docutils config file processing.
+* `OptionParser`: Standard Docutils command-line processing.
+* `Option`: Customized version of `optparse.Option`; validation support.
+* `Values`: Runtime settings; objects are simple structs
+ (``object.attribute``). Supports cumulative list settings (attributes).
+* `ConfigParser`: Standard Docutils config file processing.
+
+Also exports the following functions:
+
+* Option callbacks: `store_multiple`, `read_config_file`.
+* Setting validators: `validate_encoding`,
+ `validate_encoding_error_handler`,
+ `validate_encoding_and_error_handler`, `validate_boolean`,
+ `validate_threshold`, `validate_colon_separated_string_list`,
+ `validate_dependency_file`.
+* `make_paths_absolute`.
"""
__docformat__ = 'reStructuredText'
@@ -21,17 +32,19 @@
import os.path
import sys
import types
+import copy
+import warnings
import ConfigParser as CP
import codecs
import docutils
import optparse
-from optparse import Values, SUPPRESS_HELP
+from optparse import SUPPRESS_HELP
def store_multiple(option, opt, value, parser, *args, **kwargs):
"""
Store multiple values in `parser.values`. (Option callback.)
-
+
Store `None` for each attribute named in `args`, and store the value for
each key (attribute name) in `kwargs`.
"""
@@ -44,51 +57,31 @@
"""
Read a configuration file during option processing. (Option callback.)
"""
- config_parser = ConfigParser()
- config_parser.read(value, parser)
- settings = config_parser.get_section('options')
- make_paths_absolute(settings, parser.relative_path_settings,
- os.path.dirname(value))
- parser.values.__dict__.update(settings)
-
-def set_encoding(option, opt, value, parser):
- """
- Validate & set the encoding specified. (Option callback.)
- """
try:
- value = validate_encoding(option.dest, value)
- except LookupError, error:
- raise (optparse.OptionValueError('option "%s": %s' % (opt, error)),
- None, sys.exc_info()[2])
- setattr(parser.values, option.dest, value)
+ new_settings = parser.get_config_file_settings(value)
+ except ValueError, error:
+ parser.error(error)
+ parser.values.update(new_settings, parser)
-def validate_encoding(name, value):
+def validate_encoding(setting, value, option_parser,
+ config_parser=None, config_section=None):
try:
codecs.lookup(value)
except LookupError:
- raise (LookupError('unknown encoding: "%s"' % value),
+ raise (LookupError('setting "%s": unknown encoding: "%s"'
+ % (setting, value)),
None, sys.exc_info()[2])
return value
-def set_encoding_error_handler(option, opt, value, parser):
- """
- Validate & set the encoding error handler specified. (Option callback.)
- """
+def validate_encoding_error_handler(setting, value, option_parser,
+ config_parser=None, config_section=None):
try:
- value = validate_encoding_error_handler(option.dest, value)
- except LookupError, error:
- raise (optparse.OptionValueError('option "%s": %s' % (opt, error)),
- None, sys.exc_info()[2])
- setattr(parser.values, option.dest, value)
-
-def validate_encoding_error_handler(name, value):
- try:
codecs.lookup_error(value)
except AttributeError: # prior to Python 2.3
- if value not in ('strict', 'ignore', 'replace'):
+ if value not in ('strict', 'ignore', 'replace', 'xmlcharrefreplace'):
raise (LookupError(
'unknown encoding error handler: "%s" (choices: '
- '"strict", "ignore", or "replace")' % value),
+ '"strict", "ignore", "replace", or "xmlcharrefreplace")' % value),
None, sys.exc_info()[2])
except LookupError:
raise (LookupError(
@@ -99,31 +92,81 @@
None, sys.exc_info()[2])
return value
-def set_encoding_and_error_handler(option, opt, value, parser):
+def validate_encoding_and_error_handler(
+ setting, value, option_parser, config_parser=None, config_section=None):
"""
- Validate & set the encoding and error handler specified. (Option callback.)
+ Side-effect: if an error handler is included in the value, it is inserted
+ into the appropriate place as if it was a separate setting/option.
"""
- try:
- value = validate_encoding_and_error_handler(option.dest, value)
- except LookupError, error:
- raise (optparse.OptionValueError('option "%s": %s' % (opt, error)),
- None, sys.exc_info()[2])
if ':' in value:
encoding, handler = value.split(':')
- setattr(parser.values, option.dest + '_error_handler', handler)
+ validate_encoding_error_handler(
+ setting + '_error_handler', handler, option_parser,
+ config_parser, config_section)
+ if config_parser:
+ config_parser.set(config_section, setting + '_error_handler',
+ handler)
+ else:
+ setattr(option_parser.values, setting + '_error_handler', handler)
else:
encoding = value
- setattr(parser.values, option.dest, encoding)
+ validate_encoding(setting, encoding, option_parser,
+ config_parser, config_section)
+ return encoding
-def validate_encoding_and_error_handler(name, value):
- if ':' in value:
- encoding, handler = value.split(':')
- validate_encoding_error_handler(name + '_error_handler', handler)
+def validate_boolean(setting, value, option_parser,
+ config_parser=None, config_section=None):
+ if isinstance(value, types.StringType):
+ try:
+ return option_parser.booleans[value.strip().lower()]
+ except KeyError:
+ raise (LookupError('unknown boolean value: "%s"' % value),
+ None, sys.exc_info()[2])
+ return value
+
+def validate_nonnegative_int(setting, value, option_parser,
+ config_parser=None, config_section=None):
+ value = int(value)
+ if value < 0:
+ raise ValueError('negative value; must be positive or zero')
+ return value
+
+def validate_threshold(setting, value, option_parser,
+ config_parser=None, config_section=None):
+ try:
+ return int(value)
+ except ValueError:
+ try:
+ return option_parser.thresholds[value.lower()]
+ except (KeyError, AttributeError):
+ raise (LookupError('unknown threshold: %r.' % value),
+ None, sys.exc_info[2])
+
+def validate_colon_separated_string_list(
+ setting, value, option_parser, config_parser=None, config_section=None):
+ if isinstance(value, types.StringType):
+ value = value.split(':')
else:
- encoding = value
- validate_encoding(name, encoding)
+ last = value.pop()
+ value.extend(last.split(':'))
return value
+def validate_url_trailing_slash(
+ setting, value, option_parser, config_parser=None, config_section=None):
+ if not value:
+ return './'
+ elif value.endswith('/'):
+ return value
+ else:
+ return value + '/'
+
+def validate_dependency_file(
+ setting, value, option_parser, config_parser=None, config_section=None):
+ try:
+ return docutils.utils.DependencyList(value)
+ except IOError:
+ return docutils.utils.DependencyList(None)
+
def make_paths_absolute(pathdict, keys, base_path=None):
"""
Interpret filesystem path settings relative to the `base_path` given.
@@ -134,11 +177,74 @@
if base_path is None:
base_path = os.getcwd()
for key in keys:
- if pathdict.has_key(key) and pathdict[key]:
- pathdict[key] = os.path.normpath(
- os.path.abspath(os.path.join(base_path, pathdict[key])))
+ if pathdict.has_key(key):
+ value = pathdict[key]
+ if isinstance(value, types.ListType):
+ value = [make_one_path_absolute(base_path, path)
+ for path in value]
+ elif value:
+ value = make_one_path_absolute(base_path, value)
+ pathdict[key] = value
+def make_one_path_absolute(base_path, path):
+ return os.path.abspath(os.path.join(base_path, path))
+
+class Values(optparse.Values):
+
+ """
+ Updates list attributes by extension rather than by replacement.
+ Works in conjunction with the `OptionParser.lists` instance attribute.
+ """
+
+ def __init__(self, *args, **kwargs):
+ optparse.Values.__init__(self, *args, **kwargs)
+ if (not hasattr(self, 'record_dependencies')
+ or self.record_dependencies is None):
+ # Set up dependency list, in case it is needed.
+ self.record_dependencies = docutils.utils.DependencyList()
+
+ def update(self, other_dict, option_parser):
+ if isinstance(other_dict, Values):
+ other_dict = other_dict.__dict__
+ other_dict = other_dict.copy()
+ for setting in option_parser.lists.keys():
+ if (hasattr(self, setting) and other_dict.has_key(setting)):
+ value = getattr(self, setting)
+ if value:
+ value += other_dict[setting]
+ del other_dict[setting]
+ self._update_loose(other_dict)
+
+
+class Option(optparse.Option):
+
+ ATTRS = optparse.Option.ATTRS + ['validator', 'overrides']
+
+ def process(self, opt, value, values, parser):
+ """
+ Call the validator function on applicable settings and
+ evaluate the 'overrides' option.
+ Extends `optparse.Option.process`.
+ """
+ result = optparse.Option.process(self, opt, value, values, parser)
+ setting = self.dest
+ if setting:
+ if self.validator:
+ value = getattr(values, setting)
+ try:
+ new_value = self.validator(setting, value, parser)
+ except Exception, error:
+ raise (optparse.OptionValueError(
+ 'Error in option "%s":\n %s: %s'
+ % (opt, error.__class__.__name__, error)),
+ None, sys.exc_info()[2])
+ setattr(values, setting, new_value)
+ if self.overrides:
+ setattr(values, self.overrides, None)
+ return result
+
+
class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
"""
@@ -151,12 +257,23 @@
are restrict to using long options.
"""
+ standard_config_files = [
+ '/etc/docutils.conf', # system-wide
+ './docutils.conf', # project-specific
+ '~/.docutils'] # user-specific
+ """Docutils configuration files, using ConfigParser syntax. Filenames
+ will be tilde-expanded later. Later files override earlier ones."""
+
threshold_choices = 'info 1 warning 2 error 3 severe 4 none 5'.split()
"""Possible inputs for for --report and --halt threshold values."""
thresholds = {'info': 1, 'warning': 2, 'error': 3, 'severe': 4, 'none': 5}
"""Lookup table for --report and --halt threshold values."""
+ booleans={'1': 1, 'on': 1, 'yes': 1, 'true': 1,
+ '0': 0, 'off': 0, 'no': 0, 'false': 0, '': 0}
+ """Lookup table for boolean configuration file settings."""
+
if hasattr(codecs, 'backslashreplace_errors'):
default_error_encoding_error_handler = 'backslashreplace'
else:
@@ -167,7 +284,8 @@
None,
(('Include a "Generated by Docutils" credit and link at the end '
'of the document.',
- ['--generator', '-g'], {'action': 'store_true'}),
+ ['--generator', '-g'], {'action': 'store_true',
+ 'validator': validate_boolean}),
('Do not include a generator credit.',
['--no-generator'], {'action': 'store_false', 'dest': 'generator'}),
('Include the date at the end of the document (UTC).',
@@ -181,7 +299,8 @@
['--no-datestamp'], {'action': 'store_const', 'const': None,
'dest': 'datestamp'}),
('Include a "View document source" link (relative to destination).',
- ['--source-link', '-s'], {'action': 'store_true'}),
+ ['--source-link', '-s'], {'action': 'store_true',
+ 'validator': validate_boolean}),
('Use the supplied <URL> verbatim for a "View document source" '
'link; implies --source-link.',
['--source-url'], {'metavar': '<URL>'}),
@@ -204,27 +323,35 @@
('Enable backlinks from footnotes and citations to their '
'references. This is the default.',
['--footnote-backlinks'],
- {'action': 'store_true', 'default': 1}),
+ {'action': 'store_true', 'default': 1,
+ 'validator': validate_boolean}),
('Disable backlinks from footnotes and citations.',
['--no-footnote-backlinks'],
{'dest': 'footnote_backlinks', 'action': 'store_false'}),
+ ('Disable Docutils section numbering',
+ ['--no-section-numbering'],
+ {'action': 'store_false', 'dest': 'sectnum_xform',
+ 'default': 1, 'validator': validate_boolean}),
('Set verbosity threshold; report system messages at or higher than '
'<level> (by name or number: "info" or "1", warning/2, error/3, '
'severe/4; also, "none" or "5"). Default is 2 (warning).',
['--report', '-r'], {'choices': threshold_choices, 'default': 2,
- 'dest': 'report_level', 'metavar': '<level>'}),
+ 'dest': 'report_level', 'metavar': '<level>',
+ 'validator': validate_threshold}),
('Report all system messages, info-level and higher. (Same as '
'"--report=info".)',
- ['--verbose', '-v'], {'action': 'store_const', 'const': 'info',
+ ['--verbose', '-v'], {'action': 'store_const', 'const': 1,
'dest': 'report_level'}),
('Do not report any system messages. (Same as "--report=none".)',
- ['--quiet', '-q'], {'action': 'store_const', 'const': 'none',
+ ['--quiet', '-q'], {'action': 'store_const', 'const': 5,
'dest': 'report_level'}),
('Set the threshold (<level>) at or above which system messages are '
- 'converted to exceptions, halting execution immediately. Levels '
- 'as in --report. Default is 4 (severe).',
+ 'converted to exceptions, halting execution immediately by '
+ 'exiting (or propagating the exception if --traceback set). '
+ 'Levels as in --report. Default is 4 (severe).',
['--halt'], {'choices': threshold_choices, 'dest': 'halt_level',
- 'default': 4, 'metavar': '<level>'}),
+ 'default': 4, 'metavar': '<level>',
+ 'validator': validate_threshold}),
('Same as "--halt=info": halt processing at the slightest problem.',
['--strict'], {'action': 'store_const', 'const': 'info',
'dest': 'halt_level'}),
@@ -232,48 +359,76 @@
'system messages (at or above <level>) were generated. Levels as '
'in --report. Default is 5 (disabled). Exit status is the maximum '
'system message level plus 10 (11 for INFO, etc.).',
- ['--exit'], {'choices': threshold_choices, 'dest': 'exit_level',
- 'default': 5, 'metavar': '<level>'}),
- ('Report debug-level system messages.',
- ['--debug'], {'action': 'store_true'}),
- ('Do not report debug-level system messages.',
+ ['--exit-status'], {'choices': threshold_choices,
+ 'dest': 'exit_status_level',
+ 'default': 5, 'metavar': '<level>',
+ 'validator': validate_threshold}),
+ ('Report debug-level system messages and generate diagnostic output.',
+ ['--debug'], {'action': 'store_true', 'validator': validate_boolean}),
+ ('Do not report debug-level system messages or generate diagnostic '
+ 'output.',
['--no-debug'], {'action': 'store_false', 'dest': 'debug'}),
('Send the output of system messages (warnings) to <file>.',
['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}),
- ('Specify the encoding of input text. Default is locale-dependent.',
+ ('Enable Python tracebacks when halt-level system messages and '
+ 'other exceptions occur. Useful for debugging, and essential for '
+ 'issue reports.',
+ ['--traceback'], {'action': 'store_true', 'default': None,
+ 'validator': validate_boolean}),
+ ('Disable Python tracebacks when errors occur; report just the error '
+ 'instead. This is the default.',
+ ['--no-traceback'], {'dest': 'traceback', 'action': 'store_false'}),
+ ('Specify the encoding of input text. Default is locale-dependent. '
+ 'Optionally also specify the error handler for undecodable '
+ 'characters, after a colon (":"); default is "strict". (See '
+ '"--intput-encoding-error-handler".)',
['--input-encoding', '-i'],
- {'action': 'callback', 'callback': set_encoding,
- 'metavar': '<name>', 'type': 'string', 'dest': 'input_encoding'}),
+ {'metavar': '<name[:handler]>',
+ 'validator': validate_encoding_and_error_handler}),
+ ('Specify the error handler for undecodable characters in '
+ 'the input. Acceptable values include "strict", "ignore", and '
+ '"replace". Default is "strict". '
+ 'Usually specified as part of --input-encoding.',
+ ['--input-encoding-error-handler'],
+ {'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify the text encoding for output. Default is UTF-8. '
- 'Optionally also specify the encoding error handler for unencodable '
- 'characters (see "--error-encoding"); default is "strict".',
+ 'Optionally also specify the error handler for unencodable '
+ 'characters, after a colon (":"); default is "strict". (See '
+ '"--output-encoding-error-handler".)',
['--output-encoding', '-o'],
- {'action': 'callback', 'callback': set_encoding_and_error_handler,
- 'metavar': '<name[:handler]>', 'type': 'string',
- 'dest': 'output_encoding', 'default': 'utf-8'}),
- (SUPPRESS_HELP, # usually handled by --output-encoding
- ['--output_encoding_error_handler'],
- {'action': 'callback', 'callback': set_encoding_error_handler,
- 'type': 'string', 'dest': 'output_encoding_error_handler',
- 'default': 'strict'}),
+ {'metavar': '<name[:handler]>', 'default': 'utf-8',
+ 'validator': validate_encoding_and_error_handler}),
+ ('Specify the error handler for unencodable characters in '
+ 'the output. Acceptable values include "strict", "ignore", '
+ '"replace", "xmlcharrefreplace", and '
+ '"backslashreplace" (in Python 2.3+). Default is "strict". '
+ 'Usually specified as part of --output-encoding.',
+ ['--output-encoding-error-handler'],
+ {'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify the text encoding for error output. Default is ASCII. '
- 'Optionally also specify the encoding error handler for unencodable '
- 'characters, after a colon (":"). Acceptable values are the same '
- 'as for the "error" parameter of Python\'s ``encode`` string '
- 'method. Default is "%s".' % default_error_encoding_error_handler,
+ 'Optionally also specify the error handler for unencodable '
+ 'characters, after a colon (":"); default is "%s". (See '
+ '"--output-encoding-error-handler".)'
+ % default_error_encoding_error_handler,
['--error-encoding', '-e'],
- {'action': 'callback', 'callback': set_encoding_and_error_handler,
- 'metavar': '<name[:handler]>', 'type': 'string',
- 'dest': 'error_encoding', 'default': 'ascii'}),
- (SUPPRESS_HELP, # usually handled by --error-encoding
- ['--error_encoding_error_handler'],
- {'action': 'callback', 'callback': set_encoding_error_handler,
- 'type': 'string', 'dest': 'error_encoding_error_handler',
- 'default': default_error_encoding_error_handler}),
+ {'metavar': '<name[:handler]>', 'default': 'ascii',
+ 'validator': validate_encoding_and_error_handler}),
+ ('Specify the error handler for unencodable characters in '
+ 'error output. See --output-encoding-error-handler for acceptable '
+ 'values. Default is "%s". Usually specified as part of '
+ '--error-encoding.' % default_error_encoding_error_handler,
+ ['--error-encoding-error-handler'],
+ {'default': default_error_encoding_error_handler,
+ 'validator': validate_encoding_error_handler}),
('Specify the language of input text (ISO 639 2-letter identifier).'
' Default is "en" (English).',
['--language', '-l'], {'dest': 'language_code', 'default': 'en',
'metavar': '<name>'}),
+ ('Write dependencies (caused e.g. by file inclusions) to '
+ '<file>. Useful in conjunction with programs like "make".',
+ ['--record-dependencies'],
+ {'metavar': '<file>', 'validator': validate_dependency_file,
+ 'default': None}), # default set in Values class
('Read configuration settings from <file>, if it exists.',
['--config'], {'metavar': '<file>', 'type': 'string',
'action': 'callback', 'callback': read_config_file}),
@@ -281,22 +436,32 @@
['--version', '-V'], {'action': 'version'}),
('Show this help message and exit.',
['--help', '-h'], {'action': 'help'}),
+ # Typically not useful for non-programmatical use.
+ (SUPPRESS_HELP, ['--id-prefix'], {'default': ''}),
+ (SUPPRESS_HELP, ['--auto-id-prefix'], {'default': 'id'}),
# Hidden options, for development use only:
(SUPPRESS_HELP, ['--dump-settings'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-internals'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-transforms'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--dump-pseudo-xml'], {'action': 'store_true'}),
(SUPPRESS_HELP, ['--expose-internal-attribute'],
- {'action': 'append', 'dest': 'expose_internals'}),))
+ {'action': 'append', 'dest': 'expose_internals',
+ 'validator': validate_colon_separated_string_list}),
+ (SUPPRESS_HELP, ['--strict-visitor'], {'action': 'store_true'}),
+ ))
"""Runtime settings and command-line options common to all Docutils front
ends. Setting specs specific to individual Docutils components are also
used (see `populate_from_components()`)."""
- settings_defaults = {'_disable_config': None}
+ settings_defaults = {'_disable_config': None,
+ '_source': None,
+ '_destination': None}
"""Defaults for settings that don't have command-line option equivalents."""
relative_path_settings = ('warning_stream',)
+ config_section = 'general'
+
version_template = '%%prog (Docutils %s)' % docutils.__version__
"""Default version message."""
@@ -307,25 +472,27 @@
``.settings_spec`` attribute. `defaults` is a mapping of setting
default overrides.
"""
+
+ self.lists = {}
+ """Set of list-type settings."""
+
optparse.OptionParser.__init__(
- self, add_help_option=None,
+ self, option_class=Option, add_help_option=None,
formatter=optparse.TitledHelpFormatter(width=78),
*args, **kwargs)
if not self.version:
self.version = self.version_template
# Make an instance copy (it will be modified):
self.relative_path_settings = list(self.relative_path_settings)
- self.populate_from_components((self,) + tuple(components))
- defaults = defaults or {}
+ self.components = (self,) + tuple(components)
+ self.populate_from_components(self.components)
+ self.set_defaults(**(defaults or {}))
if read_config_files and not self.defaults['_disable_config']:
- config = ConfigParser()
- config.read_standard_files(self)
- config_settings = config.get_section('options')
- make_paths_absolute(config_settings, self.relative_path_settings)
- defaults.update(config_settings)
- # Internal settings with no defaults from settings specifications;
- # initialize manually:
- self.set_defaults(_source=None, _destination=None, **defaults)
+ try:
+ config_settings = self.get_standard_config_settings()
+ except ValueError, error:
+ self.error(error)
+ self.set_defaults(**config_settings.__dict__)
def populate_from_components(self, components):
"""
@@ -337,11 +504,10 @@
for component in components:
if component is None:
continue
- i = 0
settings_spec = component.settings_spec
self.relative_path_settings.extend(
component.relative_path_settings)
- while i < len(settings_spec):
+ for i in range(0, len(settings_spec), 3):
title, description, option_spec = settings_spec[i:i+3]
if title:
group = optparse.OptionGroup(self, title, description)
@@ -349,36 +515,57 @@
else:
group = self # single options
for (help_text, option_strings, kwargs) in option_spec:
- group.add_option(help=help_text, *option_strings,
- **kwargs)
+ option = group.add_option(help=help_text, *option_strings,
+ **kwargs)
+ if kwargs.get('action') == 'append':
+ self.lists[option.dest] = 1
if component.settings_defaults:
self.defaults.update(component.settings_defaults)
- i += 3
for component in components:
if component and component.settings_default_overrides:
self.defaults.update(component.settings_default_overrides)
+ def get_standard_config_files(self):
+ """Return list of config files, from environment or standard."""
+ try:
+ config_files = os.environ['DOCUTILSCONFIG'].split(os.pathsep)
+ except KeyError:
+ config_files = self.standard_config_files
+ return [os.path.expanduser(f) for f in config_files if f.strip()]
+
+ def get_standard_config_settings(self):
+ settings = Values()
+ for filename in self.get_standard_config_files():
+ settings.update(self.get_config_file_settings(filename), self)
+ return settings
+
+ def get_config_file_settings(self, config_file):
+ """Returns a dictionary containing appropriate config file settings."""
+ parser = ConfigParser()
+ parser.read(config_file, self)
+ base_path = os.path.dirname(config_file)
+ applied = {}
+ settings = Values()
+ for component in self.components:
+ if not component:
+ continue
+ for section in (tuple(component.config_section_dependencies or ())
+ + (component.config_section,)):
+ if applied.has_key(section):
+ continue
+ applied[section] = 1
+ settings.update(parser.get_section(section), self)
+ make_paths_absolute(
+ settings.__dict__, self.relative_path_settings, base_path)
+ return settings.__dict__
+
def check_values(self, values, args):
- if hasattr(values, 'report_level'):
- values.report_level = self.check_threshold(values.report_level)
- if hasattr(values, 'halt_level'):
- values.halt_level = self.check_threshold(values.halt_level)
- if hasattr(values, 'exit_level'):
- values.exit_level = self.check_threshold(values.exit_level)
+ """Store positional arguments as runtime settings."""
values._source, values._destination = self.check_args(args)
make_paths_absolute(values.__dict__, self.relative_path_settings,
os.getcwd())
return values
- def check_threshold(self, level):
- try:
- return int(level)
- except ValueError:
- try:
- return self.thresholds[level.lower()]
- except (KeyError, AttributeError):
- self.error('Unknown threshold: %r.' % level)
-
def check_args(self, args):
source = destination = None
if args:
@@ -396,54 +583,96 @@
'destination. It will clobber the source file.')
return source, destination
+ def get_default_values(self):
+ """Needed to get custom `Values` instances."""
+ return Values(self.defaults)
+ def get_option_by_dest(self, dest):
+ """
+ Get an option by its dest.
+
+ If you're supplying a dest which is shared by several options,
+ it is undefined which option of those is returned.
+
+ A KeyError is raised if there is no option with the supplied
+ dest.
+ """
+ for group in self.option_groups + [self]:
+ for option in group.option_list:
+ if option.dest == dest:
+ return option
+ raise KeyError('No option with dest == %r.' % dest)
+
+
class ConfigParser(CP.ConfigParser):
- standard_config_files = (
- '/etc/docutils.conf', # system-wide
- './docutils.conf', # project-specific
- os.path.expanduser('~/.docutils')) # user-specific
- """Docutils configuration files, using ConfigParser syntax (section
- 'options'). Later files override earlier ones."""
+ old_settings = {
+ 'pep_stylesheet': ('pep_html writer', 'stylesheet'),
+ 'pep_stylesheet_path': ('pep_html writer', 'stylesheet_path'),
+ 'pep_template': ('pep_html writer', 'template')}
+ """{old setting: (new section, new setting)} mapping, used by
+ `handle_old_config`, to convert settings from the old [options] section."""
- validation = {
- 'options':
- {'input_encoding': validate_encoding,
- 'output_encoding': validate_encoding,
- 'output_encoding_error_handler': validate_encoding_error_handler,
- 'error_encoding': validate_encoding,
- 'error_encoding_error_handler': validate_encoding_error_handler}}
- """{section: {option: validation function}} mapping, used by
- `validate_options`. Validation functions take two parameters: name and
- value. They return a (possibly modified) value, or raise an exception."""
+ old_warning = """
+The "[option]" section is deprecated. Support for old-format configuration
+files may be removed in a future Docutils release. Please revise your
+configuration files. See <http://docutils.sf.net/docs/user/config.html>,
+section "Old-Format Configuration Files".
+"""
- def read_standard_files(self, option_parser):
- self.read(self.standard_config_files, option_parser)
-
def read(self, filenames, option_parser):
- if type(filenames) in types.StringTypes:
+ if type(filenames) in (types.StringType, types.UnicodeType):
filenames = [filenames]
for filename in filenames:
CP.ConfigParser.read(self, filename)
- self.validate_options(filename, option_parser)
+ if self.has_section('options'):
+ self.handle_old_config(filename)
+ self.validate_settings(filename, option_parser)
- def validate_options(self, filename, option_parser):
- for section in self.validation.keys():
- if not self.has_section(section):
- continue
- for option in self.validation[section].keys():
- if self.has_option(section, option):
- value = self.get(section, option)
- validator = self.validation[section][option]
+ def handle_old_config(self, filename):
+ warnings.warn_explicit(self.old_warning, ConfigDeprecationWarning,
+ filename, 0)
+ options = self.get_section('options')
+ if not self.has_section('general'):
+ self.add_section('general')
+ for key, value in options.items():
+ if self.old_settings.has_key(key):
+ section, setting = self.old_settings[key]
+ if not self.has_section(section):
+ self.add_section(section)
+ else:
+ section = 'general'
+ setting = key
+ if not self.has_option(section, setting):
+ self.set(section, setting, value)
+ self.remove_section('options')
+
+ def validate_settings(self, filename, option_parser):
+ """
+ Call the validator function and implement overrides on all applicable
+ settings.
+ """
+ for section in self.sections():
+ for setting in self.options(section):
+ try:
+ option = option_parser.get_option_by_dest(setting)
+ except KeyError:
+ continue
+ if option.validator:
+ value = self.get(section, setting, raw=1)
try:
- new_value = validator(option, value)
+ new_value = option.validator(
+ setting, value, option_parser,
+ config_parser=self, config_section=section)
except Exception, error:
raise (ValueError(
'Error in config file "%s", section "[%s]":\n'
' %s: %s\n %s = %s'
% (filename, section, error.__class__.__name__,
- error, option, value)), None, sys.exc_info()[2])
- self.set(section, option, new_value)
+ error, setting, value)), None, sys.exc_info()[2])
+ self.set(section, setting, new_value)
+ if option.overrides:
+ self.set(section, option.overrides, None)
def optionxform(self, optionstr):
"""
@@ -451,21 +680,17 @@
"""
return optionstr.lower().replace('-', '_')
- def get_section(self, section, raw=0, vars=None):
+ def get_section(self, section):
"""
Return a given section as a dictionary (empty if the section
doesn't exist).
-
- All % interpolations are expanded in the return values, based on the
- defaults passed into the constructor, unless the optional argument
- `raw` is true. Additional substitutions may be provided using the
- `vars` argument, which must be a dictionary whose contents overrides
- any pre-existing defaults.
-
- The section DEFAULT is special.
"""
section_dict = {}
if self.has_section(section):
for option in self.options(section):
- section_dict[option] = self.get(section, option, raw, vars)
+ section_dict[option] = self.get(section, option, raw=1)
return section_dict
+
+
+class ConfigDeprecationWarning(DeprecationWarning):
+ """Warning for deprecated configuration file features."""
Modified: Zope3/trunk/src/docutils/io.py
===================================================================
--- Zope3/trunk/src/docutils/io.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/io.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:04 $
+# Revision: $Revision: 3138 $
+# Date: $Date: 2005-03-27 17:05:34 +0200 (Sun, 27 Mar 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -12,7 +12,10 @@
__docformat__ = 'reStructuredText'
import sys
-import locale
+try:
+ import locale
+except:
+ pass
from types import UnicodeType
from docutils import TransformSpec
@@ -27,10 +30,14 @@
default_source_path = None
- def __init__(self, source=None, source_path=None, encoding=None):
+ def __init__(self, source=None, source_path=None, encoding=None,
+ error_handler='strict'):
self.encoding = encoding
"""Text encoding for the input source."""
+ self.error_handler = error_handler
+ """Text decoding error handler."""
+
self.source = source
"""The source of input data."""
@@ -40,6 +47,9 @@
if not source_path:
self.source_path = self.default_source_path
+ self.successful_encoding = None
+ """The encoding that successfully decoded the source data."""
+
def __repr__(self):
return '%s: source=%r, source_path=%r' % (self.__class__, self.source,
self.source_path)
@@ -59,31 +69,43 @@
"""
if (self.encoding and self.encoding.lower() == 'unicode'
or isinstance(data, UnicodeType)):
- return unicode(data)
- encodings = [self.encoding, 'utf-8']
- try:
- encodings.append(locale.nl_langinfo(locale.CODESET))
- except:
- pass
- try:
- encodings.append(locale.getlocale()[1])
- except:
- pass
- try:
- encodings.append(locale.getdefaultlocale()[1])
- except:
- pass
- encodings.append('latin-1')
+ return data
+ encodings = [self.encoding]
+ if not self.encoding:
+ # Apply heuristics only if no encoding is explicitly given.
+ encodings.append('utf-8')
+ try:
+ encodings.append(locale.nl_langinfo(locale.CODESET))
+ except:
+ pass
+ try:
+ encodings.append(locale.getlocale()[1])
+ except:
+ pass
+ try:
+ encodings.append(locale.getdefaultlocale()[1])
+ except:
+ pass
+ encodings.append('latin-1')
+ error = None
+ error_details = ''
for enc in encodings:
if not enc:
continue
try:
- return unicode(data, enc)
- except (UnicodeError, LookupError):
+ decoded = unicode(data, enc, self.error_handler)
+ self.successful_encoding = enc
+ # Return decoded, removing BOMs.
+ return decoded.replace(u'\ufeff', u'')
+ except (UnicodeError, LookupError), error:
pass
+ if error is not None:
+ error_details = '\n(%s: %s)' % (error.__class__.__name__, error)
raise UnicodeError(
- 'Unable to decode input data. Tried the following encodings: %s.'
- % ', '.join([repr(enc) for enc in encodings if enc]))
+ 'Unable to decode input data. Tried the following encodings: '
+ '%s.%s'
+ % (', '.join([repr(enc) for enc in encodings if enc]),
+ error_details))
class Output(TransformSpec):
@@ -118,15 +140,34 @@
% (self.__class__, self.destination, self.destination_path))
def write(self, data):
+ """`data` is a Unicode string, to be encoded by `self.encode`."""
raise NotImplementedError
def encode(self, data):
if self.encoding and self.encoding.lower() == 'unicode':
return data
else:
- return data.encode(self.encoding, self.error_handler)
+ try:
+ return data.encode(self.encoding, self.error_handler)
+ except ValueError:
+ # ValueError is raised if there are unencodable chars
+ # in data and the error_handler isn't found.
+ if self.error_handler == 'xmlcharrefreplace':
+ # We are using xmlcharrefreplace with a Python
+ # version that doesn't support it (2.1 or 2.2), so
+ # we emulate its behavior.
+ return ''.join([self.xmlcharref_encode(char) for char in data])
+ else:
+ raise
+ def xmlcharref_encode(self, char):
+ """Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler."""
+ try:
+ return char.encode(self.encoding, 'strict')
+ except UnicodeError:
+ return '&#%i;' % ord(char)
+
class FileInput(Input):
"""
@@ -134,20 +175,35 @@
"""
def __init__(self, source=None, source_path=None,
- encoding=None, autoclose=1):
+ encoding=None, error_handler='strict',
+ autoclose=1, handle_io_errors=1):
"""
:Parameters:
- `source`: either a file-like object (which is read directly), or
`None` (which implies `sys.stdin` if no `source_path` given).
- `source_path`: a path to a file, which is opened and then read.
+ - `encoding`: the expected text encoding of the input file.
+ - `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after read (boolean); always
false if `sys.stdin` is the source.
+ - `handle_io_errors`: summarize I/O errors here, and exit?
"""
- Input.__init__(self, source, source_path, encoding)
+ Input.__init__(self, source, source_path, encoding, error_handler)
self.autoclose = autoclose
+ self.handle_io_errors = handle_io_errors
if source is None:
if source_path:
- self.source = open(source_path)
+ try:
+ self.source = open(source_path)
+ except IOError, error:
+ if not handle_io_errors:
+ raise
+ print >>sys.stderr, '%s: %s' % (error.__class__.__name__,
+ error)
+ print >>sys.stderr, (
+ 'Unable to open source file for reading (%r). Exiting.'
+ % source_path)
+ sys.exit(1)
else:
self.source = sys.stdin
self.autoclose = None
@@ -158,10 +214,14 @@
pass
def read(self):
- """Read and decode a single file and return the data."""
- data = self.source.read()
- if self.autoclose:
- self.close()
+ """
+ Read and decode a single file and return the data (Unicode string).
+ """
+ try:
+ data = self.source.read()
+ finally:
+ if self.autoclose:
+ self.close()
return self.decode(data)
def close(self):
@@ -175,7 +235,8 @@
"""
def __init__(self, destination=None, destination_path=None,
- encoding=None, error_handler='strict', autoclose=1):
+ encoding=None, error_handler='strict', autoclose=1,
+ handle_io_errors=1):
"""
:Parameters:
- `destination`: either a file-like object (which is written
@@ -190,6 +251,7 @@
encoding, error_handler)
self.opened = 1
self.autoclose = autoclose
+ self.handle_io_errors = handle_io_errors
if destination is None:
if destination_path:
self.opened = None
@@ -203,7 +265,16 @@
pass
def open(self):
- self.destination = open(self.destination_path, 'w')
+ try:
+ self.destination = open(self.destination_path, 'w')
+ except IOError, error:
+ if not self.handle_io_errors:
+ raise
+ print >>sys.stderr, '%s: %s' % (error.__class__.__name__,
+ error)
+ print >>sys.stderr, ('Unable to open destination file for writing '
+ '(%r). Exiting.' % self.destination_path)
+ sys.exit(1)
self.opened = 1
def write(self, data):
@@ -211,9 +282,11 @@
output = self.encode(data)
if not self.opened:
self.open()
- self.destination.write(output)
- if self.autoclose:
- self.close()
+ try:
+ self.destination.write(output)
+ finally:
+ if self.autoclose:
+ self.close()
return output
def close(self):
Modified: Zope3/trunk/src/docutils/languages/__init__.py
===================================================================
--- Zope3/trunk/src/docutils/languages/__init__.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/__init__.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,12 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:05 $
+# Revision: $Revision: 2224 $
+# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
+# Internationalization details are documented in
+# <http://docutils.sf.net/docs/howto/i18n.html>.
+
"""
This package contains modules for language-dependent features of Docutils.
"""
Added: Zope3/trunk/src/docutils/languages/af.py
===================================================================
--- Zope3/trunk/src/docutils/languages/af.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/af.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,60 @@
+# Author: Jannie Hofmeyr
+# Contact: jhsh at sun.ac.za
+# Revision: $Revision: 2224 $
+# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Afrikaans-language mappings for language-dependent features of Docutils.
+"""
+
+__docformat__ = 'reStructuredText'
+
+labels = {
+ 'author': 'Auteur',
+ 'authors': 'Auteurs',
+ 'organization': 'Organisasie',
+ 'address': 'Adres',
+ 'contact': 'Kontak',
+ 'version': 'Weergawe',
+ 'revision': 'Revisie',
+ 'status': 'Status',
+ 'date': 'Datum',
+ 'copyright': 'Kopiereg',
+ 'dedication': 'Opdrag',
+ 'abstract': 'Opsomming',
+ 'attention': 'Aandag!',
+ 'caution': 'Wees versigtig!',
+ 'danger': '!GEVAAR!',
+ 'error': 'Fout',
+ 'hint': 'Wenk',
+ 'important': 'Belangrik',
+ 'note': 'Nota',
+ 'tip': 'Tip', # hint and tip both have the same translation: wenk
+ 'warning': 'Waarskuwing',
+ 'contents': 'Inhoud'}
+"""Mapping of node class name to label text."""
+
+bibliographic_fields = {
+ 'auteur': 'author',
+ 'auteurs': 'authors',
+ 'organisasie': 'organization',
+ 'adres': 'address',
+ 'kontak': 'contact',
+ 'weergawe': 'version',
+ 'revisie': 'revision',
+ 'status': 'status',
+ 'datum': 'date',
+ 'kopiereg': 'copyright',
+ 'opdrag': 'dedication',
+ 'opsomming': 'abstract'}
+"""Afrikaans (lowcased) to canonical name mapping for bibliographic fields."""
+
+author_separators = [';', ',']
+"""List of separator strings for the 'Authors' bibliographic field. Tried in
+order."""
Added: Zope3/trunk/src/docutils/languages/ca.py
===================================================================
--- Zope3/trunk/src/docutils/languages/ca.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/ca.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,62 @@
+# Author: Ivan Vilata i Balaguer
+# Contact: ivan at selidor.net
+# Revision: $Revision: 3276 $
+# Date: $Date: 2005-04-30 13:34:52 +0200 (Sat, 30 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Catalan-language mappings for language-dependent features of Docutils.
+"""
+
+__docformat__ = 'reStructuredText'
+
+labels = {
+ # fixed: language-dependent
+ 'author': u'Autor',
+ 'authors': u'Autors',
+ 'organization': u'Organitzaci\u00F3',
+ 'address': u'Adre\u00E7a',
+ 'contact': u'Contacte',
+ 'version': u'Versi\u00F3',
+ 'revision': u'Revisi\u00F3',
+ 'status': u'Estat',
+ 'date': u'Data',
+ 'copyright': u'Copyright',
+ 'dedication': u'Dedicat\u00F2ria',
+ 'abstract': u'Resum',
+ 'attention': u'Atenci\u00F3!',
+ 'caution': u'Compte!',
+ 'danger': u'PERILL!',
+ 'error': u'Error',
+ 'hint': u'Suggeriment',
+ 'important': u'Important',
+ 'note': u'Nota',
+ 'tip': u'Consell',
+ 'warning': u'Av\u00EDs',
+ 'contents': u'Contingut'}
+"""Mapping of node class name to label text."""
+
+bibliographic_fields = {
+ # language-dependent: fixed
+ u'autor': 'author',
+ u'autors': 'authors',
+ u'organitzaci\u00F3': 'organization',
+ u'adre\u00E7a': 'address',
+ u'contacte': 'contact',
+ u'versi\u00F3': 'version',
+ u'revisi\u00F3': 'revision',
+ u'estat': 'status',
+ u'data': 'date',
+ u'copyright': 'copyright',
+ u'dedicat\u00F2ria': 'dedication',
+ u'resum': 'abstract'}
+"""Catalan (lowcased) to canonical name mapping for bibliographic fields."""
+
+author_separators = [';', ',']
+"""List of separator strings for the 'Authors' bibliographic field. Tried in
+order."""
Added: Zope3/trunk/src/docutils/languages/cs.py
===================================================================
--- Zope3/trunk/src/docutils/languages/cs.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/cs.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,62 @@
+# Author: Marek Blaha
+# Contact: mb at dat.cz
+# Revision: $Revision: 2224 $
+# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Czech-language mappings for language-dependent features of Docutils.
+"""
+
+__docformat__ = 'reStructuredText'
+
+labels = {
+ # fixed: language-dependent
+ 'author': u'Autor',
+ 'authors': u'Auto\u0159i',
+ 'organization': u'Organizace',
+ 'address': u'Adresa',
+ 'contact': u'Kontakt',
+ 'version': u'Verze',
+ 'revision': u'Revize',
+ 'status': u'Stav',
+ 'date': u'Datum',
+ 'copyright': u'Copyright',
+ 'dedication': u'V\u011Bnov\u00E1n\u00ED',
+ 'abstract': u'Abstrakt',
+ 'attention': u'Pozor!',
+ 'caution': u'Opatrn\u011B!',
+ 'danger': u'!NEBEZPE\u010C\u00CD!',
+ 'error': u'Chyba',
+ 'hint': u'Rada',
+ 'important': u'D\u016Fle\u017Eit\u00E9',
+ 'note': u'Pozn\u00E1mka',
+ 'tip': u'Tip',
+ 'warning': u'Varov\u00E1n\u00ED',
+ 'contents': u'Obsah'}
+"""Mapping of node class name to label text."""
+
+bibliographic_fields = {
+ # language-dependent: fixed
+ u'autor': 'author',
+ u'auto\u0159i': 'authors',
+ u'organizace': 'organization',
+ u'adresa': 'address',
+ u'kontakt': 'contact',
+ u'verze': 'version',
+ u'revize': 'revision',
+ u'stav': 'status',
+ u'datum': 'date',
+ u'copyright': 'copyright',
+ u'v\u011Bnov\u00E1n\u00ED': 'dedication',
+ u'abstrakt': 'abstract'}
+"""Czech (lowcased) to canonical name mapping for bibliographic fields."""
+
+author_separators = [';', ',']
+"""List of separator strings for the 'Authors' bibliographic field. Tried in
+order."""
Modified: Zope3/trunk/src/docutils/languages/de.py
===================================================================
--- Zope3/trunk/src/docutils/languages/de.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/de.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
# Authors: David Goodger; Gunnar Schwant
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:05 $
+# Revision: $Revision: 2224 $
+# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
German language mappings for language-dependent features of Docutils.
"""
Modified: Zope3/trunk/src/docutils/languages/en.py
===================================================================
--- Zope3/trunk/src/docutils/languages/en.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/en.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:05 $
+# Revision: $Revision: 2224 $
+# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
English-language mappings for language-dependent features of Docutils.
"""
@@ -11,6 +16,7 @@
__docformat__ = 'reStructuredText'
labels = {
+ # fixed: language-dependent
'author': 'Author',
'authors': 'Authors',
'organization': 'Organization',
@@ -36,6 +42,7 @@
"""Mapping of node class name to label text."""
bibliographic_fields = {
+ # language-dependent: fixed
'author': 'author',
'authors': 'authors',
'organization': 'organization',
Added: Zope3/trunk/src/docutils/languages/eo.py
===================================================================
--- Zope3/trunk/src/docutils/languages/eo.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/eo.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,63 @@
+# Author: Marcelo Huerta San Martin
+# Contact: richieadler at users.sourceforge.net
+# Revision: $Revision: 2224 $
+# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Esperanto-language mappings for language-dependent features of Docutils.
+"""
+
+__docformat__ = 'reStructuredText'
+
+labels = {
+ # fixed: language-dependent
+ 'author': u'A\u016dtoro',
+ 'authors': u'A\u016dtoroj',
+ 'organization': u'Organizo',
+ 'address': u'Adreso',
+ 'contact': u'Kontakto',
+ 'version': u'Versio',
+ 'revision': u'Revido',
+ 'status': u'Stato',
+ 'date': u'Dato',
+ # 'copyright': u'Kopirajto',
+ 'copyright': u'A\u016dtorrajto',
+ 'dedication': u'Dedi\u0109o',
+ 'abstract': u'Resumo',
+ 'attention': u'Atentu!',
+ 'caution': u'Zorgu!',
+ 'danger': u'DAN\u011cERO!',
+ 'error': u'Eraro',
+ 'hint': u'Spuro',
+ 'important': u'Grava',
+ 'note': u'Noto',
+ 'tip': u'Helpeto',
+ 'warning': u'Averto',
+ 'contents': u'Enhavo'}
+"""Mapping of node class name to label text."""
+
+bibliographic_fields = {
+ # language-dependent: fixed
+ 'a\u016dtoro': 'author',
+ 'a\u016dtoroj': 'authors',
+ 'organizo': 'organization',
+ 'adreso': 'address',
+ 'kontakto': 'contact',
+ 'versio': 'version',
+ 'revido': 'revision',
+ 'stato': 'status',
+ 'dato': 'date',
+ 'a\u016dtorrajto': 'copyright',
+ 'dedi\u0109o': 'dedication',
+ 'resumo': 'abstract'}
+"""Esperanto (lowcased) to canonical name mapping for bibliographic fields."""
+
+author_separators = [';', ',']
+"""List of separator strings for the 'Authors' bibliographic field. Tried in
+order."""
Modified: Zope3/trunk/src/docutils/languages/es.py
===================================================================
--- Zope3/trunk/src/docutils/languages/es.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/es.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,15 @@
-# Author: Marcelo Huerta San Martín
+# -*- coding: iso-8859-1 -*-
+# Author: Marcelo Huerta San Mart�
# Contact: mghsm at uol.com.ar
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:05 $
+# Revision: $Revision: 2224 $
+# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
Spanish-language mappings for language-dependent features of Docutils.
"""
Added: Zope3/trunk/src/docutils/languages/fi.py
===================================================================
--- Zope3/trunk/src/docutils/languages/fi.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/fi.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,62 @@
+# Author: Asko Soukka
+# Contact: asko.soukka at iki.fi
+# Revision: $Revision: 2609 $
+# Date: $Date: 2004-09-13 21:25:33 +0200 (Mon, 13 Sep 2004) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Finnish-language mappings for language-dependent features of Docutils.
+"""
+
+__docformat__ = 'reStructuredText'
+
+labels = {
+ # fixed: language-dependent
+ u'author': u'Tekij\u00e4',
+ u'authors': u'Tekij\u00e4t',
+ u'organization': u'Yhteis\u00f6',
+ u'address': u'Osoite',
+ u'contact': u'Yhteystiedot',
+ u'version': u'Versio',
+ u'revision': u'Vedos',
+ u'status': u'Tila',
+ u'date': u'P\u00e4iv\u00e4ys',
+ u'copyright': u'Tekij\u00e4noikeudet',
+ u'dedication': u'Omistuskirjoitus',
+ u'abstract': u'Tiivistelm\u00e4',
+ u'attention': u'Huomio!',
+ u'caution': u'Varo!',
+ u'danger': u'!VAARA!',
+ u'error': u'Virhe',
+ u'hint': u'Vihje',
+ u'important': u'T\u00e4rke\u00e4\u00e4',
+ u'note': u'Huomautus',
+ u'tip': u'Neuvo',
+ u'warning': u'Varoitus',
+ u'contents': u'Sis\u00e4llys'}
+"""Mapping of node class name to label text."""
+
+bibliographic_fields = {
+ # language-dependent: fixed
+ u'tekij\u00e4': u'author',
+ u'tekij\u00e4t': u'authors',
+ u'yhteis\u00f6': u'organization',
+ u'osoite': u'address',
+ u'yhteystiedot': u'contact',
+ u'versio': u'version',
+ u'vedos': u'revision',
+ u'tila': u'status',
+ u'p\u00e4iv\u00e4ys': u'date',
+ u'tekij\u00e4noikeudet': u'copyright',
+ u'omistuskirjoitus': u'dedication',
+ u'tiivistelm\u00e4': u'abstract'}
+"""Finnish (lowcased) to canonical name mapping for bibliographic fields."""
+
+author_separators = [';', ',']
+"""List of separator strings for the 'Authors' bibliographic field. Tried in
+order."""
Modified: Zope3/trunk/src/docutils/languages/fr.py
===================================================================
--- Zope3/trunk/src/docutils/languages/fr.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/fr.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
# Author: Stefane Fermigier
# Contact: sf at fermigier.com
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:05 $
+# Revision: $Revision: 2224 $
+# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
French-language mappings for language-dependent features of Docutils.
"""
@@ -32,7 +37,7 @@
u'note': u'Note',
u'tip': u'Astuce',
u'warning': u'Avis',
- u'contents': u'Contenu'}
+ u'contents': u'Sommaire'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
Modified: Zope3/trunk/src/docutils/languages/it.py
===================================================================
--- Zope3/trunk/src/docutils/languages/it.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/it.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
# Author: Nicola Larosa
# Contact: docutils at tekNico.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:05 $
+# Revision: $Revision: 2944 $
+# Date: $Date: 2005-01-20 13:11:50 +0100 (Thu, 20 Jan 2005) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
Italian-language mappings for language-dependent features of Docutils.
"""
@@ -40,7 +45,7 @@
'autori': 'authors',
'organizzazione': 'organization',
'indirizzo': 'address',
- 'contatti': 'contact',
+ 'contatto': 'contact',
'versione': 'version',
'revisione': 'revision',
'status': 'status',
Added: Zope3/trunk/src/docutils/languages/nl.py
===================================================================
--- Zope3/trunk/src/docutils/languages/nl.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/nl.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,62 @@
+# Author: Martijn Pieters
+# Contact: mjpieters at users.sourceforge.net
+# Revision: $Revision: 3058 $
+# Date: $Date: 2005-03-18 21:09:22 +0100 (Fri, 18 Mar 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Dutch-language mappings for language-dependent features of Docutils.
+"""
+
+__docformat__ = 'reStructuredText'
+
+labels = {
+ # fixed: language-dependent
+ 'author': 'Auteur',
+ 'authors': 'Auteurs',
+ 'organization': 'Organisatie',
+ 'address': 'Adres',
+ 'contact': 'Contact',
+ 'version': 'Versie',
+ 'revision': 'Revisie',
+ 'status': 'Status',
+ 'date': 'Datum',
+ 'copyright': 'Copyright',
+ 'dedication': 'Toewijding',
+ 'abstract': 'Samenvatting',
+ 'attention': 'Attentie!',
+ 'caution': 'Let op!',
+ 'danger': '!GEVAAR!',
+ 'error': 'Fout',
+ 'hint': 'Hint',
+ 'important': 'Belangrijk',
+ 'note': 'Opmerking',
+ 'tip': 'Tip',
+ 'warning': 'Waarschuwing',
+ 'contents': 'Inhoud'}
+"""Mapping of node class name to label text."""
+
+bibliographic_fields = {
+ # language-dependent: fixed
+ 'auteur': 'author',
+ 'auteurs': 'authors',
+ 'organisatie': 'organization',
+ 'adres': 'address',
+ 'contact': 'contact',
+ 'versie': 'version',
+ 'revisie': 'revision',
+ 'status': 'status',
+ 'datum': 'date',
+ 'copyright': 'copyright',
+ 'toewijding': 'dedication',
+ 'samenvatting': 'abstract'}
+"""Dutch (lowcased) to canonical name mapping for bibliographic fields."""
+
+author_separators = [';', ',']
+"""List of separator strings for the 'Authors' bibliographic field. Tried in
+order."""
Added: Zope3/trunk/src/docutils/languages/pt_br.py
===================================================================
--- Zope3/trunk/src/docutils/languages/pt_br.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/pt_br.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,62 @@
+# Author: David Goodger
+# Contact: goodger at users.sourceforge.net
+# Revision: $Revision: 2333 $
+# Date: $Date: 2004-06-20 22:51:22 +0200 (Sun, 20 Jun 2004) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Brazilian Portuguese-language mappings for language-dependent features of Docutils.
+"""
+
+__docformat__ = 'reStructuredText'
+
+labels = {
+ # fixed: language-dependent
+ 'author': u'Autor',
+ 'authors': u'Autores',
+ 'organization': u'Organiza\u00E7\u00E3o',
+ 'address': u'Endere\u00E7o',
+ 'contact': u'Contato',
+ 'version': u'Vers\u00E3o',
+ 'revision': u'Revis\u00E3o',
+ 'status': u'Estado',
+ 'date': u'Data',
+ 'copyright': u'Copyright',
+ 'dedication': u'Dedicat\u00F3ria',
+ 'abstract': u'Resumo',
+ 'attention': u'Atten\u00E7\u00E3o!',
+ 'caution': u'Cuidado!',
+ 'danger': u'PERIGO!',
+ 'error': u'Erro',
+ 'hint': u'Sugest\u00E3o',
+ 'important': u'Importante',
+ 'note': u'Nota',
+ 'tip': u'Dica',
+ 'warning': u'Aviso',
+ 'contents': u'Sum\u00E1rio'}
+"""Mapping of node class name to label text."""
+
+bibliographic_fields = {
+ # language-dependent: fixed
+ u'autor': 'author',
+ u'autores': 'authors',
+ u'organiza\u00E7\u00E3o': 'organization',
+ u'endere\u00E7o': 'address',
+ u'contato': 'contact',
+ u'vers\u00E3o': 'version',
+ u'revis\u00E3o': 'revision',
+ u'estado': 'status',
+ u'data': 'date',
+ u'copyright': 'copyright',
+ u'dedicat\u00F3ria': 'dedication',
+ u'resumo': 'abstract'}
+"""Brazilian Portuguese (lowcased) to canonical name mapping for bibliographic fields."""
+
+author_separators = [';', ',']
+"""List of separator strings for the 'Authors' bibliographic field. Tried in
+order."""
Added: Zope3/trunk/src/docutils/languages/ru.py
===================================================================
--- Zope3/trunk/src/docutils/languages/ru.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/ru.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,68 @@
+# Author: Roman Suzi
+# Contact: rnd at onego.ru
+# Revision: $Revision: 2999 $
+# Date: $Date: 2005-03-03 20:35:02 +0100 (Thu, 03 Mar 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Russian-language mappings for language-dependent features of Docutils.
+"""
+
+__docformat__ = 'reStructuredText'
+
+labels = {
+ u'abstract': u'\u0410\u043d\u043d\u043e\u0442\u0430\u0446\u0438\u044f',
+ u'address': u'\u0410\u0434\u0440\u0435\u0441',
+ u'attention': u'\u0412\u043d\u0438\u043c\u0430\u043d\u0438\u0435!',
+ u'author': u'\u0410\u0432\u0442\u043e\u0440',
+ u'authors': u'\u0410\u0432\u0442\u043e\u0440\u044b',
+ u'caution': u'\u041e\u0441\u0442\u043e\u0440\u043e\u0436\u043d\u043e!',
+ u'contact': u'\u041a\u043e\u043d\u0442\u0430\u043a\u0442',
+ u'contents':
+ u'\u0421\u043e\u0434\u0435\u0440\u0436\u0430\u043d\u0438\u0435',
+ u'copyright': u'\u041f\u0440\u0430\u0432\u0430 '
+ u'\u043a\u043e\u043f\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u044f',
+ u'danger': u'\u041e\u041f\u0410\u0421\u041d\u041e!',
+ u'date': u'\u0414\u0430\u0442\u0430',
+ u'dedication':
+ u'\u041f\u043e\u0441\u0432\u044f\u0449\u0435\u043d\u0438\u0435',
+ u'error': u'\u041e\u0448\u0438\u0431\u043a\u0430',
+ u'hint': u'\u0421\u043e\u0432\u0435\u0442',
+ u'important': u'\u0412\u0430\u0436\u043d\u043e',
+ u'note': u'\u041f\u0440\u0438\u043c\u0435\u0447\u0430\u043d\u0438\u0435',
+ u'organization':
+ u'\u041e\u0440\u0433\u0430\u043d\u0438\u0437\u0430\u0446\u0438\u044f',
+ u'revision': u'\u0420\u0435\u0434\u0430\u043a\u0446\u0438\u044f',
+ u'status': u'\u0421\u0442\u0430\u0442\u0443\u0441',
+ u'tip': u'\u041f\u043e\u0434\u0441\u043a\u0430\u0437\u043a\u0430',
+ u'version': u'\u0412\u0435\u0440\u0441\u0438\u044f',
+ u'warning': u'\u041f\u0440\u0435\u0434\u0443\u043f\u0440\u0435\u0436'
+ u'\u0434\u0435\u043d\u0438\u0435'}
+"""Mapping of node class name to label text."""
+
+bibliographic_fields = {
+ u'\u0430\u043d\u043d\u043e\u0442\u0430\u0446\u0438\u044f': u'abstract',
+ u'\u0430\u0434\u0440\u0435\u0441': u'address',
+ u'\u0430\u0432\u0442\u043e\u0440': u'author',
+ u'\u0430\u0432\u0442\u043e\u0440\u044b': u'authors',
+ u'\u043a\u043e\u043d\u0442\u0430\u043a\u0442': u'contact',
+ u'\u043f\u0440\u0430\u0432\u0430 \u043a\u043e\u043f\u0438\u0440\u043e'
+ u'\u0432\u0430\u043d\u0438\u044f': u'copyright',
+ u'\u0434\u0430\u0442\u0430': u'date',
+ u'\u043f\u043e\u0441\u0432\u044f\u0449\u0435\u043d\u0438\u0435':
+ u'dedication',
+ u'\u043e\u0440\u0433\u0430\u043d\u0438\u0437\u0430\u0446\u0438\u044f':
+ u'organization',
+ u'\u0440\u0435\u0434\u0430\u043a\u0446\u0438\u044f': u'revision',
+ u'\u0441\u0442\u0430\u0442\u0443\u0441': u'status',
+ u'\u0432\u0435\u0440\u0441\u0438\u044f': u'version'}
+"""Russian (lowcased) to canonical name mapping for bibliographic fields."""
+
+author_separators = [';', ',']
+"""List of separator strings for the 'Authors' bibliographic field. Tried in
+order."""
Modified: Zope3/trunk/src/docutils/languages/sk.py
===================================================================
--- Zope3/trunk/src/docutils/languages/sk.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/sk.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
# :Author: Miroslav Vasko
# :Contact: zemiak at zoznam.sk
-# :Revision: $Revision: 1.1 $
-# :Date: $Date: 2003/07/30 20:14:05 $
+# :Revision: $Revision: 2224 $
+# :Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# :Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
Slovak-language mappings for language-dependent features of Docutils.
"""
Modified: Zope3/trunk/src/docutils/languages/sv.py
===================================================================
--- Zope3/trunk/src/docutils/languages/sv.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/sv.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
# Author: Adam Chodorowski
# Contact: chodorowski at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:05 $
+# Revision: $Revision: 2224 $
+# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
Swedish language mappings for language-dependent features of Docutils.
"""
@@ -38,6 +43,7 @@
bibliographic_fields = {
# 'Author' and 'Authors' identical in Swedish; assume the plural:
u'f\u00f6rfattare': 'authors',
+ u' n/a': 'author',
u'organisation': 'organization',
u'adress': 'address',
u'kontakt': 'contact',
Added: Zope3/trunk/src/docutils/languages/zh_tw.py
===================================================================
--- Zope3/trunk/src/docutils/languages/zh_tw.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/languages/zh_tw.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,64 @@
+# Author: Joe YS Jaw
+# Contact: joeysj at users.sourceforge.net
+# Revision: $Revision: 2608 $
+# Date: $Date: 2004-09-13 21:09:56 +0200 (Mon, 13 Sep 2004) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Traditional Chinese language mappings for language-dependent features of Docutils.
+"""
+
+__docformat__ = 'reStructuredText'
+
+labels = {
+ # fixed: language-dependent
+ 'author': u'\u4f5c\u8005', # 'Author',
+ 'authors': u'\u4f5c\u8005\u7fa4', # 'Authors',
+ 'organization': u'\u7d44\u7e54', # 'Organization',
+ 'address': u'\u5730\u5740', # 'Address',
+ 'contact': u'\u9023\u7d61', # 'Contact',
+ 'version': u'\u7248\u672c', # 'Version',
+ 'revision': u'\u4fee\u8a02', # 'Revision',
+ 'status': u'\u72c0\u614b', # 'Status',
+ 'date': u'\u65e5\u671f', # 'Date',
+ 'copyright': u'\u7248\u6b0a', # 'Copyright',
+ 'dedication': u'\u984c\u737b', # 'Dedication',
+ 'abstract': u'\u6458\u8981', # 'Abstract',
+ 'attention': u'\u6ce8\u610f\uff01', # 'Attention!',
+ 'caution': u'\u5c0f\u5fc3\uff01', # 'Caution!',
+ 'danger': u'\uff01\u5371\u96aa\uff01', # '!DANGER!',
+ 'error': u'\u932f\u8aa4', # 'Error',
+ 'hint': u'\u63d0\u793a', # 'Hint',
+ 'important': u'\u91cd\u8981', # 'Important',
+ 'note': u'\u8a3b\u89e3', # 'Note',
+ 'tip': u'\u79d8\u8a23', # 'Tip',
+ 'warning': u'\u8b66\u544a', # 'Warning',
+ 'contents': u'\u76ee\u9304' # 'Contents'
+}
+"""Mapping of node class name to label text."""
+
+bibliographic_fields = {
+ # language-dependent: fixed
+ 'author': 'author',
+ 'authors': 'authors',
+ 'organization': 'organization',
+ 'address': 'address',
+ 'contact': 'contact',
+ 'version': 'version',
+ 'revision': 'revision',
+ 'status': 'status',
+ 'date': 'date',
+ 'copyright': 'copyright',
+ 'dedication': 'dedication',
+ 'abstract': 'abstract'}
+"""Traditional Chinese to canonical name mapping for bibliographic fields."""
+
+author_separators = [u'\uff1b', u'\uff0c', u'\u3001',
+ ';', ',']
+"""List of separator strings for the 'Authors' bibliographic field. Tried in
+order."""
Modified: Zope3/trunk/src/docutils/nodes.py
===================================================================
--- Zope3/trunk/src/docutils/nodes.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/nodes.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:04 $
+# Revision: $Revision: 3358 $
+# Date: $Date: 2005-05-21 02:00:25 +0200 (Sat, 21 May 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -18,7 +18,7 @@
``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy.
-.. _DTD: http://docutils.sourceforge.net/spec/docutils.dtd
+.. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
"""
__docformat__ = 'reStructuredText'
@@ -26,6 +26,8 @@
import sys
import os
import re
+import copy
+import warnings
import xml.dom.minidom
from types import IntType, SliceType, StringType, UnicodeType, \
TupleType, ListType
@@ -69,7 +71,11 @@
return self._dom_node(domroot)
def pformat(self, indent=' ', level=0):
- """Return an indented pseudo-XML representation, for test purposes."""
+ """
+ Return an indented pseudo-XML representation, for test purposes.
+
+ Override in subclasses.
+ """
raise NotImplementedError
def copy(self):
@@ -87,12 +93,11 @@
def walk(self, visitor):
"""
- Traverse a tree of `Node` objects, calling ``visit_...`` methods of
- `visitor` when entering each node. If there is no
- ``visit_particular_node`` method for a node of type
- ``particular_node``, the ``unknown_visit`` method is called. (The
- `walkabout()` method is similar, except it also calls ``depart_...``
- methods before exiting each node.)
+ Traverse a tree of `Node` objects, calling the
+ `dispatch_visit()` method of `visitor` when entering each
+ node. (The `walkabout()` method is similar, except it also
+ calls the `dispatch_departure()` method before exiting each
+ node.)
This tree traversal supports limited in-place tree
modifications. Replacing one node with one or more nodes is
@@ -100,23 +105,23 @@
or replaced occurs after the current node, the old node will
still be traversed, and any new nodes will not.
- Within ``visit_...`` methods (and ``depart_...`` methods for
+ Within ``visit`` methods (and ``depart`` methods for
`walkabout()`), `TreePruningException` subclasses may be raised
(`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`).
Parameter `visitor`: A `NodeVisitor` object, containing a
- ``visit_...`` method for each `Node` subclass encountered.
+ ``visit`` implementation for each `Node` subclass encountered.
"""
- name = 'visit_' + self.__class__.__name__
- method = getattr(visitor, name, visitor.unknown_visit)
- visitor.document.reporter.debug(name, category='nodes.Node.walk')
+ visitor.document.reporter.debug(
+ 'docutils.nodes.Node.walk calling dispatch_visit for %s'
+ % self.__class__.__name__)
try:
- method(self)
+ visitor.dispatch_visit(self)
except (SkipChildren, SkipNode):
return
except SkipDeparture: # not applicable; ignore
pass
- children = self.get_children()
+ children = self.children
try:
for child in children[:]:
child.walk(visitor)
@@ -125,26 +130,26 @@
def walkabout(self, visitor):
"""
- Perform a tree traversal similarly to `Node.walk()` (which see),
- except also call ``depart_...`` methods before exiting each node. If
- there is no ``depart_particular_node`` method for a node of type
- ``particular_node``, the ``unknown_departure`` method is called.
+ Perform a tree traversal similarly to `Node.walk()` (which
+ see), except also call the `dispatch_departure()` method
+ before exiting each node.
- Parameter `visitor`: A `NodeVisitor` object, containing ``visit_...``
- and ``depart_...`` methods for each `Node` subclass encountered.
+ Parameter `visitor`: A `NodeVisitor` object, containing a
+ ``visit`` and ``depart`` implementation for each `Node`
+ subclass encountered.
"""
call_depart = 1
- name = 'visit_' + self.__class__.__name__
- method = getattr(visitor, name, visitor.unknown_visit)
- visitor.document.reporter.debug(name, category='nodes.Node.walkabout')
+ visitor.document.reporter.debug(
+ 'docutils.nodes.Node.walkabout calling dispatch_visit for %s'
+ % self.__class__.__name__)
try:
try:
- method(self)
+ visitor.dispatch_visit(self)
except SkipNode:
return
except SkipDeparture:
call_depart = 0
- children = self.get_children()
+ children = self.children
try:
for child in children[:]:
child.walkabout(visitor)
@@ -153,13 +158,85 @@
except SkipChildren:
pass
if call_depart:
- name = 'depart_' + self.__class__.__name__
- method = getattr(visitor, name, visitor.unknown_departure)
visitor.document.reporter.debug(
- name, category='nodes.Node.walkabout')
- method(self)
+ 'docutils.nodes.Node.walkabout calling dispatch_departure '
+ 'for %s' % self.__class__.__name__)
+ visitor.dispatch_departure(self)
+ def traverse(self, condition=None,
+ include_self=1, descend=1, siblings=0, ascend=0):
+ """
+ Return an iterable containing
+ * self (if include_self is true)
+ * all descendants in tree traversal order (if descend is true)
+ * all siblings (if siblings is true) and their descendants (if
+ also descend is true)
+ * the siblings of the parent (if ascend is true) and their
+ descendants (if also descend is true), and so on
+
+ If ascend is true, assume siblings to be true as well.
+
+ For example, given the following tree::
+
+ <paragraph>
+ <emphasis> <--- emphasis.traverse() and
+ <strong> <--- strong.traverse() are called.
+ Foo
+ Bar
+ <reference name="Baz" refid="baz">
+ Baz
+
+ Then list(emphasis.traverse()) equals ::
+
+ [<emphasis>, <strong>, <#text: Foo>, <#text: Bar>]
+
+ and list(strong.traverse(ascend=1)) equals ::
+
+ [<strong>, <#text: Foo>, <#text: Bar>, <reference>, <#text: Baz>]
+ """
+ r = []
+ if ascend:
+ siblings=1
+ if include_self and (condition is None or condition(self)):
+ r.append(self)
+ if descend and len(self.children):
+ for child in self:
+ r.extend(child.traverse(
+ include_self=1, descend=1, siblings=0, ascend=0,
+ condition=condition))
+ if siblings or ascend:
+ node = self
+ while node.parent:
+ index = node.parent.index(node)
+ for sibling in node.parent[index+1:]:
+ r.extend(sibling.traverse(include_self=1, descend=descend,
+ siblings=0, ascend=0,
+ condition=condition))
+ if not ascend:
+ break
+ else:
+ node = node.parent
+ return r
+
+
+ def next_node(self, condition=None,
+ include_self=0, descend=1, siblings=0, ascend=0):
+ """
+ Return the first node in the iterable returned by traverse(),
+ or None if the iterable is empty.
+
+ Parameter list is the same as of traverse. Note that
+ include_self defaults to 0, though.
+ """
+ iterable = self.traverse(condition=condition,
+ include_self=include_self, descend=descend,
+ siblings=siblings, ascend=ascend)
+ try:
+ return iterable[0]
+ except IndexError:
+ return None
+
class Text(Node, UserString):
"""
@@ -170,6 +247,9 @@
tagname = '#text'
+ children = ()
+ """Text nodes have no children, and cannot have children."""
+
def __init__(self, data, rawsource=''):
UserString.__init__(self, data)
@@ -207,11 +287,7 @@
result.append(indent + line + '\n')
return ''.join(result)
- def get_children(self):
- """Text nodes have no children. Return []."""
- return []
-
class Element(Node):
"""
@@ -223,6 +299,12 @@
element['att'] = 'value'
+ There are two special attributes: 'ids' and 'names'. Both are
+ lists of unique identifiers, and names serve as human interfaces
+ to IDs. Names are case- and whitespace-normalized (see the
+ fully_normalize_name() function), and IDs conform to the regular
+ expression ``[a-z](-?[a-z0-9]+)*`` (see the make_id() function).
+
Elements also emulate lists for child nodes (element nodes and/or text
nodes), indexing by integer. To get the first child node, use::
@@ -243,6 +325,10 @@
This is equivalent to ``element.extend([node1, node2])``.
"""
+ attr_defaults = {'ids': [], 'classes': [], 'names': [],
+ 'dupnames': [], 'backrefs': []}
+ """Default attributes."""
+
tagname = None
"""The element generic identifier. If None, it is set as an instance
attribute to the name of the class."""
@@ -259,7 +345,7 @@
self.extend(children) # maintain parent info
- self.attributes = {}
+ self.attributes = copy.deepcopy(self.attr_defaults)
"""Dictionary of attribute {name: value}."""
for att, value in attributes.items():
@@ -270,7 +356,7 @@
def _dom_node(self, domroot):
element = domroot.createElement(self.tagname)
- for attribute, value in self.attributes.items():
+ for attribute, value in self.attlist():
if isinstance(value, ListType):
value = ' '.join(['%s' % v for v in value])
element.setAttribute(attribute, '%s' % value)
@@ -285,21 +371,21 @@
if len(data) > 60:
data = data[:56] + ' ...'
break
- if self.hasattr('name'):
+ if self['names']:
return '<%s "%s": %s>' % (self.__class__.__name__,
- self.attributes['name'], data)
+ '; '.join(self['names']), data)
else:
return '<%s: %s>' % (self.__class__.__name__, data)
def shortrepr(self):
- if self.hasattr('name'):
+ if self['names']:
return '<%s "%s"...>' % (self.__class__.__name__,
- self.attributes['name'])
+ '; '.join(self['names']))
else:
return '<%s...>' % self.tagname
def __str__(self):
- return unicode(self).encode('raw_unicode_escape')
+ return self.__unicode__().encode('raw_unicode_escape')
def __unicode__(self):
if self.children:
@@ -380,20 +466,24 @@
def __iadd__(self, other):
"""Append a node or a list of nodes to `self.children`."""
if isinstance(other, Node):
- self.setup_child(other)
- self.children.append(other)
+ self.append(other)
elif other is not None:
- for node in other:
- self.setup_child(node)
- self.children.extend(other)
+ self.extend(other)
return self
def astext(self):
return self.child_text_separator.join(
[child.astext() for child in self.children])
+ def non_default_attributes(self):
+ atts = {}
+ for key, value in self.attributes.items():
+ if self.is_not_default(key):
+ atts[key] = value
+ return atts
+
def attlist(self):
- attlist = self.attributes.items()
+ attlist = self.non_default_attributes().items()
attlist.sort()
return attlist
@@ -418,8 +508,7 @@
def extend(self, item):
for node in item:
- self.setup_child(node)
- self.children.extend(item)
+ self.append(node)
def insert(self, index, item):
if isinstance(item, Node):
@@ -437,6 +526,15 @@
def index(self, item):
return self.children.index(item)
+ def is_not_default(self, key):
+ try:
+ return self[key] != self.attr_defaults[key]
+ except KeyError:
+ return 1
+
+ def clear(self):
+ self.children = []
+
def replace(self, old, new):
"""Replace one child `Node` with another child or children."""
index = self.index(old)
@@ -480,12 +578,10 @@
if not isinstance(childclass, TupleType):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
- match = 0
for c in childclass:
if isinstance(self.children[index], c):
- match = 1
break
- if not match:
+ else:
return index
return None
@@ -494,25 +590,48 @@
[child.pformat(indent, level+1)
for child in self.children])
- def get_children(self):
- """Return this element's children."""
- return self.children
-
def copy(self):
return self.__class__(**self.attributes)
def set_class(self, name):
- """Add a new name to the "class" attribute."""
- self.attributes['class'] = (self.attributes.get('class', '') + ' '
- + name.lower()).strip()
+ """Add a new class to the "classes" attribute."""
+ warnings.warn('docutils.nodes.Element.set_class deprecated; '
+ "append to Element['classes'] list attribute directly",
+ DeprecationWarning, stacklevel=2)
+ assert ' ' not in name
+ self['classes'].append(name.lower())
+ def note_referenced_by(self, name=None, id=None):
+ """Note that this Element has been referenced by its name
+ `name` or id `id`."""
+ self.referenced = 1
+ # Element.expect_referenced_by_* dictionaries map names or ids
+ # to nodes whose ``referenced`` attribute is set to true as
+ # soon as this node is referenced by the given name or id.
+ # Needed for target propagation.
+ by_name = getattr(self, 'expect_referenced_by_name', {}).get(name)
+ by_id = getattr(self, 'expect_referenced_by_id', {}).get(id)
+ if by_name:
+ assert name is not None
+ by_name.referenced = 1
+ if by_id:
+ assert id is not None
+ by_id.referenced = 1
+
class TextElement(Element):
"""
An element which directly contains text.
- Its children are all Text or TextElement nodes.
+ Its children are all `Text` or `Inline` subclass nodes. You can
+ check whether an element's context is inline simply by checking whether
+ its immediate parent is a `TextElement` instance (including subclasses).
+ This is handy for nodes like `image` that can appear both inline and as
+ standalone body elements.
+
+ If passing children to `__init__()`, make sure to set `text` to
+ ``''`` or some other suitable value.
"""
child_text_separator = ''
@@ -548,7 +667,7 @@
class BackLinkable:
def add_backref(self, refid):
- self.setdefault('backrefs', []).append(refid)
+ self['backrefs'].append(refid)
# ====================
@@ -559,15 +678,12 @@
class Titular: pass
-class PreDecorative:
- """Category of Node which may occur before Decorative Nodes."""
-
-class PreBibliographic(PreDecorative):
+class PreBibliographic:
"""Category of Node which may occur before Bibliographic Nodes."""
-class Bibliographic(PreDecorative): pass
+class Bibliographic: pass
-class Decorative: pass
+class Decorative(PreBibliographic): pass
class Structural: pass
@@ -575,14 +691,15 @@
class General(Body): pass
-class Sequential(Body): pass
+class Sequential(Body):
+ """List-like elements."""
class Admonition(Body): pass
class Special(Body):
"""Special internal body elements."""
-class Invisible:
+class Invisible(PreBibliographic):
"""Internal elements that don't appear in output."""
class Part: pass
@@ -705,6 +822,9 @@
self.transformer = docutils.transforms.Transformer(self)
"""Storage for transforms to be applied to this document."""
+ self.decoration = None
+ """Document's `decoration` node."""
+
self.document = self
def asdom(self, dom=xml.dom.minidom):
@@ -714,21 +834,23 @@
return domroot
def set_id(self, node, msgnode=None):
- if node.has_key('id'):
- id = node['id']
+ for id in node['ids']:
if self.ids.has_key(id) and self.ids[id] is not node:
msg = self.reporter.severe('Duplicate ID: "%s".' % id)
if msgnode != None:
msgnode += msg
- else:
- if node.has_key('name'):
- id = make_id(node['name'])
+ if not node['ids']:
+ for name in node['names']:
+ id = self.settings.id_prefix + make_id(name)
+ if id and not self.ids.has_key(id):
+ break
else:
id = ''
- while not id or self.ids.has_key(id):
- id = 'id%s' % self.id_start
- self.id_start += 1
- node['id'] = id
+ while not id or self.ids.has_key(id):
+ id = (self.settings.id_prefix +
+ self.settings.auto_id_prefix + str(self.id_start))
+ self.id_start += 1
+ node['ids'].append(id)
self.ids[id] = node
return id
@@ -763,8 +885,7 @@
both old and new targets are external and refer to identical URIs.
The new target is invalidated regardless.
"""
- if node.has_key('name'):
- name = node['name']
+ for name in node['names']:
if self.nameids.has_key(name):
self.set_duplicate_name_id(node, id, name, msgnode, explicit)
else:
@@ -782,30 +903,30 @@
old_node = self.ids[old_id]
if node.has_key('refuri'):
refuri = node['refuri']
- if old_node.has_key('name') \
+ if old_node['names'] \
and old_node.has_key('refuri') \
and old_node['refuri'] == refuri:
level = 1 # just inform if refuri's identical
if level > 1:
- dupname(old_node)
+ dupname(old_node, name)
self.nameids[name] = None
msg = self.reporter.system_message(
level, 'Duplicate explicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
- dupname(node)
+ dupname(node, name)
else:
self.nameids[name] = id
if old_id is not None:
old_node = self.ids[old_id]
- dupname(old_node)
+ dupname(old_node, name)
else:
if old_id is not None and not old_explicit:
self.nameids[name] = None
old_node = self.ids[old_id]
- dupname(old_node)
- dupname(node)
+ dupname(old_node, name)
+ dupname(node, name)
if not explicit or (not old_explicit and old_id is not None):
msg = self.reporter.info(
'Duplicate implicit target name: "%s".' % name,
@@ -816,6 +937,7 @@
def has_name(self, name):
return self.nameids.has_key(name)
+ # "note" here is an imperative verb: "take note of".
def note_implicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=None)
@@ -838,7 +960,7 @@
def note_indirect_target(self, target):
self.indirect_targets.append(target)
- if target.has_key('name'):
+ if target['names']:
self.note_refname(target)
def note_anonymous_target(self, target):
@@ -882,7 +1004,8 @@
self.note_refname(ref)
def note_substitution_def(self, subdef, def_name, msgnode=None):
- name = subdef['name'] = whitespace_normalize_name(def_name)
+ name = whitespace_normalize_name(def_name)
+ subdef['names'].append(name)
if self.substitution_defs.has_key(name):
msg = self.reporter.error(
'Duplicate substitution definition name: "%s".' % name,
@@ -890,7 +1013,7 @@
if msgnode != None:
msgnode += msg
oldnode = self.substitution_defs[name]
- dupname(oldnode)
+ dupname(oldnode, name)
# keep only the last definition:
self.substitution_defs[name] = subdef
# case-insensitive mapping:
@@ -920,7 +1043,17 @@
return self.__class__(self.settings, self.reporter,
**self.attributes)
+ def get_decoration(self):
+ if not self.decoration:
+ self.decoration = decoration()
+ index = self.first_child_not_matching_class(Titular)
+ if index is None:
+ self.append(self.decoration)
+ else:
+ self.insert(index, self.decoration)
+ return self.decoration
+
# ================
# Title Elements
# ================
@@ -951,7 +1084,19 @@
# Decorative Elements
# =====================
-class decoration(Decorative, Element): pass
+class decoration(Decorative, Element):
+
+ def get_header(self):
+ if not len(self.children) or not isinstance(self.children[0], header):
+ self.insert(0, header())
+ return self.children[0]
+
+ def get_footer(self):
+ if not len(self.children) or not isinstance(self.children[-1], footer):
+ self.append(footer())
+ return self.children[-1]
+
+
class header(Decorative, Element): pass
class footer(Decorative, Element): pass
@@ -1002,6 +1147,7 @@
# ===============
class paragraph(General, TextElement): pass
+class compound(General, Element): pass
class bullet_list(Sequential, Element): pass
class enumerated_list(Sequential, Element): pass
class list_item(Part, Element): pass
@@ -1044,7 +1190,14 @@
class description(Part, Element): pass
class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
-class line_block(General, FixedTextElement): pass
+class line_block(General, Element): pass
+
+
+class line(Part, TextElement):
+
+ indent = None
+
+
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
@@ -1057,11 +1210,11 @@
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
-class comment(Special, Invisible, PreBibliographic, FixedTextElement): pass
+class comment(Special, Invisible, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
-class footnote(General, Element, Labeled, BackLinkable): pass
-class citation(General, Element, Labeled, BackLinkable): pass
+class footnote(General, BackLinkable, Element, Labeled, Targetable): pass
+class citation(General, BackLinkable, Element, Labeled, Targetable): pass
class label(Part, TextElement): pass
class figure(General, Element): pass
class caption(Part, TextElement): pass
@@ -1075,7 +1228,7 @@
class entry(Part, Element): pass
-class system_message(Special, PreBibliographic, Element, BackLinkable):
+class system_message(Special, BackLinkable, PreBibliographic, Element):
def __init__(self, message=None, *children, **attributes):
if message:
@@ -1093,7 +1246,7 @@
self['level'], Element.astext(self))
-class pending(Special, Invisible, PreBibliographic, Element):
+class pending(Special, Invisible, Element):
"""
The "pending" element is used to encapsulate a pending operation: the
@@ -1111,8 +1264,8 @@
But the "contents" directive can't do its work until the entire document
has been parsed and possibly transformed to some extent. So the directive
- code leaves a placeholder behind that will trigger the second phase of the
- its processing, something like this::
+ code leaves a placeholder behind that will trigger the second phase of its
+ processing, something like this::
<pending ...public attributes...> + internal attributes
@@ -1159,7 +1312,7 @@
def copy(self):
return self.__class__(self.transform, self.details, self.rawsource,
- **self.attribuates)
+ **self.attributes)
class raw(Special, Inline, PreBibliographic, FixedTextElement):
@@ -1178,7 +1331,7 @@
class emphasis(Inline, TextElement): pass
class strong(Inline, TextElement): pass
class literal(Inline, TextElement): pass
-class reference(Inline, Referential, TextElement): pass
+class reference(General, Inline, Referential, TextElement): pass
class footnote_reference(Inline, Referential, TextElement): pass
class citation_reference(Inline, Referential, TextElement): pass
class substitution_reference(Inline, TextElement): pass
@@ -1189,7 +1342,7 @@
class subscript(Inline, TextElement): pass
-class image(General, Inline, TextElement):
+class image(General, Inline, Element):
def astext(self):
return self.get('alt', '')
@@ -1210,7 +1363,7 @@
authors
block_quote bullet_list
caption caution citation citation_reference classifier colspec comment
- contact copyright
+ compound contact copyright
danger date decoration definition definition_list definition_list_item
description docinfo doctest_block document
emphasis entry enumerated_list error
@@ -1219,7 +1372,7 @@
generated
header hint
image important inline
- label legend line_block list_item literal literal_block
+ label legend line line_block list_item literal literal_block
note
option option_argument option_group option_list option_list_item
option_string organization
@@ -1237,15 +1390,18 @@
class NodeVisitor:
"""
- "Visitor" pattern [GoF95]_ abstract superclass implementation for document
- tree traversals.
+ "Visitor" pattern [GoF95]_ abstract superclass implementation for
+ document tree traversals.
- Each node class has corresponding methods, doing nothing by default;
- override individual methods for specific and useful behaviour. The
- "``visit_`` + node class name" method is called by `Node.walk()` upon
- entering a node. `Node.walkabout()` also calls the "``depart_`` + node
- class name" method before exiting a node.
+ Each node class has corresponding methods, doing nothing by
+ default; override individual methods for specific and useful
+ behaviour. The `dispatch_visit()` method is called by
+ `Node.walk()` upon entering a node. `Node.walkabout()` also calls
+ the `dispatch_departure()` method before exiting a node.
+ The dispatch methods call "``visit_`` + node class name" or
+ "``depart_`` + node class name", resp.
+
This is a base class for visitors whose ``visit_...`` & ``depart_...``
methods should be implemented for *all* node types encountered (such as
for `docutils.writers.Writer` subclasses). Unimplemented methods will
@@ -1260,17 +1416,56 @@
1995.
"""
+ optional = ()
+ """
+ Tuple containing node class names (as strings).
+
+ No exception will be raised if writers do not implement visit
+ or departure functions for these node classes.
+
+ Used to ensure transitional compatibility with existing 3rd-party writers.
+ """
+
def __init__(self, document):
self.document = document
+ def dispatch_visit(self, node):
+ """
+ Call self."``visit_`` + node class name" with `node` as
+ parameter. If the ``visit_...`` method does not exist, call
+ self.unknown_visit.
+ """
+ node_name = node.__class__.__name__
+ method = getattr(self, 'visit_' + node_name, self.unknown_visit)
+ self.document.reporter.debug(
+ 'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
+ % (method.__name__, node_name))
+ return method(node)
+
+ def dispatch_departure(self, node):
+ """
+ Call self."``depart_`` + node class name" with `node` as
+ parameter. If the ``depart_...`` method does not exist, call
+ self.unknown_departure.
+ """
+ node_name = node.__class__.__name__
+ method = getattr(self, 'depart_' + node_name, self.unknown_departure)
+ self.document.reporter.debug(
+ 'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
+ % (method.__name__, node_name))
+ return method(node)
+
def unknown_visit(self, node):
"""
Called when entering unknown `Node` types.
Raise an exception unless overridden.
"""
- raise NotImplementedError('visiting unknown node type: %s'
- % node.__class__.__name__)
+ if (node.document.settings.strict_visitor
+ or node.__class__.__name__ not in self.optional):
+ raise NotImplementedError(
+ '%s visiting unknown node type: %s'
+ % (self.__class__, node.__class__.__name__))
def unknown_departure(self, node):
"""
@@ -1278,8 +1473,11 @@
Raise exception unless overridden.
"""
- raise NotImplementedError('departing unknown node type: %s'
- % node.__class__.__name__)
+ if (node.document.settings.strict_visitor
+ or node.__class__.__name__ not in self.optional):
+ raise NotImplementedError(
+ '%s departing unknown node type: %s'
+ % (self.__class__, node.__class__.__name__))
class SparseNodeVisitor(NodeVisitor):
@@ -1291,13 +1489,7 @@
subclasses), subclass `NodeVisitor` instead.
"""
- # Save typing with dynamic definitions.
- for name in node_class_names:
- exec """def visit_%s(self, node): pass\n""" % name
- exec """def depart_%s(self, node): pass\n""" % name
- del name
-
class GenericNodeVisitor(NodeVisitor):
"""
@@ -1324,15 +1516,26 @@
"""Override for generic, uniform traversals."""
raise NotImplementedError
- # Save typing with dynamic definitions.
- for name in node_class_names:
- exec """def visit_%s(self, node):
- self.default_visit(node)\n""" % name
- exec """def depart_%s(self, node):
- self.default_departure(node)\n""" % name
- del name
+def _call_default_visit(self, node):
+ self.default_visit(node)
+def _call_default_departure(self, node):
+ self.default_departure(node)
+def _nop(self, node):
+ pass
+
+def _add_node_class_names(names):
+ """Save typing with dynamic assignments:"""
+ for _name in names:
+ setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
+ setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
+ setattr(SparseNodeVisitor, 'visit_' + _name, _nop)
+ setattr(SparseNodeVisitor, 'depart_' + _name, _nop)
+
+_add_node_class_names(node_class_names)
+
+
class TreeCopyVisitor(GenericNodeVisitor):
"""
@@ -1465,9 +1668,12 @@
_non_id_chars = re.compile('[^a-z0-9]+')
_non_id_at_ends = re.compile('^[-0-9]+|-+$')
-def dupname(node):
- node['dupname'] = node['name']
- del node['name']
+def dupname(node, name):
+ node['dupnames'].append(name)
+ node['names'].remove(name)
+ # Assume that this method is referenced, even though it isn't; we
+ # don't want to throw unnecessary system_messages.
+ node.referenced = 1
def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
Modified: Zope3/trunk/src/docutils/parsers/__init__.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/__init__.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/__init__.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:05 $
+# Revision: $Revision: 1645 $
+# Date: $Date: 2003-08-27 22:50:43 +0200 (Wed, 27 Aug 2003) $
# Copyright: This module has been placed in the public domain.
"""
@@ -16,6 +16,7 @@
class Parser(Component):
component_type = 'parser'
+ config_section = 'parsers'
def parse(self, inputstring, document):
"""Override to parse `inputstring` into document tree `document`."""
Modified: Zope3/trunk/src/docutils/parsers/rst/__init__.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/__init__.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/__init__.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:05 $
+# Revision: $Revision: 3171 $
+# Date: $Date: 2005-04-05 17:26:16 +0200 (Tue, 05 Apr 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -75,6 +75,7 @@
import docutils.parsers
import docutils.statemachine
from docutils.parsers.rst import states
+from docutils import frontend
class Parser(docutils.parsers.Parser):
@@ -87,19 +88,51 @@
settings_spec = (
'reStructuredText Parser Options',
None,
- (('Recognize and link to PEP references (like "PEP 258").',
+ (('Recognize and link to standalone PEP references (like "PEP 258").',
['--pep-references'],
- {'action': 'store_true'}),
- ('Recognize and link to RFC references (like "RFC 822").',
+ {'action': 'store_true', 'validator': frontend.validate_boolean}),
+ ('Base URL for PEP references '
+ '(default "http://www.python.org/peps/").',
+ ['--pep-base-url'],
+ {'metavar': '<URL>', 'default': 'http://www.python.org/peps/',
+ 'validator': frontend.validate_url_trailing_slash}),
+ ('Recognize and link to standalone RFC references (like "RFC 822").',
['--rfc-references'],
- {'action': 'store_true'}),
+ {'action': 'store_true', 'validator': frontend.validate_boolean}),
+ ('Base URL for RFC references (default "http://www.faqs.org/rfcs/").',
+ ['--rfc-base-url'],
+ {'metavar': '<URL>', 'default': 'http://www.faqs.org/rfcs/',
+ 'validator': frontend.validate_url_trailing_slash}),
('Set number of spaces for tab expansion (default 8).',
['--tab-width'],
{'metavar': '<width>', 'type': 'int', 'default': 8}),
('Remove spaces before footnote references.',
['--trim-footnote-reference-space'],
- {'action': 'store_true'}),))
+ {'action': 'store_true', 'validator': frontend.validate_boolean}),
+ ('Leave spaces before footnote references.',
+ ['--leave-footnote-reference-space'],
+ {'action': 'store_false', 'dest': 'trim_footnote_reference_space',
+ 'validator': frontend.validate_boolean}),
+ ('Disable directives that insert the contents of external file '
+ '("include" & "raw"); replaced with a "warning" system message.',
+ ['--no-file-insertion'],
+ {'action': 'store_false', 'default': 1,
+ 'dest': 'file_insertion_enabled'}),
+ ('Enable directives that insert the contents of external file '
+ '("include" & "raw"). Enabled by default.',
+ ['--file-insertion-enabled'],
+ {'action': 'store_true', 'dest': 'file_insertion_enabled'}),
+ ('Disable the "raw" directives; replaced with a "warning" '
+ 'system message.',
+ ['--no-raw'],
+ {'action': 'store_false', 'default': 1, 'dest': 'raw_enabled'}),
+ ('Enable the "raw" directive. Enabled by default.',
+ ['--raw-enabled'],
+ {'action': 'store_true', 'dest': 'raw_enabled'}),))
+ config_section = 'restructuredtext parser'
+ config_section_dependencies = ('parsers',)
+
def __init__(self, rfc2822=None, inliner=None):
if rfc2822:
self.initial_state = 'RFC2822Body'
@@ -111,11 +144,10 @@
def parse(self, inputstring, document):
"""Parse `inputstring` and populate `document`, a document tree."""
self.setup_parse(inputstring, document)
- debug = document.reporter[''].debug
self.statemachine = states.RSTStateMachine(
state_classes=self.state_classes,
initial_state=self.initial_state,
- debug=debug)
+ debug=document.reporter.debug_flag)
inputlines = docutils.statemachine.string2lines(
inputstring, tab_width=document.settings.tab_width,
convert_whitespace=1)
Modified: Zope3/trunk/src/docutils/parsers/rst/directives/__init__.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/directives/__init__.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/directives/__init__.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
-# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Contact: goodger at python.org
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -20,11 +20,12 @@
Parameters:
-- ``name`` is the directive type or name.
+- ``name`` is the directive type or name (string).
-- ``arguments`` is a list of positional arguments.
+- ``arguments`` is a list of positional arguments (strings).
-- ``options`` is a dictionary mapping option names to values.
+- ``options`` is a dictionary mapping option names (strings) to values (type
+ depends on option conversion functions; see below).
- ``content`` is a list of strings, the directive content.
@@ -60,8 +61,13 @@
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
- options to parse.
+ options to parse. Several directive option conversion functions are defined
+ in this module.
+ Option conversion functions take a single parameter, the option argument (a
+ string or ``None``), validate it and/or convert it to the appropriate form.
+ Conversion functions may raise ``ValueError`` and ``TypeError`` exceptions.
+
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
will be supplied).
@@ -73,11 +79,13 @@
See `Creating reStructuredText Directives`_ for more information.
.. _Creating reStructuredText Directives:
- http://docutils.sourceforge.net/spec/howto/rst-directives.html
+ http://docutils.sourceforge.net/docs/howto/rst-directives.html
"""
__docformat__ = 'reStructuredText'
+import re
+import codecs
from docutils import nodes
from docutils.parsers.rst.languages import en as _fallback_language_module
@@ -101,11 +109,17 @@
'epigraph': ('body', 'epigraph'),
'highlights': ('body', 'highlights'),
'pull-quote': ('body', 'pull_quote'),
+ 'compound': ('body', 'compound'),
#'questions': ('body', 'question_list'),
+ 'table': ('tables', 'table'),
+ 'csv-table': ('tables', 'csv_table'),
+ 'list-table': ('tables', 'list_table'),
'image': ('images', 'image'),
'figure': ('images', 'figure'),
'contents': ('parts', 'contents'),
'sectnum': ('parts', 'sectnum'),
+ 'header': ('parts', 'header'),
+ 'footer': ('parts', 'footer'),
#'footnotes': ('parts', 'footnotes'),
#'citations': ('parts', 'citations'),
'target-notes': ('references', 'target_notes'),
@@ -116,6 +130,7 @@
'replace': ('misc', 'replace'),
'unicode': ('misc', 'unicode_directive'),
'class': ('misc', 'class_directive'),
+ 'role': ('misc', 'role'),
'restructuredtext-test-directive': ('misc', 'directive_test_function'),}
"""Mapping of directive name to (module name, function name). The directive
name is canonical & must be lowercase. Language-dependent names are defined
@@ -164,28 +179,43 @@
try:
modulename, functionname = _directive_registry[canonicalname]
except KeyError:
+ messages.append(document.reporter.error(
+ 'Directive "%s" not registered (canonical name "%s").'
+ % (directive_name, canonicalname), line=document.current_line))
return None, messages
if _modules.has_key(modulename):
module = _modules[modulename]
else:
try:
module = __import__(modulename, globals(), locals())
- except ImportError:
+ except ImportError, detail:
+ messages.append(document.reporter.error(
+ 'Error importing directive module "%s" (directive "%s"):\n%s'
+ % (modulename, directive_name, detail),
+ line=document.current_line))
return None, messages
try:
function = getattr(module, functionname)
_directives[normname] = function
except AttributeError:
+ messages.append(document.reporter.error(
+ 'No function "%s" in module "%s" (directive "%s").'
+ % (functionname, modulename, directive_name),
+ line=document.current_line))
return None, messages
return function, messages
-def register_directive(name, directive):
- """Register a nonstandard application-defined directive function."""
- _directives[name] = directive
+def register_directive(name, directive_function):
+ """
+ Register a nonstandard application-defined directive function.
+ Language lookups are not needed for such functions.
+ """
+ _directives[name] = directive_function
def flag(argument):
"""
Check for a valid flag option (no argument) and return ``None``.
+ (Directive option conversion function.)
Raise ``ValueError`` if an argument is found.
"""
@@ -194,9 +224,10 @@
else:
return None
-def unchanged(argument):
+def unchanged_required(argument):
"""
- Return the argument, unchanged.
+ Return the argument text, unchanged.
+ (Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
@@ -205,36 +236,175 @@
else:
return argument # unchanged!
+def unchanged(argument):
+ """
+ Return the argument text, unchanged.
+ (Directive option conversion function.)
+
+ No argument implies empty string ("").
+ """
+ if argument is None:
+ return u''
+ else:
+ return argument # unchanged!
+
def path(argument):
"""
Return the path argument unwrapped (with newlines removed).
+ (Directive option conversion function.)
- Raise ``ValueError`` if no argument is found or if the path contains
- internal whitespace.
+ Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
path = ''.join([s.strip() for s in argument.splitlines()])
- if path.find(' ') == -1:
- return path
- else:
- raise ValueError('path contains whitespace')
+ return path
+def uri(argument):
+ """
+ Return the URI argument with whitespace removed.
+ (Directive option conversion function.)
+
+ Raise ``ValueError`` if no argument is found.
+ """
+ if argument is None:
+ raise ValueError('argument required but none supplied')
+ else:
+ uri = ''.join(argument.split())
+ return uri
+
def nonnegative_int(argument):
"""
Check for a nonnegative integer argument; raise ``ValueError`` if not.
+ (Directive option conversion function.)
"""
value = int(argument)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
-def format_values(values):
- return '%s, or "%s"' % (', '.join(['"%s"' % s for s in values[:-1]]),
- values[-1])
+def class_option(argument):
+ """
+ Convert the argument into a list of ID-compatible strings and return it.
+ (Directive option conversion function.)
+ Raise ``ValueError`` if no argument is found.
+ """
+ if argument is None:
+ raise ValueError('argument required but none supplied')
+ names = argument.split()
+ class_names = []
+ for name in names:
+ class_name = nodes.make_id(name)
+ if not class_name:
+ raise ValueError('cannot make "%s" into a class name' % name)
+ class_names.append(class_name)
+ return class_names
+
+unicode_pattern = re.compile(
+ r'(?:0x|x|\\x|U\+?|\\u)([0-9a-f]+)$|&#x([0-9a-f]+);$', re.IGNORECASE)
+
+def unicode_code(code):
+ r"""
+ Convert a Unicode character code to a Unicode character.
+ (Directive option conversion function.)
+
+ Codes may be decimal numbers, hexadecimal numbers (prefixed by ``0x``,
+ ``x``, ``\x``, ``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style
+ numeric character entities (e.g. ``☮``). Other text remains as-is.
+
+ Raise ValueError for illegal Unicode code values.
+ """
+ try:
+ if code.isdigit(): # decimal number
+ return unichr(int(code))
+ else:
+ match = unicode_pattern.match(code)
+ if match: # hex number
+ value = match.group(1) or match.group(2)
+ return unichr(int(value, 16))
+ else: # other text
+ return code
+ except OverflowError, detail:
+ raise ValueError('code too large (%s)' % detail)
+
+def single_char_or_unicode(argument):
+ """
+ A single character is returned as-is. Unicode characters codes are
+ converted as in `unicode_code`. (Directive option conversion function.)
+ """
+ char = unicode_code(argument)
+ if len(char) > 1:
+ raise ValueError('%r invalid; must be a single character or '
+ 'a Unicode code' % char)
+ return char
+
+def single_char_or_whitespace_or_unicode(argument):
+ """
+ As with `single_char_or_unicode`, but "tab" and "space" are also supported.
+ (Directive option conversion function.)
+ """
+ if argument == 'tab':
+ char = '\t'
+ elif argument == 'space':
+ char = ' '
+ else:
+ char = single_char_or_unicode(argument)
+ return char
+
+def positive_int(argument):
+ """
+ Converts the argument into an integer. Raises ValueError for negative,
+ zero, or non-integer values. (Directive option conversion function.)
+ """
+ value = int(argument)
+ if value < 1:
+ raise ValueError('negative or zero value; must be positive')
+ return value
+
+def positive_int_list(argument):
+ """
+ Converts a space- or comma-separated list of values into a Python list
+ of integers.
+ (Directive option conversion function.)
+
+ Raises ValueError for non-positive-integer values.
+ """
+ if ',' in argument:
+ entries = argument.split(',')
+ else:
+ entries = argument.split()
+ return [positive_int(entry) for entry in entries]
+
+def encoding(argument):
+ """
+ Verfies the encoding argument by lookup.
+ (Directive option conversion function.)
+
+ Raises ValueError for unknown encodings.
+ """
+ try:
+ codecs.lookup(argument)
+ except LookupError:
+ raise ValueError('unknown encoding: "%s"' % argument)
+ return argument
+
def choice(argument, values):
+ """
+ Directive option utility function, supplied to enable options whose
+ argument must be a member of a finite set of possible values (must be
+ lower case). A custom conversion function must be written to use it. For
+ example::
+
+ from docutils.parsers.rst import directives
+
+ def yesno(argument):
+ return directives.choice(argument, ('yes', 'no'))
+
+ Raise ``ValueError`` if no argument is found or if the argument's value is
+ not valid (not an entry in the supplied list).
+ """
try:
value = argument.lower().strip()
except AttributeError:
@@ -246,7 +416,6 @@
raise ValueError('"%s" unknown; choose from %s'
% (argument, format_values(values)))
-def class_option(argument):
- if argument is None:
- raise ValueError('argument required but none supplied')
- return nodes.make_id(argument)
+def format_values(values):
+ return '%s, or "%s"' % (', '.join(['"%s"' % s for s in values[:-1]]),
+ values[-1])
Modified: Zope3/trunk/src/docutils/parsers/rst/directives/admonitions.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/directives/admonitions.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/directives/admonitions.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3155 $
+# Date: $Date: 2005-04-02 23:57:06 +0200 (Sat, 02 Apr 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -30,10 +30,10 @@
admonition_node += nodes.title(title_text, '', *textnodes)
admonition_node += messages
if options.has_key('class'):
- class_value = options['class']
+ classes = options['class']
else:
- class_value = 'admonition-' + nodes.make_id(title_text)
- admonition_node.set_class(class_value)
+ classes = ['admonition-' + nodes.make_id(title_text)]
+ admonition_node['classes'] += classes
state.nested_parse(content, content_offset, admonition_node)
return [admonition_node]
Modified: Zope3/trunk/src/docutils/parsers/rst/directives/body.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/directives/body.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/directives/body.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,11 +1,13 @@
# Author: David Goodger
-# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Contact: goodger at python.org
+# Revision: $Revision: 3206 $
+# Date: $Date: 2005-04-12 01:16:11 +0200 (Tue, 12 Apr 2005) $
# Copyright: This module has been placed in the public domain.
"""
Directives for additional body elements.
+
+See `docutils.parsers.rst.directives` for API details.
"""
__docformat__ = 'reStructuredText'
@@ -14,14 +16,16 @@
import sys
from docutils import nodes
from docutils.parsers.rst import directives
+from docutils.parsers.rst.roles import set_classes
-
+
def topic(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
node_class=nodes.topic):
- if not state_machine.match_titles:
+ if not (state_machine.match_titles
+ or isinstance(state_machine.node, nodes.sidebar)):
error = state_machine.reporter.error(
- 'The "%s" directive may not be used within topics, sidebars, '
+ 'The "%s" directive may not be used within topics '
'or body elements.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
@@ -34,6 +38,7 @@
title_text = arguments[0]
textnodes, messages = state.inline_text(title_text, lineno)
titles = [nodes.title(title_text, '', *textnodes)]
+ # sidebar uses this code
if options.has_key('subtitle'):
textnodes, more_messages = state.inline_text(options['subtitle'],
lineno)
@@ -41,8 +46,7 @@
messages.extend(more_messages)
text = '\n'.join(content)
node = node_class(text, *(titles + messages))
- if options.has_key('class'):
- node.set_class(options['class'])
+ node['classes'] += options.get('class', [])
if text:
state.nested_parse(content, content_offset, node)
return [node]
@@ -53,40 +57,67 @@
def sidebar(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
+ if isinstance(state_machine.node, nodes.sidebar):
+ error = state_machine.reporter.error(
+ 'The "%s" directive may not be used within a sidebar element.'
+ % name, nodes.literal_block(block_text, block_text), line=lineno)
+ return [error]
return topic(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
node_class=nodes.sidebar)
sidebar.arguments = (1, 0, 1)
-sidebar.options = {'subtitle': directives.unchanged,
+sidebar.options = {'subtitle': directives.unchanged_required,
'class': directives.class_option}
sidebar.content = 1
def line_block(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine,
- node_class=nodes.line_block):
+ content_offset, block_text, state, state_machine):
if not content:
warning = state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% name, nodes.literal_block(block_text, block_text), line=lineno)
return [warning]
- text = '\n'.join(content)
- text_nodes, messages = state.inline_text(text, lineno)
- node = node_class(text, '', *text_nodes, **options)
- return [node] + messages
+ block = nodes.line_block(classes=options.get('class', []))
+ node_list = [block]
+ for line_text in content:
+ text_nodes, messages = state.inline_text(line_text.strip(),
+ lineno + content_offset)
+ line = nodes.line(line_text, '', *text_nodes)
+ if line_text.strip():
+ line.indent = len(line_text) - len(line_text.lstrip())
+ block += line
+ node_list.extend(messages)
+ content_offset += 1
+ state.nest_line_block_lines(block)
+ return node_list
line_block.options = {'class': directives.class_option}
line_block.content = 1
def parsed_literal(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
- return line_block(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine,
- node_class=nodes.literal_block)
+ set_classes(options)
+ return block(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine,
+ node_class=nodes.literal_block)
parsed_literal.options = {'class': directives.class_option}
parsed_literal.content = 1
+def block(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine, node_class):
+ if not content:
+ warning = state_machine.reporter.warning(
+ 'Content block expected for the "%s" directive; none found.'
+ % name, nodes.literal_block(block_text, block_text), line=lineno)
+ return [warning]
+ text = '\n'.join(content)
+ text_nodes, messages = state.inline_text(text, lineno)
+ node = node_class(text, '', *text_nodes, **options)
+ node.line = content_offset + 1
+ return [node] + messages
+
def rubric(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
rubric_text = arguments[0]
@@ -100,7 +131,7 @@
def epigraph(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
block_quote, messages = state.block_quote(content, content_offset)
- block_quote.set_class('epigraph')
+ block_quote['classes'].append('epigraph')
return [block_quote] + messages
epigraph.content = 1
@@ -108,7 +139,7 @@
def highlights(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
block_quote, messages = state.block_quote(content, content_offset)
- block_quote.set_class('highlights')
+ block_quote['classes'].append('highlights')
return [block_quote] + messages
highlights.content = 1
@@ -116,7 +147,23 @@
def pull_quote(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
block_quote, messages = state.block_quote(content, content_offset)
- block_quote.set_class('pull-quote')
+ block_quote['classes'].append('pull-quote')
return [block_quote] + messages
pull_quote.content = 1
+
+def compound(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ text = '\n'.join(content)
+ if not text:
+ error = state_machine.reporter.error(
+ 'The "%s" directive is empty; content required.' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [error]
+ node = nodes.compound(text)
+ node['classes'] += options.get('class', [])
+ state.nested_parse(content, content_offset, node)
+ return [node]
+
+compound.options = {'class': directives.class_option}
+compound.content = 1
Modified: Zope3/trunk/src/docutils/parsers/rst/directives/html.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/directives/html.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/directives/html.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3038 $
+# Date: $Date: 2005-03-14 17:16:57 +0100 (Mon, 14 Mar 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -34,7 +34,7 @@
'Empty meta directive.',
nodes.literal_block(block_text, block_text), line=lineno)
node += error
- return node.get_children()
+ return node.children
meta.content = 1
Modified: Zope3/trunk/src/docutils/parsers/rst/directives/images.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/directives/images.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/directives/images.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3347 $
+# Date: $Date: 2005-05-18 20:17:33 +0200 (Wed, 18 May 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -13,29 +13,66 @@
import sys
from docutils import nodes, utils
-from docutils.parsers.rst import directives
+from docutils.parsers.rst import directives, states
+from docutils.nodes import fully_normalize_name
+from docutils.parsers.rst.roles import set_classes
try:
import Image # PIL
except ImportError:
Image = None
-align_values = ('top', 'middle', 'bottom', 'left', 'center', 'right')
+align_h_values = ('left', 'center', 'right')
+align_v_values = ('top', 'middle', 'bottom')
+align_values = align_v_values + align_h_values
def align(argument):
return directives.choice(argument, align_values)
def image(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
- reference = ''.join(arguments[0].split('\n'))
- if reference.find(' ') != -1:
- error = state_machine.reporter.error(
- 'Image URI contains whitespace.',
- nodes.literal_block(block_text, block_text), line=lineno)
- return [error]
+ if options.has_key('align'):
+ # check for align_v values only
+ if isinstance(state, states.SubstitutionDef):
+ if options['align'] not in align_v_values:
+ error = state_machine.reporter.error(
+ 'Error in "%s" directive: "%s" is not a valid value for '
+ 'the "align" option within a substitution definition. '
+ 'Valid values for "align" are: "%s".'
+ % (name, options['align'], '", "'.join(align_v_values)),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [error]
+ elif options['align'] not in align_h_values:
+ error = state_machine.reporter.error(
+ 'Error in "%s" directive: "%s" is not a valid value for '
+ 'the "align" option. Valid values for "align" are: "%s".'
+ % (name, options['align'], '", "'.join(align_h_values)),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [error]
+ messages = []
+ reference = directives.uri(arguments[0])
options['uri'] = reference
+ reference_node = None
+ if options.has_key('target'):
+ block = states.escape2null(options['target']).splitlines()
+ block = [line for line in block]
+ target_type, data = state.parse_target(block, block_text, lineno)
+ if target_type == 'refuri':
+ reference_node = nodes.reference(refuri=data)
+ elif target_type == 'refname':
+ reference_node = nodes.reference(refname=data,
+ name=fully_normalize_name(options['target']))
+ state.document.note_refname(reference_node)
+ else: # malformed target
+ messages.append(data) # data is a system message
+ del options['target']
+ set_classes(options)
image_node = nodes.image(block_text, **options)
- return [image_node]
+ if reference_node:
+ reference_node += image_node
+ return messages + [reference_node]
+ else:
+ return messages + [image_node]
image.arguments = (1, 0, 1)
image.options = {'alt': directives.unchanged,
@@ -43,32 +80,41 @@
'width': directives.nonnegative_int,
'scale': directives.nonnegative_int,
'align': align,
+ 'target': directives.unchanged_required,
'class': directives.class_option}
+def figure_align(argument):
+ return directives.choice(argument, align_h_values)
+
def figure(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
figwidth = options.setdefault('figwidth')
- figclass = options.setdefault('figclass')
+ figclasses = options.setdefault('figclass')
+ align = options.setdefault('align')
del options['figwidth']
del options['figclass']
+ del options['align']
(image_node,) = image(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine)
if isinstance(image_node, nodes.system_message):
return [image_node]
figure_node = nodes.figure('', image_node)
if figwidth == 'image':
- if Image:
+ if Image and state.document.settings.file_insertion_enabled:
# PIL doesn't like Unicode paths:
try:
i = Image.open(str(image_node['uri']))
except (IOError, UnicodeError):
pass
else:
+ state.document.settings.record_dependencies.add(image_node['uri'])
figure_node['width'] = i.size[0]
elif figwidth is not None:
figure_node['width'] = figwidth
- if figclass:
- figure_node.set_class(figclass)
+ if figclasses:
+ figure_node['classes'] += figclasses
+ if align:
+ figure_node['align'] = align
if content:
node = nodes.Element() # anonymous container for parsing
state.nested_parse(content, content_offset, node)
@@ -97,4 +143,5 @@
figure.options = {'figwidth': figwidth_value,
'figclass': directives.class_option}
figure.options.update(image.options)
+figure.options['align'] = figure_align
figure.content = 1
Modified: Zope3/trunk/src/docutils/parsers/rst/directives/misc.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/directives/misc.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/directives/misc.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Authors: David Goodger, Dethe Elza
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3129 $
+# Date: $Date: 2005-03-26 17:21:28 +0100 (Sat, 26 Mar 2005) $
# Copyright: This module has been placed in the public domain.
"""Miscellaneous directives."""
@@ -11,35 +11,51 @@
import sys
import os.path
import re
-from urllib2 import urlopen, URLError
from docutils import io, nodes, statemachine, utils
-from docutils.parsers.rst import directives, states
+from docutils.parsers.rst import directives, roles, states
from docutils.transforms import misc
+try:
+ import urllib2
+except ImportError:
+ urllib2 = None
+
def include(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Include a reST file as part of the content of this reST file."""
+ if not state.document.settings.file_insertion_enabled:
+ warning = state_machine.reporter.warning(
+ '"%s" directive disabled.' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [warning]
source = state_machine.input_lines.source(
lineno - state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
- path = ''.join(arguments[0].splitlines())
- if path.find(' ') != -1:
- error = state_machine.reporter.error(
- '"%s" directive path contains whitespace.' % name,
- nodes.literal_block(block_text, block_text), line=lineno)
- return [error]
+ path = directives.path(arguments[0])
path = os.path.normpath(os.path.join(source_dir, path))
path = utils.relative_path(None, path)
+ encoding = options.get('encoding', state.document.settings.input_encoding)
try:
+ state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(
- source_path=path, encoding=state.document.settings.input_encoding)
+ source_path=path, encoding=encoding,
+ error_handler=state.document.settings.input_encoding_error_handler,
+ handle_io_errors=None)
except IOError, error:
severe = state_machine.reporter.severe(
- 'Problems with "%s" directive path:\n%s.' % (name, error),
+ 'Problems with "%s" directive path:\n%s: %s.'
+ % (name, error.__class__.__name__, error),
nodes.literal_block(block_text, block_text), line=lineno)
return [severe]
- include_text = include_file.read()
+ try:
+ include_text = include_file.read()
+ except UnicodeError, error:
+ severe = state_machine.reporter.severe(
+ 'Problem with "%s" directive:\n%s: %s'
+ % (name, error.__class__.__name__, error),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [severe]
if options.has_key('literal'):
literal_block = nodes.literal_block(include_text, include_text,
source=path)
@@ -52,7 +68,8 @@
return []
include.arguments = (1, 0, 1)
-include.options = {'literal': directives.flag}
+include.options = {'literal': directives.flag,
+ 'encoding': directives.encoding}
def raw(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
@@ -64,7 +81,15 @@
Content may be included inline (content section of directive) or
imported from a file or url.
"""
- attributes = {'format': arguments[0]}
+ if ( not state.document.settings.raw_enabled
+ or (not state.document.settings.file_insertion_enabled
+ and (options.has_key('file') or options.has_key('url'))) ):
+ warning = state_machine.reporter.warning(
+ '"%s" directive disabled.' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [warning]
+ attributes = {'format': ' '.join(arguments[0].lower().split())}
+ encoding = options.get('encoding', state.document.settings.input_encoding)
if content:
if options.has_key('file') or options.has_key('url'):
error = state_machine.reporter.error(
@@ -85,27 +110,54 @@
path = os.path.normpath(os.path.join(source_dir, options['file']))
path = utils.relative_path(None, path)
try:
- raw_file = open(path)
+ state.document.settings.record_dependencies.add(path)
+ raw_file = io.FileInput(
+ source_path=path, encoding=encoding,
+ error_handler=state.document.settings.input_encoding_error_handler,
+ handle_io_errors=None)
except IOError, error:
severe = state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.' % (name, error),
nodes.literal_block(block_text, block_text), line=lineno)
return [severe]
- text = raw_file.read()
- raw_file.close()
+ try:
+ text = raw_file.read()
+ except UnicodeError, error:
+ severe = state_machine.reporter.severe(
+ 'Problem with "%s" directive:\n%s: %s'
+ % (name, error.__class__.__name__, error),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [severe]
attributes['source'] = path
elif options.has_key('url'):
+ if not urllib2:
+ severe = state_machine.reporter.severe(
+ 'Problems with the "%s" directive and its "url" option: '
+ 'unable to access the required functionality (from the '
+ '"urllib2" module).' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [severe]
+ source = options['url']
try:
- raw_file = urlopen(options['url'])
- except (URLError, IOError, OSError), error:
+ raw_text = urllib2.urlopen(source).read()
+ except (urllib2.URLError, IOError, OSError), error:
severe = state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (name, options['url'], error),
nodes.literal_block(block_text, block_text), line=lineno)
return [severe]
- text = raw_file.read()
- raw_file.close()
- attributes['source'] = options['file']
+ raw_file = io.StringInput(
+ source=raw_text, source_path=source, encoding=encoding,
+ error_handler=state.document.settings.input_encoding_error_handler)
+ try:
+ text = raw_file.read()
+ except UnicodeError, error:
+ severe = state_machine.reporter.severe(
+ 'Problem with "%s" directive:\n%s: %s'
+ % (name, error.__class__.__name__, error),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [severe]
+ attributes['source'] = source
else:
error = state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.' % (name),
@@ -116,7 +168,8 @@
raw.arguments = (1, 0, 1)
raw.options = {'file': directives.path,
- 'url': directives.path}
+ 'url': directives.uri,
+ 'encoding': directives.encoding}
raw.content = 1
def replace(name, arguments, options, content, lineno,
@@ -135,8 +188,7 @@
messages = []
for node in element:
if isinstance(node, nodes.system_message):
- if node.has_key('backrefs'):
- del node['backrefs']
+ node['backrefs'] = []
messages.append(node)
error = state_machine.reporter.error(
'Error in "%s" directive: may contain a single paragraph '
@@ -154,7 +206,7 @@
replace.content = 1
def unicode_directive(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
+ content_offset, block_text, state, state_machine):
r"""
Convert Unicode character codes (numbers) to characters. Codes may be
decimal numbers, hexadecimal numbers (prefixed by ``0x``, ``x``, ``\x``,
@@ -168,52 +220,118 @@
'substitution definition.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
- codes = arguments[0].split('.. ')[0].split()
+ substitution_definition = state_machine.node
+ if options.has_key('trim'):
+ substitution_definition.attributes['ltrim'] = 1
+ substitution_definition.attributes['rtrim'] = 1
+ if options.has_key('ltrim'):
+ substitution_definition.attributes['ltrim'] = 1
+ if options.has_key('rtrim'):
+ substitution_definition.attributes['rtrim'] = 1
+ codes = unicode_comment_pattern.split(arguments[0])[0].split()
element = nodes.Element()
for code in codes:
try:
- if code.isdigit():
- element += nodes.Text(unichr(int(code)))
- else:
- match = unicode_pattern.match(code)
- if match:
- value = match.group(1) or match.group(2)
- element += nodes.Text(unichr(int(value, 16)))
- else:
- element += nodes.Text(code)
+ decoded = directives.unicode_code(code)
except ValueError, err:
error = state_machine.reporter.error(
- 'Invalid character code: %s\n%s' % (code, err),
+ 'Invalid character code: %s\n%s: %s'
+ % (code, err.__class__.__name__, err),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
+ element += nodes.Text(decoded)
return element.children
unicode_directive.arguments = (1, 0, 1)
-unicode_pattern = re.compile(
- r'(?:0x|x|\x00x|U\+?|\x00u)([0-9a-f]+)$|&#x([0-9a-f]+);$', re.IGNORECASE)
+unicode_directive.options = {'trim': directives.flag,
+ 'ltrim': directives.flag,
+ 'rtrim': directives.flag}
+unicode_comment_pattern = re.compile(r'( |\n|^)\.\. ')
def class_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
- """"""
- class_value = nodes.make_id(arguments[0])
- if class_value:
- pending = nodes.pending(misc.ClassAttribute,
- {'class': class_value, 'directive': name},
- block_text)
- state_machine.document.note_pending(pending)
- return [pending]
- else:
+ """
+ Set a "class" attribute on the next element.
+ A "pending" element is inserted, and a transform does the work later.
+ """
+ try:
+ class_value = directives.class_option(arguments[0])
+ except ValueError:
error = state_machine.reporter.error(
- 'Invalid class attribute value for "%s" directive: %s'
+ 'Invalid class attribute value for "%s" directive: "%s".'
% (name, arguments[0]),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
+ pending = nodes.pending(misc.ClassAttribute,
+ {'class': class_value, 'directive': name},
+ block_text)
+ state_machine.document.note_pending(pending)
+ return [pending]
-class_directive.arguments = (1, 0, 0)
+class_directive.arguments = (1, 0, 1)
class_directive.content = 1
+role_arg_pat = re.compile(r'(%s)\s*(\(\s*(%s)\s*\)\s*)?$'
+ % ((states.Inliner.simplename,) * 2))
+def role(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """Dynamically create and register a custom interpreted text role."""
+ if content_offset > lineno or not content:
+ error = state_machine.reporter.error(
+ '"%s" directive requires arguments on the first line.'
+ % name, nodes.literal_block(block_text, block_text), line=lineno)
+ return [error]
+ args = content[0]
+ match = role_arg_pat.match(args)
+ if not match:
+ error = state_machine.reporter.error(
+ '"%s" directive arguments not valid role names: "%s".'
+ % (name, args), nodes.literal_block(block_text, block_text),
+ line=lineno)
+ return [error]
+ new_role_name = match.group(1)
+ base_role_name = match.group(3)
+ messages = []
+ if base_role_name:
+ base_role, messages = roles.role(
+ base_role_name, state_machine.language, lineno, state.reporter)
+ if base_role is None:
+ error = state.reporter.error(
+ 'Unknown interpreted text role "%s".' % base_role_name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return messages + [error]
+ else:
+ base_role = roles.generic_custom_role
+ assert not hasattr(base_role, 'arguments'), (
+ 'Supplemental directive arguments for "%s" directive not supported'
+ '(specified by "%r" role).' % (name, base_role))
+ try:
+ (arguments, options, content, content_offset) = (
+ state.parse_directive_block(content[1:], content_offset, base_role,
+ option_presets={}))
+ except states.MarkupError, detail:
+ error = state_machine.reporter.error(
+ 'Error in "%s" directive:\n%s.' % (name, detail),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return messages + [error]
+ if not options.has_key('class'):
+ try:
+ options['class'] = directives.class_option(new_role_name)
+ except ValueError, detail:
+ error = state_machine.reporter.error(
+ 'Invalid argument for "%s" directive:\n%s.'
+ % (name, detail),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return messages + [error]
+ role = roles.CustomRole(new_role_name, base_role, options, content)
+ roles.register_local_role(new_role_name, role)
+ return messages
+
+role.content = 1
+
def directive_test_function(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
+ """This directive is useful only for testing purposes."""
if content:
text = '\n'.join(content)
info = state_machine.reporter.info(
@@ -227,5 +345,5 @@
return [info]
directive_test_function.arguments = (0, 1, 1)
-directive_test_function.options = {'option': directives.unchanged}
+directive_test_function.options = {'option': directives.unchanged_required}
directive_test_function.content = 1
Modified: Zope3/trunk/src/docutils/parsers/rst/directives/parts.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/directives/parts.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/directives/parts.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger, Dmitry Jemerov
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3199 $
+# Date: $Date: 2005-04-09 03:32:29 +0200 (Sat, 09 Apr 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -10,7 +10,7 @@
__docformat__ = 'reStructuredText'
-from docutils import nodes
+from docutils import nodes, languages
from docutils.transforms import parts
from docutils.parsers.rst import directives
@@ -26,18 +26,50 @@
def contents(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
- """Table of contents."""
+ """
+ Table of contents.
+
+ The table of contents is generated in two passes: initial parse and
+ transform. During the initial parse, a 'pending' element is generated
+ which acts as a placeholder, storing the TOC title and any options
+ internally. At a later stage in the processing, the 'pending' element is
+ replaced by a 'topic' element, a title and the table of contents proper.
+ """
+ if not (state_machine.match_titles
+ or isinstance(state_machine.node, nodes.sidebar)):
+ error = state_machine.reporter.error(
+ 'The "%s" directive may not be used within topics '
+ 'or body elements.' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [error]
+ document = state_machine.document
+ language = languages.get_language(document.settings.language_code)
if arguments:
title_text = arguments[0]
text_nodes, messages = state.inline_text(title_text, lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
messages = []
- title = None
- pending = nodes.pending(parts.Contents, {'title': title}, block_text)
+ if options.has_key('local'):
+ title = None
+ else:
+ title = nodes.title('', language.labels['contents'])
+ topic = nodes.topic(classes=['contents'])
+ topic['classes'] += options.get('class', [])
+ if title:
+ name = title.astext()
+ topic += title
+ else:
+ name = language.labels['contents']
+ name = nodes.fully_normalize_name(name)
+ if not document.has_name(name):
+ topic['names'].append(name)
+ document.note_implicit_target(topic)
+ pending = nodes.pending(parts.Contents, rawsource=block_text)
pending.details.update(options)
- state_machine.document.note_pending(pending)
- return [pending] + messages
+ document.note_pending(pending)
+ topic += pending
+ return [topic] + messages
contents.arguments = (0, 1, 1)
contents.options = {'depth': directives.nonnegative_int,
@@ -53,4 +85,40 @@
state_machine.document.note_pending(pending)
return [pending]
-sectnum.options = {'depth': int}
+sectnum.options = {'depth': int,
+ 'start': int,
+ 'prefix': directives.unchanged_required,
+ 'suffix': directives.unchanged_required}
+
+def header_footer(node, name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """Contents of document header or footer."""
+ if not content:
+ warning = state_machine.reporter.warning(
+ 'Content block expected for the "%s" directive; none found.'
+ % name, nodes.literal_block(block_text, block_text),
+ line=lineno)
+ node.append(nodes.paragraph(
+ '', 'Problem with the "%s" directive: no content supplied.' % name))
+ return [warning]
+ text = '\n'.join(content)
+ state.nested_parse(content, content_offset, node)
+ return []
+
+def header(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ decoration = state_machine.document.get_decoration()
+ node = decoration.get_header()
+ return header_footer(node, name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine)
+
+header.content = 1
+
+def footer(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ decoration = state_machine.document.get_decoration()
+ node = decoration.get_footer()
+ return header_footer(node, name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine)
+
+footer.content = 1
Modified: Zope3/trunk/src/docutils/parsers/rst/directives/references.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/directives/references.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/directives/references.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger, Dmitry Jemerov
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 856 $
+# Date: $Date: 2002-10-24 03:01:53 +0200 (Thu, 24 Oct 2002) $
# Copyright: This module has been placed in the public domain.
"""
Added: Zope3/trunk/src/docutils/parsers/rst/directives/tables.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/directives/tables.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/directives/tables.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,441 @@
+# Authors: David Goodger, David Priest
+# Contact: goodger at python.org
+# Revision: $Revision: 3165 $
+# Date: $Date: 2005-04-05 04:55:06 +0200 (Tue, 05 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+"""
+Directives for table elements.
+"""
+
+__docformat__ = 'reStructuredText'
+
+
+import sys
+import os.path
+from docutils import io, nodes, statemachine, utils
+from docutils.utils import SystemMessagePropagation
+from docutils.parsers.rst import directives
+
+try:
+ import csv # new in Python 2.3
+except ImportError:
+ csv = None
+
+try:
+ import urllib2
+except ImportError:
+ urllib2 = None
+
+try:
+ True
+except NameError: # Python 2.2 & 2.1 compatibility
+ True = not 0
+ False = not 1
+
+
+def table(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ if not content:
+ warning = state_machine.reporter.warning(
+ 'Content block expected for the "%s" directive; none found.'
+ % name, nodes.literal_block(block_text, block_text),
+ line=lineno)
+ return [warning]
+ title, messages = make_title(arguments, state, lineno)
+ node = nodes.Element() # anonymous container for parsing
+ state.nested_parse(content, content_offset, node)
+ if len(node) != 1 or not isinstance(node[0], nodes.table):
+ error = state_machine.reporter.error(
+ 'Error parsing content block for the "%s" directive: '
+ 'exactly one table expected.'
+ % name, nodes.literal_block(block_text, block_text),
+ line=lineno)
+ return [error]
+ table_node = node[0]
+ table_node['classes'] += options.get('class', [])
+ if title:
+ table_node.insert(0, title)
+ return [table_node] + messages
+
+table.arguments = (0, 1, 1)
+table.options = {'class': directives.class_option}
+table.content = 1
+
+def make_title(arguments, state, lineno):
+ if arguments:
+ title_text = arguments[0]
+ text_nodes, messages = state.inline_text(title_text, lineno)
+ title = nodes.title(title_text, '', *text_nodes)
+ else:
+ title = None
+ messages = []
+ return title, messages
+
+
+if csv:
+ class DocutilsDialect(csv.Dialect):
+
+ """CSV dialect for `csv_table` directive function."""
+
+ delimiter = ','
+ quotechar = '"'
+ doublequote = True
+ skipinitialspace = True
+ lineterminator = '\n'
+ quoting = csv.QUOTE_MINIMAL
+
+ def __init__(self, options):
+ if options.has_key('delim'):
+ self.delimiter = str(options['delim'])
+ if options.has_key('keepspace'):
+ self.skipinitialspace = False
+ if options.has_key('quote'):
+ self.quotechar = str(options['quote'])
+ if options.has_key('escape'):
+ self.doublequote = False
+ self.escapechar = str(options['escape'])
+ csv.Dialect.__init__(self)
+
+
+ class HeaderDialect(csv.Dialect):
+
+ """CSV dialect to use for the "header" option data."""
+
+ delimiter = ','
+ quotechar = '"'
+ escapechar = '\\'
+ doublequote = False
+ skipinitialspace = True
+ lineterminator = '\n'
+ quoting = csv.QUOTE_MINIMAL
+
+
+def csv_table(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ try:
+ if ( not state.document.settings.file_insertion_enabled
+ and (options.has_key('file') or options.has_key('url')) ):
+ warning = state_machine.reporter.warning(
+ '"%s" directive disabled.' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [warning]
+ check_requirements(name, lineno, block_text, state_machine)
+ title, messages = make_title(arguments, state, lineno)
+ csv_data, source = get_csv_data(
+ name, options, content, lineno, block_text, state, state_machine)
+ table_head, max_header_cols = process_header_option(
+ options, state_machine, lineno)
+ rows, max_cols = parse_csv_data_into_rows(
+ csv_data, DocutilsDialect(options), source, options)
+ max_cols = max(max_cols, max_header_cols)
+ header_rows = options.get('header-rows', 0) # default 0
+ stub_columns = options.get('stub-columns', 0) # default 0
+ check_table_dimensions(
+ rows, header_rows, stub_columns, name, lineno,
+ block_text, state_machine)
+ table_head.extend(rows[:header_rows])
+ table_body = rows[header_rows:]
+ col_widths = get_column_widths(
+ max_cols, name, options, lineno, block_text, state_machine)
+ extend_short_rows_with_empty_cells(max_cols, (table_head, table_body))
+ except SystemMessagePropagation, detail:
+ return [detail.args[0]]
+ except csv.Error, detail:
+ error = state_machine.reporter.error(
+ 'Error with CSV data in "%s" directive:\n%s' % (name, detail),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [error]
+ table = (col_widths, table_head, table_body)
+ table_node = state.build_table(table, content_offset, stub_columns)
+ table_node['classes'] += options.get('class', [])
+ if title:
+ table_node.insert(0, title)
+ return [table_node] + messages
+
+csv_table.arguments = (0, 1, 1)
+csv_table.options = {'header-rows': directives.nonnegative_int,
+ 'stub-columns': directives.nonnegative_int,
+ 'header': directives.unchanged,
+ 'widths': directives.positive_int_list,
+ 'file': directives.path,
+ 'url': directives.uri,
+ 'encoding': directives.encoding,
+ 'class': directives.class_option,
+ # field delimiter char
+ 'delim': directives.single_char_or_whitespace_or_unicode,
+ # treat whitespace after delimiter as significant
+ 'keepspace': directives.flag,
+ # text field quote/unquote char:
+ 'quote': directives.single_char_or_unicode,
+ # char used to escape delim & quote as-needed:
+ 'escape': directives.single_char_or_unicode,}
+csv_table.content = 1
+
+def check_requirements(name, lineno, block_text, state_machine):
+ if not csv:
+ error = state_machine.reporter.error(
+ 'The "%s" directive is not compatible with this version of '
+ 'Python (%s). Requires the "csv" module, new in Python 2.3.'
+ % (name, sys.version.split()[0]),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+
+def get_csv_data(name, options, content, lineno, block_text,
+ state, state_machine):
+ """
+ CSV data can come from the directive content, from an external file, or
+ from a URL reference.
+ """
+ encoding = options.get('encoding', state.document.settings.input_encoding)
+ if content: # CSV data is from directive content
+ if options.has_key('file') or options.has_key('url'):
+ error = state_machine.reporter.error(
+ '"%s" directive may not both specify an external file and '
+ 'have content.' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ source = content.source(0)
+ csv_data = content
+ elif options.has_key('file'): # CSV data is from an external file
+ if options.has_key('url'):
+ error = state_machine.reporter.error(
+ 'The "file" and "url" options may not be simultaneously '
+ 'specified for the "%s" directive.' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ source_dir = os.path.dirname(
+ os.path.abspath(state.document.current_source))
+ source = os.path.normpath(os.path.join(source_dir, options['file']))
+ source = utils.relative_path(None, source)
+ try:
+ state.document.settings.record_dependencies.add(source)
+ csv_file = io.FileInput(
+ source_path=source, encoding=encoding,
+ error_handler
+ =state.document.settings.input_encoding_error_handler,
+ handle_io_errors=None)
+ csv_data = csv_file.read().splitlines()
+ except IOError, error:
+ severe = state_machine.reporter.severe(
+ 'Problems with "%s" directive path:\n%s.' % (name, error),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(severe)
+ elif options.has_key('url'): # CSV data is from a URL
+ if not urllib2:
+ severe = state_machine.reporter.severe(
+ 'Problems with the "%s" directive and its "url" option: '
+ 'unable to access the required functionality (from the '
+ '"urllib2" module).' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(severe)
+ source = options['url']
+ try:
+ csv_text = urllib2.urlopen(source).read()
+ except (urllib2.URLError, IOError, OSError, ValueError), error:
+ severe = state_machine.reporter.severe(
+ 'Problems with "%s" directive URL "%s":\n%s.'
+ % (name, options['url'], error),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(severe)
+ csv_file = io.StringInput(
+ source=csv_text, source_path=source, encoding=encoding,
+ error_handler=state.document.settings.input_encoding_error_handler)
+ csv_data = csv_file.read().splitlines()
+ else:
+ error = state_machine.reporter.warning(
+ 'The "%s" directive requires content; none supplied.' % (name),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ return csv_data, source
+
+def process_header_option(options, state_machine, lineno):
+ source = state_machine.get_source(lineno - 1)
+ table_head = []
+ max_header_cols = 0
+ if options.has_key('header'): # separate table header in option
+ rows, max_header_cols = parse_csv_data_into_rows(
+ options['header'].split('\n'), HeaderDialect(), source, options)
+ table_head.extend(rows)
+ return table_head, max_header_cols
+
+def parse_csv_data_into_rows(csv_data, dialect, source, options):
+ # csv.py doesn't do Unicode; encode temporarily as UTF-8
+ csv_reader = csv.reader([line.encode('utf-8') for line in csv_data],
+ dialect=dialect)
+ rows = []
+ max_cols = 0
+ for row in csv_reader:
+ row_data = []
+ for cell in row:
+ # decode UTF-8 back to Unicode
+ cell_text = unicode(cell, 'utf-8')
+ cell_data = (0, 0, 0, statemachine.StringList(
+ cell_text.splitlines(), source=source))
+ row_data.append(cell_data)
+ rows.append(row_data)
+ max_cols = max(max_cols, len(row))
+ return rows, max_cols
+
+def check_table_dimensions(rows, header_rows, stub_columns, name, lineno,
+ block_text, state_machine):
+ if len(rows) < header_rows:
+ error = state_machine.reporter.error(
+ '%s header row(s) specified but only %s row(s) of data supplied '
+ '("%s" directive).' % (header_rows, len(rows), name),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ if len(rows) == header_rows > 0:
+ error = state_machine.reporter.error(
+ 'Insufficient data supplied (%s row(s)); no data remaining for '
+ 'table body, required by "%s" directive.' % (len(rows), name),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ for row in rows:
+ if len(row) < stub_columns:
+ error = state_machine.reporter.error(
+ '%s stub column(s) specified but only %s columns(s) of data '
+ 'supplied ("%s" directive).' % (stub_columns, len(row), name),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ if len(row) == stub_columns > 0:
+ error = state_machine.reporter.error(
+ 'Insufficient data supplied (%s columns(s)); no data remaining '
+ 'for table body, required by "%s" directive.'
+ % (len(row), name),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+
+def get_column_widths(max_cols, name, options, lineno, block_text,
+ state_machine):
+ if options.has_key('widths'):
+ col_widths = options['widths']
+ if len(col_widths) != max_cols:
+ error = state_machine.reporter.error(
+ '"%s" widths do not match the number of columns in table (%s).'
+ % (name, max_cols),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ elif max_cols:
+ col_widths = [100 / max_cols] * max_cols
+ else:
+ error = state_machine.reporter.error(
+ 'No table data detected in CSV file.',
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ return col_widths
+
+def extend_short_rows_with_empty_cells(columns, parts):
+ for part in parts:
+ for row in part:
+ if len(row) < columns:
+ row.extend([(0, 0, 0, [])] * (columns - len(row)))
+
+def list_table(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """
+ Implement tables whose data is encoded as a uniform two-level bullet list.
+ For further ideas, see
+ http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
+ """
+ if not content:
+ error = state_machine.reporter.error(
+ 'The "%s" directive is empty; content required.' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [error]
+ title, messages = make_title(arguments, state, lineno)
+ node = nodes.Element() # anonymous container for parsing
+ state.nested_parse(content, content_offset, node)
+ try:
+ num_cols, col_widths = check_list_content(
+ node, name, options, content, lineno, block_text, state_machine)
+ table_data = [[item.children for item in row_list[0]]
+ for row_list in node[0]]
+ header_rows = options.get('header-rows', 0) # default 0
+ stub_columns = options.get('stub-columns', 0) # default 0
+ check_table_dimensions(
+ table_data, header_rows, stub_columns, name, lineno,
+ block_text, state_machine)
+ except SystemMessagePropagation, detail:
+ return [detail.args[0]]
+ table_node = build_table_from_list(table_data, col_widths,
+ header_rows, stub_columns)
+ table_node['classes'] += options.get('class', [])
+ if title:
+ table_node.insert(0, title)
+ return [table_node] + messages
+
+list_table.arguments = (0, 1, 1)
+list_table.options = {'header-rows': directives.nonnegative_int,
+ 'stub-columns': directives.nonnegative_int,
+ 'widths': directives.positive_int_list,
+ 'class': directives.class_option}
+list_table.content = 1
+
+def check_list_content(node, name, options, content, lineno, block_text,
+ state_machine):
+ if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
+ error = state_machine.reporter.error(
+ 'Error parsing content block for the "%s" directive: '
+ 'exactly one bullet list expected.' % name,
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ list_node = node[0]
+ # Check for a uniform two-level bullet list:
+ for item_index in range(len(list_node)):
+ item = list_node[item_index]
+ if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
+ error = state_machine.reporter.error(
+ 'Error parsing content block for the "%s" directive: '
+ 'two-level bullet list expected, but row %s does not contain '
+ 'a second-level bullet list.' % (name, item_index + 1),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ elif item_index:
+ if len(item[0]) != num_cols:
+ error = state_machine.reporter.error(
+ 'Error parsing content block for the "%s" directive: '
+ 'uniform two-level bullet list expected, but row %s does '
+ 'not contain the same number of items as row 1 (%s vs %s).'
+ % (name, item_index + 1, len(item[0]), num_cols),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ else:
+ num_cols = len(item[0])
+ col_widths = get_column_widths(
+ num_cols, name, options, lineno, block_text, state_machine)
+ if len(col_widths) != num_cols:
+ error = state_machine.reporter.error(
+ 'Error parsing "widths" option of the "%s" directive: '
+ 'number of columns does not match the table data (%s vs %s).'
+ % (name, len(col_widths), num_cols),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ raise SystemMessagePropagation(error)
+ return num_cols, col_widths
+
+def build_table_from_list(table_data, col_widths, header_rows, stub_columns):
+ table = nodes.table()
+ tgroup = nodes.tgroup(cols=len(col_widths))
+ table += tgroup
+ for col_width in col_widths:
+ colspec = nodes.colspec(colwidth=col_width)
+ if stub_columns:
+ colspec.attributes['stub'] = 1
+ stub_columns -= 1
+ tgroup += colspec
+ rows = []
+ for row in table_data:
+ row_node = nodes.row()
+ for cell in row:
+ entry = nodes.entry()
+ entry += cell
+ row_node += entry
+ rows.append(row_node)
+ if header_rows:
+ thead = nodes.thead()
+ thead.extend(rows[:header_rows])
+ tgroup += thead
+ tbody = nodes.tbody()
+ tbody.extend(rows[header_rows:])
+ tgroup += tbody
+ return table
Modified: Zope3/trunk/src/docutils/parsers/rst/languages/__init__.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/__init__.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/__init__.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,12 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 2224 $
+# Date: $Date: 2004-06-05 21:40:46 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
+# Internationalization details are documented in
+# <http://docutils.sf.net/docs/howto/i18n.html>.
+
"""
This package contains modules for language-dependent features of
reStructuredText.
Added: Zope3/trunk/src/docutils/parsers/rst/languages/af.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/af.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/af.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,100 @@
+# Author: Jannie Hofmeyr
+# Contact: jhsh at sun.ac.za
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Afrikaans-language mappings for language-dependent features of
+reStructuredText.
+"""
+
+__docformat__ = 'reStructuredText'
+
+
+directives = {
+ 'aandag': 'attention',
+ 'versigtig': 'caution',
+ 'gevaar': 'danger',
+ 'fout': 'error',
+ 'wenk': 'hint',
+ 'belangrik': 'important',
+ 'nota': 'note',
+ 'tip': 'tip', # hint and tip both have the same translation: wenk
+ 'waarskuwing': 'warning',
+ 'vermaning': 'admonition',
+ 'kantstreep': 'sidebar',
+ 'onderwerp': 'topic',
+ 'lynblok': 'line-block',
+ 'parsed-literal (translation required)': 'parsed-literal',
+ 'rubriek': 'rubric',
+ 'epigraaf': 'epigraph',
+ 'hoogtepunte': 'highlights',
+ 'pull-quote (translation required)': 'pull-quote',
+ u'compound (translation required)': 'compound',
+ #'vrae': 'questions',
+ #'qa': 'questions',
+ #'faq': 'questions',
+ 'table (translation required)': 'table',
+ 'csv-table (translation required)': 'csv-table',
+ 'list-table (translation required)': 'list-table',
+ 'meta': 'meta',
+ #'beeldkaart': 'imagemap',
+ 'beeld': 'image',
+ 'figuur': 'figure',
+ 'insluiting': 'include',
+ 'rou': 'raw',
+ 'vervang': 'replace',
+ 'unicode': 'unicode', # should this be translated? unikode
+ 'klas': 'class',
+ 'role (translation required)': 'role',
+ 'inhoud': 'contents',
+ 'sectnum': 'sectnum',
+ 'section-numbering': 'sectnum',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
+ #'voetnote': 'footnotes',
+ #'aanhalings': 'citations',
+ 'teikennotas': 'target-notes',
+ 'restructuredtext-test-directive': 'restructuredtext-test-directive'}
+"""Afrikaans name to registered (in directives/__init__.py) directive name
+mapping."""
+
+roles = {
+ 'afkorting': 'abbreviation',
+ 'ab': 'abbreviation',
+ 'akroniem': 'acronym',
+ 'ac': 'acronym',
+ 'indeks': 'index',
+ 'i': 'index',
+ 'voetskrif': 'subscript',
+ 'sub': 'subscript',
+ 'boskrif': 'superscript',
+ 'sup': 'superscript',
+ 'titelverwysing': 'title-reference',
+ 'titel': 'title-reference',
+ 't': 'title-reference',
+ 'pep-verwysing': 'pep-reference',
+ 'pep': 'pep-reference',
+ 'rfc-verwysing': 'rfc-reference',
+ 'rfc': 'rfc-reference',
+ 'nadruk': 'emphasis',
+ 'sterk': 'strong',
+ 'literal (translation required)': 'literal',
+ 'benoemde verwysing': 'named-reference',
+ 'anonieme verwysing': 'anonymous-reference',
+ 'voetnootverwysing': 'footnote-reference',
+ 'aanhalingverwysing': 'citation-reference',
+ 'vervangingsverwysing': 'substitution-reference',
+ 'teiken': 'target',
+ 'uri-verwysing': 'uri-reference',
+ 'uri': 'uri-reference',
+ 'url': 'uri-reference',
+ 'rou': 'raw',}
+"""Mapping of Afrikaans role names to canonical role names for interpreted text.
+"""
Added: Zope3/trunk/src/docutils/parsers/rst/languages/ca.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/ca.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/ca.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,119 @@
+# Author: Ivan Vilata i Balaguer
+# Contact: ivan at selidor.net
+# Revision: $Revision: 3276 $
+# Date: $Date: 2005-04-30 13:34:52 +0200 (Sat, 30 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Catalan-language mappings for language-dependent features of
+reStructuredText.
+"""
+
+__docformat__ = 'reStructuredText'
+
+
+directives = {
+ # language-dependent: fixed
+ u'atenci\u00F3': 'attention',
+ u'compte': 'caution',
+ u'perill': 'danger',
+ u'error': 'error',
+ u'suggeriment': 'hint',
+ u'important': 'important',
+ u'nota': 'note',
+ u'consell': 'tip',
+ u'av\u00EDs': 'warning',
+ u'advertiment': 'admonition',
+ u'nota-al-marge': 'sidebar',
+ u'nota-marge': 'sidebar',
+ u'tema': 'topic',
+ u'bloc-de-l\u00EDnies': 'line-block',
+ u'bloc-l\u00EDnies': 'line-block',
+ u'literal-analitzat': 'parsed-literal',
+ u'r\u00FAbrica': 'rubric',
+ u'ep\u00EDgraf': 'epigraph',
+ u'sumari': 'highlights',
+ u'cita-destacada': 'pull-quote',
+ u'compost': 'compound',
+ #'questions': 'questions',
+ u'taula': 'table',
+ u'taula-csv': 'csv-table',
+ u'taula-llista': 'list-table',
+ #'qa': 'questions',
+ #'faq': 'questions',
+ u'meta': 'meta',
+ #'imagemap': 'imagemap',
+ u'imatge': 'image',
+ u'figura': 'figure',
+ u'inclou': 'include',
+ u'incloure': 'include',
+ u'cru': 'raw',
+ u'reempla\u00E7a': 'replace',
+ u'reempla\u00E7ar': 'replace',
+ u'unicode': 'unicode',
+ u'classe': 'class',
+ u'rol': 'role',
+ u'contingut': 'contents',
+ u'numsec': 'sectnum',
+ u'numeraci\u00F3-de-seccions': 'sectnum',
+ u'numeraci\u00F3-seccions': 'sectnum',
+ u'cap\u00E7alera': 'header',
+ u'peu-de-p\u00E0gina': 'footer',
+ u'peu-p\u00E0gina': 'footer',
+ #'footnotes': 'footnotes',
+ #'citations': 'citations',
+ u'notes-amb-destinacions': 'target-notes',
+ u'notes-destinacions': 'target-notes',
+ u'directiva-de-prova-de-restructuredtext': 'restructuredtext-test-directive'}
+"""Catalan name to registered (in directives/__init__.py) directive name
+mapping."""
+
+roles = {
+ # language-dependent: fixed
+ u'abreviatura': 'abbreviation',
+ u'abreviaci\u00F3': 'abbreviation',
+ u'abrev': 'abbreviation',
+ u'ab': 'abbreviation',
+ u'acr\u00F2nim': 'acronym',
+ u'ac': 'acronym',
+ u'\u00EDndex': 'index',
+ u'i': 'index',
+ u'sub\u00EDndex': 'subscript',
+ u'sub': 'subscript',
+ u'super\u00EDndex': 'superscript',
+ u'sup': 'superscript',
+ u'refer\u00E8ncia-a-t\u00EDtol': 'title-reference',
+ u'refer\u00E8ncia-t\u00EDtol': 'title-reference',
+ u't\u00EDtol': 'title-reference',
+ u't': 'title-reference',
+ u'refer\u00E8ncia-a-pep': 'pep-reference',
+ u'refer\u00E8ncia-pep': 'pep-reference',
+ u'pep': 'pep-reference',
+ u'refer\u00E8ncia-a-rfc': 'rfc-reference',
+ u'refer\u00E8ncia-rfc': 'rfc-reference',
+ u'rfc': 'rfc-reference',
+ u'\u00E8mfasi': 'emphasis',
+ u'destacat': 'strong',
+ u'literal': 'literal',
+ u'refer\u00E8ncia-amb-nom': 'named-reference',
+ u'refer\u00E8ncia-nom': 'named-reference',
+ u'refer\u00E8ncia-an\u00F2nima': 'anonymous-reference',
+ u'refer\u00E8ncia-a-nota-al-peu': 'footnote-reference',
+ u'refer\u00E8ncia-nota-al-peu': 'footnote-reference',
+ u'refer\u00E8ncia-a-cita': 'citation-reference',
+ u'refer\u00E8ncia-cita': 'citation-reference',
+ u'refer\u00E8ncia-a-substituci\u00F3': 'substitution-reference',
+ u'refer\u00E8ncia-substituci\u00F3': 'substitution-reference',
+ u'destinaci\u00F3': 'target',
+ u'refer\u00E8ncia-a-uri': 'uri-reference',
+ u'refer\u00E8ncia-uri': 'uri-reference',
+ u'uri': 'uri-reference',
+ u'url': 'uri-reference',
+ u'cru': 'raw',}
+"""Mapping of Catalan role names to canonical role names for interpreted text.
+"""
Added: Zope3/trunk/src/docutils/parsers/rst/languages/cs.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/cs.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/cs.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,102 @@
+# Author: Marek Blaha
+# Contact: mb at dat.cz
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Czech-language mappings for language-dependent features of
+reStructuredText.
+"""
+
+__docformat__ = 'reStructuredText'
+
+
+directives = {
+ # language-dependent: fixed
+ u'pozor': 'attention',
+ u'caution (translation required)': 'caution', # jak rozlisit caution a warning?
+ u'nebezpe\u010D\u00ED': 'danger',
+ u'chyba': 'error',
+ u'rada': 'hint',
+ u'd\u016Fle\u017Eit\u00E9': 'important',
+ u'pozn\u00E1mka': 'note',
+ u'tip (translation required)': 'tip',
+ u'varov\u00E1n\u00ED': 'warning',
+ u'admonition (translation required)': 'admonition',
+ u'sidebar (translation required)': 'sidebar',
+ u't\u00E9ma': 'topic',
+ u'line-block (translation required)': 'line-block',
+ u'parsed-literal (translation required)': 'parsed-literal',
+ u'odd\u00EDl': 'rubric',
+ u'moto': 'epigraph',
+ u'highlights (translation required)': 'highlights',
+ u'pull-quote (translation required)': 'pull-quote',
+ u'compound (translation required)': 'compound',
+ #'questions': 'questions',
+ #'qa': 'questions',
+ #'faq': 'questions',
+ u'table (translation required)': 'table',
+ u'csv-table (translation required)': 'csv-table',
+ u'list-table (translation required)': 'list-table',
+ u'meta (translation required)': 'meta',
+ #'imagemap': 'imagemap',
+ u'image (translation required)': 'image', # obrazek
+ u'figure (translation required)': 'figure', # a tady?
+ u'include (translation required)': 'include',
+ u'raw (translation required)': 'raw',
+ u'replace (translation required)': 'replace',
+ u'unicode (translation required)': 'unicode',
+ u't\u0159\u00EDda': 'class',
+ u'role (translation required)': 'role',
+ u'obsah': 'contents',
+ u'sectnum (translation required)': 'sectnum',
+ u'section-numbering (translation required)': 'sectnum',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
+ #'footnotes': 'footnotes',
+ #'citations': 'citations',
+ u'target-notes (translation required)': 'target-notes',
+ u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
+"""Czech name to registered (in directives/__init__.py) directive name
+mapping."""
+
+roles = {
+ # language-dependent: fixed
+ u'abbreviation (translation required)': 'abbreviation',
+ u'ab (translation required)': 'abbreviation',
+ u'acronym (translation required)': 'acronym',
+ u'ac (translation required)': 'acronym',
+ u'index (translation required)': 'index',
+ u'i (translation required)': 'index',
+ u'subscript (translation required)': 'subscript',
+ u'sub (translation required)': 'subscript',
+ u'superscript (translation required)': 'superscript',
+ u'sup (translation required)': 'superscript',
+ u'title-reference (translation required)': 'title-reference',
+ u'title (translation required)': 'title-reference',
+ u't (translation required)': 'title-reference',
+ u'pep-reference (translation required)': 'pep-reference',
+ u'pep (translation required)': 'pep-reference',
+ u'rfc-reference (translation required)': 'rfc-reference',
+ u'rfc (translation required)': 'rfc-reference',
+ u'emphasis (translation required)': 'emphasis',
+ u'strong (translation required)': 'strong',
+ u'literal (translation required)': 'literal',
+ u'named-reference (translation required)': 'named-reference',
+ u'anonymous-reference (translation required)': 'anonymous-reference',
+ u'footnote-reference (translation required)': 'footnote-reference',
+ u'citation-reference (translation required)': 'citation-reference',
+ u'substitution-reference (translation required)': 'substitution-reference',
+ u'target (translation required)': 'target',
+ u'uri-reference (translation required)': 'uri-reference',
+ u'uri (translation required)': 'uri-reference',
+ u'url (translation required)': 'uri-reference',
+ u'raw (translation required)': 'raw',}
+"""Mapping of Czech role names to canonical role names for interpreted text.
+"""
Modified: Zope3/trunk/src/docutils/parsers/rst/languages/de.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/de.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/de.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,10 +1,14 @@
-# -*- coding: iso-8859-1 -*-
-# Author: Engelbert Gruber
+# Authors: Engelbert Gruber; Felix Wiemann
# Contact: grubert at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
German-language mappings for language-dependent features of
reStructuredText.
@@ -21,36 +25,69 @@
'hinweis': 'hint',
'wichtig': 'important',
'notiz': 'note',
- 'tip': 'tip',
+ 'tipp': 'tip',
'warnung': 'warning',
'ermahnung': 'admonition',
- 'kasten': 'sidebar', # seitenkasten ?
- 'thema': 'topic',
- 'line-block': 'line-block',
- 'parsed-literal': 'parsed-literal',
+ 'kasten': 'sidebar',
+ 'seitenkasten': 'sidebar',
+ 'thema': 'topic',
+ 'zeilen-block': 'line-block',
+ 'parsed-literal (translation required)': 'parsed-literal',
'rubrik': 'rubric',
- 'epigraph (translation required)': 'epigraph',
+ 'epigraph': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote', # kasten too ?
- #'questions': 'questions',
- #'qa': 'questions',
- #'faq': 'questions',
+ 'zusammengesetzt': 'compound',
+ 'verbund': 'compound',
+ #'fragen': 'questions',
+ 'tabelle': 'table',
+ 'csv-tabelle': 'csv-table',
+ 'list-table (translation required)': 'list-table',
'meta': 'meta',
#'imagemap': 'imagemap',
'bild': 'image',
'abbildung': 'figure',
- 'raw': 'raw', # unbearbeitet
- 'include': 'include', # einfügen, "füge ein" would be more like a command.
- # einfügung would be the noun.
- 'ersetzung': 'replace', # ersetzen, ersetze
+ u'unver\xe4ndert': 'raw',
+ u'roh': 'raw',
+ u'einf\xfcgen': 'include',
+ 'ersetzung': 'replace',
+ 'ersetzen': 'replace',
+ 'ersetze': 'replace',
'unicode': 'unicode',
- 'klasse': 'class', # offer class too ?
+ 'klasse': 'class',
+ 'rolle': 'role',
'inhalt': 'contents',
- 'sectnum': 'sectnum',
- 'section-numbering': 'sectnum',
- 'target-notes': 'target-notes',
- #'footnotes': 'footnotes',
- #'citations': 'citations',
- 'restructuredtext-test-directive': 'restructuredtext-test-directive'}
-"""English name to registered (in directives/__init__.py) directive name
+ 'kapitel-nummerierung': 'sectnum',
+ 'abschnitts-nummerierung': 'sectnum',
+ u'linkziel-fu\xdfnoten': 'target-notes',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
+ #u'fu\xdfnoten': 'footnotes',
+ #'zitate': 'citations',
+ }
+"""German name to registered (in directives/__init__.py) directive name
mapping."""
+
+roles = {
+ u'abk\xfcrzung': 'abbreviation',
+ 'akronym': 'acronym',
+ 'index': 'index',
+ 'tiefgestellt': 'subscript',
+ 'hochgestellt': 'superscript',
+ 'titel-referenz': 'title-reference',
+ 'pep-referenz': 'pep-reference',
+ 'rfc-referenz': 'rfc-reference',
+ 'betonung': 'emphasis',
+ 'fett': 'strong',
+ u'w\xf6rtlich': 'literal',
+ 'benannte-referenz': 'named-reference',
+ 'unbenannte-referenz': 'anonymous-reference',
+ u'fu\xdfnoten-referenz': 'footnote-reference',
+ 'zitat-referenz': 'citation-reference',
+ 'ersetzungs-referenz': 'substitution-reference',
+ 'ziel': 'target',
+ 'uri-referenz': 'uri-reference',
+ u'unver\xe4ndert': 'raw',
+ u'roh': 'raw',}
+"""Mapping of German role names to canonical role names for interpreted text.
+"""
Modified: Zope3/trunk/src/docutils/parsers/rst/languages/en.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/en.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/en.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
English-language mappings for language-dependent features of
reStructuredText.
@@ -13,6 +18,7 @@
directives = {
+ # language-dependent: fixed
'attention': 'attention',
'caution': 'caution',
'danger': 'danger',
@@ -31,7 +37,11 @@
'epigraph': 'epigraph',
'highlights': 'highlights',
'pull-quote': 'pull-quote',
+ 'compound': 'compound',
#'questions': 'questions',
+ 'table': 'table',
+ 'csv-table': 'csv-table',
+ 'list-table': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
'meta': 'meta',
@@ -43,9 +53,12 @@
'replace': 'replace',
'unicode': 'unicode',
'class': 'class',
+ 'role': 'role',
'contents': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
+ 'header': 'header',
+ 'footer': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
'target-notes': 'target-notes',
@@ -54,6 +67,7 @@
mapping."""
roles = {
+ # language-dependent: fixed
'abbreviation': 'abbreviation',
'ab': 'abbreviation',
'acronym': 'acronym',
@@ -82,6 +96,7 @@
'target': 'target',
'uri-reference': 'uri-reference',
'uri': 'uri-reference',
- 'url': 'uri-reference',}
+ 'url': 'uri-reference',
+ 'raw': 'raw',}
"""Mapping of English role names to canonical role names for interpreted text.
"""
Added: Zope3/trunk/src/docutils/parsers/rst/languages/eo.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/eo.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/eo.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,111 @@
+# Author: Marcelo Huerta San Martin
+# Contact: richieadler at users.sourceforge.net
+# Revision: $Revision: 3189 $
+# Date: $Date: 2005-04-08 05:05:45 +0200 (Fri, 08 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Esperanto-language mappings for language-dependent features of
+reStructuredText.
+"""
+
+__docformat__ = 'reStructuredText'
+
+
+directives = {
+ # language-dependent: fixed
+ u'atentu': 'attention',
+ u'zorgu': 'caution',
+ u'dangxero': 'danger',
+ u'dan\u011dero': 'danger',
+ u'eraro': 'error',
+ u'spuro': 'hint',
+ u'grava': 'important',
+ u'noto': 'note',
+ u'helpeto': 'tip',
+ u'averto': 'warning',
+ u'admono': 'admonition',
+ u'flankteksto': 'sidebar',
+ u'temo': 'topic',
+ u'linea-bloko': 'line-block',
+ u'analizota-literalo': 'parsed-literal',
+ u'rubriko': 'rubric',
+ u'epigrafo': 'epigraph',
+ u'elstarajxoj': 'highlights',
+ u'elstara\u0135oj': 'highlights',
+ u'ekstera-citajxo': 'pull-quote',
+ u'ekstera-cita\u0135o': 'pull-quote',
+ u'kombinajxo': 'compound',
+ u'kombina\u0135o': 'compound',
+ #'questions': 'questions',
+ #'qa': 'questions',
+ #'faq': 'questions',
+ u'tabelo': 'table',
+ u'tabelo-vdk': 'csv-table', # "valoroj disigitaj per komoj"
+ u'tabelo-csv': 'csv-table',
+ u'tabelo-lista': 'list-table',
+ u'meta': 'meta',
+ #'imagemap': 'imagemap',
+ u'bildo': 'image',
+ u'figuro': 'figure',
+ u'inkludi': 'include',
+ u'senanaliza': 'raw',
+ u'anstatauxi': 'replace',
+ u'anstata\u016di': 'replace',
+ u'unicode': 'unicode',
+ u'klaso': 'class',
+ u'rolo': 'role',
+ u'enhavo': 'contents',
+ u'seknum': 'sectnum',
+ u'sekcia-numerado': 'sectnum',
+ u'kapsekcio': 'header',
+ u'piedsekcio': 'footer',
+ #'footnotes': 'footnotes',
+ #'citations': 'citations',
+ u'celaj-notoj': 'target-notes',
+ u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
+"""Esperanto name to registered (in directives/__init__.py) directive name
+mapping."""
+
+roles = {
+ # language-dependent: fixed
+ u'mallongigo': 'abbreviation',
+ u'mall': 'abbreviation',
+ u'komenclitero': 'acronym',
+ u'kl': 'acronym',
+ u'indekso': 'index',
+ u'i': 'index',
+ u'subskribo': 'subscript',
+ u'sub': 'subscript',
+ u'supraskribo': 'superscript',
+ u'sup': 'superscript',
+ u'titola-referenco': 'title-reference',
+ u'titolo': 'title-reference',
+ u't': 'title-reference',
+ u'pep-referenco': 'pep-reference',
+ u'pep': 'pep-reference',
+ u'rfc-referenco': 'rfc-reference',
+ u'rfc': 'rfc-reference',
+ u'emfazo': 'emphasis',
+ u'forta': 'strong',
+ u'litera': 'literal',
+ u'nomita-referenco': 'named-reference',
+ u'nenomita-referenco': 'anonymous-reference',
+ u'piednota-referenco': 'footnote-reference',
+ u'citajxo-referenco': 'citation-reference',
+ u'cita\u0135o-referenco': 'citation-reference',
+ u'anstatauxa-referenco': 'substitution-reference',
+ u'anstata\u016da-referenco': 'substitution-reference',
+ u'celo': 'target',
+ u'uri-referenco': 'uri-reference',
+ u'uri': 'uri-reference',
+ u'url': 'uri-reference',
+ u'senanaliza': 'raw',
+}
+"""Mapping of Esperanto role names to canonical role names for interpreted text.
+"""
Modified: Zope3/trunk/src/docutils/parsers/rst/languages/es.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/es.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/es.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,15 @@
-# Author: Marcelo Huerta San Martín
-# Contact: mghsm at uol.com.ar
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# -*- coding: iso-8859-1 -*-
+# Author: Marcelo Huerta San Mart�
+# Contact: richieadler at users.sourceforge.net
+# Revision: $Revision: 3190 $
+# Date: $Date: 2005-04-08 05:06:12 +0200 (Fri, 08 Apr 2005) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
Spanish-language mappings for language-dependent features of
reStructuredText.
@@ -36,28 +42,38 @@
u'epigrafe': 'epigraph',
u'destacado': 'highlights',
u'cita-destacada': 'pull-quote',
+ u'combinacion': 'compound',
+ u'combinaci\u00f3n': 'compound',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
+ u'tabla': 'table',
+ u'tabla-vsc': 'csv-table',
+ u'tabla-csv': 'csv-table',
+ u'tabla-lista': 'list-table',
u'meta': 'meta',
#'imagemap': 'imagemap',
u'imagen': 'image',
u'figura': 'figure',
u'incluir': 'include',
- u'raw': 'raw',
+ u'sin-analisis': 'raw',
+ u'sin-an\u00e1lisis': 'raw',
u'reemplazar': 'replace',
u'unicode': 'unicode',
u'clase': 'class',
+ u'rol': 'role',
u'contenido': 'contents',
u'numseccion': 'sectnum',
u'numsecci\u00f3n': 'sectnum',
u'numeracion-seccion': 'sectnum',
u'numeraci\u00f3n-secci\u00f3n': 'sectnum',
u'notas-destino': 'target-notes',
+ u'cabecera': 'header',
+ u'pie': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
-"""English name to registered (in directives/__init__.py) directive name
+"""Spanish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
@@ -68,6 +84,10 @@
u'ac': 'acronym',
u'indice': 'index',
u'i': 'index',
+ u'subindice': 'subscript',
+ u'sub\u00edndice': 'subscript',
+ u'superindice': 'superscript',
+ u'super\u00edndice': 'superscript',
u'referencia-titulo': 'title-reference',
u'titulo': 'title-reference',
u't': 'title-reference',
@@ -78,7 +98,7 @@
u'enfasis': 'emphasis',
u'\u00e9nfasis': 'emphasis',
u'destacado': 'strong',
- u'literal': 'literal',
+ u'literal': 'literal', # "literal" is also a word in Spanish :-)
u'referencia-con-nombre': 'named-reference',
u'referencia-anonima': 'anonymous-reference',
u'referencia-an\u00f3nima': 'anonymous-reference',
@@ -90,6 +110,8 @@
u'referencia-uri': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
- }
+ u'sin-analisis': 'raw',
+ u'sin-an\u00e1lisis': 'raw',
+}
"""Mapping of Spanish role names to canonical role names for interpreted text.
"""
Added: Zope3/trunk/src/docutils/parsers/rst/languages/fi.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/fi.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/fi.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,91 @@
+# Author: Asko Soukka
+# Contact: asko.soukka at iki.fi
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Finnish-language mappings for language-dependent features of
+reStructuredText.
+"""
+
+__docformat__ = 'reStructuredText'
+
+
+directives = {
+ # language-dependent: fixed
+ u'huomio': u'attention',
+ u'varo': u'caution',
+ u'vaara': u'danger',
+ u'virhe': u'error',
+ u'vihje': u'hint',
+ u't\u00e4rke\u00e4\u00e4': u'important',
+ u'huomautus': u'note',
+ u'neuvo': u'tip',
+ u'varoitus': u'warning',
+ u'kehotus': u'admonition',
+ u'sivupalkki': u'sidebar',
+ u'aihe': u'topic',
+ u'rivi': u'line-block',
+ u'tasalevyinen': u'parsed-literal',
+ u'ohje': u'rubric',
+ u'epigraafi': u'epigraph',
+ u'kohokohdat': u'highlights',
+ u'lainaus': u'pull-quote',
+ u'taulukko': u'table',
+ u'csv-taulukko': u'csv-table',
+ u'list-table (translation required)': 'list-table',
+ u'compound (translation required)': 'compound',
+ #u'kysymykset': u'questions',
+ u'meta': u'meta',
+ #u'kuvakartta': u'imagemap',
+ u'kuva': u'image',
+ u'kaavio': u'figure',
+ u'sis\u00e4llyt\u00e4': u'include',
+ u'raaka': u'raw',
+ u'korvaa': u'replace',
+ u'unicode': u'unicode',
+ u'luokka': u'class',
+ u'rooli': u'role',
+ u'sis\u00e4llys': u'contents',
+ u'kappale': u'sectnum',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
+ #u'alaviitteet': u'footnotes',
+ #u'viitaukset': u'citations',
+ u'target-notes (translation required)': u'target-notes'}
+"""Finnish name to registered (in directives/__init__.py) directive name
+mapping."""
+
+roles = {
+ # language-dependent: fixed
+ u'lyhennys': u'abbreviation',
+ u'akronyymi': u'acronym',
+ u'kirjainsana': u'acronym',
+ u'hakemisto': u'index',
+ u'luettelo': u'index',
+ u'alaindeksi': u'subscript',
+ u'indeksi': u'subscript',
+ u'yl\u00e4indeksi': u'superscript',
+ u'title-reference (translation required)': u'title-reference',
+ u'title (translation required)': u'title-reference',
+ u'pep-reference (translation required)': u'pep-reference',
+ u'rfc-reference (translation required)': u'rfc-reference',
+ u'korostus': u'emphasis',
+ u'vahvistus': u'strong',
+ u'tasalevyinen': u'literal',
+ u'named-reference (translation required)': u'named-reference',
+ u'anonymous-reference (translation required)': u'anonymous-reference',
+ u'footnote-reference (translation required)': u'footnote-reference',
+ u'citation-reference (translation required)': u'citation-reference',
+ u'substitution-reference (translation required)': u'substitution-reference',
+ u'kohde': u'target',
+ u'uri-reference (translation required)': u'uri-reference',
+ u'raw (translation required)': 'raw',}
+"""Mapping of Finnish role names to canonical role names for interpreted text.
+"""
Modified: Zope3/trunk/src/docutils/parsers/rst/languages/fr.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/fr.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/fr.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
# Authors: David Goodger; William Dode
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
French-language mappings for language-dependent features of
reStructuredText.
@@ -33,25 +38,60 @@
u'\u00E9pigraphe': 'epigraph',
u'chapeau': 'highlights',
u'accroche': 'pull-quote',
+ u'compound (translation required)': 'compound',
#u'questions': 'questions',
#u'qr': 'questions',
#u'faq': 'questions',
- u'meta': 'meta',
+ u'tableau': 'table',
+ u'csv-table (translation required)': 'csv-table',
+ u'list-table (translation required)': 'list-table',
+ u'm\u00E9ta': 'meta',
#u'imagemap (translation required)': 'imagemap',
u'image': 'image',
u'figure': 'figure',
u'inclure': 'include',
u'brut': 'raw',
u'remplacer': 'replace',
+ u'remplace': 'replace',
u'unicode': 'unicode',
u'classe': 'class',
+ u'role (translation required)': 'role',
u'sommaire': 'contents',
u'table-des-mati\u00E8res': 'contents',
u'sectnum': 'sectnum',
u'section-num\u00E9rot\u00E9e': 'sectnum',
u'liens': 'target-notes',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
#u'footnotes (translation required)': 'footnotes',
#u'citations (translation required)': 'citations',
}
"""French name to registered (in directives/__init__.py) directive name
mapping."""
+
+roles = {
+ u'abr\u00E9viation': 'abbreviation',
+ u'acronyme': 'acronym',
+ u'sigle': 'acronym',
+ u'index': 'index',
+ u'indice': 'subscript',
+ u'ind': 'subscript',
+ u'exposant': 'superscript',
+ u'exp': 'superscript',
+ u'titre-r\u00E9f\u00E9rence': 'title-reference',
+ u'titre': 'title-reference',
+ u'pep-r\u00E9f\u00E9rence': 'pep-reference',
+ u'rfc-r\u00E9f\u00E9rence': 'rfc-reference',
+ u'emphase': 'emphasis',
+ u'fort': 'strong',
+ u'litt\u00E9ral': 'literal',
+ u'nomm\u00E9e-r\u00E9f\u00E9rence': 'named-reference',
+ u'anonyme-r\u00E9f\u00E9rence': 'anonymous-reference',
+ u'note-r\u00E9f\u00E9rence': 'footnote-reference',
+ u'citation-r\u00E9f\u00E9rence': 'citation-reference',
+ u'substitution-r\u00E9f\u00E9rence': 'substitution-reference',
+ u'lien': 'target',
+ u'uri-r\u00E9f\u00E9rence': 'uri-reference',
+ u'brut': 'raw',}
+"""Mapping of French role names to canonical role names for interpreted text.
+"""
Modified: Zope3/trunk/src/docutils/parsers/rst/languages/it.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/it.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/it.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
-# Author: Nicola Larosa
-# Contact: docutils at tekNico.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Author: Nicola Larosa, Lele Gaifax
+# Contact: docutils at tekNico.net, lele at seldati.it
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
# Copyright: This module has been placed in the public domain.
+# Beware: the italian translation of the reStructuredText documentation
+# at http://docit.bice.dyndns.org/static/ReST, in particular
+# http://docit.bice.dyndns.org/static/ReST/ref/rst/directives.html, needs
+# to be synced with the content of this file.
+
"""
Italian-language mappings for language-dependent features of
reStructuredText.
@@ -22,18 +27,22 @@
'nota': 'note',
'consiglio': 'tip',
'avvertenza': 'warning',
- 'admonition (translation required)': 'admonition',
- 'sidebar (translation required)': 'sidebar',
+ 'ammonizione': 'admonition',
+ 'riquadro': 'sidebar',
'argomento': 'topic',
- 'blocco di linee': 'line-block',
- 'parsed-literal': 'parsed-literal',
- 'rubric (translation required)': 'rubric',
- 'epigraph (translation required)': 'epigraph',
- 'highlights (translation required)': 'highlights',
- 'pull-quote (translation required)': 'pull-quote',
+ 'blocco-di-righe': 'line-block',
+ 'blocco-interpretato': 'parsed-literal',
+ 'rubrica': 'rubric',
+ 'epigrafe': 'epigraph',
+ 'punti-salienti': 'highlights',
+ 'estratto-evidenziato': 'pull-quote',
+ 'composito': 'compound',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
+ 'tabella': 'table',
+ 'tabella-csv': 'csv-table',
+ 'tabella-elenco': 'list-table',
'meta': 'meta',
#'imagemap': 'imagemap',
'immagine': 'image',
@@ -42,13 +51,40 @@
'grezzo': 'raw',
'sostituisci': 'replace',
'unicode': 'unicode',
- 'class (translation required)': 'class',
+ 'classe': 'class',
+ 'ruolo': 'role',
'indice': 'contents',
+ 'contenuti': 'contents',
'seznum': 'sectnum',
- 'section-numbering': 'sectnum',
- 'target-notes': 'target-notes',
+ 'sezioni-autonumerate': 'sectnum',
+ 'annota-riferimenti-esterni': 'target-notes',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
-"""English name to registered (in directives/__init__.py) directive name
+"""Italian name to registered (in directives/__init__.py) directive name
mapping."""
+
+roles = {
+ 'abbreviazione': 'abbreviation',
+ 'acronimo': 'acronym',
+ 'indice': 'index',
+ 'deponente': 'subscript',
+ 'esponente': 'superscript',
+ 'riferimento-titolo': 'title-reference',
+ 'riferimento-pep': 'pep-reference',
+ 'riferimento-rfc': 'rfc-reference',
+ 'enfasi': 'emphasis',
+ 'forte': 'strong',
+ 'letterale': 'literal',
+ 'riferimento-con-nome': 'named-reference',
+ 'riferimento-anonimo': 'anonymous-reference',
+ 'riferimento-nota': 'footnote-reference',
+ 'riferimento-citazione': 'citation-reference',
+ 'riferimento-sostituzione': 'substitution-reference',
+ 'destinazione': 'target',
+ 'riferimento-uri': 'uri-reference',
+ 'grezzo': 'raw',}
+"""Mapping of Italian role names to canonical role names for interpreted text.
+"""
Added: Zope3/trunk/src/docutils/parsers/rst/languages/nl.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/nl.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/nl.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,106 @@
+# Author: Martijn Pieters
+# Contact: mjpieters at users.sourceforge.net
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Dutch-language mappings for language-dependent features of
+reStructuredText.
+"""
+
+__docformat__ = 'reStructuredText'
+
+
+directives = {
+ # language-dependent: fixed
+ 'attentie': 'attention',
+ 'let-op': 'caution',
+ 'gevaar': 'danger',
+ 'fout': 'error',
+ 'hint': 'hint',
+ 'belangrijk': 'important',
+ 'opmerking': 'note',
+ 'tip': 'tip',
+ 'waarschuwing': 'warning',
+ 'aanmaning': 'admonition',
+ 'katern': 'sidebar',
+ 'onderwerp': 'topic',
+ 'lijn-blok': 'line-block',
+ 'letterlijk-ontleed': 'parsed-literal',
+ 'rubriek': 'rubric',
+ 'opschrift': 'epigraph',
+ 'hoogtepunten': 'highlights',
+ 'pull-quote': 'pull-quote', # Dutch printers use the english term
+ 'samenstelling': 'compound',
+ 'verbinding': 'compound',
+ #'vragen': 'questions',
+ 'tabel': 'table',
+ 'csv-tabel': 'csv-table',
+ 'lijst-tabel': 'list-table',
+ #'veelgestelde-vragen': 'questions',
+ 'meta': 'meta',
+ #'imagemap': 'imagemap',
+ 'beeld': 'image',
+ 'figuur': 'figure',
+ 'opnemen': 'include',
+ 'onbewerkt': 'raw',
+ 'vervang': 'replace',
+ 'vervanging': 'replace',
+ 'unicode': 'unicode',
+ 'klasse': 'class',
+ 'rol': 'role',
+ 'inhoud': 'contents',
+ 'sectnum': 'sectnum',
+ 'sectie-nummering': 'sectnum',
+ 'hoofdstuk-nummering': 'sectnum',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
+ #'voetnoten': 'footnotes',
+ #'citaten': 'citations',
+ 'verwijzing-voetnoten': 'target-notes',
+ 'restructuredtext-test-instructie': 'restructuredtext-test-directive'}
+"""Dutch name to registered (in directives/__init__.py) directive name
+mapping."""
+
+roles = {
+ # language-dependent: fixed
+ 'afkorting': 'abbreviation',
+ # 'ab': 'abbreviation',
+ 'acroniem': 'acronym',
+ 'ac': 'acronym',
+ 'index': 'index',
+ 'i': 'index',
+ 'inferieur': 'subscript',
+ 'inf': 'subscript',
+ 'superieur': 'superscript',
+ 'sup': 'superscript',
+ 'titel-referentie': 'title-reference',
+ 'titel': 'title-reference',
+ 't': 'title-reference',
+ 'pep-referentie': 'pep-reference',
+ 'pep': 'pep-reference',
+ 'rfc-referentie': 'rfc-reference',
+ 'rfc': 'rfc-reference',
+ 'nadruk': 'emphasis',
+ 'extra': 'strong',
+ 'extra-nadruk': 'strong',
+ 'vet': 'strong',
+ 'letterlijk': 'literal',
+ 'benoemde-referentie': 'named-reference',
+ 'anonieme-referentie': 'anonymous-reference',
+ 'voetnoot-referentie': 'footnote-reference',
+ 'citaat-referentie': 'citation-reference',
+ 'substitie-reference': 'substitution-reference',
+ 'verwijzing': 'target',
+ 'uri-referentie': 'uri-reference',
+ 'uri': 'uri-reference',
+ 'url': 'uri-reference',
+ 'onbewerkt': 'raw',}
+"""Mapping of Dutch role names to canonical role names for interpreted text.
+"""
Added: Zope3/trunk/src/docutils/parsers/rst/languages/pt_br.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/pt_br.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/pt_br.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,102 @@
+# Author: David Goodger
+# Contact: goodger at users.sourceforge.net
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Brazilian Portuguese-language mappings for language-dependent features of
+reStructuredText.
+"""
+
+__docformat__ = 'reStructuredText'
+
+
+directives = {
+ # language-dependent: fixed
+ u'aten\u00E7\u00E3o': 'attention',
+ 'cuidado': 'caution',
+ 'perigo': 'danger',
+ 'erro': 'error',
+ u'sugest\u00E3o': 'hint',
+ 'importante': 'important',
+ 'nota': 'note',
+ 'dica': 'tip',
+ 'aviso': 'warning',
+ u'exorta\u00E7\u00E3o': 'admonition',
+ 'barra-lateral': 'sidebar',
+ u't\u00F3pico': 'topic',
+ 'bloco-de-linhas': 'line-block',
+ 'literal-interpretado': 'parsed-literal',
+ 'rubrica': 'rubric',
+ u'ep\u00EDgrafo': 'epigraph',
+ 'destaques': 'highlights',
+ u'cita\u00E7\u00E3o-destacada': 'pull-quote',
+ u'compound (translation required)': 'compound',
+ #'perguntas': 'questions',
+ #'qa': 'questions',
+ #'faq': 'questions',
+ u'table (translation required)': 'table',
+ u'csv-table (translation required)': 'csv-table',
+ u'list-table (translation required)': 'list-table',
+ 'meta': 'meta',
+ #'imagemap': 'imagemap',
+ 'imagem': 'image',
+ 'figura': 'figure',
+ u'inclus\u00E3o': 'include',
+ 'cru': 'raw',
+ u'substitui\u00E7\u00E3o': 'replace',
+ 'unicode': 'unicode',
+ 'classe': 'class',
+ 'role (translation required)': 'role',
+ u'\u00EDndice': 'contents',
+ 'numsec': 'sectnum',
+ u'numera\u00E7\u00E3o-de-se\u00E7\u00F5es': 'sectnum',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
+ #u'notas-de-rorap\u00E9': 'footnotes',
+ #u'cita\u00E7\u00F5es': 'citations',
+ u'links-no-rodap\u00E9': 'target-notes',
+ 'restructuredtext-test-directive': 'restructuredtext-test-directive'}
+"""Brazilian Portuguese name to registered (in directives/__init__.py)
+directive name mapping."""
+
+roles = {
+ # language-dependent: fixed
+ u'abbrevia\u00E7\u00E3o': 'abbreviation',
+ 'ab': 'abbreviation',
+ u'acr\u00F4nimo': 'acronym',
+ 'ac': 'acronym',
+ u'\u00EDndice-remissivo': 'index',
+ 'i': 'index',
+ 'subscrito': 'subscript',
+ 'sub': 'subscript',
+ 'sobrescrito': 'superscript',
+ 'sob': 'superscript',
+ u'refer\u00EAncia-a-t\u00EDtulo': 'title-reference',
+ u't\u00EDtulo': 'title-reference',
+ 't': 'title-reference',
+ u'refer\u00EAncia-a-pep': 'pep-reference',
+ 'pep': 'pep-reference',
+ u'refer\u00EAncia-a-rfc': 'rfc-reference',
+ 'rfc': 'rfc-reference',
+ u'\u00EAnfase': 'emphasis',
+ 'forte': 'strong',
+ 'literal': 'literal', # translation required?
+ u'refer\u00EAncia-por-nome': 'named-reference',
+ u'refer\u00EAncia-an\u00F4nima': 'anonymous-reference',
+ u'refer\u00EAncia-a-nota-de-rodap\u00E9': 'footnote-reference',
+ u'refer\u00EAncia-a-cita\u00E7\u00E3o': 'citation-reference',
+ u'refer\u00EAncia-a-substitui\u00E7\u00E3o': 'substitution-reference',
+ 'alvo': 'target',
+ u'refer\u00EAncia-a-uri': 'uri-reference',
+ 'uri': 'uri-reference',
+ 'url': 'uri-reference',
+ 'cru': 'raw',}
+"""Mapping of Brazilian Portuguese role names to canonical role names
+for interpreted text."""
Added: Zope3/trunk/src/docutils/parsers/rst/languages/ru.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/ru.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/ru.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,101 @@
+# Author: Roman Suzi
+# Contact: rnd at onego.ru
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Russian-language mappings for language-dependent features of
+reStructuredText.
+"""
+
+__docformat__ = 'reStructuredText'
+
+directives = {
+ u'\u0431\u043b\u043e\u043a-\u0441\u0442\u0440\u043e\u043a': u'line-block',
+ u'meta': u'meta',
+ u'\u043e\u0431\u0440\u0430\u0431\u043e\u0442\u0430\u043d\u043d\u044b\u0439-\u043b\u0438\u0442\u0435\u0440\u0430\u043b':
+ u'parsed-literal',
+ u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u043d\u0430\u044f-\u0446\u0438\u0442\u0430\u0442\u0430':
+ u'pull-quote',
+ u'compound (translation required)': 'compound',
+ u'table (translation required)': 'table',
+ u'csv-table (translation required)': 'csv-table',
+ u'list-table (translation required)': 'list-table',
+ u'\u0441\u044b\u0440\u043e\u0439': u'raw',
+ u'\u0437\u0430\u043c\u0435\u043d\u0430': u'replace',
+ u'\u0442\u0435\u0441\u0442\u043e\u0432\u0430\u044f-\u0434\u0438\u0440\u0435\u043a\u0442\u0438\u0432\u0430-restructuredtext':
+ u'restructuredtext-test-directive',
+ u'\u0446\u0435\u043b\u0435\u0432\u044b\u0435-\u0441\u043d\u043e\u0441\u043a\u0438':
+ u'target-notes',
+ u'unicode': u'unicode',
+ u'\u0431\u043e\u043a\u043e\u0432\u0430\u044f-\u043f\u043e\u043b\u043e\u0441\u0430':
+ u'sidebar',
+ u'\u0432\u0430\u0436\u043d\u043e': u'important',
+ u'\u0432\u043a\u043b\u044e\u0447\u0430\u0442\u044c': u'include',
+ u'\u0432\u043d\u0438\u043c\u0430\u043d\u0438\u0435': u'attention',
+ u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435': u'highlights',
+ u'\u0437\u0430\u043c\u0435\u0447\u0430\u043d\u0438\u0435': u'admonition',
+ u'\u0438\u0437\u043e\u0431\u0440\u0430\u0436\u0435\u043d\u0438\u0435':
+ u'image',
+ u'\u043a\u043b\u0430\u0441\u0441': u'class',
+ u'role (translation required)': 'role',
+ u'\u043d\u043e\u043c\u0435\u0440-\u0440\u0430\u0437\u0434\u0435\u043b\u0430':
+ u'sectnum',
+ u'\u043d\u0443\u043c\u0435\u0440\u0430\u0446\u0438\u044f-\u0440\u0430\u0437'
+ u'\u0434\u0435\u043b\u043e\u0432': u'sectnum',
+ u'\u043e\u043f\u0430\u0441\u043d\u043e': u'danger',
+ u'\u043e\u0441\u0442\u043e\u0440\u043e\u0436\u043d\u043e': u'caution',
+ u'\u043e\u0448\u0438\u0431\u043a\u0430': u'error',
+ u'\u043f\u043e\u0434\u0441\u043a\u0430\u0437\u043a\u0430': u'tip',
+ u'\u043f\u0440\u0435\u0434\u0443\u043f\u0440\u0435\u0436\u0434\u0435\u043d'
+ u'\u0438\u0435': u'warning',
+ u'\u043f\u0440\u0438\u043c\u0435\u0447\u0430\u043d\u0438\u0435': u'note',
+ u'\u0440\u0438\u0441\u0443\u043d\u043e\u043a': u'figure',
+ u'\u0440\u0443\u0431\u0440\u0438\u043a\u0430': u'rubric',
+ u'\u0441\u043e\u0432\u0435\u0442': u'hint',
+ u'\u0441\u043e\u0434\u0435\u0440\u0436\u0430\u043d\u0438\u0435': u'contents',
+ u'\u0442\u0435\u043c\u0430': u'topic',
+ u'\u044d\u043f\u0438\u0433\u0440\u0430\u0444': u'epigraph',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',}
+"""Russian name to registered (in directives/__init__.py) directive name
+mapping."""
+
+roles = {
+ u'\u0430\u043a\u0440\u043e\u043d\u0438\u043c': 'acronym',
+ u'\u0430\u043d\u043e\u043d\u0438\u043c\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
+ 'anonymous-reference',
+ u'\u0431\u0443\u043a\u0432\u0430\u043b\u044c\u043d\u043e': 'literal',
+ u'\u0432\u0435\u0440\u0445\u043d\u0438\u0439-\u0438\u043d\u0434\u0435\u043a\u0441':
+ 'superscript',
+ u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435': 'emphasis',
+ u'\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
+ 'named-reference',
+ u'\u0438\u043d\u0434\u0435\u043a\u0441': 'index',
+ u'\u043d\u0438\u0436\u043d\u0438\u0439-\u0438\u043d\u0434\u0435\u043a\u0441':
+ 'subscript',
+ u'\u0441\u0438\u043b\u044c\u043d\u043e\u0435-\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u0438\u0435':
+ 'strong',
+ u'\u0441\u043e\u043a\u0440\u0430\u0449\u0435\u043d\u0438\u0435':
+ 'abbreviation',
+ u'\u0441\u0441\u044b\u043b\u043a\u0430-\u0437\u0430\u043c\u0435\u043d\u0430':
+ 'substitution-reference',
+ u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-pep': 'pep-reference',
+ u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-rfc': 'rfc-reference',
+ u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-uri': 'uri-reference',
+ u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-\u0437\u0430\u0433\u043b\u0430\u0432\u0438\u0435':
+ 'title-reference',
+ u'\u0441\u0441\u044b\u043b\u043a\u0430-\u043d\u0430-\u0441\u043d\u043e\u0441\u043a\u0443':
+ 'footnote-reference',
+ u'\u0446\u0438\u0442\u0430\u0442\u043d\u0430\u044f-\u0441\u0441\u044b\u043b\u043a\u0430':
+ 'citation-reference',
+ u'\u0446\u0435\u043b\u044c': 'target',
+ u'raw (translation required)': 'raw',}
+"""Mapping of Russian role names to canonical role names for interpreted text.
+"""
Modified: Zope3/trunk/src/docutils/parsers/rst/languages/sk.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/sk.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/sk.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
# Author: Miroslav Vasko
# Contact: zemiak at zoznam.sk
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
Slovak-language mappings for language-dependent features of
reStructuredText.
@@ -20,7 +25,7 @@
u'rada': 'hint',
u'd\xf4le\x9eit\xe9': 'important',
u'pozn\xe1mka': 'note',
- u'tip': 'tip',
+ u'tip (translation required)': 'tip',
u'varovanie': 'warning',
u'admonition (translation required)': 'admonition',
u'sidebar (translation required)': 'sidebar',
@@ -31,24 +36,54 @@
u'epigraph (translation required)': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
+ u'compound (translation required)': 'compound',
#u'questions': 'questions',
#u'qa': 'questions',
#u'faq': 'questions',
+ u'table (translation required)': 'table',
+ u'csv-table (translation required)': 'csv-table',
+ u'list-table (translation required)': 'list-table',
u'meta': 'meta',
#u'imagemap': 'imagemap',
u'obr\xe1zok': 'image',
u'tvar': 'figure',
u'vlo\x9ei\x9d': 'include',
- u'raw': 'raw',
+ u'raw (translation required)': 'raw',
u'nahradi\x9d': 'replace',
u'unicode': 'unicode',
u'class (translation required)': 'class',
+ u'role (translation required)': 'role',
u'obsah': 'contents',
u'\xe8as\x9d': 'sectnum',
u'\xe8as\x9d-\xe8\xedslovanie': 'sectnum',
u'cie\xbeov\xe9-pozn\xe1mky': 'target-notes',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
#u'footnotes': 'footnotes',
#u'citations': 'citations',
}
"""Slovak name to registered (in directives/__init__.py) directive name
mapping."""
+
+roles = {
+ u'abbreviation (translation required)': 'abbreviation',
+ u'acronym (translation required)': 'acronym',
+ u'index (translation required)': 'index',
+ u'subscript (translation required)': 'subscript',
+ u'superscript (translation required)': 'superscript',
+ u'title-reference (translation required)': 'title-reference',
+ u'pep-reference (translation required)': 'pep-reference',
+ u'rfc-reference (translation required)': 'rfc-reference',
+ u'emphasis (translation required)': 'emphasis',
+ u'strong (translation required)': 'strong',
+ u'literal (translation required)': 'literal',
+ u'named-reference (translation required)': 'named-reference',
+ u'anonymous-reference (translation required)': 'anonymous-reference',
+ u'footnote-reference (translation required)': 'footnote-reference',
+ u'citation-reference (translation required)': 'citation-reference',
+ u'substitution-reference (translation required)': 'substitution-reference',
+ u'target (translation required)': 'target',
+ u'uri-reference (translation required)': 'uri-reference',
+ u'raw (translation required)': 'raw',}
+"""Mapping of Slovak role names to canonical role names for interpreted text.
+"""
Modified: Zope3/trunk/src/docutils/parsers/rst/languages/sv.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/sv.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/sv.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,9 +1,14 @@
# Author: Adam Chodorowski
# Contact: chodorowski at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
# Copyright: This module has been placed in the public domain.
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
"""
Swedish language mappings for language-dependent features of reStructuredText.
"""
@@ -30,10 +35,14 @@
u'epigraph (translation required)': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
+ u'compound (translation required)': 'compound',
# u'fr\u00e5gor': 'questions',
# NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/:
# u'fr\u00e5gor-och-svar': 'questions',
# u'vanliga-fr\u00e5gor': 'questions',
+ u'table (translation required)': 'table',
+ u'csv-table (translation required)': 'csv-table',
+ u'list-table (translation required)': 'list-table',
u'meta': 'meta',
# u'bildkarta': 'imagemap', # FIXME: Translation might be too literal.
u'bild': 'image',
@@ -43,11 +52,37 @@
u'ers\u00e4tt': 'replace',
u'unicode': 'unicode',
u'class (translation required)': 'class',
+ u'role (translation required)': 'role',
u'inneh\u00e5ll': 'contents',
u'sektionsnumrering': 'sectnum',
u'target-notes (translation required)': 'target-notes',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
# u'fotnoter': 'footnotes',
# u'citeringar': 'citations',
}
"""Swedish name to registered (in directives/__init__.py) directive name
mapping."""
+
+roles = {
+ u'abbreviation (translation required)': 'abbreviation',
+ u'acronym (translation required)': 'acronym',
+ u'index (translation required)': 'index',
+ u'subscript (translation required)': 'subscript',
+ u'superscript (translation required)': 'superscript',
+ u'title-reference (translation required)': 'title-reference',
+ u'pep-reference (translation required)': 'pep-reference',
+ u'rfc-reference (translation required)': 'rfc-reference',
+ u'emphasis (translation required)': 'emphasis',
+ u'strong (translation required)': 'strong',
+ u'literal (translation required)': 'literal',
+ u'named-reference (translation required)': 'named-reference',
+ u'anonymous-reference (translation required)': 'anonymous-reference',
+ u'footnote-reference (translation required)': 'footnote-reference',
+ u'citation-reference (translation required)': 'citation-reference',
+ u'substitution-reference (translation required)': 'substitution-reference',
+ u'target (translation required)': 'target',
+ u'uri-reference (translation required)': 'uri-reference',
+ u'r\u00e5': 'raw',}
+"""Mapping of Swedish role names to canonical role names for interpreted text.
+"""
Added: Zope3/trunk/src/docutils/parsers/rst/languages/zh_tw.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/languages/zh_tw.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/languages/zh_tw.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,102 @@
+# Author: David Goodger
+# Contact: goodger at users.sourceforge.net
+# Revision: $Revision: 3184 $
+# Date: $Date: 2005-04-07 21:36:11 +0200 (Thu, 07 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+# New language mappings are welcome. Before doing a new translation, please
+# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
+# translated for each language: one in docutils/languages, the other in
+# docutils/parsers/rst/languages.
+
+"""
+Traditional Chinese language mappings for language-dependent features of
+reStructuredText.
+"""
+
+__docformat__ = 'reStructuredText'
+
+
+directives = {
+ # language-dependent: fixed
+ 'attention (translation required)': 'attention',
+ 'caution (translation required)': 'caution',
+ 'danger (translation required)': 'danger',
+ 'error (translation required)': 'error',
+ 'hint (translation required)': 'hint',
+ 'important (translation required)': 'important',
+ 'note (translation required)': 'note',
+ 'tip (translation required)': 'tip',
+ 'warning (translation required)': 'warning',
+ 'admonition (translation required)': 'admonition',
+ 'sidebar (translation required)': 'sidebar',
+ 'topic (translation required)': 'topic',
+ 'line-block (translation required)': 'line-block',
+ 'parsed-literal (translation required)': 'parsed-literal',
+ 'rubric (translation required)': 'rubric',
+ 'epigraph (translation required)': 'epigraph',
+ 'highlights (translation required)': 'highlights',
+ 'pull-quote (translation required)': 'pull-quote',
+ 'compound (translation required)': 'compound',
+ #'questions (translation required)': 'questions',
+ 'table (translation required)': 'table',
+ 'csv-table (translation required)': 'csv-table',
+ 'list-table (translation required)': 'list-table',
+ #'qa (translation required)': 'questions',
+ #'faq (translation required)': 'questions',
+ 'meta (translation required)': 'meta',
+ #'imagemap (translation required)': 'imagemap',
+ 'image (translation required)': 'image',
+ 'figure (translation required)': 'figure',
+ 'include (translation required)': 'include',
+ 'raw (translation required)': 'raw',
+ 'replace (translation required)': 'replace',
+ 'unicode (translation required)': 'unicode',
+ 'class (translation required)': 'class',
+ 'role (translation required)': 'role',
+ 'contents (translation required)': 'contents',
+ 'sectnum (translation required)': 'sectnum',
+ 'section-numbering (translation required)': 'sectnum',
+ u'header (translation required)': 'header',
+ u'footer (translation required)': 'footer',
+ #'footnotes (translation required)': 'footnotes',
+ #'citations (translation required)': 'citations',
+ 'target-notes (translation required)': 'target-notes',
+ 'restructuredtext-test-directive': 'restructuredtext-test-directive'}
+"""Traditional Chinese name to registered (in directives/__init__.py)
+directive name mapping."""
+
+roles = {
+ # language-dependent: fixed
+ 'abbreviation (translation required)': 'abbreviation',
+ 'ab (translation required)': 'abbreviation',
+ 'acronym (translation required)': 'acronym',
+ 'ac (translation required)': 'acronym',
+ 'index (translation required)': 'index',
+ 'i (translation required)': 'index',
+ 'subscript (translation required)': 'subscript',
+ 'sub (translation required)': 'subscript',
+ 'superscript (translation required)': 'superscript',
+ 'sup (translation required)': 'superscript',
+ 'title-reference (translation required)': 'title-reference',
+ 'title (translation required)': 'title-reference',
+ 't (translation required)': 'title-reference',
+ 'pep-reference (translation required)': 'pep-reference',
+ 'pep (translation required)': 'pep-reference',
+ 'rfc-reference (translation required)': 'rfc-reference',
+ 'rfc (translation required)': 'rfc-reference',
+ 'emphasis (translation required)': 'emphasis',
+ 'strong (translation required)': 'strong',
+ 'literal (translation required)': 'literal',
+ 'named-reference (translation required)': 'named-reference',
+ 'anonymous-reference (translation required)': 'anonymous-reference',
+ 'footnote-reference (translation required)': 'footnote-reference',
+ 'citation-reference (translation required)': 'citation-reference',
+ 'substitution-reference (translation required)': 'substitution-reference',
+ 'target (translation required)': 'target',
+ 'uri-reference (translation required)': 'uri-reference',
+ 'uri (translation required)': 'uri-reference',
+ 'url (translation required)': 'uri-reference',
+ 'raw (translation required)': 'raw',}
+"""Mapping of Traditional Chinese role names to canonical role names for
+interpreted text."""
Added: Zope3/trunk/src/docutils/parsers/rst/roles.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/roles.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/roles.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,347 @@
+# Author: Edward Loper
+# Contact: edloper at gradient.cis.upenn.edu
+# Revision: $Revision: 3155 $
+# Date: $Date: 2005-04-02 23:57:06 +0200 (Sat, 02 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+"""
+This module defines standard interpreted text role functions, a registry for
+interpreted text roles, and an API for adding to and retrieving from the
+registry.
+
+The interface for interpreted role functions is as follows::
+
+ def role_fn(name, rawtext, text, lineno, inliner,
+ options={}, content=[]):
+ code...
+
+ # Set function attributes for customization:
+ role_fn.options = ...
+ role_fn.content = ...
+
+Parameters:
+
+- ``name`` is the local name of the interpreted text role, the role name
+ actually used in the document.
+
+- ``rawtext`` is a string containing the entire interpreted text construct.
+ Return it as a ``problematic`` node linked to a system message if there is a
+ problem.
+
+- ``text`` is the interpreted text content, with backslash escapes converted
+ to nulls (``\x00``).
+
+- ``lineno`` is the line number where the interpreted text beings.
+
+- ``inliner`` is the Inliner object that called the role function.
+ It defines the following useful attributes: ``reporter``,
+ ``problematic``, ``memo``, ``parent``, ``document``.
+
+- ``options``: A dictionary of directive options for customization, to be
+ interpreted by the role function. Used for additional attributes for the
+ generated elements and other functionality.
+
+- ``content``: A list of strings, the directive content for customization
+ ("role" directive). To be interpreted by the role function.
+
+Function attributes for customization, interpreted by the "role" directive:
+
+- ``options``: A dictionary, mapping known option names to conversion
+ functions such as `int` or `float`. ``None`` or an empty dict implies no
+ options to parse. Several directive option conversion functions are defined
+ in the `directives` module.
+
+ All role functions implicitly support the "class" option, unless disabled
+ with an explicit ``{'class': None}``.
+
+- ``content``: A boolean; true if content is allowed. Client code must handle
+ the case where content is required but not supplied (an empty content list
+ will be supplied).
+
+Note that unlike directives, the "arguments" function attribute is not
+supported for role customization. Directive arguments are handled by the
+"role" directive itself.
+
+Interpreted role functions return a tuple of two values:
+
+- A list of nodes which will be inserted into the document tree at the
+ point where the interpreted role was encountered (can be an empty
+ list).
+
+- A list of system messages, which will be inserted into the document tree
+ immediately after the end of the current inline block (can also be empty).
+"""
+
+__docformat__ = 'reStructuredText'
+
+from docutils import nodes, utils
+from docutils.parsers.rst import directives
+from docutils.parsers.rst.languages import en as _fallback_language_module
+
+DEFAULT_INTERPRETED_ROLE = 'title-reference'
+"""
+The canonical name of the default interpreted role. This role is used
+when no role is specified for a piece of interpreted text.
+"""
+
+_role_registry = {}
+"""Mapping of canonical role names to role functions. Language-dependent role
+names are defined in the ``language`` subpackage."""
+
+_roles = {}
+"""Mapping of local or language-dependent interpreted text role names to role
+functions."""
+
+def role(role_name, language_module, lineno, reporter):
+ """
+ Locate and return a role function from its language-dependent name, along
+ with a list of system messages. If the role is not found in the current
+ language, check English. Return a 2-tuple: role function (``None`` if the
+ named role cannot be found) and a list of system messages.
+ """
+ normname = role_name.lower()
+ messages = []
+ msg_text = []
+
+ if _roles.has_key(normname):
+ return _roles[normname], messages
+
+ if role_name:
+ canonicalname = None
+ try:
+ canonicalname = language_module.roles[normname]
+ except AttributeError, error:
+ msg_text.append('Problem retrieving role entry from language '
+ 'module %r: %s.' % (language_module, error))
+ except KeyError:
+ msg_text.append('No role entry for "%s" in module "%s".'
+ % (role_name, language_module.__name__))
+ else:
+ canonicalname = DEFAULT_INTERPRETED_ROLE
+
+ # If we didn't find it, try English as a fallback.
+ if not canonicalname:
+ try:
+ canonicalname = _fallback_language_module.roles[normname]
+ msg_text.append('Using English fallback for role "%s".'
+ % role_name)
+ except KeyError:
+ msg_text.append('Trying "%s" as canonical role name.'
+ % role_name)
+ # The canonical name should be an English name, but just in case:
+ canonicalname = normname
+
+ # Collect any messages that we generated.
+ if msg_text:
+ message = reporter.info('\n'.join(msg_text), line=lineno)
+ messages.append(message)
+
+ # Look the role up in the registry, and return it.
+ if _role_registry.has_key(canonicalname):
+ role_fn = _role_registry[canonicalname]
+ register_local_role(normname, role_fn)
+ return role_fn, messages
+ else:
+ return None, messages # Error message will be generated by caller.
+
+def register_canonical_role(name, role_fn):
+ """
+ Register an interpreted text role by its canonical name.
+
+ :Parameters:
+ - `name`: The canonical name of the interpreted role.
+ - `role_fn`: The role function. See the module docstring.
+ """
+ set_implicit_options(role_fn)
+ _role_registry[name] = role_fn
+
+def register_local_role(name, role_fn):
+ """
+ Register an interpreted text role by its local or language-dependent name.
+
+ :Parameters:
+ - `name`: The local or language-dependent name of the interpreted role.
+ - `role_fn`: The role function. See the module docstring.
+ """
+ set_implicit_options(role_fn)
+ _roles[name] = role_fn
+
+def set_implicit_options(role_fn):
+ """
+ Add customization options to role functions, unless explicitly set or
+ disabled.
+ """
+ if not hasattr(role_fn, 'options') or role_fn.options is None:
+ role_fn.options = {'class': directives.class_option}
+ elif not role_fn.options.has_key('class'):
+ role_fn.options['class'] = directives.class_option
+
+def register_generic_role(canonical_name, node_class):
+ """For roles which simply wrap a given `node_class` around the text."""
+ role = GenericRole(canonical_name, node_class)
+ register_canonical_role(canonical_name, role)
+
+
+class GenericRole:
+
+ """
+ Generic interpreted text role, where the interpreted text is simply
+ wrapped with the provided node class.
+ """
+
+ def __init__(self, role_name, node_class):
+ self.name = role_name
+ self.node_class = node_class
+
+ def __call__(self, role, rawtext, text, lineno, inliner,
+ options={}, content=[]):
+ set_classes(options)
+ return [self.node_class(rawtext, utils.unescape(text), **options)], []
+
+
+class CustomRole:
+
+ """
+ Wrapper for custom interpreted text roles.
+ """
+
+ def __init__(self, role_name, base_role, options={}, content=[]):
+ self.name = role_name
+ self.base_role = base_role
+ self.options = None
+ if hasattr(base_role, 'options'):
+ self.options = base_role.options
+ self.content = None
+ if hasattr(base_role, 'content'):
+ self.content = base_role.content
+ self.supplied_options = options
+ self.supplied_content = content
+
+ def __call__(self, role, rawtext, text, lineno, inliner,
+ options={}, content=[]):
+ opts = self.supplied_options.copy()
+ opts.update(options)
+ cont = list(self.supplied_content)
+ if cont and content:
+ cont += '\n'
+ cont.extend(content)
+ return self.base_role(role, rawtext, text, lineno, inliner,
+ options=opts, content=cont)
+
+
+def generic_custom_role(role, rawtext, text, lineno, inliner,
+ options={}, content=[]):
+ """"""
+ # Once nested inline markup is implemented, this and other methods should
+ # recursively call inliner.nested_parse().
+ set_classes(options)
+ return [nodes.inline(rawtext, utils.unescape(text), **options)], []
+
+generic_custom_role.options = {'class': directives.class_option}
+
+
+######################################################################
+# Define and register the standard roles:
+######################################################################
+
+register_generic_role('abbreviation', nodes.abbreviation)
+register_generic_role('acronym', nodes.acronym)
+register_generic_role('emphasis', nodes.emphasis)
+register_generic_role('literal', nodes.literal)
+register_generic_role('strong', nodes.strong)
+register_generic_role('subscript', nodes.subscript)
+register_generic_role('superscript', nodes.superscript)
+register_generic_role('title-reference', nodes.title_reference)
+
+def pep_reference_role(role, rawtext, text, lineno, inliner,
+ options={}, content=[]):
+ try:
+ pepnum = int(text)
+ if pepnum < 0 or pepnum > 9999:
+ raise ValueError
+ except ValueError:
+ msg = inliner.reporter.error(
+ 'PEP number must be a number from 0 to 9999; "%s" is invalid.'
+ % text, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ # Base URL mainly used by inliner.pep_reference; so this is correct:
+ ref = inliner.document.settings.pep_base_url + inliner.pep_url % pepnum
+ set_classes(options)
+ return [nodes.reference(rawtext, 'PEP ' + utils.unescape(text), refuri=ref,
+ **options)], []
+
+register_canonical_role('pep-reference', pep_reference_role)
+
+def rfc_reference_role(role, rawtext, text, lineno, inliner,
+ options={}, content=[]):
+ try:
+ rfcnum = int(text)
+ if rfcnum <= 0:
+ raise ValueError
+ except ValueError:
+ msg = inliner.reporter.error(
+ 'RFC number must be a number greater than or equal to 1; '
+ '"%s" is invalid.' % text, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ # Base URL mainly used by inliner.rfc_reference, so this is correct:
+ ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
+ set_classes(options)
+ node = nodes.reference(rawtext, 'RFC ' + utils.unescape(text), refuri=ref,
+ **options)
+ return [node], []
+
+register_canonical_role('rfc-reference', rfc_reference_role)
+
+def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
+ if not options.has_key('format'):
+ msg = inliner.reporter.error(
+ 'No format (Writer name) is associated with this role: "%s".\n'
+ 'The "raw" role cannot be used directly.\n'
+ 'Instead, use the "role" directive to create a new role with '
+ 'an associated format.' % role, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ set_classes(options)
+ node = nodes.raw(rawtext, utils.unescape(text, 1), **options)
+ return [node], []
+
+raw_role.options = {'format': directives.unchanged}
+
+register_canonical_role('raw', raw_role)
+
+
+######################################################################
+# Register roles that are currently unimplemented.
+######################################################################
+
+def unimplemented_role(role, rawtext, text, lineno, inliner, attributes={}):
+ msg = inliner.reporter.error(
+ 'Interpreted text role "%s" not implemented.' % role, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+
+register_canonical_role('index', unimplemented_role)
+register_canonical_role('named-reference', unimplemented_role)
+register_canonical_role('anonymous-reference', unimplemented_role)
+register_canonical_role('uri-reference', unimplemented_role)
+register_canonical_role('footnote-reference', unimplemented_role)
+register_canonical_role('citation-reference', unimplemented_role)
+register_canonical_role('substitution-reference', unimplemented_role)
+register_canonical_role('target', unimplemented_role)
+
+# This should remain unimplemented, for testing purposes:
+register_canonical_role('restructuredtext-unimplemented-role',
+ unimplemented_role)
+
+
+def set_classes(options):
+ """
+ Auxiliary function to set options['classes'] and delete
+ options['class'].
+ """
+ if options.has_key('class'):
+ assert not options.has_key('classes')
+ options['classes'] = options['class']
+ del options['class']
Modified: Zope3/trunk/src/docutils/parsers/rst/roman.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/roman.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/roman.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,19 +1,16 @@
-"""Convert to and from Roman numerals"""
+"""Convert to and from Roman numerals
-__author__ = "Mark Pilgrim (f8dy at diveintopython.org)"
-__version__ = "1.4"
-__date__ = "8 August 2001"
-__copyright__ = """Copyright (c) 2001 Mark Pilgrim
-
-This program is part of "Dive Into Python", a free Python tutorial for
+This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
-
-This program is free software; you can redistribute it and/or modify
-it under the terms of the Python 2.1.1 license, available at
-http://www.python.org/2.1.1/license.html
"""
+__author__ = "Steve Lamm, Mark Pilgrim (mark at diveintopython.org)"
+__version__ = "$Revision: 1.3 $"
+__date__ = "$Date: 2004/05/05 21:57:20 $"
+__copyright__ = "Copyright (c) 2001 Steve Lamm, Copyright (c) 2001 Mark Pilgrim"
+__license__ = "Python"
+
import re
#Define exceptions
@@ -22,6 +19,9 @@
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
+#Roman numerals must be less than 5000
+MAX_ROMAN_NUMERAL = 4999
+
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
@@ -37,45 +37,47 @@
('IV', 4),
('I', 1))
+#Create tables for fast conversion of roman numerals.
+#See fillLookupTables() below.
+toRomanTable = [ None ] # Skip an index since Roman numerals have no zero
+fromRomanTable = {}
+
def toRoman(n):
"""convert integer to Roman numeral"""
- if not (0 < n < 5000):
+ if not (0 < n <= MAX_ROMAN_NUMERAL):
raise OutOfRangeError, "number out of range (must be 1..4999)"
if int(n) <> n:
- raise NotIntegerError, "decimals can not be converted"
+ raise NotIntegerError, "non-integers can not be converted"
+ return toRomanTable[n]
- result = ""
- for numeral, integer in romanNumeralMap:
- while n >= integer:
- result += numeral
- n -= integer
- return result
-
-#Define pattern to detect valid Roman numerals
-romanNumeralPattern = re.compile("""
- ^ # beginning of string
- M{0,4} # thousands - 0 to 4 M's
- (CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
- # or 500-800 (D, followed by 0 to 3 C's)
- (XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
- # or 50-80 (L, followed by 0 to 3 X's)
- (IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
- # or 5-8 (V, followed by 0 to 3 I's)
- $ # end of string
- """ ,re.VERBOSE)
-
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError, 'Input can not be blank'
- if not romanNumeralPattern.search(s):
+ if not fromRomanTable.has_key(s):
raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s
+ return fromRomanTable[s]
- result = 0
- index = 0
+def toRomanDynamic(n):
+ """convert integer to Roman numeral using dynamic programming"""
+ assert(0 < n <= MAX_ROMAN_NUMERAL)
+ assert(int(n) == n)
+ result = ""
for numeral, integer in romanNumeralMap:
- while s[index:index+len(numeral)] == numeral:
- result += integer
- index += len(numeral)
+ if n >= integer:
+ result = numeral
+ n -= integer
+ break
+ if n > 0:
+ result += toRomanTable[n]
return result
+def fillLookupTables():
+ """compute all the possible roman numerals"""
+ #Save the values in two global tables to convert to and from integers.
+ for integer in range(1, MAX_ROMAN_NUMERAL + 1):
+ romanNumber = toRomanDynamic(integer)
+ toRomanTable.append(romanNumber)
+ fromRomanTable[romanNumber] = integer
+
+fillLookupTables()
Modified: Zope3/trunk/src/docutils/parsers/rst/states.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/states.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/states.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:05 $
+# Revision: $Revision: 3253 $
+# Date: $Date: 2005-04-25 17:08:01 +0200 (Mon, 25 Apr 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -113,7 +113,9 @@
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
-from docutils.parsers.rst import directives, languages, tableparser
+from docutils.nodes import whitespace_normalize_name
+from docutils.utils import escape2null, unescape
+from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
@@ -143,11 +145,10 @@
def run(self, input_lines, document, input_offset=0, match_titles=1,
inliner=None):
"""
- Parse `input_lines` and return a `docutils.nodes.document` instance.
+ Parse `input_lines` and modify the `document` node in place.
- Extend `StateMachineWS.run()`: set up parse-global data, run the
- StateMachine, and return the resulting
- document.
+ Extend `StateMachineWS.run()`: set up parse-global data and
+ run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
@@ -169,17 +170,9 @@
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
- self.check_document()
self.node = self.memo = None # remove unneeded references
- def check_document(self):
- """Check for illegal structure: empty document."""
- if len(self.document) == 0:
- error = self.reporter.error(
- 'Document empty; must have contents.', line=0)
- self.document += error
-
class NestedStateMachine(StateMachineWS):
"""
@@ -198,6 +191,7 @@
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
+ self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
@@ -273,8 +267,10 @@
node=node, match_titles=match_titles)
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
- # Adjustment for block if modified in nested parse:
- self.state_machine.next_line(len(block) - block_length)
+ # No `block.parent` implies disconnected -- lines aren't in sync:
+ if block.parent and (len(block) - block_length) != 0:
+ # Adjustment for block if modified in nested parse:
+ self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
@@ -286,7 +282,7 @@
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
- `block`. Also keep track of optional intermdediate blank lines and the
+ `block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
@@ -369,7 +365,7 @@
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
- section_node['name'] = name
+ section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
@@ -380,37 +376,11 @@
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=1)
self.goto_line(newabsoffset)
- self.check_section(section_node)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
- def check_section(self, section):
- """
- Check for illegal structure: empty section, misplaced transitions.
- """
- lineno = section.line
- if len(section) <= 1:
- error = self.reporter.error(
- 'Section empty; must have contents.', line=lineno)
- section += error
- return
- if not isinstance(section[0], nodes.title): # shouldn't ever happen
- error = self.reporter.error(
- 'First element of section must be a title.', line=lineno)
- section.insert(0, error)
- if isinstance(section[1], nodes.transition):
- error = self.reporter.error(
- 'Section may not begin with a transition.',
- line=section[1].line)
- section.insert(1, error)
- if len(section) > 2 and isinstance(section[-1], nodes.transition):
- error = self.reporter.error(
- 'Section may not end with a transition.',
- line=section[-1].line)
- section += error
-
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
@@ -473,46 +443,6 @@
Parse inline markup; call the `parse()` method.
"""
- _interpreted_roles = {
- # Values of ``None`` mean "not implemented yet":
- 'title-reference': 'generic_interpreted_role',
- 'abbreviation': 'generic_interpreted_role',
- 'acronym': 'generic_interpreted_role',
- 'index': None,
- 'subscript': 'generic_interpreted_role',
- 'superscript': 'generic_interpreted_role',
- 'emphasis': 'generic_interpreted_role',
- 'strong': 'generic_interpreted_role',
- 'literal': 'generic_interpreted_role',
- 'named-reference': None,
- 'anonymous-reference': None,
- 'uri-reference': None,
- 'pep-reference': 'pep_reference_role',
- 'rfc-reference': 'rfc_reference_role',
- 'footnote-reference': None,
- 'citation-reference': None,
- 'substitution-reference': None,
- 'target': None,
- 'restructuredtext-unimplemented-role': None}
- """Mapping of canonical interpreted text role name to method name.
- Initializes a name to bound-method mapping in `__init__`."""
-
- default_interpreted_role = 'title-reference'
- """The role to use when no explicit role is given.
- Override in subclasses."""
-
- generic_roles = {'abbreviation': nodes.abbreviation,
- 'acronym': nodes.acronym,
- 'emphasis': nodes.emphasis,
- 'literal': nodes.literal,
- 'strong': nodes.strong,
- 'subscript': nodes.subscript,
- 'superscript': nodes.superscript,
- 'title-reference': nodes.title_reference,}
- """Mapping of canonical interpreted text role name to node class.
- Used by the `generic_interpreted_role` method for simple, straightforward
- roles (simple wrapping; no extra processing)."""
-
def __init__(self, roles=None):
"""
`roles` is a mapping of canonical role name to role function or bound
@@ -523,17 +453,6 @@
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
- self.interpreted_roles = {}
- """Mapping of canonical role name to role function or bound method.
- Items removed from this mapping will be disabled."""
-
- for canonical, method in self._interpreted_roles.items():
- if method:
- self.interpreted_roles[canonical] = getattr(self, method)
- else:
- self.interpreted_roles[canonical] = None
- self.interpreted_roles.update(roles or {})
-
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
@@ -544,6 +463,8 @@
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
+ # Needs to be refactored for nested inline markup.
+ # Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
@@ -599,16 +520,22 @@
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._](?:(?!_)\w)+)*'
- # Valid URI characters (see RFC 2396 & RFC 2732):
- uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9]"""
+ # Valid URI characters (see RFC 2396 & RFC 2732);
+ # final \x00 allows backslash escapes in URIs:
+ uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
+ # Delimiter indicating the end of a URI (not part of the URI):
+ uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
- urilast = r"""[_~/a-zA-Z0-9]"""
- emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9]"""
+ urilast = r"""[_~*/=+a-zA-Z0-9]"""
+ # End of a URI (either 'urilast' or 'uric followed by a
+ # uri_end_delim'):
+ uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
+ emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
- @ # at
+ (?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
- %(urilast)s # final URI char
+ %(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
@@ -657,10 +584,10 @@
embedded_uri=re.compile(
r"""
(
- [ \n]+ # spaces or beginning of line
+ (?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
- ([^<>\0]+) # anything but angle brackets & nulls
+ ([^<>\x00]+) # anything but angle brackets & nulls
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
@@ -687,15 +614,15 @@
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
- %(urilast)s # final URI char
+ %(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
- %(urilast)s
+ %(uri_end)s
)?
( # optional fragment
\#%(uric)s*
- %(urilast)s
+ %(uri_end)s
)?
)
)
@@ -808,7 +735,6 @@
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
- text = unescape(escaped, 0)
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
@@ -819,28 +745,13 @@
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
- rawsource, escaped, text)
+ rawsource, escaped, unescape(escaped))
else:
- try:
- return self.interpreted(
- string[:rolestart], string[textend:],
- rawsource, text, role, lineno)
- except UnknownInterpretedRoleError, detail:
- msg = self.reporter.error(
- 'Unknown interpreted text role "%s".' % role,
- line=lineno)
- text = unescape(string[rolestart:textend], 1)
- prb = self.problematic(text, text, msg)
- return (string[:rolestart], [prb], string[textend:],
- detail.args[0] + [msg])
- except InterpretedRoleNotImplementedError, detail:
- msg = self.reporter.error(
- 'Interpreted text role "%s" not implemented.' % role,
- line=lineno)
- text = unescape(string[rolestart:textend], 1)
- prb = self.problematic(text, text, msg)
- return (string[:rolestart], [prb], string[textend:],
- detail.args[0] + [msg])
+ rawsource = unescape(string[rolestart:textend], 1)
+ nodelist, messages = self.interpreted(rawsource, escaped, role,
+ lineno)
+ return (string[:rolestart], nodelist,
+ string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
@@ -859,10 +770,13 @@
target = nodes.target(match.group(1), refuri=uri)
else:
raise ApplicationError('problem with URI: %r' % uri_text)
+ if not text:
+ text = uri
else:
target = None
refname = normalize_name(text)
- reference = nodes.reference(rawsource, text)
+ reference = nodes.reference(rawsource, text,
+ name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target:
@@ -873,7 +787,7 @@
else:
if target:
reference['refuri'] = uri
- target['name'] = refname
+ target['names'].append(refname)
self.document.note_external_target(target)
self.document.note_explicit_target(target, self.parent)
node_list.append(target)
@@ -889,51 +803,19 @@
else:
return uri
- def interpreted(self, before, after, rawsource, text, role, lineno):
- role_function, canonical, messages = self.get_role_function(role,
- lineno)
- if role_function:
- nodelist, messages2 = role_function(canonical, rawsource, text,
- lineno)
- messages.extend(messages2)
- return before, nodelist, after, messages
+ def interpreted(self, rawsource, text, role, lineno):
+ role_fn, messages = roles.role(role, self.language, lineno,
+ self.reporter)
+ if role_fn:
+ nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
+ return nodes, messages + messages2
else:
- raise InterpretedRoleNotImplementedError(messages)
+ msg = self.reporter.error(
+ 'Unknown interpreted text role "%s".' % role,
+ line=lineno)
+ return ([self.problematic(rawsource, rawsource, msg)],
+ messages + [msg])
- def get_role_function(self, role, lineno):
- messages = []
- msg_text = []
- if role:
- name = role.lower()
- else:
- name = self.default_interpreted_role
- canonical = None
- try:
- canonical = self.language.roles[name]
- except AttributeError, error:
- msg_text.append('Problem retrieving role entry from language '
- 'module %r: %s.' % (self.language, error))
- except KeyError:
- msg_text.append('No role entry for "%s" in module "%s".'
- % (name, self.language.__name__))
- if not canonical:
- try:
- canonical = _fallback_language_module.roles[name]
- msg_text.append('Using English fallback for role "%s".'
- % name)
- except KeyError:
- msg_text.append('Trying "%s" as canonical role name.'
- % name)
- # Should be an English name, but just in case:
- canonical = name
- if msg_text:
- message = self.reporter.info('\n'.join(msg_text), line=lineno)
- messages.append(message)
- try:
- return self.interpreted_roles[canonical], canonical, messages
- except KeyError:
- raise UnknownInterpretedRoleError(messages)
-
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
@@ -947,7 +829,7 @@
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
- target['name'] = name
+ target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
@@ -1005,15 +887,16 @@
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
- if self.document.settings.trim_footnote_reference_space:
+ if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=None):
referencename = match.group('refname')
refname = normalize_name(referencename)
- referencenode = nodes.reference(referencename + match.group('refend'),
- referencename)
+ referencenode = nodes.reference(
+ referencename + match.group('refend'), referencename,
+ name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
self.document.note_anonymous_ref(referencenode)
@@ -1042,9 +925,7 @@
else: # not a valid scheme
raise MarkupMismatch
- pep_url_local = 'pep-%04d.html'
- pep_url_absolute = 'http://www.python.org/peps/pep-%04d.html'
- pep_url = pep_url_absolute
+ pep_url = 'pep-%04d.html'
def pep_reference(self, match, lineno):
text = match.group(0)
@@ -1054,17 +935,17 @@
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
- ref = self.pep_url % pepnum
+ ref = self.document.settings.pep_base_url + self.pep_url % pepnum
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
- rfc_url = 'http://www.faqs.org/rfcs/rfc%d.html'
+ rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
- ref = self.rfc_url % rfcnum
+ ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
@@ -1102,45 +983,7 @@
'_': reference,
'__': anonymous_reference}
- def generic_interpreted_role(self, role, rawtext, text, lineno):
- try:
- role_class = self.generic_roles[role]
- except KeyError:
- msg = self.reporter.error('Unknown interpreted text role: "%s".'
- % role, line=lineno)
- prb = self.problematic(text, text, msg)
- return [prb], [msg]
- return [role_class(rawtext, text)], []
- def pep_reference_role(self, role, rawtext, text, lineno):
- try:
- pepnum = int(text)
- if pepnum < 0 or pepnum > 9999:
- raise ValueError
- except ValueError:
- msg = self.reporter.error(
- 'PEP number must be a number from 0 to 9999; "%s" is invalid.'
- % text, line=lineno)
- prb = self.problematic(text, text, msg)
- return [prb], [msg]
- ref = self.pep_url % pepnum
- return [nodes.reference(rawtext, 'PEP ' + text, refuri=ref)], []
-
- def rfc_reference_role(self, role, rawtext, text, lineno):
- try:
- rfcnum = int(text)
- if rfcnum <= 0:
- raise ValueError
- except ValueError:
- msg = self.reporter.error(
- 'RFC number must be a number greater than or equal to 1; '
- '"%s" is invalid.' % text, line=lineno)
- prb = self.problematic(text, text, msg)
- return [prb], [msg]
- ref = self.rfc_url % rfcnum
- return [nodes.reference(rawtext, 'RFC ' + text, refuri=ref)], []
-
-
class Body(RSTState):
"""
@@ -1193,11 +1036,13 @@
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
- '|%(upperroman)s)' % enum.sequencepats)
+ '|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
- pats['optarg'] = '%(alpha)s%(alphanumplus)s*' % pats
- pats['option'] = r'(--?|\+|/)%(optname)s([ =]%(optarg)s)?' % pats
+ pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
+ pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
+ pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
+ pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
@@ -1210,6 +1055,7 @@
'field_marker': r':[^: ]([^:]*[^: ])?:( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
+ 'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
@@ -1222,6 +1068,7 @@
'field_marker',
'option_marker',
'doctest',
+ 'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
@@ -1252,7 +1099,8 @@
blockquote += attribution
return blockquote, messages
- attribution_pattern = re.compile(r'--(?![-\n]) *(?=[^ \n])')
+ # u'\u2014' is an em-dash:
+ attribution_pattern = re.compile(ur'(---?(?!-)|\u2014) *(?=[^ \n])')
def check_attribution(self, indented, line_offset):
"""
@@ -1286,8 +1134,8 @@
break
if blank:
a_lines = indented[blank + 1:]
- a_lines.strip_indent(match.end(), end=1)
- a_lines.strip_indent(indent, start=1)
+ a_lines.trim_left(match.end(), end=1)
+ a_lines.trim_left(indent, start=1)
return (indented[:blank], a_lines, line_offset + blank + 1)
else:
return (indented, None, None)
@@ -1308,12 +1156,12 @@
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
- newline_offset, blank_finish = self.nested_list_parse(
+ new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
- self.goto_line(newline_offset)
+ self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
@@ -1332,18 +1180,20 @@
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
+ enumlist = nodes.enumerated_list()
+ self.parent += enumlist
+ if sequence == '#':
+ enumlist['enumtype'] = 'arabic'
+ else:
+ enumlist['enumtype'] = sequence
+ enumlist['prefix'] = self.enum.formatinfo[format].prefix
+ enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
+ enumlist['start'] = ordinal
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal), line=self.state_machine.abs_line_number())
self.parent += msg
- enumlist = nodes.enumerated_list()
- self.parent += enumlist
- enumlist['enumtype'] = sequence
- if ordinal != 1:
- enumlist['start'] = ordinal
- enumlist['prefix'] = self.enum.formatinfo[format].prefix
- enumlist['suffix'] = self.enum.formatinfo[format].suffix
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
@@ -1352,7 +1202,9 @@
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
- extra_settings={'lastordinal': ordinal, 'format': format})
+ extra_settings={'lastordinal': ordinal,
+ 'format': format,
+ 'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
@@ -1385,7 +1237,9 @@
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
- if expected_sequence:
+ if text == '#':
+ sequence = '#'
+ elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
@@ -1402,10 +1256,13 @@
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
- try:
- ordinal = self.enum.converters[sequence](text)
- except roman.InvalidRomanNumeralError:
- ordinal = None
+ if sequence == '#':
+ ordinal = 1
+ else:
+ try:
+ ordinal = self.enum.converters[sequence](text)
+ except roman.InvalidRomanNumeralError:
+ ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
@@ -1413,7 +1270,7 @@
Check validity based on the ordinal value and the second line.
Return true iff the ordinal is valid and the second line is blank,
- indented, or starts with the next enumerator.
+ indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
@@ -1426,9 +1283,11 @@
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
- next_enumerator = self.make_enumerator(ordinal + 1, sequence, format)
+ next_enumerator, auto_enumerator = self.make_enumerator(
+ ordinal + 1, sequence, format)
try:
- if next_line.startswith(next_enumerator):
+ if ( next_line.startswith(next_enumerator) or
+ next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
@@ -1436,11 +1295,14 @@
def make_enumerator(self, ordinal, sequence, format):
"""
- Construct and return an enumerated list item marker.
+ Construct and return the next enumerated list item marker, and an
+ auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
- """
- if sequence == 'arabic':
+ """ #"
+ if sequence == '#':
+ enumerator = '#'
+ elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
@@ -1463,19 +1325,22 @@
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
- return formatinfo.prefix + enumerator + formatinfo.suffix + ' '
+ next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ + ' ')
+ auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
+ return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
- fieldlist = nodes.field_list()
- self.parent += fieldlist
+ field_list = nodes.field_list()
+ self.parent += field_list
field, blank_finish = self.field(match)
- fieldlist += field
+ field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
- node=fieldlist, initial_state='FieldList',
+ node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
@@ -1487,14 +1352,15 @@
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
- fieldnode = nodes.field()
- fieldnode.line = lineno
- fieldnode += nodes.field_name(name, name)
- fieldbody = nodes.field_body('\n'.join(indented))
- fieldnode += fieldbody
+ field_node = nodes.field()
+ field_node.line = lineno
+ name_nodes, name_messages = self.inline_text(name, lineno)
+ field_node += nodes.field_name(name, '', *name_nodes)
+ field_body = nodes.field_body('\n'.join(indented), *name_messages)
+ field_node += field_body
if indented:
- self.parse_field_body(indented, line_offset, fieldbody)
- return fieldnode, blank_finish
+ self.parse_field_body(indented, line_offset, field_body)
+ return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
@@ -1567,8 +1433,20 @@
delimiter = ' '
firstopt = tokens[0].split('=')
if len(firstopt) > 1:
+ # "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
+ elif (len(tokens[0]) > 2
+ and ((tokens[0].startswith('-')
+ and not tokens[0].startswith('--'))
+ or tokens[0].startswith('+'))):
+ # "-ovalue" form
+ tokens[:1] = [tokens[0][:2], tokens[0][2:]]
+ delimiter = ''
+ if len(tokens) > 1 and (tokens[1].startswith('<')
+ and tokens[-1].endswith('>')):
+ # "-o <value1 value2>" form; join all values into one token
+ tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
@@ -1578,7 +1456,7 @@
optlist.append(option)
else:
raise MarkupError(
- 'wrong numer of option tokens (=%s), should be 1 or 2: '
+ 'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring),
self.state_machine.abs_line_number() + 1)
return optlist
@@ -1588,6 +1466,69 @@
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
+ def line_block(self, match, context, next_state):
+ """First line of a line block."""
+ block = nodes.line_block()
+ self.parent += block
+ lineno = self.state_machine.abs_line_number()
+ line, messages, blank_finish = self.line_block_line(match, lineno)
+ block += line
+ self.parent += messages
+ if not blank_finish:
+ offset = self.state_machine.line_offset + 1 # next line
+ new_line_offset, blank_finish = self.nested_list_parse(
+ self.state_machine.input_lines[offset:],
+ input_offset=self.state_machine.abs_line_offset() + 1,
+ node=block, initial_state='LineBlock',
+ blank_finish=0)
+ self.goto_line(new_line_offset)
+ if not blank_finish:
+ self.parent += self.reporter.warning(
+ 'Line block ends without a blank line.',
+ line=(self.state_machine.abs_line_number() + 1))
+ if len(block):
+ if block[0].indent is None:
+ block[0].indent = 0
+ self.nest_line_block_lines(block)
+ return [], next_state, []
+
+ def line_block_line(self, match, lineno):
+ """Return one line element of a line_block."""
+ indented, indent, line_offset, blank_finish = \
+ self.state_machine.get_first_known_indented(match.end(),
+ until_blank=1)
+ text = u'\n'.join(indented)
+ text_nodes, messages = self.inline_text(text, lineno)
+ line = nodes.line(text, '', *text_nodes)
+ if match.string.rstrip() != '|': # not empty
+ line.indent = len(match.group(1)) - 1
+ return line, messages, blank_finish
+
+ def nest_line_block_lines(self, block):
+ for index in range(1, len(block)):
+ if block[index].indent is None:
+ block[index].indent = block[index - 1].indent
+ self.nest_line_block_segment(block)
+
+ def nest_line_block_segment(self, block):
+ indents = [item.indent for item in block]
+ least = min(indents)
+ new_items = []
+ new_block = nodes.line_block()
+ for item in block:
+ if item.indent > least:
+ new_block.append(item)
+ else:
+ if len(new_block):
+ self.nest_line_block_segment(new_block)
+ new_items.append(new_block)
+ new_block = nodes.line_block()
+ new_items.append(item)
+ if len(new_block):
+ self.nest_line_block_segment(new_block)
+ new_items.append(new_block)
+ block[:] = new_items
+
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
@@ -1624,7 +1565,8 @@
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError, detail:
- nodelist = self.malformed_table(block, str(detail)) + messages
+ nodelist = self.malformed_table(
+ block, ' '.join(detail.args)) + messages
else:
nodelist = messages
return nodelist, blank_finish
@@ -1716,13 +1658,17 @@
line=lineno)
return [error]
- def build_table(self, tabledata, tableline):
- colspecs, headrows, bodyrows = tabledata
+ def build_table(self, tabledata, tableline, stub_columns=0):
+ colwidths, headrows, bodyrows = tabledata
table = nodes.table()
- tgroup = nodes.tgroup(cols=len(colspecs))
+ tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
- for colspec in colspecs:
- tgroup += nodes.colspec(colwidth=colspec)
+ for colwidth in colwidths:
+ colspec = nodes.colspec(colwidth=colwidth)
+ if stub_columns:
+ colspec.attributes['stub'] = 1
+ stub_columns -= 1
+ tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
@@ -1810,7 +1756,7 @@
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
- footnote['name'] = name
+ footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
@@ -1818,7 +1764,7 @@
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
- footnote['name'] = name
+ footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
@@ -1837,7 +1783,7 @@
citation = nodes.citation('\n'.join(indented))
citation.line = lineno
citation += nodes.label('', label)
- citation['name'] = name
+ citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
@@ -1865,42 +1811,54 @@
raise MarkupError('malformed hyperlink target.', lineno)
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
+ target = self.make_target(block, blocktext, lineno,
+ targetmatch.group('name'))
+ return [target], blank_finish
+
+ def make_target(self, block, block_text, lineno, target_name):
+ target_type, data = self.parse_target(block, block_text, lineno)
+ if target_type == 'refname':
+ target = nodes.target(block_text, '', refname=normalize_name(data))
+ self.add_target(target_name, '', target, lineno)
+ self.document.note_indirect_target(target)
+ return target
+ elif target_type == 'refuri':
+ target = nodes.target(block_text, '')
+ self.add_target(target_name, data, target, lineno)
+ return target
+ else:
+ return data
+
+ def parse_target(self, block, block_text, lineno):
+ """
+ Determine the type of reference of a target.
+
+ :Return: A 2-tuple, one of:
+
+ - 'refname' and the indirect reference name
+ - 'refuri' and the URI
+ - 'malformed' and a system_message node
+ """
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
- target = nodes.target(blocktext, '', refname=refname)
- target.line = lineno
- self.add_target(targetmatch.group('name'), '', target)
- self.document.note_indirect_target(target)
- return [target], blank_finish
- nodelist = []
- reference = ''.join([line.strip() for line in block])
- if reference.find(' ') != -1:
- warning = self.reporter.warning(
- 'Hyperlink target contains whitespace. Perhaps a footnote '
- 'was intended?',
- nodes.literal_block(blocktext, blocktext), line=lineno)
- nodelist.append(warning)
- else:
- unescaped = unescape(reference)
- target = nodes.target(blocktext, '')
- target.line = lineno
- self.add_target(targetmatch.group('name'), unescaped, target)
- nodelist.append(target)
- return nodelist, blank_finish
+ return 'refname', refname
+ reference = ''.join([''.join(line.split()) for line in block])
+ return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
- normalize_name(reference))
+ whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
- def add_target(self, targetname, refuri, target):
+ def add_target(self, targetname, refuri, target, lineno):
+ target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
- target['name'] = name
+ target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
@@ -1914,6 +1872,8 @@
else: # anonymous target
if refuri:
target['refuri'] = refuri
+ else:
+ self.document.note_internal_target(target)
target['anonymous'] = 1
self.document.note_anonymous_target(target)
@@ -1925,9 +1885,7 @@
strip_indent=0)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
- for i in range(len(block)):
- block[i] = escape2null(block[i])
- escaped = block[0].rstrip()
+ escaped = escape2null(block[0].rstrip())
blockindex = 0
while 1:
subdefmatch = pattern.match(escaped)
@@ -1935,12 +1893,12 @@
break
blockindex += 1
try:
- escaped = escaped + ' ' + block[blockindex].strip()
+ escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.',
lineno)
del block[:blockindex] # strip out the substitution marker
- block[0] = (block[0] + ' ')[subdefmatch.end()-len(escaped)-1:].strip()
+ block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
@@ -1979,17 +1937,18 @@
return [msg], blank_finish
def directive(self, match, **option_presets):
+ """Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_function, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_function:
- return self.parse_directive(
+ return self.run_directive(
directive_function, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
- def parse_directive(self, directive_fn, match, type_name, option_presets):
+ def run_directive(self, directive_fn, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
@@ -2011,6 +1970,31 @@
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
+ lineno = self.state_machine.abs_line_number()
+ initial_line_offset = self.state_machine.line_offset
+ indented, indent, line_offset, blank_finish \
+ = self.state_machine.get_first_known_indented(match.end(),
+ strip_top=0)
+ block_text = '\n'.join(self.state_machine.input_lines[
+ initial_line_offset : self.state_machine.line_offset + 1])
+ try:
+ arguments, options, content, content_offset = (
+ self.parse_directive_block(indented, line_offset,
+ directive_fn, option_presets))
+ except MarkupError, detail:
+ error = self.reporter.error(
+ 'Error in "%s" directive:\n%s.' % (type_name,
+ ' '.join(detail.args)),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [error], blank_finish
+ result = directive_fn(type_name, arguments, options, content, lineno,
+ content_offset, block_text, self,
+ self.state_machine)
+ return (result,
+ blank_finish or self.state_machine.is_next_line_blank())
+
+ def parse_directive_block(self, indented, line_offset, directive_fn,
+ option_presets):
arguments = []
options = {}
argument_spec = getattr(directive_fn, 'arguments', None)
@@ -2018,13 +2002,6 @@
argument_spec = None
option_spec = getattr(directive_fn, 'options', None)
content_spec = getattr(directive_fn, 'content', None)
- lineno = self.state_machine.abs_line_number()
- initial_line_offset = self.state_machine.line_offset
- indented, indent, line_offset, blank_finish \
- = self.state_machine.get_first_known_indented(match.end(),
- strip_top=0)
- block_text = '\n'.join(self.state_machine.input_lines[
- initial_line_offset : self.state_machine.line_offset + 1])
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
@@ -2046,24 +2023,18 @@
while content and not content[0].strip():
content.trim_start()
content_offset += 1
- try:
- if option_spec:
- options, arg_block = self.parse_directive_options(
- option_presets, option_spec, arg_block)
- if argument_spec:
- arguments = self.parse_directive_arguments(argument_spec,
- arg_block)
- if content and not content_spec:
- raise MarkupError('no content permitted')
- except MarkupError, detail:
- error = self.reporter.error(
- 'Error in "%s" directive:\n%s.' % (type_name, detail),
- nodes.literal_block(block_text, block_text), line=lineno)
- return [error], blank_finish
- result = directive_fn(
- type_name, arguments, options, content, lineno, content_offset,
- block_text, self, self.state_machine)
- return result, blank_finish or self.state_machine.is_next_line_blank()
+ if option_spec:
+ options, arg_block = self.parse_directive_options(
+ option_presets, option_spec, arg_block)
+ if arg_block and not argument_spec:
+ raise MarkupError('no arguments permitted; blank line '
+ 'required before content block')
+ if argument_spec:
+ arguments = self.parse_directive_arguments(
+ argument_spec, arg_block)
+ if content and not content_spec:
+ raise MarkupError('no content permitted')
+ return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
@@ -2124,9 +2095,9 @@
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
- return 0, ('invalid option value: %s' % detail)
+ return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
- return 0, ('invalid option data: %s' % detail)
+ return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
@@ -2180,13 +2151,13 @@
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
- (?![ ]) # first char. not space
+ (?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
- (?![ ]) # first char. not space
+ (?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(directive,
re.compile(r"""
@@ -2242,38 +2213,14 @@
return [], next_state, []
def anonymous_target(self, match):
+ lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
blocktext = match.string[:match.end()] + '\n'.join(block)
- if block and block[-1].strip()[-1:] == '_': # possible indirect target
- reference = escape2null(' '.join([line.strip()
- for line in block]))
- refname = self.is_reference(reference)
- if refname:
- target = nodes.target(blocktext, '', refname=refname,
- anonymous=1)
- self.document.note_anonymous_target(target)
- self.document.note_indirect_target(target)
- return [target], blank_finish
- nodelist = []
- reference = escape2null(''.join([line.strip() for line in block]))
- if reference.find(' ') != -1:
- lineno = self.state_machine.abs_line_number() - len(block) + 1
- warning = self.reporter.warning(
- 'Anonymous hyperlink target contains whitespace. Perhaps a '
- 'footnote was intended?',
- nodes.literal_block(blocktext, blocktext),
- line=lineno)
- nodelist.append(warning)
- else:
- target = nodes.target(blocktext, '', anonymous=1)
- if reference:
- unescaped = unescape(reference)
- target['refuri'] = unescaped
- self.document.note_anonymous_target(target)
- nodelist.append(target)
- return nodelist, blank_finish
+ block = [escape2null(line) for line in block]
+ target = self.make_target(block, blocktext, lineno, '')
+ return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
@@ -2317,7 +2264,7 @@
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
- fieldlist = nodes.field_list(CLASS='rfc2822')
+ fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
@@ -2383,6 +2330,7 @@
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
+ line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
@@ -2423,12 +2371,15 @@
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
- if (sequence != self.parent['enumtype'] or
- format != self.format or
- ordinal != (self.lastordinal + 1) or
- not self.is_enumerated_list_item(ordinal, sequence, format)):
+ if ( format != self.format
+ or (sequence != '#' and (sequence != self.parent['enumtype']
+ or self.auto
+ or ordinal != (self.lastordinal + 1)))
+ or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
+ if sequence == '#':
+ self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
@@ -2500,6 +2451,22 @@
lines = []
+class LineBlock(SpecializedBody):
+
+ """Second and subsequent lines of a line_block."""
+
+ blank = SpecializedBody.invalid_input
+
+ def line_block(self, match, context, next_state):
+ """New line of line block."""
+ lineno = self.state_machine.abs_line_number()
+ line, messages, blank_finish = self.line_block_line(match, lineno)
+ self.parent += line
+ self.parent.parent += messages
+ self.blank_finish = blank_finish
+ return [], next_state, []
+
+
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
@@ -2535,7 +2502,7 @@
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
- alt=self.parent['name'])
+ alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
@@ -2651,20 +2618,30 @@
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
- nodelist = []
while indented and not indented[-1].strip():
indented.trim_end()
- if indented:
- data = '\n'.join(indented)
- nodelist.append(nodes.literal_block(data, data))
- if not blank_finish:
- nodelist.append(self.unindent_warning('Literal block'))
- else:
- nodelist.append(self.reporter.warning(
- 'Literal block expected; none found.',
- line=self.state_machine.abs_line_number()))
+ if not indented:
+ return self.quoted_literal_block()
+ data = '\n'.join(indented)
+ literal_block = nodes.literal_block(data, data)
+ literal_block.line = offset + 1
+ nodelist = [literal_block]
+ if not blank_finish:
+ nodelist.append(self.unindent_warning('Literal block'))
return nodelist
+ def quoted_literal_block(self):
+ abs_line_offset = self.state_machine.abs_line_offset()
+ offset = self.state_machine.line_offset
+ parent_node = nodes.Element()
+ new_abs_offset = self.nested_parse(
+ self.state_machine.input_lines[offset:],
+ input_offset=abs_line_offset, node=parent_node, match_titles=0,
+ state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
+ 'initial_state': 'QuotedLiteralBlock'})
+ self.goto_line(new_abs_offset)
+ return parent_node.children
+
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
@@ -2678,13 +2655,15 @@
definitionlistitem += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
- 'Blank line missing before literal block? Interpreted as a '
- 'definition list item.', line=line_offset + 1)
+ 'Blank line missing before literal block (after the "::")? '
+ 'Interpreted as a definition list item.', line=line_offset+1)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return definitionlistitem, blank_finish
+ classifier_delimiter = re.compile(' +: +')
+
def term(self, lines, lineno):
- """Return a definition_list's term and optional classifier."""
+ """Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
@@ -2692,17 +2671,17 @@
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
- parts = node.rawsource.split(' : ', 1)
+ parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
- term_node += node
+ node_list[-1] += node
else:
- term_node += nodes.Text(parts[0].rstrip())
- classifier_node = nodes.classifier('', parts[1])
- classifier_node += text_nodes[i+1:]
- node_list.append(classifier_node)
- break
+
+ node_list[-1] += nodes.Text(parts[0].rstrip())
+ for part in parts[1:]:
+ classifier_node = nodes.classifier('', part)
+ node_list.append(classifier_node)
else:
- term_node += node
+ node_list[-1] += node
return node_list, messages
@@ -2764,13 +2743,9 @@
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
- transition = nodes.transition(context[0])
+ transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
- msg = self.reporter.error(
- 'Document or section may not end with a transition.',
- line=lineno)
- self.parent += msg
self.eofcheck = 1
return []
@@ -2780,19 +2755,8 @@
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
- transition = nodes.transition(marker)
+ transition = nodes.transition(rawsource=marker)
transition.line = lineno
- if len(self.parent) == 0:
- msg = self.reporter.error(
- 'Document or section may not begin with a transition.',
- line=lineno)
- self.parent += msg
- elif isinstance(self.parent[-1], nodes.transition):
- msg = self.reporter.error(
- 'At least one body element must separate transitions; '
- 'adjacent transitions not allowed.',
- line=lineno)
- self.parent += msg
self.parent += transition
return [], 'Body', []
@@ -2823,7 +2787,7 @@
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
- 'Missing underline for overline.',
+ 'Missing matching underline for section title overline.',
nodes.literal_block(source, source), line=lineno)
self.parent += msg
return [], 'Body', []
@@ -2881,33 +2845,78 @@
raise statemachine.StateCorrection('Body', 'text')
-state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
- OptionList, ExtensionOptions, Explicit, Text, Definition,
- Line, SubstitutionDef, RFC2822Body, RFC2822List)
-"""Standard set of State classes used to start `RSTStateMachine`."""
+class QuotedLiteralBlock(RSTState):
+ """
+ Nested parse handler for quoted (unindented) literal blocks.
-def escape2null(text):
- """Return a string with escape-backslashes converted to nulls."""
- parts = []
- start = 0
- while 1:
- found = text.find('\\', start)
- if found == -1:
- parts.append(text[start:])
- return ''.join(parts)
- parts.append(text[start:found])
- parts.append('\x00' + text[found+1:found+2])
- start = found + 2 # skip character after escape
+ Special-purpose. Not for inclusion in `state_classes`.
+ """
-def unescape(text, restore_backslashes=0):
- """
- Return a string with nulls removed or restored to backslashes.
- Backslash-escaped spaces are also removed.
- """
- if restore_backslashes:
- return text.replace('\x00', '\\')
- else:
- for sep in ['\x00 ', '\x00\n', '\x00']:
- text = ''.join(text.split(sep))
- return text
+ patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
+ 'text': r''}
+ initial_transitions = ('initial_quoted', 'text')
+
+ def __init__(self, state_machine, debug=0):
+ RSTState.__init__(self, state_machine, debug)
+ self.messages = []
+ self.initial_lineno = None
+
+ def blank(self, match, context, next_state):
+ if context:
+ raise EOFError
+ else:
+ return context, next_state, []
+
+ def eof(self, context):
+ if context:
+ text = '\n'.join(context)
+ literal_block = nodes.literal_block(text, text)
+ literal_block.line = self.initial_lineno
+ self.parent += literal_block
+ else:
+ self.parent += self.reporter.warning(
+ 'Literal block expected; none found.',
+ line=self.state_machine.abs_line_number())
+ self.state_machine.previous_line()
+ self.parent += self.messages
+ return []
+
+ def indent(self, match, context, next_state):
+ assert context, ('QuotedLiteralBlock.indent: context should not '
+ 'be empty!')
+ self.messages.append(
+ self.reporter.error('Unexpected indentation.',
+ line=self.state_machine.abs_line_number()))
+ self.state_machine.previous_line()
+ raise EOFError
+
+ def initial_quoted(self, match, context, next_state):
+ """Match arbitrary quote character on the first line only."""
+ self.remove_transition('initial_quoted')
+ quote = match.string[0]
+ pattern = re.compile(re.escape(quote))
+ # New transition matches consistent quotes only:
+ self.add_transition('quoted',
+ (pattern, self.quoted, self.__class__.__name__))
+ self.initial_lineno = self.state_machine.abs_line_number()
+ return [match.string], next_state, []
+
+ def quoted(self, match, context, next_state):
+ """Match consistent quotes on subsequent lines."""
+ context.append(match.string)
+ return context, next_state, []
+
+ def text(self, match, context, next_state):
+ if context:
+ self.messages.append(
+ self.reporter.error('Inconsistent literal block quoting.',
+ line=self.state_machine.abs_line_number()))
+ self.state_machine.previous_line()
+ raise EOFError
+
+
+state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
+ OptionList, LineBlock, ExtensionOptions, Explicit, Text,
+ Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
+"""Standard set of State classes used to start `RSTStateMachine`."""
Modified: Zope3/trunk/src/docutils/parsers/rst/tableparser.py
===================================================================
--- Zope3/trunk/src/docutils/parsers/rst/tableparser.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/parsers/rst/tableparser.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 1574 $
+# Date: $Date: 2003-07-06 00:38:28 +0200 (Sun, 06 Jul 2003) $
# Copyright: This module has been placed in the public domain.
"""
@@ -131,7 +131,8 @@
head_body_separator_pat = re.compile(r'\+=[=+]+=\+ *$')
def setup(self, block):
- self.block = list(block) # make a copy; it may be modified
+ self.block = block[:] # make a copy; it may be modified
+ self.block.disconnect() # don't propagate changes to parent
self.bottom = len(block) - 1
self.right = len(block[0]) - 1
self.head_body_sep = None
@@ -165,7 +166,9 @@
update_dict_of_lists(self.rowseps, rowseps)
update_dict_of_lists(self.colseps, colseps)
self.mark_done(top, left, bottom, right)
- cellblock = self.get_cell_block(top, left, bottom, right)
+ cellblock = self.block.get_2D_block(top + 1, left + 1,
+ bottom, right)
+ cellblock.disconnect() # lines in cell can't sync with parent
self.cells.append((top, left, bottom, right, cellblock))
corners.extend([(top, right), (bottom, left)])
corners.sort()
@@ -188,19 +191,6 @@
return None
return 1
- def get_cell_block(self, top, left, bottom, right):
- """Given the corners, extract the text of a cell."""
- cellblock = []
- margin = right
- for lineno in range(top + 1, bottom):
- line = self.block[lineno][left + 1 : right].rstrip()
- cellblock.append(line)
- if line:
- margin = min(margin, len(line) - len(line.lstrip()))
- if 0 < margin < right:
- cellblock = [line[margin:] for line in cellblock]
- return cellblock
-
def scan_cell(self, top, left):
"""Starting at the top-left corner, start tracing out a cell."""
assert self.block[top][left] == '+'
@@ -278,7 +268,7 @@
def structure_from_cells(self):
"""
- From the data colledted by `scan_cell()`, convert to the final data
+ From the data collected by `scan_cell()`, convert to the final data
structure.
"""
rowseps = self.rowseps.keys() # list of row boundaries
@@ -371,7 +361,8 @@
span_pat = re.compile('-[ -]*$')
def setup(self, block):
- self.block = list(block) # make a copy; it will be modified
+ self.block = block[:] # make a copy; it will be modified
+ self.block.disconnect() # don't propagate changes to parent
# Convert top & bottom borders to column span underlines:
self.block[0] = self.block[0].replace('=', '-')
self.block[-1] = self.block[-1].replace('=', '-')
@@ -394,25 +385,26 @@
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
- block = self.block[1:]
- offset = 0
- # Container for accumulating text lines until a row is complete:
- rowlines = []
- while block:
- line = block.pop(0)
- offset += 1
+ offset = 1 # skip top border
+ start = 1
+ text_found = None
+ while offset < len(self.block):
+ line = self.block[offset]
if self.span_pat.match(line):
# Column span underline or border; row is complete.
- self.parse_row(rowlines, (line.rstrip(), offset))
- rowlines = []
+ self.parse_row(self.block[start:offset], start,
+ (line.rstrip(), offset))
+ start = offset + 1
+ text_found = None
elif line[firststart:firstend].strip():
# First column not blank, therefore it's a new row.
- if rowlines:
- self.parse_row(rowlines)
- rowlines = [(line.rstrip(), offset)]
- else:
- # Accumulate lines of incomplete row.
- rowlines.append((line.rstrip(), offset))
+ if text_found and offset != start:
+ self.parse_row(self.block[start:offset], start)
+ start = offset
+ text_found = 1
+ elif not text_found:
+ start = offset + 1
+ offset += 1
def parse_columns(self, line, offset):
"""
@@ -448,12 +440,12 @@
morecols += 1
except (AssertionError, IndexError):
raise TableMarkupError('Column span alignment problem at '
- 'line offset %s.' % offset)
- cells.append((0, morecols, offset, []))
+ 'line offset %s.' % (offset + 1))
+ cells.append([0, morecols, offset, []])
i += 1
return cells
- def parse_row(self, lines, spanline=None):
+ def parse_row(self, lines, start, spanline=None):
"""
Given the text `lines` of a row, parse it and append to `self.table`.
@@ -462,20 +454,30 @@
text from each line, and check for text in column margins. Finally,
adjust for insigificant whitespace.
"""
- while lines and not lines[-1][0]:
- lines.pop() # Remove blank trailing lines.
- if lines:
- offset = lines[0][1]
- elif spanline:
- offset = spanline[1]
- else:
+ if not (lines or spanline):
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
+ span_offset = spanline[1]
else:
columns = self.columns[:]
- row = self.init_row(columns, offset)
+ span_offset = start
+ self.check_columns(lines, start, columns)
+ row = self.init_row(columns, start)
+ for i in range(len(columns)):
+ start, end = columns[i]
+ cellblock = lines.get_2D_block(0, start, len(lines), end)
+ cellblock.disconnect() # lines in cell can't sync with parent
+ row[i][3] = cellblock
+ self.table.append(row)
+
+ def check_columns(self, lines, first_line, columns):
+ """
+ Check for text in column margins and text overflow in the last column.
+ Raise TableMarkupError if anything but whitespace is in column margins.
+ Adjust the end value for the last column if there is text overflow.
+ """
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxint, None))
@@ -483,31 +485,21 @@
for i in range(len(columns) - 1):
start, end = columns[i]
nextstart = columns[i+1][0]
- block = []
- margin = sys.maxint
- for line, offset in lines:
+ offset = 0
+ for line in lines:
if i == lastcol and line[end:].strip():
text = line[start:].rstrip()
- columns[lastcol] = (start, start + len(text))
- self.adjust_last_column(start + len(text))
+ new_end = start + len(text)
+ columns[i] = (start, new_end)
+ main_start, main_end = self.columns[-1]
+ if new_end > main_end:
+ self.columns[-1] = (main_start, new_end)
elif line[end:nextstart].strip():
raise TableMarkupError('Text in column margin at line '
- 'offset %s.' % offset)
- else:
- text = line[start:end].rstrip()
- block.append(text)
- if text:
- margin = min(margin, len(text) - len(text.lstrip()))
- if 0 < margin < sys.maxint:
- block = [line[margin:] for line in block]
- row[i][3].extend(block)
- self.table.append(row)
+ 'offset %s.' % (first_line + offset))
+ offset += 1
+ columns.pop()
- def adjust_last_column(self, new_end):
- start, end = self.columns[-1]
- if new_end > end:
- self.columns[-1] = (start, new_end)
-
def structure_from_cells(self):
colspecs = [end - start for start, end in self.columns]
first_body_row = 0
Modified: Zope3/trunk/src/docutils/readers/__init__.py
===================================================================
--- Zope3/trunk/src/docutils/readers/__init__.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/readers/__init__.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Authors: David Goodger; Ueli Schlaepfer
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 1645 $
+# Date: $Date: 2003-08-27 22:50:43 +0200 (Wed, 27 Aug 2003) $
# Copyright: This module has been placed in the public domain.
"""
@@ -28,6 +28,7 @@
"""
component_type = 'reader'
+ config_section = 'readers'
def __init__(self, parser=None, parser_name='restructuredtext'):
"""
Modified: Zope3/trunk/src/docutils/readers/pep.py
===================================================================
--- Zope3/trunk/src/docutils/readers/pep.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/readers/pep.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3129 $
+# Date: $Date: 2005-03-26 17:21:28 +0100 (Sat, 26 Mar 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -16,15 +16,6 @@
from docutils.parsers import rst
-class Inliner(rst.states.Inliner):
-
- """
- Extend `rst.Inliner` for local PEP references.
- """
-
- pep_url = rst.states.Inliner.pep_url_local
-
-
class Reader(standalone.Reader):
supported = ('pep',)
@@ -36,10 +27,13 @@
'reStructuredText parser) are on by default.',
())
+ config_section = 'pep reader'
+ config_section_dependencies = ('readers', 'standalone reader')
+
default_transforms = (references.Substitutions,
+ references.PropagateTargets,
peps.Headers,
peps.Contents,
- references.ChainedTargets,
references.AnonymousHyperlinks,
references.IndirectHyperlinks,
peps.TargetNotes,
@@ -49,7 +43,7 @@
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
- inliner_class = Inliner
+ inliner_class = rst.states.Inliner
def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``."""
Modified: Zope3/trunk/src/docutils/readers/python/__init__.py
===================================================================
--- Zope3/trunk/src/docutils/readers/python/__init__.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/readers/python/__init__.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,11 +1,14 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3038 $
+# Date: $Date: 2005-03-14 17:16:57 +0100 (Mon, 14 Mar 2005) $
# Copyright: This module has been placed in the public domain.
"""
This package contains the Python Source Reader modules.
+
+It requires Python 2.2 or higher (`moduleparser` depends on the
+`compiler` and `tokenize` modules).
"""
__docformat__ = 'reStructuredText'
@@ -13,7 +16,117 @@
import sys
import docutils.readers
+from docutils.readers.python import moduleparser
+from docutils import parsers
+from docutils import nodes
+from docutils.readers.python import pynodes
+from docutils import readers
+class Reader(docutils.readers.Reader):
-class Reader(docutils.readers.Reader):
- pass
+ config_section = 'python reader'
+ config_section_dependencies = ('readers',)
+
+ default_parser = 'restructuredtext'
+
+ def parse(self):
+ """Parse `self.input` into a document tree."""
+ self.document = document = self.new_document()
+ module_section = moduleparser.parse_module(self.input,
+ self.source.source_path)
+ module_section.walk(DocformatVisitor(self.document))
+ visitor = DocstringFormattingVisitor(
+ document=document,
+ default_parser=self.default_parser)
+ module_section.walk(visitor)
+ self.document.append(module_section)
+
+
+class DocformatVisitor(nodes.SparseNodeVisitor):
+
+ """
+ This sets docformat attributes in a module. Wherever an assignment
+ to __docformat__ is found, we look for the enclosing scope -- a class,
+ a module, or a function -- and set the docformat attribute there.
+
+ We can't do this during the DocstringFormattingVisitor walking,
+ because __docformat__ may appear below a docstring in that format
+ (typically below the module docstring).
+ """
+
+ def visit_attribute(self, node):
+ assert isinstance(node[0], pynodes.object_name)
+ name = node[0][0].data
+ if name != '__docformat__':
+ return
+ value = None
+ for child in children:
+ if isinstance(child, pynodes.expression_value):
+ value = child[0].data
+ break
+ assert value.startswith("'") or value.startswith('"'), "__docformat__ must be assigned a string literal (not %s); line: %s" % (value, node['lineno'])
+ name = name[1:-1]
+ looking_in = node.parent
+ while not isinstance(looking_in, (pynodes.module_section,
+ pynodes.function_section,
+ pynodes.class_section)):
+ looking_in = looking_in.parent
+ looking_in['docformat'] = name
+
+
+class DocstringFormattingVisitor(nodes.SparseNodeVisitor):
+
+ def __init__(self, document, default_parser):
+ self.document = document
+ self.default_parser = default_parser
+ self.parsers = {}
+
+ def visit_docstring(self, node):
+ text = node[0].data
+ docformat = self.find_docformat(node)
+ del node[0]
+ node['docformat'] = docformat
+ parser = self.get_parser(docformat)
+ parser.parse(text, self.document)
+ for child in self.document.children:
+ node.append(child)
+ self.document.current_source = self.document.current_line = None
+ del self.document[:]
+
+ def get_parser(self, parser_name):
+ """
+ Get a parser based on its name. We reuse parsers during this
+ visitation, so parser instances are cached.
+ """
+ parser_name = parsers._parser_aliases.get(parser_name, parser_name)
+ if not self.parsers.has_key(parser_name):
+ cls = parsers.get_parser_class(parser_name)
+ self.parsers[parser_name] = cls()
+ return self.parsers[parser_name]
+
+ def find_docformat(self, node):
+ """
+ Find the __docformat__ closest to this node (i.e., look in the
+ class or module)
+ """
+ while node:
+ if node.get('docformat'):
+ return node['docformat']
+ node = node.parent
+ return self.default_parser
+
+
+if __name__ == '__main__':
+ try:
+ import locale
+ locale.setlocale(locale.LC_ALL, '')
+ except:
+ pass
+
+ from docutils.core import publish_cmdline, default_description
+
+ description = ('Generates pseudo-XML from Python modules '
+ '(for testing purposes). ' + default_description)
+
+ publish_cmdline(description=description,
+ reader=Reader())
Modified: Zope3/trunk/src/docutils/readers/python/moduleparser.py
===================================================================
--- Zope3/trunk/src/docutils/readers/python/moduleparser.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/readers/python/moduleparser.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,17 +1,16 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 2449 $
+# Date: $Date: 2004-07-25 03:45:27 +0200 (Sun, 25 Jul 2004) $
# Copyright: This module has been placed in the public domain.
"""
-Parser for Python modules.
+Parser for Python modules. Requires Python 2.2 or higher.
-The `parse_module()` function takes a module's text and file name, runs it
-through the module parser (using compiler.py and tokenize.py) and produces a
-"module documentation tree": a high-level AST full of nodes that are
-interesting from an auto-documentation standpoint. For example, given this
-module (x.py)::
+The `parse_module()` function takes a module's text and file name,
+runs it through the module parser (using compiler.py and tokenize.py)
+and produces a parse tree of the source code, using the nodes as found
+in pynodes.py. For example, given this module (x.py)::
# comment
@@ -50,69 +49,95 @@
The module parser will produce this module documentation tree::
- <Module filename="test data">
- <Comment lineno=1>
- comment
- <Docstring>
+ <module_section filename="test data">
+ <docstring>
Docstring
- <Docstring lineno="5">
+ <docstring lineno="5">
Additional docstring
- <Attribute lineno="7" name="__docformat__">
- <Expression lineno="7">
+ <attribute lineno="7">
+ <object_name>
+ __docformat__
+ <expression_value lineno="7">
'reStructuredText'
- <Attribute lineno="9" name="a">
- <Expression lineno="9">
+ <attribute lineno="9">
+ <object_name>
+ a
+ <expression_value lineno="9">
1
- <Docstring lineno="10">
+ <docstring lineno="10">
Attribute docstring
- <Class bases="Super" lineno="12" name="C">
- <Docstring lineno="12">
+ <class_section lineno="12">
+ <object_name>
+ C
+ <class_base>
+ Super
+ <docstring lineno="12">
C's docstring
- <Attribute lineno="16" name="class_attribute">
- <Expression lineno="16">
+ <attribute lineno="16">
+ <object_name>
+ class_attribute
+ <expression_value lineno="16">
1
- <Docstring lineno="17">
+ <docstring lineno="17">
class_attribute's docstring
- <Method lineno="19" name="__init__">
- <Docstring lineno="19">
+ <method_section lineno="19">
+ <object_name>
+ __init__
+ <docstring lineno="19">
__init__'s docstring
- <ParameterList lineno="19">
- <Parameter lineno="19" name="self">
- <Parameter lineno="19" name="text">
- <Default lineno="19">
+ <parameter_list lineno="19">
+ <parameter lineno="19">
+ <object_name>
+ self
+ <parameter lineno="19">
+ <object_name>
+ text
+ <parameter_default lineno="19">
None
- <Attribute lineno="22" name="self.instance_attribute">
- <Expression lineno="22">
+ <attribute lineno="22">
+ <object_name>
+ self.instance_attribute
+ <expression_value lineno="22">
(text * 7 + ' whaddyaknow')
- <Docstring lineno="24">
+ <docstring lineno="24">
instance_attribute's docstring
- <Function lineno="27" name="f">
- <Docstring lineno="27">
+ <function_section lineno="27">
+ <object_name>
+ f
+ <docstring lineno="27">
f's docstring
- <ParameterList lineno="27">
- <Parameter lineno="27" name="x">
- <Comment>
+ <parameter_list lineno="27">
+ <parameter lineno="27">
+ <object_name>
+ x
+ <comment>
# parameter x
- <Parameter lineno="27" name="y">
- <Default lineno="27">
+ <parameter lineno="27">
+ <object_name>
+ y
+ <parameter_default lineno="27">
a * 5
- <Comment>
+ <comment>
# parameter y
- <ExcessPositionalArguments lineno="27" name="args">
- <Comment>
+ <parameter excess_positional="1" lineno="27">
+ <object_name>
+ args
+ <comment>
# parameter args
- <Attribute lineno="33" name="f.function_attribute">
- <Expression lineno="33">
+ <attribute lineno="33">
+ <object_name>
+ f.function_attribute
+ <expression_value lineno="33">
1
- <Docstring lineno="34">
+ <docstring lineno="34">
f.function_attribute's docstring
(Comments are not implemented yet.)
compiler.parse() provides most of what's needed for this doctree, and
-"tokenize" can be used to get the rest. We can determine the line number from
-the compiler.parse() AST, and the TokenParser.rhs(lineno) method provides the
-rest.
+"tokenize" can be used to get the rest. We can determine the line
+number from the compiler.parse() AST, and the TokenParser.rhs(lineno)
+method provides the rest.
The Docutils Python reader component will transform this module doctree into a
Python-specific Docutils doctree, and then a `stylist transform`_ will
@@ -190,6 +215,8 @@
from compiler.consts import OP_ASSIGN
from compiler.visitor import ASTVisitor
from types import StringType, UnicodeType, TupleType
+from docutils.readers.python import pynodes
+from docutils.nodes import Text
def parse_module(module_text, filename):
@@ -200,168 +227,6 @@
compiler.walk(ast, visitor, walker=visitor)
return visitor.module
-
-class Node:
-
- """
- Base class for module documentation tree nodes.
- """
-
- def __init__(self, node):
- self.children = []
- """List of child nodes."""
-
- self.lineno = node.lineno
- """Line number of this node (or ``None``)."""
-
- def __str__(self, indent=' ', level=0):
- return ''.join(['%s%s\n' % (indent * level, repr(self))] +
- [child.__str__(indent, level+1)
- for child in self.children])
-
- def __repr__(self):
- parts = [self.__class__.__name__]
- for name, value in self.attlist():
- parts.append('%s="%s"' % (name, value))
- return '<%s>' % ' '.join(parts)
-
- def attlist(self, **atts):
- if self.lineno is not None:
- atts['lineno'] = self.lineno
- attlist = atts.items()
- attlist.sort()
- return attlist
-
- def append(self, node):
- self.children.append(node)
-
- def extend(self, node_list):
- self.children.extend(node_list)
-
-
-class TextNode(Node):
-
- def __init__(self, node, text):
- Node.__init__(self, node)
- self.text = trim_docstring(text)
-
- def __str__(self, indent=' ', level=0):
- prefix = indent * (level + 1)
- text = '\n'.join([prefix + line for line in self.text.splitlines()])
- return Node.__str__(self, indent, level) + text + '\n'
-
-
-class Module(Node):
-
- def __init__(self, node, filename):
- Node.__init__(self, node)
- self.filename = filename
-
- def attlist(self):
- return Node.attlist(self, filename=self.filename)
-
-
-class Docstring(TextNode): pass
-
-
-class Comment(TextNode): pass
-
-
-class Import(Node):
-
- def __init__(self, node, names, from_name=None):
- Node.__init__(self, node)
- self.names = names
- self.from_name = from_name
-
- def __str__(self, indent=' ', level=0):
- prefix = indent * (level + 1)
- lines = []
- for name, as in self.names:
- if as:
- lines.append('%s%s as %s' % (prefix, name, as))
- else:
- lines.append('%s%s' % (prefix, name))
- text = '\n'.join(lines)
- return Node.__str__(self, indent, level) + text + '\n'
-
- def attlist(self):
- if self.from_name:
- atts = {'from': self.from_name}
- else:
- atts = {}
- return Node.attlist(self, **atts)
-
-
-class Attribute(Node):
-
- def __init__(self, node, name):
- Node.__init__(self, node)
- self.name = name
-
- def attlist(self):
- return Node.attlist(self, name=self.name)
-
-
-class AttributeTuple(Node):
-
- def __init__(self, node, names):
- Node.__init__(self, node)
- self.names = names
-
- def attlist(self):
- return Node.attlist(self, names=' '.join(self.names))
-
-
-class Expression(TextNode):
-
- def __str__(self, indent=' ', level=0):
- prefix = indent * (level + 1)
- return '%s%s%s\n' % (Node.__str__(self, indent, level),
- prefix, self.text.encode('unicode-escape'))
-
-
-class Function(Attribute): pass
-
-
-class ParameterList(Node): pass
-
-
-class Parameter(Attribute): pass
-
-
-class ParameterTuple(AttributeTuple):
-
- def attlist(self):
- return Node.attlist(self, names=normalize_parameter_name(self.names))
-
-
-class ExcessPositionalArguments(Parameter): pass
-
-
-class ExcessKeywordArguments(Parameter): pass
-
-
-class Default(Expression): pass
-
-
-class Class(Node):
-
- def __init__(self, node, name, bases=None):
- Node.__init__(self, node)
- self.name = name
- self.bases = bases or []
-
- def attlist(self):
- atts = {'name': self.name}
- if self.bases:
- atts['bases'] = ' '.join(self.bases)
- return Node.attlist(self, **atts)
-
-
-class Method(Function): pass
-
-
class BaseVisitor(ASTVisitor):
def __init__(self, token_parser):
@@ -389,7 +254,7 @@
def visitConst(self, node):
if self.documentable:
if type(node.value) in (StringType, UnicodeType):
- self.documentable.append(Docstring(node, node.value))
+ self.documentable.append(make_docstring(node.value, node.lineno))
else:
self.documentable = None
@@ -418,25 +283,28 @@
self.module = None
def visitModule(self, node):
- self.module = module = Module(node, self.filename)
- if node.doc is not None:
- module.append(Docstring(node, node.doc))
+ self.module = module = pynodes.module_section()
+ module['filename'] = self.filename
+ append_docstring(module, node.doc, node.lineno)
self.context.append(module)
self.documentable = module
self.visit(node.node)
self.context.pop()
def visitImport(self, node):
- self.context[-1].append(Import(node, node.names))
+ self.context[-1] += make_import_group(names=node.names,
+ lineno=node.lineno)
self.documentable = None
def visitFrom(self, node):
self.context[-1].append(
- Import(node, node.names, from_name=node.modname))
+ make_import_group(names=node.names, from_name=node.modname,
+ lineno=node.lineno))
self.documentable = None
def visitFunction(self, node):
- visitor = FunctionVisitor(self.token_parser)
+ visitor = FunctionVisitor(self.token_parser,
+ function_class=pynodes.function_section)
compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.function)
@@ -450,29 +318,32 @@
def __init__(self, token_parser):
BaseVisitor.__init__(self, token_parser)
- self.attributes = []
+ self.attributes = pynodes.class_attribute_section()
def visitAssign(self, node):
# Don't visit the expression itself, just the attribute nodes:
for child in node.nodes:
self.dispatch(child)
expression_text = self.token_parser.rhs(node.lineno)
- expression = Expression(node, expression_text)
+ expression = pynodes.expression_value()
+ expression.append(Text(expression_text))
for attribute in self.attributes:
attribute.append(expression)
def visitAssName(self, node):
- self.attributes.append(Attribute(node, node.name))
+ self.attributes.append(make_attribute(node.name,
+ lineno=node.lineno))
def visitAssTuple(self, node):
attributes = self.attributes
self.attributes = []
self.default_visit(node)
- names = [attribute.name for attribute in self.attributes]
- att_tuple = AttributeTuple(node, names)
- att_tuple.lineno = self.attributes[0].lineno
+ n = pynodes.attribute_tuple()
+ n.extend(self.attributes)
+ n['lineno'] = self.attributes[0]['lineno']
+ attributes.append(n)
self.attributes = attributes
- self.attributes.append(att_tuple)
+ #self.attributes.append(att_tuple)
def visitAssAttr(self, node):
self.default_visit(node, node.attrname)
@@ -481,23 +352,29 @@
self.default_visit(node, node.attrname + '.' + suffix)
def visitName(self, node, suffix):
- self.attributes.append(Attribute(node, node.name + '.' + suffix))
+ self.attributes.append(make_attribute(node.name + '.' + suffix,
+ lineno=node.lineno))
class FunctionVisitor(DocstringVisitor):
in_function = 0
- function_class = Function
+ def __init__(self, token_parser, function_class):
+ DocstringVisitor.__init__(self, token_parser)
+ self.function_class = function_class
+
def visitFunction(self, node):
if self.in_function:
self.documentable = None
# Don't bother with nested function definitions.
return
self.in_function = 1
- self.function = function = self.function_class(node, node.name)
- if node.doc is not None:
- function.append(Docstring(node, node.doc))
+ self.function = function = make_function_like_section(
+ name=node.name,
+ lineno=node.lineno,
+ doc=node.doc,
+ function_class=self.function_class)
self.context.append(function)
self.documentable = function
self.parse_parameter_list(node)
@@ -509,10 +386,11 @@
special = []
argnames = list(node.argnames)
if node.kwargs:
- special.append(ExcessKeywordArguments(node, argnames[-1]))
+ special.append(make_parameter(argnames[-1], excess_keyword=1))
argnames.pop()
if node.varargs:
- special.append(ExcessPositionalArguments(node, argnames[-1]))
+ special.append(make_parameter(argnames[-1],
+ excess_positional=1))
argnames.pop()
defaults = list(node.defaults)
defaults = [None] * (len(argnames) - len(defaults)) + defaults
@@ -521,17 +399,21 @@
#print >>sys.stderr, function_parameters
for argname, default in zip(argnames, defaults):
if type(argname) is TupleType:
- parameter = ParameterTuple(node, argname)
+ parameter = pynodes.parameter_tuple()
+ for tuplearg in argname:
+ parameter.append(make_parameter(tuplearg))
argname = normalize_parameter_name(argname)
else:
- parameter = Parameter(node, argname)
+ parameter = make_parameter(argname)
if default:
- parameter.append(Default(node, function_parameters[argname]))
+ n_default = pynodes.parameter_default()
+ n_default.append(Text(function_parameters[argname]))
+ parameter.append(n_default)
parameters.append(parameter)
if parameters or special:
special.reverse()
parameters.extend(special)
- parameter_list = ParameterList(node)
+ parameter_list = pynodes.parameter_list()
parameter_list.extend(parameters)
self.function.append(parameter_list)
@@ -552,9 +434,9 @@
self.in_class = 1
for base in node.bases:
self.visit(base)
- self.klass = klass = Class(node, node.name, self.bases)
- if node.doc is not None:
- klass.append(Docstring(node, node.doc))
+ self.klass = klass = make_class_section(node.name, self.bases,
+ doc=node.doc,
+ lineno=node.lineno)
self.context.append(klass)
self.documentable = klass
self.visit(node.code)
@@ -576,21 +458,19 @@
def visitFunction(self, node):
if node.name == '__init__':
- visitor = InitMethodVisitor(self.token_parser)
+ visitor = InitMethodVisitor(self.token_parser,
+ function_class=pynodes.method_section)
+ compiler.walk(node, visitor, walker=visitor)
else:
- visitor = MethodVisitor(self.token_parser)
- compiler.walk(node, visitor, walker=visitor)
+ visitor = FunctionVisitor(self.token_parser,
+ function_class=pynodes.method_section)
+ compiler.walk(node, visitor, walker=visitor)
self.context[-1].append(visitor.function)
-class MethodVisitor(FunctionVisitor):
+class InitMethodVisitor(FunctionVisitor, AssignmentVisitor): pass
- function_class = Method
-
-class InitMethodVisitor(MethodVisitor, AssignmentVisitor): pass
-
-
class TokenParser:
def __init__(self, text):
@@ -742,6 +622,85 @@
return parameters
+def make_docstring(doc, lineno):
+ n = pynodes.docstring()
+ if lineno:
+ # Really, only module docstrings don't have a line
+ # (@@: but maybe they should)
+ n['lineno'] = lineno
+ n.append(Text(doc))
+ return n
+
+def append_docstring(node, doc, lineno):
+ if doc:
+ node.append(make_docstring(doc, lineno))
+
+def make_class_section(name, bases, lineno, doc):
+ n = pynodes.class_section()
+ n['lineno'] = lineno
+ n.append(make_object_name(name))
+ for base in bases:
+ b = pynodes.class_base()
+ b.append(make_object_name(base))
+ n.append(b)
+ append_docstring(n, doc, lineno)
+ return n
+
+def make_object_name(name):
+ n = pynodes.object_name()
+ n.append(Text(name))
+ return n
+
+def make_function_like_section(name, lineno, doc, function_class):
+ n = function_class()
+ n['lineno'] = lineno
+ n.append(make_object_name(name))
+ append_docstring(n, doc, lineno)
+ return n
+
+def make_import_group(names, lineno, from_name=None):
+ n = pynodes.import_group()
+ n['lineno'] = lineno
+ if from_name:
+ n_from = pynodes.import_from()
+ n_from.append(Text(from_name))
+ n.append(n_from)
+ for name, alias in names:
+ n_name = pynodes.import_name()
+ n_name.append(Text(name))
+ if alias:
+ n_alias = pynodes.import_alias()
+ n_alias.append(Text(alias))
+ n_name.append(n_alias)
+ n.append(n_name)
+ return n
+
+def make_class_attribute(name, lineno):
+ n = pynodes.class_attribute()
+ n['lineno'] = lineno
+ n.append(Text(name))
+ return n
+
+def make_attribute(name, lineno):
+ n = pynodes.attribute()
+ n['lineno'] = lineno
+ n.append(make_object_name(name))
+ return n
+
+def make_parameter(name, excess_keyword=0, excess_positional=0):
+ """
+ excess_keyword and excess_positional must be either 1 or 0, and
+ not both of them can be 1.
+ """
+ n = pynodes.parameter()
+ n.append(make_object_name(name))
+ assert not excess_keyword or not excess_positional
+ if excess_keyword:
+ n['excess_keyword'] = 1
+ if excess_positional:
+ n['excess_positional'] = 1
+ return n
+
def trim_docstring(text):
"""
Trim indentation and blank lines from docstring text & return it.
@@ -780,3 +739,18 @@
return '(%s)' % ', '.join([normalize_parameter_name(n) for n in name])
else:
return name
+
+if __name__ == '__main__':
+ import sys
+ args = sys.argv[1:]
+ if args[0] == '-v':
+ filename = args[1]
+ module_text = open(filename).read()
+ ast = compiler.parse(module_text)
+ visitor = compiler.visitor.ExampleASTVisitor()
+ compiler.walk(ast, visitor, walker=visitor, verbose=1)
+ else:
+ filename = args[0]
+ content = open(filename).read()
+ print parse_module(content, filename).pformat()
+
Added: Zope3/trunk/src/docutils/readers/python/pynodes.py
===================================================================
--- Zope3/trunk/src/docutils/readers/python/pynodes.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/readers/python/pynodes.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,87 @@
+#! /usr/bin/env python
+
+"""
+:Author: David Goodger
+:Contact: goodger at users.sourceforge.net
+:Revision: $Revision: 1881 $
+:Date: $Date: 2004-03-24 00:21:11 +0100 (Wed, 24 Mar 2004) $
+:Copyright: This module has been placed in the public domain.
+
+"""
+
+from docutils import nodes
+from docutils.nodes import Element, TextElement, Structural, Inline, Part, \
+ Text
+import types
+
+# This is the parent class of all the other pynode classes:
+class PythonStructural(Structural): pass
+
+# =====================
+# Structural Elements
+# =====================
+
+class module_section(PythonStructural, Element): pass
+class class_section(PythonStructural, Element): pass
+class class_base(PythonStructural, Element): pass
+class method_section(PythonStructural, Element): pass
+class attribute(PythonStructural, Element): pass
+class function_section(PythonStructural, Element): pass
+class class_attribute_section(PythonStructural, Element): pass
+class class_attribute(PythonStructural, Element): pass
+class expression_value(PythonStructural, Element): pass
+class attribute(PythonStructural, Element): pass
+
+# Structural Support Elements
+# ---------------------------
+
+class parameter_list(PythonStructural, Element): pass
+class parameter_tuple(PythonStructural, Element): pass
+class parameter_default(PythonStructural, TextElement): pass
+class import_group(PythonStructural, TextElement): pass
+class import_from(PythonStructural, TextElement): pass
+class import_name(PythonStructural, TextElement): pass
+class import_alias(PythonStructural, TextElement): pass
+class docstring(PythonStructural, Element): pass
+
+# =================
+# Inline Elements
+# =================
+
+# These elements cannot become references until the second
+# pass. Initially, we'll use "reference" or "name".
+
+class object_name(PythonStructural, TextElement): pass
+class parameter_list(PythonStructural, TextElement): pass
+class parameter(PythonStructural, TextElement): pass
+class parameter_default(PythonStructural, TextElement): pass
+class class_attribute(PythonStructural, TextElement): pass
+class attribute_tuple(PythonStructural, TextElement): pass
+
+# =================
+# Unused Elements
+# =================
+
+# These were part of the model, and maybe should be in the future, but
+# aren't now.
+#class package_section(PythonStructural, Element): pass
+#class module_attribute_section(PythonStructural, Element): pass
+#class instance_attribute_section(PythonStructural, Element): pass
+#class module_attribute(PythonStructural, TextElement): pass
+#class instance_attribute(PythonStructural, TextElement): pass
+#class exception_class(PythonStructural, TextElement): pass
+#class warning_class(PythonStructural, TextElement): pass
+
+
+# Collect all the classes we've written above
+def install_node_class_names():
+ node_class_names = []
+ for name, var in globals().items():
+ if (type(var) is types.ClassType
+ and issubclass(var, PythonStructural) \
+ and name.lower() == name):
+ node_class_names.append(var.tagname or name)
+ # Register the new node names with GenericNodeVisitor and
+ # SpecificNodeVisitor:
+ nodes._add_node_class_names(node_class_names)
+install_node_class_names()
Modified: Zope3/trunk/src/docutils/readers/standalone.py
===================================================================
--- Zope3/trunk/src/docutils/readers/standalone.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/readers/standalone.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3353 $
+# Date: $Date: 2005-05-19 02:49:14 +0200 (Thu, 19 May 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -12,9 +12,8 @@
import sys
-from docutils import readers
+from docutils import frontend, readers
from docutils.transforms import frontmatter, references
-from docutils.parsers.rst import Parser
class Reader(readers.Reader):
@@ -32,16 +31,32 @@
'document title (and subsequent section title to document '
'subtitle promotion; enabled by default).',
['--no-doc-title'],
- {'dest': 'doctitle_xform', 'action': 'store_false', 'default': 1}),
+ {'dest': 'doctitle_xform', 'action': 'store_false', 'default': 1,
+ 'validator': frontend.validate_boolean}),
('Disable the bibliographic field list transform (enabled by '
'default).',
['--no-doc-info'],
- {'dest': 'docinfo_xform', 'action': 'store_false', 'default': 1}),))
+ {'dest': 'docinfo_xform', 'action': 'store_false', 'default': 1,
+ 'validator': frontend.validate_boolean}),
+ ('Activate the promotion of lone subsection titles to '
+ 'section subtitles (disabled by default).',
+ ['--section-subtitles'],
+ {'dest': 'sectsubtitle_xform', 'action': 'store_true', 'default': 0,
+ 'validator': frontend.validate_boolean}),
+ ('Deactivate the promotion of lone subsection titles.',
+ ['--no-section-subtitles'],
+ {'dest': 'sectsubtitle_xform', 'action': 'store_false',
+ 'validator': frontend.validate_boolean}),
+ ))
+ config_section = 'standalone reader'
+ config_section_dependencies = ('readers',)
+
default_transforms = (references.Substitutions,
+ references.PropagateTargets,
frontmatter.DocTitle,
+ frontmatter.SectionSubTitle,
frontmatter.DocInfo,
- references.ChainedTargets,
references.AnonymousHyperlinks,
references.IndirectHyperlinks,
references.Footnotes,
Modified: Zope3/trunk/src/docutils/statemachine.py
===================================================================
--- Zope3/trunk/src/docutils/statemachine.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/statemachine.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:04 $
+# Revision: $Revision: 2299 $
+# Date: $Date: 2004-06-17 23:46:50 +0200 (Thu, 17 Jun 2004) $
# Copyright: This module has been placed in the public domain.
"""
@@ -266,7 +266,8 @@
transitions = None
state = self.get_state(next_state)
except:
- self.error()
+ if self.debug:
+ self.error()
raise
self.observers = []
return results
@@ -343,6 +344,10 @@
finally:
self.notify_observers()
+ def get_source(self, line_offset):
+ """Return source of line at absolute line offset `line_offset`."""
+ return self.input_lines.source(line_offset - self.input_offset)
+
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
@@ -1294,11 +1299,11 @@
"""A `ViewList` with string-specific methods."""
- def strip_indent(self, length, start=0, end=sys.maxint):
+ def trim_left(self, length, start=0, end=sys.maxint):
"""
- Strip `length` characters off the beginning of each item, in-place,
+ Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
- stripped text. Does not affect slice parent.
+ trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
@@ -1381,10 +1386,21 @@
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
- block.strip_indent(indent, start=(first_indent is not None))
+ block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
+ def get_2D_block(self, top, left, bottom, right, strip_indent=1):
+ block = self[top:bottom]
+ indent = right
+ for i in range(len(block.data)):
+ block.data[i] = line = block.data[i][left:right].rstrip()
+ if line:
+ indent = min(indent, len(line) - len(line.lstrip()))
+ if strip_indent and 0 < indent < right:
+ block.data = [line[indent:] for line in block.data]
+ return block
+
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
Modified: Zope3/trunk/src/docutils/transforms/__init__.py
===================================================================
--- Zope3/trunk/src/docutils/transforms/__init__.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/transforms/__init__.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3066 $
+# Date: $Date: 2005-03-21 18:33:42 +0100 (Mon, 21 Mar 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -60,6 +60,7 @@
document.settings.language_code)
"""Language module local to this document."""
+
def apply(self):
"""Override to apply the transform to the document tree."""
raise NotImplementedError('subclass must override this method')
@@ -76,7 +77,8 @@
default_transforms = (universal.Decorations,
universal.FinalChecks,
- universal.Messages)
+ universal.Messages,
+ universal.FilterMessages)
"""These transforms are applied to all document trees."""
def __init__(self, document):
@@ -84,6 +86,9 @@
"""List of transforms to apply. Each item is a 3-tuple:
``(priority string, transform class, pending node or None)``."""
+ self.unknown_reference_resolvers = []
+ """List of hook functions which assist in resolving references"""
+
self.document = document
"""The `nodes.document` object this Transformer is attached to."""
@@ -149,6 +154,15 @@
self.add_transforms(component.default_transforms)
self.components[component.component_type] = component
self.sorted = 0
+ # Setup all of the reference resolvers for this transformer. Each
+ # component of this transformer is able to register its own helper
+ # functions to help resolve references.
+ unknown_reference_resolvers = []
+ for i in components:
+ unknown_reference_resolvers.extend(i.unknown_reference_resolvers)
+ decorated_list = [(f.priority, f) for f in unknown_reference_resolvers]
+ decorated_list.sort()
+ self.unknown_reference_resolvers.extend([f[1] for f in decorated_list])
def apply_transforms(self):
"""Apply all of the stored transforms, in priority order."""
Modified: Zope3/trunk/src/docutils/transforms/components.py
===================================================================
--- Zope3/trunk/src/docutils/transforms/components.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/transforms/components.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 853 $
+# Date: $Date: 2002-10-24 02:51:10 +0200 (Thu, 24 Oct 2002) $
# Copyright: This module has been placed in the public domain.
"""
Modified: Zope3/trunk/src/docutils/transforms/frontmatter.py
===================================================================
--- Zope3/trunk/src/docutils/transforms/frontmatter.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/transforms/frontmatter.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,17 +1,19 @@
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3351 $
+# Date: $Date: 2005-05-19 00:27:52 +0200 (Thu, 19 May 2005) $
# Copyright: This module has been placed in the public domain.
"""
-Transforms related to the front matter of a document (information
-found before the main text):
+Transforms related to the front matter of a document or a section
+(information found before the main text):
- `DocTitle`: Used to transform a lone top level section's title to
the document title, and promote a remaining lone top-level section's
title to the document subtitle.
+- `SectionTitle`: Used to transform a lone subsection into a subtitle.
+
- `DocInfo`: Used to transform a bibliographic field list into docinfo
elements.
"""
@@ -23,9 +25,102 @@
from docutils.transforms import TransformError, Transform
-class DocTitle(Transform):
+class TitlePromoter(Transform):
"""
+ Abstract base class for DocTitle and SectionSubTitle transforms.
+ """
+
+ def promote_title(self, node):
+ """
+ Transform the following tree::
+
+ <node>
+ <section>
+ <title>
+ ...
+
+ into ::
+
+ <node>
+ <title>
+ ...
+
+ `node` is normally a document.
+ """
+ # `node` must not have a title yet.
+ assert not (len(node) and isinstance(node[0], nodes.title))
+ section, index = self.candidate_index(node)
+ if index is None:
+ return None
+ # Transfer the section's attributes to the node:
+ node.attributes.update(section.attributes)
+ # setup_child is called automatically for all nodes.
+ node[:] = (section[:1] # section title
+ + node[:index] # everything that was in the
+ # node before the section
+ + section[1:]) # everything that was in the section
+ assert isinstance(node[0], nodes.title)
+ return 1
+
+ def promote_subtitle(self, node):
+ """
+ Transform the following node tree::
+
+ <node>
+ <title>
+ <section>
+ <title>
+ ...
+
+ into ::
+
+ <node>
+ <title>
+ <subtitle>
+ ...
+ """
+ subsection, index = self.candidate_index(node)
+ if index is None:
+ return None
+ subtitle = nodes.subtitle()
+ # Transfer the subsection's attributes to the new subtitle:
+ # This causes trouble with list attributes! To do: Write a
+ # test case which catches direct access to the `attributes`
+ # dictionary and/or write a test case which shows problems in
+ # this particular case.
+ subtitle.attributes.update(subsection.attributes)
+ # We're losing the subtitle's attributes here! To do: Write a
+ # test case which shows this behavior.
+ # Transfer the contents of the subsection's title to the
+ # subtitle:
+ subtitle[:] = subsection[0][:]
+ node[:] = (node[:1] # title
+ + [subtitle]
+ # everything that was before the section:
+ + node[1:index]
+ # everything that was in the subsection:
+ + subsection[1:])
+ return 1
+
+ def candidate_index(self, node):
+ """
+ Find and return the promotion candidate and its index.
+
+ Return (None, None) if no valid candidate was found.
+ """
+ index = node.first_child_not_matching_class(
+ nodes.PreBibliographic)
+ if index is None or len(node) > (index + 1) or \
+ not isinstance(node[index], nodes.section):
+ return None, None
+ else:
+ return node[index], index
+
+
+class DocTitle(TitlePromoter):
+
+ """
In reStructuredText_, there is no way to specify a document title
and subtitle explicitly. Instead, we can supply the document title
(and possibly the subtitle as well) implicitly, and use this
@@ -50,7 +145,7 @@
Once parsed, it looks like this::
<document>
- <section name="top-level title">
+ <section names="top-level title">
<title>
Top-Level Title
<paragraph>
@@ -58,7 +153,7 @@
After running the DocTitle transform, we have::
- <document name="top-level title">
+ <document names="top-level title">
<title>
Top-Level Title
<paragraph>
@@ -85,10 +180,10 @@
After parsing and running the Section Promotion transform, the
result is::
- <document name="top-level title">
+ <document names="top-level title">
<title>
Top-Level Title
- <subtitle name="second-level title">
+ <subtitle names="second-level title">
Second-Level Title
<paragraph>
A paragraph.
@@ -107,56 +202,49 @@
def apply(self):
if not getattr(self.document.settings, 'doctitle_xform', 1):
return
- if self.promote_document_title():
- self.promote_document_subtitle()
+ if self.promote_title(self.document):
+ self.promote_subtitle(self.document)
- def promote_document_title(self):
- section, index = self.candidate_index()
- if index is None:
- return None
- document = self.document
- # Transfer the section's attributes to the document element (at root):
- document.attributes.update(section.attributes)
- document[:] = (section[:1] # section title
- + document[:index] # everything that was in the
- # document before the section
- + section[1:]) # everything that was in the section
- return 1
- def promote_document_subtitle(self):
- subsection, index = self.candidate_index()
- if index is None:
- return None
- subtitle = nodes.subtitle()
- # Transfer the subsection's attributes to the new subtitle:
- subtitle.attributes.update(subsection.attributes)
- # Transfer the contents of the subsection's title to the subtitle:
- subtitle[:] = subsection[0][:]
- document = self.document
- document[:] = (document[:1] # document title
- + [subtitle]
- # everything that was before the section:
- + document[1:index]
- # everything that was in the subsection:
- + subsection[1:])
- return 1
+class SectionSubTitle(TitlePromoter):
- def candidate_index(self):
- """
- Find and return the promotion candidate and its index.
+ """
+ This works like document subtitles, but for sections. For example, ::
- Return (None, None) if no valid candidate was found.
- """
- document = self.document
- index = document.first_child_not_matching_class(
- nodes.PreBibliographic)
- if index is None or len(document) > (index + 1) or \
- not isinstance(document[index], nodes.section):
- return None, None
- else:
- return document[index], index
+ <section>
+ <title>
+ Title
+ <section>
+ <title>
+ Subtitle
+ ...
+ is transformed into ::
+ <section>
+ <title>
+ Title
+ <subtitle>
+ Subtitle
+ ...
+
+ For details refer to the docstring of DocTitle.
+ """
+
+ default_priority = 350
+
+ def apply(self):
+ if not getattr(self.document.settings, 'sectsubtitle_xform', 1):
+ return
+ for section in self.document.traverse(lambda n:
+ isinstance(n, nodes.section)):
+ # On our way through the node tree, we are deleting
+ # sections, but we call self.promote_subtitle for those
+ # sections nonetheless. To do: Write a test case which
+ # shows the problem and discuss on Docutils-develop.
+ self.promote_subtitle(section)
+
+
class DocInfo(Transform):
"""
@@ -189,7 +277,7 @@
Status
<field_body>
<paragraph>
- $RCSfile: frontmatter.py,v $
+ $RCSfile$
...
After running the bibliographic field list transform, the
@@ -258,11 +346,10 @@
candidate = document[index]
if isinstance(candidate, nodes.field_list):
biblioindex = document.first_child_not_matching_class(
- nodes.Titular)
+ (nodes.Titular, nodes.Decorative))
nodelist = self.extract_bibliographic(candidate)
del document[index] # untransformed field list (candidate)
document[biblioindex:biblioindex] = nodelist
- return
def extract_bibliographic(self, field_list):
docinfo = nodes.docinfo()
@@ -294,7 +381,7 @@
raise TransformError
title = nodes.title(name, labels[canonical])
topics[canonical] = biblioclass(
- '', title, CLASS=canonical, *field[1].children)
+ '', title, classes=[canonical], *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
except TransformError:
@@ -334,10 +421,10 @@
return 1
rcs_keyword_substitutions = [
- (re.compile(r'\$' r'Date: (\d\d\d\d)/(\d\d)/(\d\d) [\d:]+ \$$',
+ (re.compile(r'\$' r'Date: (\d\d\d\d)/(\d\d)/(\d\d) [\d:]+ \$',
re.IGNORECASE), r'\1-\2-\3'),
- (re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
- (re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),]
+ (re.compile(r'\$' r'RCSfile: (.+),v \$', re.IGNORECASE), r'\1'),
+ (re.compile(r'\$[a-zA-Z]+: (.+) \$'), r'\1'),]
def extract_authors(self, field, name, docinfo):
try:
Modified: Zope3/trunk/src/docutils/transforms/misc.py
===================================================================
--- Zope3/trunk/src/docutils/transforms/misc.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/transforms/misc.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3155 $
+# Date: $Date: 2005-04-02 23:57:06 +0200 (Sat, 02 Apr 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -36,22 +36,29 @@
class ClassAttribute(Transform):
+ """
+ Move the "class" attribute specified in the "pending" node into the
+ immediately following non-comment element.
+ """
+
default_priority = 210
def apply(self):
pending = self.startnode
- class_value = pending.details['class']
parent = pending.parent
child = pending
while parent:
+ # Check for appropriate following siblings:
for index in range(parent.index(child) + 1, len(parent)):
element = parent[index]
- if isinstance(element, nodes.comment):
+ if (isinstance(element, nodes.Invisible) or
+ isinstance(element, nodes.system_message)):
continue
- element.set_class(class_value)
+ element['classes'] += pending.details['class']
pending.parent.remove(pending)
return
else:
+ # At end of section or container; apply to sibling
child = parent
parent = parent.parent
error = self.document.reporter.error(
Modified: Zope3/trunk/src/docutils/transforms/parts.py
===================================================================
--- Zope3/trunk/src/docutils/transforms/parts.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/transforms/parts.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Authors: David Goodger, Ueli Schlaepfer, Dmitry Jemerov
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3199 $
+# Date: $Date: 2005-04-09 03:32:29 +0200 (Sat, 09 Apr 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -33,19 +33,28 @@
def apply(self):
self.maxdepth = self.startnode.details.get('depth', sys.maxint)
+ self.startvalue = self.startnode.details.get('start', 1)
+ self.prefix = self.startnode.details.get('prefix', '')
+ self.suffix = self.startnode.details.get('suffix', '')
self.startnode.parent.remove(self.startnode)
- self.update_section_numbers(self.document)
+ if self.document.settings.sectnum_xform:
+ self.update_section_numbers(self.document)
def update_section_numbers(self, node, prefix=(), depth=0):
depth += 1
- sectnum = 1
+ if prefix:
+ sectnum = 1
+ else:
+ sectnum = self.startvalue
for child in node:
if isinstance(child, nodes.section):
numbers = prefix + (str(sectnum),)
title = child[0]
# Use for spacing:
generated = nodes.generated(
- '', '.'.join(numbers) + u'\u00a0' * 3, CLASS='sectnum')
+ '', (self.prefix + '.'.join(numbers) + self.suffix
+ + u'\u00a0' * 3),
+ classes=['sectnum'])
title.insert(0, generated)
title['auto'] = 1
if depth < self.maxdepth:
@@ -58,11 +67,11 @@
"""
This transform generates a table of contents from the entire document tree
or from a single branch. It locates "section" elements and builds them
- into a nested bullet list, which is placed within a "topic". A title is
- either explicitly specified, taken from the appropriate language module,
- or omitted (local table of contents). The depth may be specified.
- Two-way references between the table of contents and section titles are
- generated (requires Writer support).
+ into a nested bullet list, which is placed within a "topic" created by the
+ contents directive. A title is either explicitly specified, taken from
+ the appropriate language module, or omitted (local table of contents).
+ The depth may be specified. Two-way references between the table of
+ contents and section titles are generated (requires Writer support).
This transform requires a startnode, which which contains generation
options and provides the location for the generated table of contents (the
@@ -72,41 +81,25 @@
default_priority = 720
def apply(self):
- topic = nodes.topic(CLASS='contents')
details = self.startnode.details
- if details.has_key('class'):
- topic.set_class(details['class'])
- title = details['title']
if details.has_key('local'):
- startnode = self.startnode.parent
- # @@@ generate an error if the startnode (directive) not at
- # section/document top-level? Drag it up until it is?
- while not isinstance(startnode, nodes.Structural):
+ startnode = self.startnode.parent.parent
+ while not (isinstance(startnode, nodes.section)
+ or isinstance(startnode, nodes.document)):
+ # find the ToC root: a direct ancestor of startnode
startnode = startnode.parent
else:
startnode = self.document
- if not title:
- title = nodes.title('', self.language.labels['contents'])
- if title:
- name = title.astext()
- topic += title
- else:
- name = self.language.labels['contents']
- name = nodes.fully_normalize_name(name)
- if not self.document.has_name(name):
- topic['name'] = name
- self.document.note_implicit_target(topic)
- self.toc_id = topic['id']
+ self.toc_id = self.startnode.parent['ids'][0]
if details.has_key('backlinks'):
self.backlinks = details['backlinks']
else:
self.backlinks = self.document.settings.toc_backlinks
contents = self.build_contents(startnode)
if len(contents):
- topic += contents
- self.startnode.parent.replace(self.startnode, topic)
+ self.startnode.parent.replace(self.startnode, contents)
else:
- self.startnode.parent.remove(self.startnode)
+ self.startnode.parent.parent.remove(self.startnode.parent)
def build_contents(self, node, level=0):
level += 1
@@ -123,15 +116,17 @@
title = section[0]
auto = title.get('auto') # May be set by SectNum.
entrytext = self.copy_and_filter(title)
- reference = nodes.reference('', '', refid=section['id'],
+ reference = nodes.reference('', '', refid=section['ids'][0],
*entrytext)
ref_id = self.document.set_id(reference)
entry = nodes.paragraph('', '', reference)
item = nodes.list_item('', entry)
- if self.backlinks == 'entry':
- title['refid'] = ref_id
- elif self.backlinks == 'top':
- title['refid'] = self.toc_id
+ if (self.backlinks in ('entry', 'top') and title.next_node(
+ lambda n: isinstance(n, nodes.reference)) is None):
+ if self.backlinks == 'entry':
+ title['refid'] = ref_id
+ elif self.backlinks == 'top':
+ title['refid'] = self.toc_id
if level < depth:
subsects = self.build_contents(section, level)
item += subsects
@@ -139,7 +134,7 @@
if entries:
contents = nodes.bullet_list('', *entries)
if auto:
- contents.set_class('auto-toc')
+ contents['classes'].append('auto-toc')
return contents
else:
return []
@@ -154,7 +149,7 @@
class ContentsFilter(nodes.TreeCopyVisitor):
def get_entry_text(self):
- return self.get_tree_copy().get_children()
+ return self.get_tree_copy().children
def visit_citation_reference(self, node):
raise nodes.SkipNode
Modified: Zope3/trunk/src/docutils/transforms/peps.py
===================================================================
--- Zope3/trunk/src/docutils/transforms/peps.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/transforms/peps.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3129 $
+# Date: $Date: 2005-03-26 17:21:28 +0100 (Sat, 26 Mar 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -19,7 +19,7 @@
import os
import re
import time
-from docutils import nodes, utils
+from docutils import nodes, utils, languages
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
from docutils.transforms import parts, references, misc
@@ -46,7 +46,7 @@
raise DataError('Document tree is empty.')
header = self.document[0]
if not isinstance(header, nodes.field_list) or \
- header.get('class') != 'rfc2822':
+ 'rfc2822' not in header['classes']:
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a PEP.')
pep = None
@@ -118,7 +118,9 @@
for refpep in re.split(',?\s+', body.astext()):
pepno = int(refpep)
newbody.append(nodes.reference(
- refpep, refpep, refuri=self.pep_url % pepno))
+ refpep, refpep,
+ refuri=(self.document.settings.pep_base_url
+ + self.pep_url % pepno)))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified':
@@ -128,7 +130,7 @@
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type':
pep_type = para.astext()
- uri = self.pep_url % 12
+ uri = self.document.settings.pep_base_url + self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
@@ -137,15 +139,24 @@
class Contents(Transform):
"""
- Insert a table of contents transform placeholder into the document after
- the RFC 2822 header.
+ Insert an empty table of contents topic and a transform placeholder into
+ the document after the RFC 2822 header.
"""
default_priority = 380
def apply(self):
- pending = nodes.pending(parts.Contents, {'title': None})
- self.document.insert(1, pending)
+ language = languages.get_language(self.document.settings.language_code)
+ name = language.labels['contents']
+ title = nodes.title('', name)
+ topic = nodes.topic('', title, classes=['contents'])
+ name = nodes.fully_normalize_name(name)
+ if not self.document.has_name(name):
+ topic['names'].append(name)
+ self.document.note_implicit_target(topic)
+ pending = nodes.pending(parts.Contents)
+ topic += pending
+ self.document.insert(1, topic)
self.document.note_pending(pending)
@@ -233,7 +244,7 @@
node.parent.replace(node, mask_email(node))
def visit_field_list(self, node):
- if node.hasattr('class') and node['class'] == 'rfc2822':
+ if 'rfc2822' in node['classes']:
raise nodes.SkipNode
def visit_tgroup(self, node):
@@ -243,7 +254,7 @@
def visit_colspec(self, node):
self.entry += 1
if self.pep_table and self.entry == 2:
- node['class'] = 'num'
+ node['classes'].append('num')
def visit_row(self, node):
self.entry = 0
@@ -251,13 +262,14 @@
def visit_entry(self, node):
self.entry += 1
if self.pep_table and self.entry == 2 and len(node) == 1:
- node['class'] = 'num'
+ node['classes'].append('num')
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
pep = int(text)
- ref = self.pep_url % pep
+ ref = (self.document.settings.pep_base_url
+ + self.pep_url % pep)
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
Modified: Zope3/trunk/src/docutils/transforms/references.py
===================================================================
--- Zope3/trunk/src/docutils/transforms/references.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/transforms/references.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3149 $
+# Date: $Date: 2005-03-30 22:51:06 +0200 (Wed, 30 Mar 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -16,89 +16,77 @@
from docutils.transforms import TransformError, Transform
-indices = xrange(sys.maxint)
+class PropagateTargets(Transform):
-
-class ChainedTargets(Transform):
-
"""
- Attributes "refuri" and "refname" are migrated from the final direct
- target up the chain of contiguous adjacent internal targets, using
- `ChainedTargetResolver`.
- """
+ Propagate empty internal targets to the next element.
- default_priority = 420
+ Given the following nodes::
- def apply(self):
- visitor = ChainedTargetResolver(self.document)
- self.document.walk(visitor)
+ <target ids="internal1" names="internal1">
+ <target anonymous="1" ids="id1">
+ <target ids="internal2" names="internal2">
+ <paragraph>
+ This is a test.
+ PropagateTargets propagates the ids and names of the internal
+ targets preceding the paragraph to the paragraph itself::
-class ChainedTargetResolver(nodes.SparseNodeVisitor):
-
+ <target refid="internal1">
+ <target anonymous="1" refid="id1">
+ <target refid="internal2">
+ <paragraph ids="internal2 id1 internal1" names="internal2 internal1">
+ This is a test.
"""
- Copy reference attributes up the length of a hyperlink target chain.
- "Chained targets" are multiple adjacent internal hyperlink targets which
- "point to" an external or indirect target. After the transform, all
- chained targets will effectively point to the same place.
+ default_priority = 260
- Given the following ``document`` as input::
+ def apply(self):
+ for target in self.document.internal_targets:
+ if not (len(target) == 0 and
+ not (target.attributes.has_key('refid') or
+ target.attributes.has_key('refuri') or
+ target.attributes.has_key('refname'))):
+ continue
+ next_node = target.next_node(ascend=1)
+ # Do not move names and ids into Invisibles (we'd lose the
+ # attributes) or different Targetables (e.g. footnotes).
+ if (next_node is not None and
+ ((not isinstance(next_node, nodes.Invisible) and
+ not isinstance(next_node, nodes.Targetable)) or
+ isinstance(next_node, nodes.target))):
+ next_node['ids'].extend(target['ids'])
+ next_node['names'].extend(target['names'])
+ # Set defaults for next_node.expect_referenced_by_name/id.
+ if not hasattr(next_node, 'expect_referenced_by_name'):
+ next_node.expect_referenced_by_name = {}
+ if not hasattr(next_node, 'expect_referenced_by_id'):
+ next_node.expect_referenced_by_id = {}
+ for id in target['ids']:
+ # Update IDs to node mapping.
+ self.document.ids[id] = next_node
+ # If next_node is referenced by id ``id``, this
+ # target shall be marked as referenced.
+ next_node.expect_referenced_by_id[id] = target
+ for name in target['names']:
+ next_node.expect_referenced_by_name[name] = target
+ # If there are any expect_referenced_by_... attributes
+ # in target set, copy them to next_node.
+ next_node.expect_referenced_by_name.update(
+ getattr(target, 'expect_referenced_by_name', {}))
+ next_node.expect_referenced_by_id.update(
+ getattr(target, 'expect_referenced_by_id', {}))
+ # Set refid to point to the first former ID of target
+ # which is now an ID of next_node.
+ target['refid'] = target['ids'][0]
+ # Clear ids and names; they have been moved to
+ # next_node.
+ target['ids'] = []
+ target['names'] = []
+ self.document.note_refid(target)
+ self.document.note_internal_target(next_node)
- <document>
- <target id="a" name="a">
- <target id="b" name="b">
- <target id="c" name="c" refuri="http://chained.external.targets">
- <target id="d" name="d">
- <paragraph>
- I'm known as "d".
- <target id="e" name="e">
- <target id="id1">
- <target id="f" name="f" refname="d">
- ``ChainedTargetResolver(document).walk()`` will transform the above into::
-
- <document>
- <target id="a" name="a" refuri="http://chained.external.targets">
- <target id="b" name="b" refuri="http://chained.external.targets">
- <target id="c" name="c" refuri="http://chained.external.targets">
- <target id="d" name="d">
- <paragraph>
- I'm known as "d".
- <target id="e" name="e" refname="d">
- <target id="id1" refname="d">
- <target id="f" name="f" refname="d">
- """
-
- def unknown_visit(self, node):
- pass
-
- def visit_target(self, node):
- if node.hasattr('refuri'):
- attname = 'refuri'
- call_if_named = self.document.note_external_target
- elif node.hasattr('refname'):
- attname = 'refname'
- call_if_named = self.document.note_indirect_target
- elif node.hasattr('refid'):
- attname = 'refid'
- call_if_named = None
- else:
- return
- attval = node[attname]
- index = node.parent.index(node)
- for i in range(index - 1, -1, -1):
- sibling = node.parent[i]
- if not isinstance(sibling, nodes.target) \
- or sibling.hasattr('refuri') \
- or sibling.hasattr('refname') \
- or sibling.hasattr('refid'):
- break
- sibling[attname] = attval
- if sibling.hasattr('name') and call_if_named:
- call_if_named(sibling)
-
-
class AnonymousHyperlinks(Transform):
"""
@@ -109,8 +97,8 @@
internal
<reference anonymous="1">
external
- <target anonymous="1" id="id1">
- <target anonymous="1" id="id2" refuri="http://external">
+ <target anonymous="1" ids="id1">
+ <target anonymous="1" ids="id2" refuri="http://external">
Corresponding references are linked via "refid" or resolved via "refuri"::
@@ -119,8 +107,8 @@
text
<reference anonymous="1" refuri="http://external">
external
- <target anonymous="1" id="id1">
- <target anonymous="1" id="id2" refuri="http://external">
+ <target anonymous="1" ids="id1">
+ <target anonymous="1" ids="id2" refuri="http://external">
"""
default_priority = 440
@@ -140,16 +128,28 @@
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.parent.replace(ref, prb)
+ for target in self.document.anonymous_targets:
+ # Assume that all anonymous targets have been
+ # referenced to avoid generating lots of
+ # system_messages.
+ target.referenced = 1
return
for ref, target in zip(self.document.anonymous_refs,
self.document.anonymous_targets):
- if target.hasattr('refuri'):
- ref['refuri'] = target['refuri']
- ref.resolved = 1
- else:
- ref['refid'] = target['id']
- self.document.note_refid(ref)
target.referenced = 1
+ while 1:
+ if target.hasattr('refuri'):
+ ref['refuri'] = target['refuri']
+ ref.resolved = 1
+ break
+ else:
+ if not target['ids']:
+ # Propagated target.
+ target = self.document.ids[target['refid']]
+ continue
+ ref['refid'] = target['ids'][0]
+ self.document.note_refid(ref)
+ break
class IndirectHyperlinks(Transform):
@@ -213,14 +213,24 @@
self.resolve_indirect_references(target)
def resolve_indirect_target(self, target):
- refname = target['refname']
- reftarget_id = self.document.nameids.get(refname)
- if not reftarget_id:
- self.nonexistent_indirect_target(target)
- return
+ refname = target.get('refname')
+ if refname is None:
+ reftarget_id = target['refid']
+ else:
+ reftarget_id = self.document.nameids.get(refname)
+ if not reftarget_id:
+ # Check the unknown_reference_resolvers
+ for resolver_function in \
+ self.document.transformer.unknown_reference_resolvers:
+ if resolver_function(target):
+ break
+ else:
+ self.nonexistent_indirect_target(target)
+ return
reftarget = self.document.ids[reftarget_id]
+ reftarget.note_referenced_by(id=reftarget_id)
if isinstance(reftarget, nodes.target) \
- and not reftarget.resolved and reftarget.hasattr('refname'):
+ and not reftarget.resolved and reftarget.hasattr('refname'):
if hasattr(target, 'multiply_indirect'):
#and target.multiply_indirect):
#del target.multiply_indirect
@@ -231,42 +241,49 @@
del target.multiply_indirect
if reftarget.hasattr('refuri'):
target['refuri'] = reftarget['refuri']
- if target.hasattr('name'):
+ if target['names']:
self.document.note_external_target(target)
+ if target.has_key('refid'):
+ del target['refid']
elif reftarget.hasattr('refid'):
target['refid'] = reftarget['refid']
self.document.note_refid(target)
else:
- try:
- target['refid'] = reftarget['id']
+ if reftarget['ids']:
+ target['refid'] = reftarget_id
self.document.note_refid(target)
- except KeyError:
+ else:
self.nonexistent_indirect_target(target)
return
- del target['refname']
+ if refname is not None:
+ del target['refname']
target.resolved = 1
- reftarget.referenced = 1
def nonexistent_indirect_target(self, target):
- self.indirect_target_error(target, 'which does not exist')
+ if self.document.nameids.has_key(target['refname']):
+ self.indirect_target_error(target, 'which is a duplicate, and '
+ 'cannot be used as a unique reference')
+ else:
+ self.indirect_target_error(target, 'which does not exist')
def circular_indirect_reference(self, target):
self.indirect_target_error(target, 'forming a circular reference')
def indirect_target_error(self, target, explanation):
naming = ''
- if target.hasattr('name'):
- naming = '"%s" ' % target['name']
- reflist = self.document.refnames.get(target['name'], [])
- else:
- reflist = self.document.refids.get(target['id'], [])
- naming += '(id="%s")' % target['id']
+ reflist = []
+ if target['names']:
+ naming = '"%s" ' % target['names'][0]
+ for name in target['names']:
+ reflist.extend(self.document.refnames.get(name, []))
+ for id in target['ids']:
+ reflist.extend(self.document.refids.get(id, []))
+ naming += '(id="%s")' % target['ids'][0]
msg = self.document.reporter.error(
'Indirect hyperlink target %s refers to target "%s", %s.'
- % (naming, target['refname'], explanation),
- base_node=target)
+ % (naming, target['refname'], explanation), base_node=target)
msgid = self.document.set_id(msg)
- for ref in reflist:
+ for ref in uniq(reflist):
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
@@ -286,43 +303,34 @@
else:
return
attval = target[attname]
- if target.hasattr('name'):
- name = target['name']
- try:
- reflist = self.document.refnames[name]
- except KeyError, instance:
- if target.referenced:
- return
- msg = self.document.reporter.info(
- 'Indirect hyperlink target "%s" is not referenced.'
- % name, base_node=target)
- target.referenced = 1
- return
- delatt = 'refname'
- else:
- id = target['id']
- try:
- reflist = self.document.refids[id]
- except KeyError, instance:
- if target.referenced:
- return
- msg = self.document.reporter.info(
- 'Indirect hyperlink target id="%s" is not referenced.'
- % id, base_node=target)
- target.referenced = 1
- return
- delatt = 'refid'
- for ref in reflist:
- if ref.resolved:
- continue
- del ref[delatt]
- ref[attname] = attval
- if not call_if_named or ref.hasattr('name'):
- call_method(ref)
- ref.resolved = 1
- if isinstance(ref, nodes.target):
- self.resolve_indirect_references(ref)
- target.referenced = 1
+ for name in target['names']:
+ reflist = self.document.refnames.get(name, [])
+ if reflist:
+ target.note_referenced_by(name=name)
+ for ref in reflist:
+ if ref.resolved:
+ continue
+ del ref['refname']
+ ref[attname] = attval
+ if not call_if_named or ref['names']:
+ call_method(ref)
+ ref.resolved = 1
+ if isinstance(ref, nodes.target):
+ self.resolve_indirect_references(ref)
+ for id in target['ids']:
+ reflist = self.document.refids.get(id, [])
+ if reflist:
+ target.note_referenced_by(id=id)
+ for ref in reflist:
+ if ref.resolved:
+ continue
+ del ref['refid']
+ ref[attname] = attval
+ if not call_if_named or ref['names']:
+ call_method(ref)
+ ref.resolved = 1
+ if isinstance(ref, nodes.target):
+ self.resolve_indirect_references(ref)
class ExternalTargets(Transform):
@@ -347,73 +355,59 @@
def apply(self):
for target in self.document.external_targets:
- if target.hasattr('refuri') and target.hasattr('name'):
- name = target['name']
+ if target.hasattr('refuri'):
refuri = target['refuri']
- try:
- reflist = self.document.refnames[name]
- except KeyError, instance:
- if target.referenced:
- continue
- msg = self.document.reporter.info(
- 'External hyperlink target "%s" is not referenced.'
- % name, base_node=target)
- target.referenced = 1
- continue
- for ref in reflist:
- if ref.resolved:
- continue
- del ref['refname']
- ref['refuri'] = refuri
- ref.resolved = 1
- target.referenced = 1
+ for name in target['names']:
+ reflist = self.document.refnames.get(name, [])
+ if reflist:
+ target.note_referenced_by(name=name)
+ for ref in reflist:
+ if ref.resolved:
+ continue
+ del ref['refname']
+ ref['refuri'] = refuri
+ ref.resolved = 1
class InternalTargets(Transform):
- """
- Given::
+ default_priority = 660
- <paragraph>
- <reference refname="direct internal">
- direct internal
- <target id="id1" name="direct internal">
+ def apply(self):
+ for target in self.document.internal_targets:
+ self.resolve_reference_ids(target)
- The "refname" attribute is replaced by "refid" linking to the target's
- "id"::
+ def resolve_reference_ids(self, target):
+ """
+ Given::
- <paragraph>
- <reference refid="id1">
- direct internal
- <target id="id1" name="direct internal">
- """
+ <paragraph>
+ <reference refname="direct internal">
+ direct internal
+ <target id="id1" name="direct internal">
- default_priority = 660
+ The "refname" attribute is replaced by "refid" linking to the target's
+ "id"::
- def apply(self):
- for target in self.document.internal_targets:
- if target.hasattr('refuri') or target.hasattr('refid') \
- or not target.hasattr('name'):
- continue
- name = target['name']
- refid = target['id']
- try:
- reflist = self.document.refnames[name]
- except KeyError, instance:
- if target.referenced:
- continue
- msg = self.document.reporter.info(
- 'Internal hyperlink target "%s" is not referenced.'
- % name, base_node=target)
- target.referenced = 1
- continue
+ <paragraph>
+ <reference refid="id1">
+ direct internal
+ <target id="id1" name="direct internal">
+ """
+ if target.hasattr('refuri') or target.hasattr('refid') \
+ or not target['names']:
+ return
+ for name in target['names']:
+ refid = self.document.nameids[name]
+ reflist = self.document.refnames.get(name, [])
+ if reflist:
+ target.note_referenced_by(name=name)
for ref in reflist:
if ref.resolved:
continue
del ref['refname']
ref['refid'] = refid
ref.resolved = 1
- target.referenced = 1
class Footnotes(Transform):
@@ -521,19 +515,17 @@
if not self.document.nameids.has_key(label):
break
footnote.insert(0, nodes.label('', label))
- if footnote.hasattr('dupname'):
- continue
- if footnote.hasattr('name'):
- name = footnote['name']
+ for name in footnote['names']:
for ref in self.document.footnote_refs.get(name, []):
ref += nodes.Text(label)
ref.delattr('refname')
- ref['refid'] = footnote['id']
- footnote.add_backref(ref['id'])
+ assert len(footnote['ids']) == len(ref['ids']) == 1
+ ref['refid'] = footnote['ids'][0]
+ footnote.add_backref(ref['ids'][0])
self.document.note_refid(ref)
ref.resolved = 1
- else:
- footnote['name'] = label
+ if not footnote['names'] and not footnote['dupnames']:
+ footnote['names'].append(label)
self.document.note_explicit_target(footnote, footnote)
self.autofootnote_labels.append(label)
return startnum
@@ -566,7 +558,8 @@
footnote = self.document.ids[id]
ref['refid'] = id
self.document.note_refid(ref)
- footnote.add_backref(ref['id'])
+ assert len(ref['ids']) == 1
+ footnote.add_backref(ref['ids'][0])
ref.resolved = 1
i += 1
@@ -601,9 +594,10 @@
ref.parent.replace(ref, prb)
break
footnote = self.document.symbol_footnotes[i]
- ref['refid'] = footnote['id']
+ assert len(footnote['ids']) == 1
+ ref['refid'] = footnote['ids'][0]
self.document.note_refid(ref)
- footnote.add_backref(ref['id'])
+ footnote.add_backref(ref['ids'][0])
i += 1
def resolve_footnotes_and_citations(self):
@@ -612,24 +606,26 @@
references.
"""
for footnote in self.document.footnotes:
- label = footnote['name']
- if self.document.footnote_refs.has_key(label):
- reflist = self.document.footnote_refs[label]
- self.resolve_references(footnote, reflist)
+ for label in footnote['names']:
+ if self.document.footnote_refs.has_key(label):
+ reflist = self.document.footnote_refs[label]
+ self.resolve_references(footnote, reflist)
for citation in self.document.citations:
- label = citation['name']
- if self.document.citation_refs.has_key(label):
- reflist = self.document.citation_refs[label]
- self.resolve_references(citation, reflist)
+ for label in citation['names']:
+ if self.document.citation_refs.has_key(label):
+ reflist = self.document.citation_refs[label]
+ self.resolve_references(citation, reflist)
def resolve_references(self, note, reflist):
- id = note['id']
+ assert len(note['ids']) == 1
+ id = note['ids'][0]
for ref in reflist:
if ref.resolved:
continue
ref.delattr('refname')
ref['refid'] = id
- note.add_backref(ref['id'])
+ assert len(ref['ids']) == 1
+ note.add_backref(ref['ids'][0])
ref.resolved = 1
note.resolved = 1
@@ -669,7 +665,9 @@
def apply(self):
defs = self.document.substitution_defs
normed = self.document.substitution_names
- for refname, refs in self.document.substitution_refs.items():
+ subreflist = self.document.substitution_refs.items()
+ subreflist.sort()
+ for refname, refs in subreflist:
for ref in refs:
key = None
if defs.has_key(refname):
@@ -689,7 +687,22 @@
msg.add_backref(prbid)
ref.parent.replace(ref, prb)
else:
- ref.parent.replace(ref, defs[key].get_children())
+ subdef = defs[key]
+ parent = ref.parent
+ index = parent.index(ref)
+ if (subdef.attributes.has_key('ltrim')
+ or subdef.attributes.has_key('trim')):
+ if index > 0 and isinstance(parent[index - 1],
+ nodes.Text):
+ parent.replace(parent[index - 1],
+ parent[index - 1].rstrip())
+ if (subdef.attributes.has_key('rtrim')
+ or subdef.attributes.has_key('trim')):
+ if (len(parent) > index + 1
+ and isinstance(parent[index + 1], nodes.Text)):
+ parent.replace(parent[index + 1],
+ parent[index + 1].lstrip())
+ parent.replace(ref, subdef.children)
self.document.substitution_refs = None # release replaced references
@@ -708,11 +721,12 @@
notes = {}
nodelist = []
for target in self.document.external_targets:
- name = target.get('name')
- if not name:
- print >>sys.stderr, 'no name on target: %r' % target
- continue
- refs = self.document.refnames.get(name, [])
+ names = target['names']
+ # Only named targets.
+ assert names
+ refs = []
+ for name in names:
+ refs.extend(self.document.refnames.get(name, []))
if not refs:
continue
footnote = self.make_target_footnote(target, refs, notes)
@@ -734,14 +748,16 @@
refuri = target['refuri']
if notes.has_key(refuri): # duplicate?
footnote = notes[refuri]
- footnote_name = footnote['name']
+ assert len(footnote['names']) == 1
+ footnote_name = footnote['names'][0]
else: # original
footnote = nodes.footnote()
footnote_id = self.document.set_id(footnote)
- # Use a colon; they can't be produced inside names by the parser:
- footnote_name = 'target_note: ' + footnote_id
+ # Use uppercase letters and a colon; they can't be
+ # produced inside names by the parser.
+ footnote_name = 'TARGET_NOTE: ' + footnote_id
footnote['auto'] = 1
- footnote['name'] = footnote_name
+ footnote['names'] = [footnote_name]
footnote_paragraph = nodes.paragraph()
footnote_paragraph += nodes.reference('', refuri, refuri=refuri)
footnote += footnote_paragraph
@@ -756,7 +772,15 @@
self.document.note_footnote_ref(refnode)
index = ref.parent.index(ref) + 1
reflist = [refnode]
- if not self.document.settings.trim_footnote_reference_space:
+ if not utils.get_trim_footnote_ref_space(self.document.settings):
reflist.insert(0, nodes.Text(' '))
ref.parent.insert(index, reflist)
return footnote
+
+
+def uniq(L):
+ r = []
+ for item in L:
+ if not item in r:
+ r.append(item)
+ return r
Modified: Zope3/trunk/src/docutils/transforms/universal.py
===================================================================
--- Zope3/trunk/src/docutils/transforms/universal.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/transforms/universal.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 3186 $
+# Date: $Date: 2005-04-07 21:51:45 +0200 (Thu, 07 Apr 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -32,19 +32,16 @@
default_priority = 820
def apply(self):
- header = self.generate_header()
- footer = self.generate_footer()
- if header or footer:
- decoration = nodes.decoration()
- decoration += header
- decoration += footer
- document = self.document
- index = document.first_child_not_matching_class(
- nodes.PreDecorative)
- if index is None:
- document += decoration
- else:
- document[index:index] = [decoration]
+ header_nodes = self.generate_header()
+ if header_nodes:
+ decoration = self.document.get_decoration()
+ header = decoration.get_header()
+ header.extend(header_nodes)
+ footer_nodes = self.generate_footer()
+ if footer_nodes:
+ decoration = self.document.get_decoration()
+ footer = decoration.get_footer()
+ footer.extend(footer_nodes)
def generate_header(self):
return None
@@ -79,9 +76,7 @@
nodes.reference('', 'reStructuredText', refuri='http://'
'docutils.sourceforge.net/rst.html'),
nodes.Text(' source.\n')])
- footer = nodes.footer()
- footer += nodes.paragraph('', '', *text)
- return footer
+ return [nodes.paragraph('', '', *text)]
else:
return None
@@ -97,13 +92,13 @@
def apply(self):
unfiltered = self.document.transform_messages
- threshold = self.document.reporter['writer'].report_level
+ threshold = self.document.reporter.report_level
messages = []
for msg in unfiltered:
if msg['level'] >= threshold and not msg.parent:
messages.append(msg)
if messages:
- section = nodes.section(CLASS='system-messages')
+ section = nodes.section(classes=['system-messages'])
# @@@ get this from the language module?
section += nodes.title('', 'Docutils System Messages')
section += messages
@@ -111,6 +106,29 @@
self.document += section
+class FilterMessages(Transform):
+
+ """
+ Remove system messages below verbosity threshold.
+ """
+
+ default_priority = 870
+
+ def apply(self):
+ visitor = SystemMessageFilterVisitor(self.document)
+ self.document.walk(visitor)
+
+
+class SystemMessageFilterVisitor(nodes.SparseNodeVisitor):
+
+ def unknown_visit(self, node):
+ pass
+
+ def visit_system_message(self, node):
+ if node['level'] < self.document.reporter.report_level:
+ node.parent.remove(node)
+
+
class TestMessages(Transform):
"""
@@ -128,22 +146,45 @@
class FinalChecks(Transform):
"""
- Perform last-minute checks.
+ Perform last-minute checks and transforms.
- Check for dangling references (incl. footnote & citation).
+ - Check for illegal transitions, move transitions.
"""
default_priority = 840
def apply(self):
- visitor = FinalCheckVisitor(self.document)
+ visitor = FinalCheckVisitor(
+ self.document,
+ self.document.transformer.unknown_reference_resolvers)
self.document.walk(visitor)
if self.document.settings.expose_internals:
visitor = InternalAttributeExposer(self.document)
self.document.walk(visitor)
+ # *After* resolving all references, check for unreferenced
+ # targets:
+ for target in self.document.traverse():
+ if isinstance(target, nodes.target) and not target.referenced:
+ if target['names']:
+ naming = target['names'][0]
+ elif target['ids']:
+ naming = target['ids'][0]
+ else:
+ # Hack: Propagated targets always have their refid
+ # attribute set.
+ naming = target['refid']
+ self.document.reporter.info(
+ 'Hyperlink target "%s" is not referenced.'
+ % naming, base_node=target)
class FinalCheckVisitor(nodes.SparseNodeVisitor):
+
+ def __init__(self, document, unknown_reference_resolvers):
+ nodes.SparseNodeVisitor.__init__(self, document)
+ self.document = document
+ self.unknown_reference_resolvers = unknown_reference_resolvers
def unknown_visit(self, node):
pass
@@ -154,24 +195,99 @@
refname = node['refname']
id = self.document.nameids.get(refname)
if id is None:
- msg = self.document.reporter.error(
- 'Unknown target name: "%s".' % (node['refname']),
- base_node=node)
- msgid = self.document.set_id(msg)
- prb = nodes.problematic(
- node.rawsource, node.rawsource, refid=msgid)
- prbid = self.document.set_id(prb)
- msg.add_backref(prbid)
- node.parent.replace(node, prb)
+ for resolver_function in self.unknown_reference_resolvers:
+ if resolver_function(node):
+ break
+ else:
+ if self.document.nameids.has_key(refname):
+ msg = self.document.reporter.error(
+ 'Duplicate target name, cannot be used as a unique '
+ 'reference: "%s".' % (node['refname']), base_node=node)
+ else:
+ msg = self.document.reporter.error(
+ 'Unknown target name: "%s".' % (node['refname']),
+ base_node=node)
+ msgid = self.document.set_id(msg)
+ prb = nodes.problematic(
+ node.rawsource, node.rawsource, refid=msgid)
+ prbid = self.document.set_id(prb)
+ msg.add_backref(prbid)
+ node.parent.replace(node, prb)
else:
del node['refname']
node['refid'] = id
- self.document.ids[id].referenced = 1
+ self.document.ids[id].note_referenced_by(id=id)
node.resolved = 1
visit_footnote_reference = visit_citation_reference = visit_reference
+ def visit_transition(self, node):
+ """
+ Move transitions at the end of sections up the tree. Complain
+ on transitions after a title, at the beginning or end of the
+ document, and after another transition.
+ For example, transform this::
+
+ <section>
+ ...
+ <transition>
+ <section>
+ ...
+
+ into this::
+
+ <section>
+ ...
+ <transition>
+ <section>
+ ...
+ """
+ index = node.parent.index(node)
+ error = None
+ if (index == 0 or
+ isinstance(node.parent[0], nodes.title) and
+ (index == 1 or
+ isinstance(node.parent[1], nodes.subtitle) and
+ index == 2)):
+ assert (isinstance(node.parent, nodes.document) or
+ isinstance(node.parent, nodes.section))
+ error = self.document.reporter.error(
+ 'Document or section may not begin with a transition.',
+ line=node.line)
+ elif isinstance(node.parent[index - 1], nodes.transition):
+ error = self.document.reporter.error(
+ 'At least one body element must separate transitions; '
+ 'adjacent transitions are not allowed.', line=node.line)
+ if error:
+ # Insert before node and update index.
+ node.parent.insert(index, error)
+ index += 1
+ assert index < len(node.parent)
+ if index != len(node.parent) - 1:
+ # No need to move the node.
+ return
+ # Node behind which the transition is to be moved.
+ sibling = node
+ # While sibling is the last node of its parent.
+ while index == len(sibling.parent) - 1:
+ sibling = sibling.parent
+ # If sibling is the whole document (i.e. it has no parent).
+ if sibling.parent is None:
+ # Transition at the end of document. Do not move the
+ # transition up, and place an error behind.
+ error = self.document.reporter.error(
+ 'Document may not end with a transition.',
+ line=node.line)
+ node.parent.insert(node.parent.index(node) + 1, error)
+ return
+ index = sibling.parent.index(sibling)
+ # Remove the original transition node.
+ node.parent.remove(node)
+ # Insert the transition after the sibling.
+ sibling.parent.insert(index + 1, node)
+
+
class InternalAttributeExposer(nodes.GenericNodeVisitor):
def __init__(self, document):
Modified: Zope3/trunk/src/docutils/urischemes.py
===================================================================
--- Zope3/trunk/src/docutils/urischemes.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/urischemes.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,8 @@
"""
`schemes` is a dictionary with lowercase URI addressing schemes as
keys and descriptions as values. It was compiled from the index at
-http://www.w3.org/Addressing/schemes.html (revised 2001-08-20).
+http://www.iana.org/assignments/uri-schemes (revised 2003-11-26)
+and an older list at http://www.w3.org/Addressing/schemes.html.
"""
# Many values are blank and should be filled in with useful descriptions.
@@ -26,10 +27,12 @@
'specialized to justify their own schemes'),
'fax': ('a connection to a terminal that can handle telefaxes '
'(facsimiles); RFC 2806'),
+ 'feed' : 'NetNewsWire feed',
'file': 'Host-specific file names',
'finger': '',
'freenet': '',
'ftp': 'File Transfer Protocol',
+ 'go': 'go; RFC3368',
'gopher': 'The Gopher Protocol',
'gsm-sms': ('Global System for Mobile Communications Short Message '
'Service'),
@@ -40,16 +43,19 @@
'hnews': 'an HTTP-tunneling variant of the NNTP news protocol',
'http': 'Hypertext Transfer Protocol',
'https': 'HTTP over SSL',
+ 'hydra': 'SubEthaEdit URI. See http://www.codingmonkeys.de/subethaedit.',
'iioploc': 'Internet Inter-ORB Protocol Location?',
'ilu': 'Inter-Language Unification',
+ 'im': 'Instant Messaging',
'imap': 'Internet Message Access Protocol',
'ior': 'CORBA interoperable object reference',
'ipp': 'Internet Printing Protocol',
'irc': 'Internet Relay Chat',
+ 'iseek' : 'See www.ambrosiasw.com; a little util for OS X.',
'jar': 'Java archive',
'javascript': ('JavaScript code; evaluates the expression after the '
'colon'),
- 'jdbc': '',
+ 'jdbc': 'JDBC connection URI.',
'ldap': 'Lightweight Directory Access Protocol',
'lifn': '',
'livescript': '',
@@ -62,6 +68,7 @@
'mocha': '',
'modem': ('a connection to a terminal that can handle incoming data '
'calls; RFC 2806'),
+ 'mupdate': 'Mailbox Update (MUPDATE) Protocol',
'news': 'USENET news',
'nfs': 'Network File System protocol',
'nntp': 'USENET news using NNTP access',
@@ -69,8 +76,10 @@
'phone': '',
'pop': 'Post Office Protocol',
'pop3': 'Post Office Protocol v3',
+ 'pres': 'Presence',
'printer': '',
'prospero': 'Prospero Directory Service',
+ 'rdar' : 'URLs found in Darwin source (http://www.opensource.apple.com/darwinsource/).',
'res': '',
'rtsp': 'real time streaming protocol',
'rvp': '',
@@ -80,8 +89,12 @@
'service': 'service location',
'shttp': 'secure hypertext transfer protocol',
'sip': 'Session Initiation Protocol',
- 'smb': '',
+ 'sips': 'secure session intitiaion protocol',
+ 'smb': 'SAMBA filesystems.',
'snews': 'For NNTP postings via SSL',
+ 'soap.beep': '',
+ 'soap.beeps': '',
+ 'ssh': 'Reference to interactive sessions via ssh.',
't120': 'real time data conferencing (audiographics)',
'tcp': '',
'tel': ('a connection to a terminal that handles normal voice '
@@ -90,6 +103,7 @@
'RFC 2806.'),
'telephone': 'telephone',
'telnet': 'Reference to interactive sessions',
+ 'tftp': 'Trivial File Transfer Protocol',
'tip': 'Transaction Internet Protocol',
'tn3270': 'Interactive 3270 emulation sessions',
'tv': '',
@@ -101,5 +115,8 @@
'wais': 'Wide Area Information Servers',
'whodp': '',
'whois++': 'Distributed directory service.',
+ 'x-man-page': 'Opens man page in Terminal.app on OS X (see macosxhints.com)',
+ 'xmlrpc.beep': '',
+ 'xmlrpc.beeps': '',
'z39.50r': 'Z39.50 Retrieval',
'z39.50s': 'Z39.50 Session',}
Modified: Zope3/trunk/src/docutils/utils.py
===================================================================
--- Zope3/trunk/src/docutils/utils.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/utils.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:04 $
+# Revision: $Revision: 3253 $
+# Date: $Date: 2005-04-25 17:08:01 +0200 (Mon, 25 Apr 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -13,6 +13,7 @@
import sys
import os
import os.path
+import warnings
from types import StringType, UnicodeType
from docutils import ApplicationError, DataError
from docutils import frontend, nodes
@@ -20,10 +21,14 @@
class SystemMessage(ApplicationError):
- def __init__(self, system_message):
+ def __init__(self, system_message, level):
Exception.__init__(self, system_message.astext())
+ self.level = level
+class SystemMessagePropagation(ApplicationError): pass
+
+
class Reporter:
"""
@@ -35,37 +40,21 @@
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
- or off, and an I/O stream for warnings. These are stored in the default
- reporting category, '' (zero-length string).
+ or off, and an I/O stream for warnings. These are stored as instance
+ attributes.
- Multiple reporting categories [#]_ may be set, each with its own reporting
- and halting thresholds, debugging switch, and warning stream
- (collectively a `ConditionSet`). Categories are hierarchical dotted-name
- strings that look like attribute references: 'spam', 'spam.eggs',
- 'neeeow.wum.ping'. The 'spam' category is the ancestor of
- 'spam.bacon.eggs'. Unset categories inherit stored conditions from their
- closest ancestor category that has been set.
+ When a system message is generated, its level is compared to the stored
+ thresholds, and a warning or error is generated as appropriate. Debug
+ messages are produced iff the stored debug switch is on, independently of
+ other thresholds. Message output is sent to the stored warning stream if
+ not set to ''.
- When a system message is generated, the stored conditions from its
- category (or ancestor if unset) are retrieved. The system message level
- is compared to the thresholds stored in the category, and a warning or
- error is generated as appropriate. Debug messages are produced iff the
- stored debug switch is on. Message output is sent to the stored warning
- stream.
-
- The default category is '' (empty string). By convention, Writers should
- retrieve reporting conditions from the 'writer' category (which, unless
- explicitly set, defaults to the conditions of the default category).
-
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
- .. [#] The concept of "categories" was inspired by the log4j project:
- http://jakarta.apache.org/log4j/.
-
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
@@ -77,10 +66,7 @@
def __init__(self, source, report_level, halt_level, stream=None,
debug=0, encoding='ascii', error_handler='replace'):
"""
- Initialize the `ConditionSet` forthe `Reporter`'s default category.
-
:Parameters:
-
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
@@ -88,18 +74,14 @@
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
- ``.write`` method), a string (file name, opened for writing), or
+ ``.write`` method), a string (file name, opened for writing),
+ '' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default).
- `encoding`: The encoding for stderr output.
- `error_handler`: The error handler for stderr output encoding.
"""
self.source = source
"""The path to or description of the source data."""
-
- if stream is None:
- stream = sys.stderr
- elif type(stream) in (StringType, UnicodeType):
- raise NotImplementedError('This should open a file for writing.')
self.encoding = encoding
"""The character encoding for the stderr output."""
@@ -107,10 +89,30 @@
self.error_handler = error_handler
"""The character encoding error handler."""
- self.categories = {'': ConditionSet(debug, report_level, halt_level,
- stream)}
- """Mapping of category names to conditions. Default category is ''."""
+ self.debug_flag = debug
+ """Show debug (level=0) system messages?"""
+ self.report_level = report_level
+ """The level at or above which warning output will be sent
+ to `self.stream`."""
+
+ self.halt_level = halt_level
+ """The level at or above which `SystemMessage` exceptions
+ will be raised, halting execution."""
+
+ if stream is None:
+ stream = sys.stderr
+ elif type(stream) in (StringType, UnicodeType):
+ # Leave stream untouched if it's ''.
+ if stream != '':
+ if type(stream) == StringType:
+ stream = open(stream, 'w')
+ elif type(stream) == UnicodeType:
+ stream = open(stream.encode(), 'w')
+
+ self.stream = stream
+ """Where warning output is sent."""
+
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
@@ -120,24 +122,16 @@
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
+ warnings.warn('docutils.utils.Reporter.set_conditions deprecated; '
+ 'set attributes via configuration settings or directly',
+ DeprecationWarning, stacklevel=2)
+ self.report_level = report_level
+ self.halt_level = halt_level
if stream is None:
stream = sys.stderr
- self.categories[category] = ConditionSet(debug, report_level,
- halt_level, stream)
+ self.stream = stream
+ self.debug = debug
- def unset_conditions(self, category):
- if category and self.categories.has_key(category):
- del self.categories[category]
-
- __delitem__ = unset_conditions
-
- def get_conditions(self, category):
- while not self.categories.has_key(category):
- category = category[:category.rfind('.') + 1][:-1]
- return self.categories[category]
-
- __getitem__ = get_conditions
-
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
@@ -159,9 +153,6 @@
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
- category = kwargs.get('category', '')
- if kwargs.has_key('category'):
- del attributes['category']
if kwargs.has_key('base_node'):
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
@@ -173,16 +164,13 @@
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
- debug, report_level, halt_level, stream = self[category].astuple()
- if level >= report_level or debug and level == 0:
+ if self.stream and (level >= self.report_level
+ or self.debug_flag and level == 0):
msgtext = msg.astext().encode(self.encoding, self.error_handler)
- if category:
- print >>stream, msgtext, '[%s]' % category
- else:
- print >>stream, msgtext
- if level >= halt_level:
- raise SystemMessage(msg)
- if level > 0 or debug:
+ print >>self.stream, msgtext
+ if level >= self.halt_level:
+ raise SystemMessage(msg, level)
+ if level > 0 or self.debug_flag:
self.notify_observers(msg)
self.max_level = max(level, self.max_level)
return msg
@@ -193,7 +181,8 @@
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
- return self.system_message(0, *args, **kwargs)
+ if self.debug_flag:
+ return self.system_message(0, *args, **kwargs)
def info(self, *args, **kwargs):
"""
@@ -225,25 +214,6 @@
return self.system_message(4, *args, **kwargs)
-class ConditionSet:
-
- """
- A set of two thresholds (`report_level` & `halt_level`), a switch
- (`debug`), and an I/O stream (`stream`), corresponding to one `Reporter`
- category.
- """
-
- def __init__(self, debug, report_level, halt_level, stream):
- self.debug = debug
- self.report_level = report_level
- self.halt_level = halt_level
- self.stream = stream
-
- def astuple(self):
- return (self.debug, self.report_level, self.halt_level,
- self.stream)
-
-
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
@@ -264,6 +234,8 @@
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
+ - `TypeError` for invalid option value types (raised by conversion
+ function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
@@ -320,17 +292,21 @@
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
+ - `TypeError` for invalid option value types (raised by conversion
+ function).
"""
options = {}
for name, value in option_list:
- convertor = options_spec[name] # raises KeyError if unknown
+ convertor = options_spec[name] # raises KeyError if unknown
+ if convertor is None:
+ raise KeyError(name) # or if explicitly disabled
if options.has_key(name):
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
- % (name, value, detail))
+ % (name, value, ' '.join(detail.args)))
return options
@@ -405,7 +381,7 @@
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
- match = pattern.match(textnode.data)
+ match = pattern.search(textnode.data)
if match:
textnode.data = pattern.sub(substitution, textnode.data)
return
@@ -434,6 +410,34 @@
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
+def get_stylesheet_reference(settings, relative_to=None):
+ """
+ Retrieve a stylesheet reference from the settings object.
+ """
+ if settings.stylesheet_path:
+ assert not settings.stylesheet, \
+ 'stylesheet and stylesheet_path are mutually exclusive.'
+ if relative_to == None:
+ relative_to = settings._destination
+ return relative_path(relative_to, settings.stylesheet_path)
+ else:
+ return settings.stylesheet
+
+def get_trim_footnote_ref_space(settings):
+ """
+ Return whether or not to trim footnote space.
+
+ If trim_footnote_reference_space is not None, return it.
+
+ If trim_footnote_reference_space is None, return False unless the
+ footnote reference style is 'superscript'.
+ """
+ if settings.trim_footnote_reference_space is None:
+ return hasattr(settings, 'footnote_references') and \
+ settings.footnote_references == 'superscript'
+ else:
+ return settings.trim_footnote_reference_space
+
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
@@ -444,3 +448,92 @@
return node.source, node.line
node = node.parent
return None, None
+
+def escape2null(text):
+ """Return a string with escape-backslashes converted to nulls."""
+ parts = []
+ start = 0
+ while 1:
+ found = text.find('\\', start)
+ if found == -1:
+ parts.append(text[start:])
+ return ''.join(parts)
+ parts.append(text[start:found])
+ parts.append('\x00' + text[found+1:found+2])
+ start = found + 2 # skip character after escape
+
+def unescape(text, restore_backslashes=0):
+ """
+ Return a string with nulls removed or restored to backslashes.
+ Backslash-escaped spaces are also removed.
+ """
+ if restore_backslashes:
+ return text.replace('\x00', '\\')
+ else:
+ for sep in ['\x00 ', '\x00\n', '\x00']:
+ text = ''.join(text.split(sep))
+ return text
+
+
+class DependencyList:
+
+ """
+ List of dependencies, with file recording support.
+
+ Note that the output file is not automatically closed. You have
+ to explicitly call the close() method.
+ """
+
+ def __init__(self, output_file=None, dependencies=[]):
+ """
+ Initialize the dependency list, automatically setting the
+ output file to `output_file` (see `set_output()`) and adding
+ all supplied dependencies.
+ """
+ self.set_output(output_file)
+ for i in dependencies:
+ self.add(i)
+
+ def set_output(self, output_file):
+ """
+ Set the output file and clear the list of already added
+ dependencies.
+
+ `output_file` must be a string. The specified file is
+ immediately overwritten.
+
+ If output_file is '-', the output will be written to stdout.
+ If it is None, no file output is done when calling add().
+ """
+ self.list = []
+ if output_file == '-':
+ self.file = sys.stdout
+ elif output_file:
+ self.file = open(output_file, 'w')
+ else:
+ self.file = None
+
+ def add(self, filename):
+ """
+ If the dependency `filename` has not already been added,
+ append it to self.list and print it to self.file if self.file
+ is not None.
+ """
+ if not filename in self.list:
+ self.list.append(filename)
+ if self.file is not None:
+ print >>self.file, filename
+
+ def close(self):
+ """
+ Close the output file.
+ """
+ self.file.close()
+ self.file = None
+
+ def __repr__(self):
+ if self.file:
+ output_file = self.file.name
+ else:
+ output_file = None
+ return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list)
Modified: Zope3/trunk/src/docutils/writers/__init__.py
===================================================================
--- Zope3/trunk/src/docutils/writers/__init__.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/writers/__init__.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Authors: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 2321 $
+# Date: $Date: 2004-06-20 14:28:08 +0200 (Sun, 20 Jun 2004) $
# Copyright: This module has been placed in the public domain.
"""
@@ -26,24 +26,45 @@
Each writer must support all standard node types listed in
`docutils.nodes.node_class_names`.
- Call `write()` to process a document.
+ The `write()` method is the main entry point.
"""
component_type = 'writer'
+ config_section = 'writers'
document = None
- """The document to write."""
+ """The document to write (Docutils doctree); set by `write`."""
+ output = None
+ """Final translated form of `document` (Unicode string);
+ set by `translate`."""
+
language = None
- """Language module for the document."""
+ """Language module for the document; set by `write`."""
destination = None
- """`docutils.io` IO object; where to write the document."""
+ """`docutils.io` Output object; where to write the document.
+ Set by `write`."""
def __init__(self):
- """Initialize the Writer instance."""
+ # Currently only used by HTML writer for output fragments:
+ self.parts = {}
+ """Mapping of document part names to fragments of `self.output`.
+ Values are Unicode strings; encoding is up to the client. The 'whole'
+ key should contain the entire document output.
+ """
+
def write(self, document, destination):
+ """
+ Process a document into its final form.
+
+ Translate `document` (a Docutils document tree) into the Writer's
+ native format, and write it out to its `destination` (a
+ `docutils.io.Output` subclass object).
+
+ Normally not overridden or extended in subclasses.
+ """
self.document = document
self.language = languages.get_language(
document.settings.language_code)
@@ -54,9 +75,10 @@
def translate(self):
"""
- Override to do final document tree translation.
+ Do final translation of `self.document` into `self.output` (Unicode
+ string). Called from `write`. Override in subclasses.
- This is usually done with a `docutils.nodes.NodeVisitor` subclass, in
+ Usually done with a `docutils.nodes.NodeVisitor` subclass, in
combination with a call to `docutils.nodes.Node.walk()` or
`docutils.nodes.Node.walkabout()`. The ``NodeVisitor`` subclass must
support all standard elements (listed in
@@ -65,7 +87,11 @@
"""
raise NotImplementedError('subclass must override this method')
+ def assemble_parts(self):
+ """Assemble the `self.parts` dictionary. Extend in subclasses."""
+ self.parts['whole'] = self.output
+
_writer_aliases = {
'html': 'html4css1',
'latex': 'latex2e',
Modified: Zope3/trunk/src/docutils/writers/docutils_xml.py
===================================================================
--- Zope3/trunk/src/docutils/writers/docutils_xml.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/writers/docutils_xml.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Authors: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:06 $
+# Revision: $Revision: 2223 $
+# Date: $Date: 2004-06-05 21:32:15 +0200 (Sat, 05 Jun 2004) $
# Copyright: This module has been placed in the public domain.
"""
@@ -12,7 +12,7 @@
import docutils
-from docutils import writers
+from docutils import frontend, writers
class Writer(writers.Writer):
@@ -25,16 +25,23 @@
'Warning: the --newlines and --indents options may adversely affect '
'whitespace; use them only for reading convenience.',
(('Generate XML with newlines before and after tags.',
- ['--newlines'], {'action': 'store_true'}),
+ ['--newlines'],
+ {'action': 'store_true', 'validator': frontend.validate_boolean}),
('Generate XML with indents and newlines.',
- ['--indents'], {'action': 'store_true'}),
+ ['--indents'],
+ {'action': 'store_true', 'validator': frontend.validate_boolean}),
('Omit the XML declaration. Use with caution.',
- ['--no-xml-declaration'], {'dest': 'xml_declaration', 'default': 1,
- 'action': 'store_false'}),
+ ['--no-xml-declaration'],
+ {'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
+ 'validator': frontend.validate_boolean}),
('Omit the DOCTYPE declaration.',
- ['--no-doctype'], {'dest': 'doctype_declaration', 'default': 1,
- 'action': 'store_false'}),))
+ ['--no-doctype'],
+ {'dest': 'doctype_declaration', 'default': 1,
+ 'action': 'store_false', 'validator': frontend.validate_boolean}),))
+ config_section = 'docutils_xml writer'
+ config_section_dependencies = ('writers',)
+
output = None
"""Final translated form of `document`."""
@@ -43,7 +50,7 @@
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
- ' "http://docutils.sourceforge.net/spec/docutils.dtd">\n')
+ ' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
def translate(self):
Modified: Zope3/trunk/src/docutils/writers/html4css1.py
===================================================================
--- Zope3/trunk/src/docutils/writers/html4css1.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/writers/html4css1.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:07 $
+# Revision: $Revision: 3367 $
+# Date: $Date: 2005-05-26 02:44:13 +0200 (Thu, 26 May 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -22,8 +22,12 @@
import time
import re
from types import ListType
+try:
+ import Image # check for the Python Imaging Library
+except ImportError:
+ Image = None
import docutils
-from docutils import nodes, utils, writers, languages
+from docutils import frontend, nodes, utils, writers, languages
class Writer(writers.Writer):
@@ -35,30 +39,50 @@
'HTML-Specific Options',
None,
(('Specify a stylesheet URL, used verbatim. Default is '
- '"default.css". Overridden by --stylesheet-path.',
+ '"default.css". Overrides --stylesheet-path.',
['--stylesheet'],
- {'default': 'default.css', 'metavar': '<URL>'}),
+ {'default': 'default.css', 'metavar': '<URL>',
+ 'overrides': 'stylesheet_path'}),
('Specify a stylesheet file, relative to the current working '
'directory. The path is adjusted relative to the output HTML '
'file. Overrides --stylesheet.',
['--stylesheet-path'],
- {'metavar': '<file>'}),
+ {'metavar': '<file>', 'overrides': 'stylesheet'}),
('Link to the stylesheet in the output HTML file. This is the '
'default.',
['--link-stylesheet'],
- {'dest': 'embed_stylesheet', 'action': 'store_false'}),
+ {'dest': 'embed_stylesheet', 'action': 'store_false',
+ 'validator': frontend.validate_boolean}),
('Embed the stylesheet in the output HTML file. The stylesheet '
'file must be accessible during processing (--stylesheet-path is '
- 'recommended). The stylesheet is embedded inside a comment, so it '
- 'must not contain the text "--" (two hyphens). Default: link the '
- 'stylesheet, do not embed it.',
+ 'recommended). Default: link the stylesheet, do not embed it.',
['--embed-stylesheet'],
- {'action': 'store_true'}),
+ {'action': 'store_true', 'validator': frontend.validate_boolean}),
+ ('Specify the initial header level. Default is 1 for "<h1>". '
+ 'Does not affect document title & subtitle (see --no-doc-title).',
+ ['--initial-header-level'],
+ {'choices': '1 2 3 4 5 6'.split(), 'default': '1',
+ 'metavar': '<level>'}),
+ ('Specify the maximum width (in characters) for one-column field '
+ 'names. Longer field names will span an entire row of the table '
+ 'used to render the field list. Default is 14 characters. '
+ 'Use 0 for "no limit".',
+ ['--field-name-limit'],
+ {'default': 14, 'metavar': '<level>',
+ 'validator': frontend.validate_nonnegative_int}),
+ ('Specify the maximum width (in characters) for options in option '
+ 'lists. Longer options will span an entire row of the table used '
+ 'to render the option list. Default is 14 characters. '
+ 'Use 0 for "no limit".',
+ ['--option-limit'],
+ {'default': 14, 'metavar': '<level>',
+ 'validator': frontend.validate_nonnegative_int}),
('Format for footnote references: one of "superscript" or '
- '"brackets". Default is "superscript".',
+ '"brackets". Default is "brackets".',
['--footnote-references'],
- {'choices': ['superscript', 'brackets'], 'default': 'superscript',
- 'metavar': '<format>'}),
+ {'choices': ['superscript', 'brackets'], 'default': 'brackets',
+ 'metavar': '<format>',
+ 'overrides': 'trim_footnote_reference_space'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
@@ -69,37 +93,48 @@
'items each contain one paragraph and/or one "simple" sublist '
'only). Default: enabled.',
['--compact-lists'],
- {'default': 1, 'action': 'store_true'}),
+ {'default': 1, 'action': 'store_true',
+ 'validator': frontend.validate_boolean}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),
('Omit the XML declaration. Use with caution.',
- ['--no-xml-declaration'], {'dest': 'xml_declaration', 'default': 1,
- 'action': 'store_false'}),))
+ ['--no-xml-declaration'],
+ {'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
+ 'validator': frontend.validate_boolean}),
+ ('Scramble email addresses to confuse harvesters. '
+ 'For example, "abc at example.org" will become '
+ '``<a href="mailto:%61%62%63%40...">abc at example dot org</a>``.',
+ ['--cloak-email-addresses'],
+ {'action': 'store_true', 'validator': frontend.validate_boolean}),))
relative_path_settings = ('stylesheet_path',)
- output = None
- """Final translated form of `document`."""
+ config_section = 'html4css1 writer'
+ config_section_dependencies = ('writers',)
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = HTMLTranslator
def translate(self):
- visitor = self.translator_class(self.document)
+ self.visitor = visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
- self.head_prefix = visitor.head_prefix
- self.stylesheet = visitor.stylesheet
- self.head = visitor.head
- self.body_prefix = visitor.body_prefix
- self.body_pre_docinfo = visitor.body_pre_docinfo
- self.docinfo = visitor.docinfo
- self.body = visitor.body
- self.body_suffix = visitor.body_suffix
+ for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix',
+ 'body_pre_docinfo', 'docinfo', 'body', 'fragment',
+ 'body_suffix'):
+ setattr(self, attr, getattr(visitor, attr))
+ def assemble_parts(self):
+ writers.Writer.assemble_parts(self)
+ for part in ('title', 'subtitle', 'docinfo', 'body', 'header',
+ 'footer', 'meta', 'stylesheet', 'fragment',
+ 'html_prolog', 'html_head', 'html_title', 'html_subtitle',
+ 'html_body'):
+ self.parts[part] = ''.join(getattr(self.visitor, part))
+
class HTMLTranslator(nodes.NodeVisitor):
"""
@@ -137,27 +172,26 @@
- Regardless of the above, in definitions, table cells, field bodies,
option descriptions, and list items, mark the first child with
'class="first"' and the last child with 'class="last"'. The stylesheet
- sets the margins (top & bottom respecively) to 0 for these elements.
+ sets the margins (top & bottom respectively) to 0 for these elements.
The ``no_compact_lists`` setting (``--no-compact-lists`` command-line
option) disables list whitespace optimization.
"""
xml_declaration = '<?xml version="1.0" encoding="%s" ?>\n'
- doctype = ('<!DOCTYPE html'
+ doctype = ('<!DOCTYPE html'
' PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/'
'xhtml1-transitional.dtd">\n')
- html_head = ('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="%s" '
- 'lang="%s">\n<head>\n')
- content_type = ('<meta http-equiv="Content-Type" content="text/html; '
- 'charset=%s" />\n')
+ head_prefix_template = ('<html xmlns="http://www.w3.org/1999/xhtml"'
+ ' xml:lang="%s" lang="%s">\n<head>\n')
+ content_type = ('<meta http-equiv="Content-Type"'
+ ' content="text/html; charset=%s" />\n')
generator = ('<meta name="generator" content="Docutils %s: '
'http://docutils.sourceforge.net/" />\n')
stylesheet_link = '<link rel="stylesheet" href="%s" type="text/css" />\n'
- embedded_stylesheet = '<style type="text/css"><!--\n\n%s\n--></style>\n'
- named_tags = {'a': 1, 'applet': 1, 'form': 1, 'frame': 1, 'iframe': 1,
- 'img': 1, 'map': 1}
+ embedded_stylesheet = '<style type="text/css">\n\n%s\n</style>\n'
+ named_tags = ['a', 'applet', 'form', 'frame', 'iframe', 'img', 'map']
words_and_spaces = re.compile(r'\S+| +|\n')
def __init__(self, document):
@@ -165,49 +199,62 @@
self.settings = settings = document.settings
lcode = settings.language_code
self.language = languages.get_language(lcode)
- self.head_prefix = [
- self.doctype,
- self.html_head % (lcode, lcode),
- self.content_type % settings.output_encoding,
- self.generator % docutils.__version__]
+ self.meta = [self.content_type % settings.output_encoding,
+ self.generator % docutils.__version__]
+ self.head_prefix = []
+ self.html_prolog = []
if settings.xml_declaration:
- self.head_prefix.insert(0, self.xml_declaration
+ self.head_prefix.append(self.xml_declaration
% settings.output_encoding)
- self.head = []
+ # encoding not interpolated:
+ self.html_prolog.append(self.xml_declaration)
+ self.head_prefix.extend([self.doctype,
+ self.head_prefix_template % (lcode, lcode)])
+ self.html_prolog.append(self.doctype)
+ self.head = self.meta[:]
if settings.embed_stylesheet:
- stylesheet = self.get_stylesheet_reference(
+ stylesheet = utils.get_stylesheet_reference(settings,
os.path.join(os.getcwd(), 'dummy'))
+ settings.record_dependencies.add(stylesheet)
stylesheet_text = open(stylesheet).read()
self.stylesheet = [self.embedded_stylesheet % stylesheet_text]
else:
- stylesheet = self.get_stylesheet_reference()
+ stylesheet = utils.get_stylesheet_reference(settings)
if stylesheet:
- self.stylesheet = [self.stylesheet_link % stylesheet]
+ self.stylesheet = [self.stylesheet_link
+ % self.encode(stylesheet)]
else:
self.stylesheet = []
self.body_prefix = ['</head>\n<body>\n']
+ # document title, subtitle display
self.body_pre_docinfo = []
+ # author, date, etc.
self.docinfo = []
self.body = []
+ self.fragment = []
self.body_suffix = ['</body>\n</html>\n']
self.section_level = 0
+ self.initial_header_level = int(settings.initial_header_level)
+ # A heterogenous stack used in conjunction with the tree traversal.
+ # Make sure that the pops correspond to the pushes:
self.context = []
- self.topic_class = ''
+ self.topic_classes = []
self.colspecs = []
self.compact_p = 1
self.compact_simple = None
self.in_docinfo = None
self.in_sidebar = None
+ self.title = []
+ self.subtitle = []
+ self.header = []
+ self.footer = []
+ self.html_head = [self.content_type] # charset not interpolated
+ self.html_title = []
+ self.html_subtitle = []
+ self.html_body = []
+ self.in_document_title = 0
+ self.in_mailto = 0
- def get_stylesheet_reference(self, relative_to=None):
- settings = self.settings
- if settings.stylesheet_path:
- if relative_to == None:
- relative_to = settings._destination
- return utils.relative_path(relative_to, settings.stylesheet_path)
- else:
- return settings.stylesheet
-
def astext(self):
return ''.join(self.head_prefix + self.head
+ self.stylesheet + self.body_prefix
@@ -222,8 +269,24 @@
text = text.replace('"', """)
text = text.replace(">", ">")
text = text.replace("@", "@") # may thwart some address harvesters
+ # Replace the non-breaking space character with the HTML entity:
+ text = text.replace(u'\u00a0', " ")
return text
+ def cloak_mailto(self, uri):
+ """Try to hide a mailto: URL from harvesters."""
+ addr = uri.split(':', 1)[1]
+ if '?' in addr:
+ addr, query = addr.split('?', 1)
+ query = '?' + query
+ else:
+ query = ''
+ escaped = ['%%%02X' % ord(c) for c in addr]
+ return 'mailto:%s%s' % (''.join(escaped), query)
+
+ def cloak_email(self, addr):
+ return addr.replace('@', ' at ').replace('.', ' dot ')
+
def attval(self, text,
whitespace=re.compile('[\n\r\t\v\f]')):
"""Cleanse, HTML encode, and return attribute value text."""
@@ -235,42 +298,56 @@
are extracted), tag name, and optional attributes.
"""
tagname = tagname.lower()
+ prefix = []
atts = {}
for (name, value) in attributes.items():
atts[name.lower()] = value
- for att in ('class',): # append to node attribute
- if node.has_key(att) or atts.has_key(att):
- atts[att] = \
- (node.get(att, '') + ' ' + atts.get(att, '')).strip()
- for att in ('id',): # node attribute overrides
- if node.has_key(att):
- atts[att] = node[att]
- if atts.has_key('id') and self.named_tags.has_key(tagname):
+ classes = node.get('classes', [])
+ if atts.has_key('class'):
+ classes.append(atts['class'])
+ if classes:
+ atts['class'] = ' '.join(classes)
+ assert not atts.has_key('id')
+ if node.get('ids'):
+ atts['id'] = node['ids'][0]
+ for id in node['ids'][1:]:
+ prefix.append('<span id="%s"></span>' % id)
+ if atts.has_key('id') and tagname in self.named_tags:
atts['name'] = atts['id'] # for compatibility with old browsers
attlist = atts.items()
attlist.sort()
parts = [tagname]
for name, value in attlist:
- if value is None: # boolean attribute
- # According to the HTML spec, ``<element boolean>`` is good,
- # ``<element boolean="boolean">`` is bad.
- # (But the XHTML (XML) spec says the opposite. <sigh>)
- parts.append(name.lower())
- elif isinstance(value, ListType):
- values = [str(v) for v in value]
+ # value=None was used for boolean attributes without
+ # value, but this isn't supported by XHTML.
+ assert value is not None
+ if isinstance(value, ListType):
+ values = [unicode(v) for v in value]
parts.append('%s="%s"' % (name.lower(),
self.attval(' '.join(values))))
else:
- parts.append('%s="%s"' % (name.lower(),
- self.attval(str(value))))
- return '<%s%s>%s' % (' '.join(parts), infix, suffix)
+ try:
+ uval = unicode(value)
+ except TypeError: # for Python 2.1 compatibility:
+ uval = unicode(str(value))
+ parts.append('%s="%s"' % (name.lower(), self.attval(uval)))
+ return ''.join(prefix) + '<%s%s>' % (' '.join(parts), infix) + suffix
def emptytag(self, node, tagname, suffix='\n', **attributes):
"""Construct and return an XML-compatible empty tag."""
return self.starttag(node, tagname, suffix, infix=' /', **attributes)
+ def set_first_last(self, node):
+ children = [n for n in node if not isinstance(n, nodes.Invisible)]
+ if children:
+ children[0]['classes'].append('first')
+ children[-1]['classes'].append('last')
+
def visit_Text(self, node):
- self.body.append(self.encode(node.astext()))
+ text = node.astext()
+ if self.in_mailto and self.settings.cloak_email_addresses:
+ text = self.cloak_email(text)
+ self.body.append(self.encode(text))
def depart_Text(self, node):
pass
@@ -301,8 +378,8 @@
self.body.append(self.starttag(node, 'div',
CLASS=(name or 'admonition')))
if name:
- self.body.append('<p class="admonition-title">'
- + self.language.labels[name] + '</p>\n')
+ node.insert(0, nodes.title(name, self.language.labels[name]))
+ self.set_first_last(node)
def depart_admonition(self, node=None):
self.body.append('</div>\n')
@@ -362,7 +439,7 @@
self.compact_p = None
self.compact_simple = (self.settings.compact_lists and
(self.compact_simple
- or self.topic_class == 'contents'
+ or self.topic_classes == ['contents']
or self.check_simple_list(node)))
if self.compact_simple and not old_compact_simple:
atts['class'] = 'simple'
@@ -385,10 +462,10 @@
self.depart_admonition()
def visit_citation(self, node):
- self.body.append(self.starttag(node, 'table', CLASS='citation',
+ self.body.append(self.starttag(node, 'table',
+ CLASS='docutils citation',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
- '<col />\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
@@ -398,13 +475,9 @@
'</tbody>\n</table>\n')
def visit_citation_reference(self, node):
- href = ''
- if node.has_key('refid'):
- href = '#' + node['refid']
- elif node.has_key('refname'):
- href = '#' + self.document.nameids[node['refname']]
- self.body.append(self.starttag(node, 'a', '[', href=href,
- CLASS='citation-reference'))
+ href = '#' + node['refid']
+ self.body.append(self.starttag(
+ node, 'a', '[', CLASS='citation-reference', href=href))
def depart_citation_reference(self, node):
self.body.append(']</a>')
@@ -418,6 +491,8 @@
def visit_colspec(self, node):
self.colspecs.append(node)
+ # "stubs" list is an attribute of the tgroup element:
+ node.parent.stubs.append(node.attributes.get('stub'))
def depart_colspec(self, node):
pass
@@ -439,6 +514,17 @@
# Content already processed:
raise nodes.SkipNode
+ def visit_compound(self, node):
+ self.body.append(self.starttag(node, 'div', CLASS='compound'))
+ if len(node) > 1:
+ node[0]['classes'].append('compound-first')
+ node[-1]['classes'].append('compound-last')
+ for child in node[1:-1]:
+ child['classes'].append('compound-middle')
+
+ def depart_compound(self, node):
+ self.body.append('</div>\n')
+
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact', meta=None)
@@ -472,15 +558,13 @@
def visit_definition(self, node):
self.body.append('</dt>\n')
self.body.append(self.starttag(node, 'dd', ''))
- if len(node):
- node[0].set_class('first')
- node[-1].set_class('last')
+ self.set_first_last(node)
def depart_definition(self, node):
self.body.append('</dd>\n')
def visit_definition_list(self, node):
- self.body.append(self.starttag(node, 'dl'))
+ self.body.append(self.starttag(node, 'dl', CLASS='docutils'))
def depart_definition_list(self, node):
self.body.append('</dl>\n')
@@ -493,16 +577,15 @@
def visit_description(self, node):
self.body.append(self.starttag(node, 'td', ''))
- if len(node):
- node[0].set_class('first')
- node[-1].set_class('last')
+ self.set_first_last(node)
def depart_description(self, node):
self.body.append('</td>')
def visit_docinfo(self, node):
self.context.append(len(self.body))
- self.body.append(self.starttag(node, 'table', CLASS='docinfo',
+ self.body.append(self.starttag(node, 'table',
+ CLASS='docinfo',
frame="void", rules="none"))
self.body.append('<col class="docinfo-name" />\n'
'<col class="docinfo-content" />\n'
@@ -513,22 +596,22 @@
self.body.append('</tbody>\n</table>\n')
self.in_docinfo = None
start = self.context.pop()
- self.body_pre_docinfo = self.body[:start]
self.docinfo = self.body[start:]
self.body = []
def visit_docinfo_item(self, node, name, meta=1):
if meta:
- self.head.append('<meta name="%s" content="%s" />\n'
- % (name, self.attval(node.astext())))
+ meta_tag = '<meta name="%s" content="%s" />\n' \
+ % (name, self.attval(node.astext()))
+ self.add_meta(meta_tag)
self.body.append(self.starttag(node, 'tr', ''))
self.body.append('<th class="docinfo-name">%s:</th>\n<td>'
% self.language.labels[name])
if len(node):
if isinstance(node[0], nodes.Element):
- node[0].set_class('first')
- if isinstance(node[0], nodes.Element):
- node[-1].set_class('last')
+ node[0]['classes'].append('first')
+ if isinstance(node[-1], nodes.Element):
+ node[-1]['classes'].append('last')
def depart_docinfo_item(self):
self.body.append('</td></tr>\n')
@@ -540,10 +623,20 @@
self.body.append('\n</pre>\n')
def visit_document(self, node):
- self.body.append(self.starttag(node, 'div', CLASS='document'))
+ # empty or untitled document?
+ if not len(node) or not isinstance(node[0], nodes.title):
+ # for XHTML conformance, modulo IE6 appeasement:
+ self.head.append('<title></title>\n')
def depart_document(self, node):
- self.body.append('</div>\n')
+ self.fragment.extend(self.body)
+ self.body_prefix.append(self.starttag(node, 'div', CLASS='document'))
+ self.body_suffix.insert(0, '</div>\n')
+ # skip content-type meta tag with interpolated charset value:
+ self.html_head.extend(self.head[1:])
+ self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo
+ + self.docinfo + self.body
+ + self.body_suffix[:-1])
def visit_emphasis(self, node):
self.body.append('<em>')
@@ -552,22 +645,29 @@
self.body.append('</em>')
def visit_entry(self, node):
+ atts = {'class': []}
if isinstance(node.parent.parent, nodes.thead):
+ atts['class'].append('head')
+ if node.parent.parent.parent.stubs[node.parent.column]:
+ # "stubs" list is an attribute of the tgroup element
+ atts['class'].append('stub')
+ if atts['class']:
tagname = 'th'
+ atts['class'] = ' '.join(atts['class'])
else:
tagname = 'td'
- atts = {}
+ del atts['class']
+ node.parent.column += 1
if node.has_key('morerows'):
atts['rowspan'] = node['morerows'] + 1
if node.has_key('morecols'):
atts['colspan'] = node['morecols'] + 1
+ node.parent.column += node['morecols']
self.body.append(self.starttag(node, tagname, '', **atts))
self.context.append('</%s>\n' % tagname.lower())
if len(node) == 0: # empty cell
self.body.append(' ')
- else:
- node[0].set_class('first')
- node[-1].set_class('last')
+ self.set_first_last(node)
def depart_entry(self, node):
self.body.append(self.context.pop())
@@ -590,7 +690,7 @@
self.compact_p = None
self.compact_simple = (self.settings.compact_lists and
(self.compact_simple
- or self.topic_class == 'contents'
+ or self.topic_classes == ['contents']
or self.check_simple_list(node)))
if self.compact_simple and not old_compact_simple:
atts['class'] = (atts.get('class', '') + ' simple').strip()
@@ -614,16 +714,15 @@
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'td', '', CLASS='field-body'))
- if len(node):
- node[0].set_class('first')
- node[-1].set_class('last')
+ self.set_first_last(node)
def depart_field_body(self, node):
self.body.append('</td>\n')
def visit_field_list(self, node):
self.body.append(self.starttag(node, 'table', frame='void',
- rules='none', CLASS='field-list'))
+ rules='none',
+ CLASS='docutils field-list'))
self.body.append('<col class="field-name" />\n'
'<col class="field-body" />\n'
'<tbody valign="top">\n')
@@ -637,7 +736,8 @@
atts['class'] = 'docinfo-name'
else:
atts['class'] = 'field-name'
- if len(node.astext()) > 14:
+ if ( self.settings.field_name_limit
+ and len(node.astext()) > self.settings.field_name_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
@@ -652,6 +752,8 @@
atts = {'class': 'figure'}
if node.get('width'):
atts['style'] = 'width: %spx' % node['width']
+ if node.get('align'):
+ atts['align'] = node['align']
self.body.append(self.starttag(node, 'div', **atts))
def depart_figure(self, node):
@@ -662,14 +764,17 @@
def depart_footer(self, node):
start = self.context.pop()
- footer = (['<hr class="footer"/>\n',
- self.starttag(node, 'div', CLASS='footer')]
- + self.body[start:] + ['</div>\n'])
+ footer = [self.starttag(node, 'div', CLASS='footer'),
+ '<hr class="footer" />\n']
+ footer.extend(self.body[start:])
+ footer.append('\n</div>\n')
+ self.footer.extend(footer)
self.body_suffix[:0] = footer
del self.body[start:]
def visit_footnote(self, node):
- self.body.append(self.starttag(node, 'table', CLASS='footnote',
+ self.body.append(self.starttag(node, 'table',
+ CLASS='docutils footnote',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
@@ -677,47 +782,49 @@
self.footnote_backrefs(node)
def footnote_backrefs(self, node):
- if self.settings.footnote_backlinks and node.hasattr('backrefs'):
- backrefs = node['backrefs']
+ backlinks = []
+ backrefs = node['backrefs']
+ if self.settings.footnote_backlinks and backrefs:
if len(backrefs) == 1:
self.context.append('')
- self.context.append('<a class="fn-backref" href="#%s" '
- 'name="%s">' % (backrefs[0], node['id']))
+ self.context.append(
+ '<a class="fn-backref" href="#%s" name="%s">'
+ % (backrefs[0], node['ids'][0]))
else:
i = 1
- backlinks = []
for backref in backrefs:
backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
% (backref, i))
i += 1
self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
- self.context.append('<a name="%s">' % node['id'])
+ self.context.append('<a name="%s">' % node['ids'][0])
else:
self.context.append('')
- self.context.append('<a name="%s">' % node['id'])
+ self.context.append('<a name="%s">' % node['ids'][0])
+ # If the node does not only consist of a label.
+ if len(node) > 1:
+ # If there are preceding backlinks, we do not set class
+ # 'first', because we need to retain the top-margin.
+ if not backlinks:
+ node[1]['classes'].append('first')
+ node[-1]['classes'].append('last')
def depart_footnote(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
def visit_footnote_reference(self, node):
- href = ''
- if node.has_key('refid'):
- href = '#' + node['refid']
- elif node.has_key('refname'):
- href = '#' + self.document.nameids[node['refname']]
+ href = '#' + node['refid']
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
- elif format == 'superscript':
+ else:
+ assert format == 'superscript'
suffix = '<sup>'
self.context.append('</sup>')
- else: # shouldn't happen
- suffix = '???'
- self.content.append('???')
- self.body.append(self.starttag(node, 'a', suffix, href=href,
- CLASS='footnote-reference'))
+ self.body.append(self.starttag(node, 'a', suffix,
+ CLASS='footnote-reference', href=href))
def depart_footnote_reference(self, node):
self.body.append(self.context.pop() + '</a>')
@@ -733,9 +840,11 @@
def depart_header(self, node):
start = self.context.pop()
- self.body_prefix.append(self.starttag(node, 'div', CLASS='header'))
- self.body_prefix.extend(self.body[start:])
- self.body_prefix.append('<hr />\n</div>\n')
+ header = [self.starttag(node, 'div', CLASS='header')]
+ header.extend(self.body[start:])
+ header.append('\n<hr class="header"/>\n</div>\n')
+ self.body_prefix.extend(header)
+ self.header.extend(header)
del self.body[start:]
def visit_hint(self, node):
@@ -745,18 +854,50 @@
self.depart_admonition()
def visit_image(self, node):
- atts = node.attributes.copy()
+ atts = node.non_default_attributes()
+ if atts.has_key('classes'):
+ del atts['classes'] # prevent duplication with node attrs
atts['src'] = atts['uri']
del atts['uri']
+ if atts.has_key('scale'):
+ if Image and not (atts.has_key('width')
+ and atts.has_key('height')):
+ try:
+ im = Image.open(str(atts['src']))
+ except (IOError, # Source image can't be found or opened
+ UnicodeError): # PIL doesn't like Unicode paths.
+ pass
+ else:
+ if not atts.has_key('width'):
+ atts['width'] = im.size[0]
+ if not atts.has_key('height'):
+ atts['height'] = im.size[1]
+ del im
+ if atts.has_key('width'):
+ atts['width'] = int(round(atts['width']
+ * (float(atts['scale']) / 100)))
+ if atts.has_key('height'):
+ atts['height'] = int(round(atts['height']
+ * (float(atts['scale']) / 100)))
+ del atts['scale']
if not atts.has_key('alt'):
atts['alt'] = atts['src']
if isinstance(node.parent, nodes.TextElement):
self.context.append('')
else:
- self.body.append('<p>')
- self.context.append('</p>\n')
+ div_atts = self.image_div_atts(node)
+ self.body.append(self.starttag({}, 'div', '', **div_atts))
+ self.context.append('</div>\n')
self.body.append(self.emptytag(node, 'img', '', **atts))
+ def image_div_atts(self, image_node):
+ div_atts = {}
+ div_atts['class'] = ' '.join(['image'] + image_node['classes'])
+ if image_node.attributes.has_key('align'):
+ div_atts['align'] = self.attval(image_node.attributes['align'])
+ div_atts['class'] += ' align-%s' % div_atts['align']
+ return div_atts
+
def depart_image(self, node):
self.body.append(self.context.pop())
@@ -785,23 +926,32 @@
def depart_legend(self, node):
self.body.append('</div>\n')
+ def visit_line(self, node):
+ self.body.append(self.starttag(node, 'div', suffix='', CLASS='line'))
+ if not len(node):
+ self.body.append('<br />')
+
+ def depart_line(self, node):
+ self.body.append('</div>\n')
+
def visit_line_block(self, node):
- self.body.append(self.starttag(node, 'pre', CLASS='line-block'))
+ self.body.append(self.starttag(node, 'div', CLASS='line-block'))
def depart_line_block(self, node):
- self.body.append('\n</pre>\n')
+ self.body.append('</div>\n')
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li', ''))
if len(node):
- node[0].set_class('first')
+ node[0]['classes'].append('first')
def depart_list_item(self, node):
self.body.append('</li>\n')
def visit_literal(self, node):
"""Process text to prevent tokens from wrapping."""
- self.body.append(self.starttag(node, 'tt', '', CLASS='literal'))
+ self.body.append(
+ self.starttag(node, 'tt', '', CLASS='docutils literal'))
text = node.astext()
for token in self.words_and_spaces.findall(text):
if token.strip():
@@ -825,11 +975,16 @@
self.body.append('\n</pre>\n')
def visit_meta(self, node):
- self.head.append(self.emptytag(node, 'meta', **node.attributes))
+ meta = self.emptytag(node, 'meta', **node.non_default_attributes())
+ self.add_meta(meta)
def depart_meta(self, node):
pass
+ def add_meta(self, tag):
+ self.meta.append(tag)
+ self.head.append(tag)
+
def visit_note(self, node):
self.visit_admonition(node, 'note')
@@ -839,8 +994,10 @@
def visit_option(self, node):
if self.context[-1]:
self.body.append(', ')
+ self.body.append(self.starttag(node, 'span', '', CLASS='option'))
def depart_option(self, node):
+ self.body.append('</span>')
self.context[-1] += 1
def visit_option_argument(self, node):
@@ -852,12 +1009,14 @@
def visit_option_group(self, node):
atts = {}
- if len(node.astext()) > 14:
+ if ( self.settings.option_limit
+ and len(node.astext()) > self.settings.option_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
self.context.append('')
- self.body.append(self.starttag(node, 'td', **atts))
+ self.body.append(
+ self.starttag(node, 'td', CLASS='option-group', **atts))
self.body.append('<kbd>')
self.context.append(0) # count number of options
@@ -868,7 +1027,7 @@
def visit_option_list(self, node):
self.body.append(
- self.starttag(node, 'table', CLASS='option-list',
+ self.starttag(node, 'table', CLASS='docutils option-list',
frame="void", rules="none"))
self.body.append('<col class="option" />\n'
'<col class="description" />\n'
@@ -884,10 +1043,10 @@
self.body.append('</tr>\n')
def visit_option_string(self, node):
- self.body.append(self.starttag(node, 'span', '', CLASS='option'))
+ pass
def depart_option_string(self, node):
- self.body.append('</span>')
+ pass
def visit_organization(self, node):
self.visit_docinfo_item(node, 'organization')
@@ -895,12 +1054,29 @@
def depart_organization(self, node):
self.depart_docinfo_item()
- def visit_paragraph(self, node):
- # Omit <p> tags if this is an only child and optimizable.
+ def should_be_compact_paragraph(self, node):
+ """
+ Determine if the <p> tags around paragraph ``node`` can be omitted.
+ """
+ if (isinstance(node.parent, nodes.document) or
+ isinstance(node.parent, nodes.compound)):
+ # Never compact paragraphs in document or compound.
+ return 0
+ for key, value in node.attlist():
+ if (node.is_not_default(key) and
+ not (key == 'classes' and value in
+ ([], ['first'], ['last'], ['first', 'last']))):
+ # Attribute which needs to survive.
+ return 0
if (self.compact_simple or
self.compact_p and (len(node.parent) == 1 or
len(node.parent) == 2 and
isinstance(node.parent[0], nodes.label))):
+ return 1
+ return 0
+
+ def visit_paragraph(self, node):
+ if self.should_be_compact_paragraph(node):
self.context.append('')
else:
self.body.append(self.starttag(node, 'p', ''))
@@ -912,7 +1088,7 @@
def visit_problematic(self, node):
if node.hasattr('refid'):
self.body.append('<a href="#%s" name="%s">' % (node['refid'],
- node['id']))
+ node['ids'][0]))
self.context.append('</a>')
else:
self.context.append('')
@@ -923,23 +1099,42 @@
self.body.append(self.context.pop())
def visit_raw(self, node):
- if node.get('format') == 'html':
+ if 'html' in node.get('format', '').split():
+ t = isinstance(node.parent, nodes.TextElement) and 'span' or 'div'
+ if node['classes']:
+ self.body.append(self.starttag(node, t, suffix=''))
self.body.append(node.astext())
+ if node['classes']:
+ self.body.append('</%s>' % t)
# Keep non-HTML raw text out of output:
raise nodes.SkipNode
def visit_reference(self, node):
+ if isinstance(node.parent, nodes.TextElement):
+ self.context.append('')
+ else: # contains an image
+ assert len(node) == 1 and isinstance(node[0], nodes.image)
+ div_atts = self.image_div_atts(node[0])
+ div_atts['class'] += ' image-reference'
+ self.body.append(self.starttag({}, 'div', '', **div_atts))
+ self.context.append('</div>\n')
if node.has_key('refuri'):
href = node['refuri']
- elif node.has_key('refid'):
+ if ( self.settings.cloak_email_addresses
+ and href.startswith('mailto:')):
+ href = self.cloak_mailto(href)
+ self.in_mailto = 1
+ else:
+ assert node.has_key('refid'), \
+ 'References must have "refuri" or "refid" attribute.'
href = '#' + node['refid']
- elif node.has_key('refname'):
- href = '#' + self.document.nameids[node['refname']]
- self.body.append(self.starttag(node, 'a', '', href=href,
- CLASS='reference'))
+ self.body.append(self.starttag(node, 'a', '', CLASS='reference',
+ href=href))
def depart_reference(self, node):
self.body.append('</a>')
+ self.body.append(self.context.pop())
+ self.in_mailto = 0
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision', meta=None)
@@ -949,6 +1144,7 @@
def visit_row(self, node):
self.body.append(self.starttag(node, 'tr', ''))
+ node.column = 0
def depart_row(self, node):
self.body.append('</tr>\n')
@@ -969,6 +1165,7 @@
def visit_sidebar(self, node):
self.body.append(self.starttag(node, 'div', CLASS='sidebar'))
+ self.set_first_last(node)
self.in_sidebar = 1
def depart_sidebar(self, node):
@@ -1005,12 +1202,25 @@
self.body.append(self.starttag(node, 'p', '',
CLASS='sidebar-subtitle'))
self.context.append('</p>\n')
- else:
+ elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h2', '', CLASS='subtitle'))
self.context.append('</h2>\n')
+ self.in_document_title = len(self.body)
+ elif isinstance(node.parent, nodes.section):
+ tag = 'h%s' % (self.section_level + self.initial_header_level - 1)
+ self.body.append(
+ self.starttag(node, tag, '', CLASS='section-subtitle') +
+ self.starttag({}, 'span', '', CLASS='section-subtitle'))
+ self.context.append('</span></%s>\n' % tag)
def depart_subtitle(self, node):
self.body.append(self.context.pop())
+ if self.in_document_title:
+ self.subtitle = self.body[self.in_document_title:-1]
+ self.in_document_title = 0
+ self.body_pre_docinfo.extend(self.body)
+ self.html_subtitle.extend(self.body)
+ del self.body[:]
def visit_superscript(self, node):
self.body.append(self.starttag(node, 'sup', ''))
@@ -1019,16 +1229,16 @@
self.body.append('</sup>')
def visit_system_message(self, node):
- if node['level'] < self.document.reporter['writer'].report_level:
+ if node['level'] < self.document.reporter.report_level:
# Level is too low to display:
raise nodes.SkipNode
self.body.append(self.starttag(node, 'div', CLASS='system-message'))
self.body.append('<p class="system-message-title">')
attr = {}
backref_text = ''
- if node.hasattr('id'):
- attr['name'] = node['id']
- if node.hasattr('backrefs'):
+ if node['ids']:
+ attr['name'] = node['ids'][0]
+ if len(node['backrefs']):
backrefs = node['backrefs']
if len(backrefs) == 1:
backref_text = ('; <em><a href="#%s">backlink</a></em>'
@@ -1050,7 +1260,8 @@
a_end = '</a>'
else:
a_start = a_end = ''
- self.body.append('System Message: %s%s/%s%s (<tt>%s</tt>%s)%s</p>\n'
+ self.body.append('System Message: %s%s/%s%s '
+ '(<tt class="docutils">%s</tt>%s)%s</p>\n'
% (a_start, node['type'], node['level'], a_end,
self.encode(node['source']), line, backref_text))
@@ -1059,8 +1270,7 @@
def visit_table(self, node):
self.body.append(
- self.starttag(node, 'table', CLASS="table",
- frame='border', rules='all'))
+ self.starttag(node, 'table', CLASS='docutils', border="1"))
def depart_table(self, node):
self.body.append('</table>\n')
@@ -1068,8 +1278,8 @@
def visit_target(self, node):
if not (node.has_key('refuri') or node.has_key('refid')
or node.has_key('refname')):
- self.body.append(self.starttag(node, 'a', '', CLASS='target'))
- self.context.append('</a>')
+ self.body.append(self.starttag(node, 'span', '', CLASS='target'))
+ self.context.append('</span>')
else:
self.context.append('')
@@ -1099,6 +1309,7 @@
self.body.append(self.starttag(node, 'colgroup'))
# Appended by thead or tbody:
self.context.append('</colgroup>\n')
+ node.stubs = []
def depart_tgroup(self, node):
pass
@@ -1122,45 +1333,65 @@
def visit_title(self, node):
"""Only 6 section levels are supported by HTML."""
check_id = 0
+ close_tag = '</p>\n'
if isinstance(node.parent, nodes.topic):
self.body.append(
- self.starttag(node, 'p', '', CLASS='topic-title'))
+ self.starttag(node, 'p', '', CLASS='topic-title first'))
check_id = 1
elif isinstance(node.parent, nodes.sidebar):
self.body.append(
self.starttag(node, 'p', '', CLASS='sidebar-title'))
check_id = 1
- elif isinstance(node.parent, nodes.admonition):
+ elif isinstance(node.parent, nodes.Admonition):
self.body.append(
self.starttag(node, 'p', '', CLASS='admonition-title'))
check_id = 1
+ elif isinstance(node.parent, nodes.table):
+ self.body.append(
+ self.starttag(node, 'caption', ''))
+ check_id = 1
+ close_tag = '</caption>\n'
elif self.section_level == 0:
+ assert node.parent is self.document
# document title
self.head.append('<title>%s</title>\n'
% self.encode(node.astext()))
self.body.append(self.starttag(node, 'h1', '', CLASS='title'))
self.context.append('</h1>\n')
+ self.in_document_title = len(self.body)
else:
+ assert isinstance(node.parent, nodes.section)
+ h_level = self.section_level + self.initial_header_level - 1
+ atts = {}
+ if (len(node.parent) >= 2 and
+ isinstance(node.parent[1], nodes.subtitle)):
+ atts['CLASS'] = 'with-subtitle'
self.body.append(
- self.starttag(node, 'h%s' % self.section_level, ''))
+ self.starttag(node, 'h%s' % h_level, '', **atts))
atts = {}
- if node.parent.hasattr('id'):
- atts['name'] = node.parent['id']
+ if node.parent['ids']:
+ atts['name'] = node.parent['ids'][0]
if node.hasattr('refid'):
atts['class'] = 'toc-backref'
atts['href'] = '#' + node['refid']
self.body.append(self.starttag({}, 'a', '', **atts))
- self.context.append('</a></h%s>\n' % (self.section_level))
+ self.context.append('</a></h%s>\n' % (h_level))
if check_id:
- if node.parent.hasattr('id'):
+ if node.parent['ids']:
self.body.append(
- self.starttag({}, 'a', '', name=node.parent['id']))
- self.context.append('</a></p>\n')
+ self.starttag({}, 'a', '', name=node.parent['ids'][0]))
+ self.context.append('</a>' + close_tag)
else:
- self.context.append('</p>\n')
+ self.context.append(close_tag)
def depart_title(self, node):
self.body.append(self.context.pop())
+ if self.in_document_title:
+ self.title = self.body[self.in_document_title:-1]
+ self.in_document_title = 0
+ self.body_pre_docinfo.extend(self.body)
+ self.html_title.extend(self.body)
+ del self.body[:]
def visit_title_reference(self, node):
self.body.append(self.starttag(node, 'cite', ''))
@@ -1170,14 +1401,14 @@
def visit_topic(self, node):
self.body.append(self.starttag(node, 'div', CLASS='topic'))
- self.topic_class = node.get('class')
+ self.topic_classes = node['classes']
def depart_topic(self, node):
self.body.append('</div>\n')
- self.topic_class = ''
+ self.topic_classes = []
def visit_transition(self, node):
- self.body.append(self.emptytag(node, 'hr'))
+ self.body.append(self.emptytag(node, 'hr', CLASS='docutils'))
def depart_transition(self, node):
pass
@@ -1202,7 +1433,7 @@
class SimpleListChecker(nodes.GenericNodeVisitor):
"""
- Raise `nodes.SkipNode` if non-simple list item is encountered.
+ Raise `nodes.NodeFound` if non-simple list item is encountered.
Here "simple" means a list item containing nothing other than a single
paragraph, a simple list, or a paragraph followed by a simple list.
@@ -1219,7 +1450,7 @@
def visit_list_item(self, node):
children = []
- for child in node.get_children():
+ for child in node.children:
if not isinstance(child, nodes.Invisible):
children.append(child)
if (children and isinstance(children[0], nodes.paragraph)
@@ -1236,7 +1467,7 @@
def invisible_visit(self, node):
"""Invisible nodes should be ignored."""
- pass
+ raise nodes.SkipNode
visit_comment = invisible_visit
visit_substitution_definition = invisible_visit
Modified: Zope3/trunk/src/docutils/writers/latex2e.py
===================================================================
--- Zope3/trunk/src/docutils/writers/latex2e.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/writers/latex2e.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,8 +1,8 @@
"""
:Author: Engelbert Gruber
:Contact: grubert at users.sourceforge.net
-:Revision: $Revision: 1.1 $
-:Date: $Date: 2003/07/30 20:14:07 $
+:Revision: $Revision: 3367 $
+:Date: $Date: 2005-05-26 02:44:13 +0200 (Thu, 26 May 2005) $
:Copyright: This module has been placed in the public domain.
LaTeX2e document tree Writer.
@@ -10,8 +10,8 @@
__docformat__ = 'reStructuredText'
-# code contributions from several people included, thanks too all.
-# some named: David Abrahams, Julien Letessier, who is missing.
+# code contributions from several people included, thanks to all.
+# some named: David Abrahams, Julien Letessier, Lele Gaifax, and others.
#
# convention deactivate code by two # e.g. ##.
@@ -20,7 +20,7 @@
import re
import string
from types import ListType
-from docutils import writers, nodes, languages
+from docutils import frontend, nodes, languages, writers, utils
class Writer(writers.Writer):
@@ -33,51 +33,121 @@
(('Specify documentclass. Default is "article".',
['--documentclass'],
{'default': 'article', }),
+ ('Specify document options. Multiple options can be given, '
+ 'separated by commas. Default is "10pt,a4paper".',
+ ['--documentoptions'],
+ {'default': '10pt,a4paper', }),
+ ('Use LaTeX footnotes. LaTeX supports only numbered footnotes (does it?). '
+ 'Default: no, uses figures.',
+ ['--use-latex-footnotes'],
+ {'default': 0, 'action': 'store_true',
+ 'validator': frontend.validate_boolean}),
('Format for footnote references: one of "superscript" or '
- '"brackets". Default is "brackets".',
+ '"brackets". Default is "superscript".',
['--footnote-references'],
- {'choices': ['superscript', 'brackets'], 'default': 'brackets',
- 'metavar': '<format>'}),
+ {'choices': ['superscript', 'brackets'], 'default': 'superscript',
+ 'metavar': '<format>',
+ 'overrides': 'trim_footnote_reference_space'}),
+ ('Use LaTeX citations. '
+ 'Default: no, uses figures which might get mixed with images.',
+ ['--use-latex-citations'],
+ {'default': 0, 'action': 'store_true',
+ 'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
- ('Specify a stylesheet file. The file will be "input" by latex '
- 'in the document header. Default is "style.tex". '
- 'If this is set to "" disables input.'
- 'Overridden by --stylesheet-path.',
+ ('Specify a stylesheet file. The file will be "input" by latex in '
+ 'the document header. Default is no stylesheet (""). '
+ 'Overrides --stylesheet-path.',
['--stylesheet'],
- {'default': 'style.tex', 'metavar': '<file>'}),
+ {'default': '', 'metavar': '<file>',
+ 'overrides': 'stylesheet_path'}),
('Specify a stylesheet file, relative to the current working '
- 'directory.'
- 'Overrides --stylesheet.',
+ 'directory. Overrides --stylesheet.',
['--stylesheet-path'],
- {'metavar': '<file>'}),
- ('Link to the stylesheet in the output LaTeX file. This is the '
- 'default.',
- ['--link-stylesheet'],
- {'dest': 'embed_stylesheet', 'action': 'store_false'}),
- ('Embed the stylesheet in the output LaTeX file. The stylesheet '
- 'file must be accessible during processing (--stylesheet-path is '
- 'recommended).',
- ['--embed-stylesheet'],
- {'action': 'store_true'}),
- ('Table of contents by docutils (default) or latex. Latex(writer) '
+ {'metavar': '<file>', 'overrides': 'stylesheet'}),
+ ('Table of contents by docutils (default) or latex. Latex (writer) '
'supports only one ToC per document, but docutils does not write '
'pagenumbers.',
- ['--use-latex-toc'], {'default': 0}),
+ ['--use-latex-toc'],
+ {'default': 0, 'action': 'store_true',
+ 'validator': frontend.validate_boolean}),
+ ('Let LaTeX print author and date, do not show it in docutils '
+ 'document info.',
+ ['--use-latex-docinfo'],
+ {'default': 0, 'action': 'store_true',
+ 'validator': frontend.validate_boolean}),
('Color of any hyperlinks embedded in text '
'(default: "blue", "0" to disable).',
- ['--hyperlink-color'], {'default': 'blue'}),))
+ ['--hyperlink-color'], {'default': 'blue'}),
+ ('Enable compound enumerators for nested enumerated lists '
+ '(e.g. "1.2.a.ii"). Default: disabled.',
+ ['--compound-enumerators'],
+ {'default': None, 'action': 'store_true',
+ 'validator': frontend.validate_boolean}),
+ ('Disable compound enumerators for nested enumerated lists. This is '
+ 'the default.',
+ ['--no-compound-enumerators'],
+ {'action': 'store_false', 'dest': 'compound_enumerators'}),
+ ('Enable section ("." subsection ...) prefixes for compound '
+ 'enumerators. This has no effect without --compound-enumerators. '
+ 'Default: disabled.',
+ ['--section-prefix-for-enumerators'],
+ {'default': None, 'action': 'store_true',
+ 'validator': frontend.validate_boolean}),
+ ('Disable section prefixes for compound enumerators. '
+ 'This is the default.',
+ ['--no-section-prefix-for-enumerators'],
+ {'action': 'store_false', 'dest': 'section_prefix_for_enumerators'}),
+ ('Set the separator between section number and enumerator '
+ 'for compound enumerated lists. Default is "-".',
+ ['--section-enumerator-separator'],
+ {'default': '-', 'metavar': '<char>'}),
+ ('When possibile, use verbatim for literal-blocks. '
+ 'Default is to always use the mbox environment.',
+ ['--use-verbatim-when-possible'],
+ {'default': 0, 'action': 'store_true',
+ 'validator': frontend.validate_boolean}),
+ ('Table style. "standard" with horizontal and vertical lines, '
+ '"booktabs" (LaTeX booktabs style) only horizontal lines '
+ 'above and below the table and below the header or "nolines". '
+ 'Default: "standard"',
+ ['--table-style'],
+ {'choices': ['standard', 'booktabs','nolines'], 'default': 'standard',
+ 'metavar': '<format>'}),
+ ('LaTeX graphicx package option. '
+ 'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
+ 'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
+ 'Default is no option.',
+ ['--graphicx-option'],
+ {'default': ''}),
+ ('LaTeX font encoding. '
+ 'Possible values are "T1", "OT1", "" or some other fontenc option. '
+ 'The font encoding influences available symbols, e.g. "<<" as one '
+ 'character. Default is "" which leads to package "ae" (a T1 '
+ 'emulation using CM fonts).',
+ ['--font-encoding'],
+ {'default': ''}),
+ ),)
settings_defaults = {'output_encoding': 'latin-1'}
+ relative_path_settings = ('stylesheet_path',)
+
+ config_section = 'latex2e writer'
+ config_section_dependencies = ('writers',)
+
output = None
"""Final translated form of `document`."""
+ def __init__(self):
+ writers.Writer.__init__(self)
+ self.translator_class = LaTeXTranslator
+
def translate(self):
- visitor = LaTeXTranslator(self.document)
+ visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
self.head_prefix = visitor.head_prefix
@@ -93,14 +163,21 @@
* latex does not support multiple tocs in one document.
(might be no limitation except for docutils documentation)
-* width
+* width
* linewidth - width of a line in the local environment
* textwidth - the width of text on the page
Maybe always use linewidth ?
-"""
+ *Bug* inside a minipage a (e.g. Sidebar) the linewidth is
+ not changed, needs fix in docutils so that tables
+ are not too wide.
+
+ So we add locallinewidth set it initially and
+ on entering sidebar and reset on exit.
+"""
+
class Babel:
"""Language specifics for LaTeX."""
# country code by a.schlock.
@@ -155,13 +232,13 @@
# pdflatex does not produce double quotes for ngerman in tt.
self.double_quote_replacment = None
if re.search('^de',self.language):
- # maybe use: {\glqq} {\grqq}.
- self.quotes = ("\"`", "\"'")
+ #self.quotes = ("\"`", "\"'")
+ self.quotes = ('{\\glqq}', '{\\grqq}')
self.double_quote_replacment = "{\\dq}"
- else:
+ else:
self.quotes = ("``", "''")
self.quote_index = 0
-
+
def next_quote(self):
q = self.quotes[self.quote_index]
self.quote_index = (self.quote_index+1)%2
@@ -205,6 +282,18 @@
' \\renewcommand{\\makelabel}{\\optionlistlabel}}\n'
'}{\\end{list}}\n',
],
+ 'lineblock_environment' : [
+ '\\newlength{\\lineblockindentation}\n'
+ '\\setlength{\\lineblockindentation}{2.5em}\n'
+ '\\newenvironment{lineblock}[1]\n'
+ '{\\begin{list}{}\n'
+ ' {\\setlength{\\partopsep}{\\parskip}\n'
+ ' \\addtolength{\\partopsep}{\\baselineskip}\n'
+ ' \\topsep0pt\\itemsep0.15\\baselineskip\\parsep0pt\n'
+ ' \\leftmargin#1}\n'
+ ' \\raggedright}\n'
+ '{\\end{list}}\n'
+ ],
'footnote_floats' : [
'% begin: floats for footnotes tweaking.\n',
'\\setlength{\\floatsep}{0.5em}\n',
@@ -218,73 +307,317 @@
'\\setcounter{bottomnumber}{50}\n',
'% end floats for footnotes\n',
],
- 'some_commands' : [
+ 'some_commands' : [
'% some commands, that could be overwritten in the style file.\n'
'\\newcommand{\\rubric}[1]'
'{\\subsection*{~\\hfill {\\it #1} \\hfill ~}}\n'
+ '\\newcommand{\\titlereference}[1]{\\textsl{#1}}\n'
'% end of "some commands"\n',
- ]
+ ]
}
+class DocumentClass:
+ """Details of a LaTeX document class."""
+ # BUG: LaTeX has no deeper sections (actually paragrah is no
+ # section either).
+ # BUG: No support for unknown document classes. Make 'article'
+ # default?
+ _class_sections = {
+ 'book': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
+ 'scrbook': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
+ 'report': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
+ 'scrreprt': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
+ 'article': ( 'section', 'subsection', 'subsubsection' ),
+ 'scrartcl': ( 'section', 'subsection', 'subsubsection' ),
+ }
+ _deepest_section = 'subsubsection'
+
+ def __init__(self, document_class):
+ self.document_class = document_class
+
+ def section(self, level):
+ """ Return the section name at the given level for the specific
+ document class.
+
+ Level is 1,2,3..., as level 0 is the title."""
+
+ sections = self._class_sections[self.document_class]
+ if level <= len(sections):
+ return sections[level-1]
+ else:
+ return self._deepest_section
+
+class Table:
+ """ Manage a table while traversing.
+ Maybe change to a mixin defining the visit/departs, but then
+ class Table internal variables are in the Translator.
+ """
+ def __init__(self,latex_type,table_style):
+ self._latex_type = latex_type
+ self._table_style = table_style
+ self._open = 0
+ # miscellaneous attributes
+ self._attrs = {}
+ self._col_width = []
+ self._rowspan = []
+
+ def open(self):
+ self._open = 1
+ self._col_specs = []
+ self.caption = None
+ self._attrs = {}
+ self._in_head = 0 # maybe context with search
+ def close(self):
+ self._open = 0
+ self._col_specs = None
+ self.caption = None
+ self._attrs = {}
+ def is_open(self):
+ return self._open
+ def used_packages(self):
+ if self._table_style == 'booktabs':
+ return '\\usepackage{booktabs}\n'
+ return ''
+ def get_latex_type(self):
+ return self._latex_type
+
+ def set(self,attr,value):
+ self._attrs[attr] = value
+ def get(self,attr):
+ if self._attrs.has_key(attr):
+ return self._attrs[attr]
+ return None
+ def get_vertical_bar(self):
+ if self._table_style == 'standard':
+ return '|'
+ return ''
+ # horizontal lines are drawn below a row, because we.
+ def get_opening(self):
+ return '\\begin{%s}[c]' % self._latex_type
+ def get_closing(self):
+ line = ""
+ if self._table_style == 'booktabs':
+ line = '\\bottomrule\n'
+ elif self._table_style == 'standard':
+ lines = '\\hline\n'
+ return '%s\\end{%s}' % (line,self._latex_type)
+
+ def visit_colspec(self,node):
+ self._col_specs.append(node)
+
+ def get_colspecs(self):
+ """
+ Return column specification for longtable.
+
+ Assumes reST line length being 80 characters.
+ Table width is hairy.
+
+ === ===
+ ABC DEF
+ === ===
+
+ usually gets to narrow, therefore we add 1 (fiddlefactor).
+ """
+ width = 80
+
+ total_width = 0.0
+ # first see if we get too wide.
+ for node in self._col_specs:
+ colwidth = float(node['colwidth']+1) / width
+ total_width += colwidth
+ self._col_width = []
+ self._rowspan = []
+ # donot make it full linewidth
+ factor = 0.93
+ if total_width > 1.0:
+ factor /= total_width
+ bar = self.get_vertical_bar()
+ latex_table_spec = ""
+ for node in self._col_specs:
+ colwidth = factor * float(node['colwidth']+1) / width
+ self._col_width.append(colwidth+0.005)
+ self._rowspan.append(0)
+ latex_table_spec += "%sp{%.2f\\locallinewidth}" % (bar,colwidth+0.005)
+ return latex_table_spec+bar
+
+ def get_column_width(self):
+ """ return columnwidth for current cell (not multicell)
+ """
+ return "%.2f\\locallinewidth" % self._col_width[self._cell_in_row-1]
+
+ def visit_thead(self):
+ self._in_thead = 1
+ if self._table_style == 'standard':
+ return ['\\hline\n']
+ elif self._table_style == 'booktabs':
+ return ['\\toprule\n']
+ return []
+ def depart_thead(self):
+ a = []
+ #if self._table_style == 'standard':
+ # a.append('\\hline\n')
+ if self._table_style == 'booktabs':
+ a.append('\\midrule\n')
+ a.append('\\endhead\n')
+ # for longtable one could add firsthead, foot and lastfoot
+ self._in_thead = 0
+ return a
+ def visit_row(self):
+ self._cell_in_row = 0
+ def depart_row(self):
+ res = [' \\\\\n']
+ self._cell_in_row = None # remove cell counter
+ for i in range(len(self._rowspan)):
+ if (self._rowspan[i]>0):
+ self._rowspan[i] -= 1
+
+ if self._table_style == 'standard':
+ rowspans = []
+ for i in range(len(self._rowspan)):
+ if (self._rowspan[i]<=0):
+ rowspans.append(i+1)
+ if len(rowspans)==len(self._rowspan):
+ res.append('\\hline\n')
+ else:
+ cline = ''
+ rowspans.reverse()
+ # TODO merge clines
+ while 1:
+ try:
+ c_start = rowspans.pop()
+ except:
+ break
+ cline += '\\cline{%d-%d}\n' % (c_start,c_start)
+ res.append(cline)
+ return res
+
+ def set_rowspan(self,cell,value):
+ try:
+ self._rowspan[cell] = value
+ except:
+ pass
+ def get_rowspan(self,cell):
+ try:
+ return self._rowspan[cell]
+ except:
+ return 0
+ def get_entry_number(self):
+ return self._cell_in_row
+ def visit_entry(self):
+ self._cell_in_row += 1
+
+
class LaTeXTranslator(nodes.NodeVisitor):
+
# When options are given to the documentclass, latex will pass them
- # to other packages, as done with babel.
+ # to other packages, as done with babel.
# Dummy settings might be taken from document settings
- d_options = '10pt' # papersize, fontsize
- d_paper = 'a4paper'
- d_margins = '2cm'
-
latex_head = '\\documentclass[%s]{%s}\n'
- encoding = '\\usepackage[latin1]{inputenc}\n'
+ encoding = '\\usepackage[%s]{inputenc}\n'
linking = '\\usepackage[colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}\n'
- geometry = '\\usepackage[%s,margin=%s,nohead]{geometry}\n'
stylesheet = '\\input{%s}\n'
# add a generated on day , machine by user using docutils version.
generator = '%% generator Docutils: http://docutils.sourceforge.net/\n'
# use latex tableofcontents or let docutils do it.
use_latex_toc = 0
- # table kind: if 0 tabularx (single page), 1 longtable
- # maybe should be decided on row count.
- use_longtable = 1
+
# TODO: use mixins for different implementations.
# list environment for option-list. else tabularx
use_optionlist_for_option_list = 1
# list environment for docinfo. else tabularx
use_optionlist_for_docinfo = 0 # NOT YET IN USE
+ # Use compound enumerations (1.A.1.)
+ compound_enumerators = 0
+
+ # If using compound enumerations, include section information.
+ section_prefix_for_enumerators = 0
+
+ # This is the character that separates the section ("." subsection ...)
+ # prefix from the regular list enumerator.
+ section_enumerator_separator = '-'
+
# default link color
hyperlink_color = "blue"
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.settings = settings = document.settings
+ self.latex_encoding = self.to_latex_encoding(settings.output_encoding)
self.use_latex_toc = settings.use_latex_toc
+ self.use_latex_docinfo = settings.use_latex_docinfo
+ self.use_latex_footnotes = settings.use_latex_footnotes
+ self._use_latex_citations = settings.use_latex_citations
self.hyperlink_color = settings.hyperlink_color
+ self.compound_enumerators = settings.compound_enumerators
+ self.font_encoding = settings.font_encoding
+ self.section_prefix_for_enumerators = (
+ settings.section_prefix_for_enumerators)
+ self.section_enumerator_separator = (
+ settings.section_enumerator_separator.replace('_', '\\_'))
if self.hyperlink_color == '0':
self.hyperlink_color = 'black'
self.colorlinks = 'false'
else:
self.colorlinks = 'true'
-
+
# language: labels, bibliographic_fields, and author_separators.
# to allow writing labes for specific languages.
self.language = languages.get_language(settings.language_code)
self.babel = Babel(settings.language_code)
self.author_separator = self.language.author_separators[0]
+ self.d_options = self.settings.documentoptions
if self.babel.get_language():
self.d_options += ',%s' % \
self.babel.get_language()
+
+ self.d_class = DocumentClass(settings.documentclass)
+ # object for a table while proccessing.
+ self.active_table = Table('longtable',settings.table_style)
+
+ # HACK. Should have more sophisticated typearea handling.
+ if settings.documentclass.find('scr') == -1:
+ self.typearea = '\\usepackage[DIV12]{typearea}\n'
+ else:
+ if self.d_options.find('DIV') == -1 and self.d_options.find('BCOR') == -1:
+ self.typearea = '\\typearea{12}\n'
+ else:
+ self.typearea = ''
+
+ if self.font_encoding == 'OT1':
+ fontenc_header = ''
+ elif self.font_encoding == '':
+ fontenc_header = '\\usepackage{ae}\n\\usepackage{aeguill}\n'
+ else:
+ fontenc_header = '\\usepackage[%s]{fontenc}\n' % (self.font_encoding,)
+ input_encoding = self.encoding % self.latex_encoding
+ if self.settings.graphicx_option == '':
+ self.graphicx_package = '\\usepackage{graphicx}\n'
+ elif self.settings.graphicx_option.lower() == 'auto':
+ self.graphicx_package = '\n'.join(
+ ('%Check if we are compiling under latex or pdflatex',
+ '\\ifx\\pdftexversion\\undefined',
+ ' \\usepackage{graphicx}',
+ '\\else',
+ ' \\usepackage[pdftex]{graphicx}',
+ '\\fi\n'))
+ else:
+ self.graphicx_package = (
+ '\\usepackage[%s]{graphicx}\n' % self.settings.graphicx_option)
+
self.head_prefix = [
self.latex_head % (self.d_options,self.settings.documentclass),
'\\usepackage{babel}\n', # language is in documents settings.
+ fontenc_header,
'\\usepackage{shortvrb}\n', # allows verb in footnotes.
- self.encoding,
+ input_encoding,
# * tabularx: for docinfo, automatic width of columns, always on one page.
'\\usepackage{tabularx}\n',
'\\usepackage{longtable}\n',
+ self.active_table.used_packages(),
# possible other packages.
# * fancyhdr
# * ltxtable is a combination of tabularx and longtable (pagebreaks).
@@ -292,28 +625,34 @@
#
# extra space between text in tables and the line above them
'\\setlength{\\extrarowheight}{2pt}\n',
- '\\usepackage{amsmath}\n', # what fore amsmath.
- '\\usepackage{graphicx}\n',
+ '\\usepackage{amsmath}\n', # what fore amsmath.
+ self.graphicx_package,
'\\usepackage{color}\n',
'\\usepackage{multirow}\n',
+ '\\usepackage{ifthen}\n', # before hyperref!
self.linking % (self.colorlinks, self.hyperlink_color, self.hyperlink_color),
- # geometry and fonts might go into style.tex.
- self.geometry % (self.d_paper, self.d_margins),
- #
+ self.typearea,
self.generator,
# latex lengths
'\\newlength{\\admonitionwidth}\n',
- '\\setlength{\\admonitionwidth}{0.9\\textwidth}\n'
+ '\\setlength{\\admonitionwidth}{0.9\\textwidth}\n'
# width for docinfo tablewidth
'\\newlength{\\docinfowidth}\n',
- '\\setlength{\\docinfowidth}{0.9\\textwidth}\n'
+ '\\setlength{\\docinfowidth}{0.9\\textwidth}\n'
+ # linewidth of current environment, so tables are not wider
+ # than the sidebar: using locallinewidth seems to defer evaluation
+ # of linewidth, this is fixing it.
+ '\\newlength{\\locallinewidth}\n',
+ # will be set later.
]
self.head_prefix.extend( latex_headings['optionlist_environment'] )
+ self.head_prefix.extend( latex_headings['lineblock_environment'] )
self.head_prefix.extend( latex_headings['footnote_floats'] )
self.head_prefix.extend( latex_headings['some_commands'] )
## stylesheet is last: so it might be possible to overwrite defaults.
- stylesheet = self.get_stylesheet_reference()
+ stylesheet = utils.get_stylesheet_reference(settings)
if stylesheet:
+ settings.record_dependencies.add(stylesheet)
self.head_prefix.append(self.stylesheet % (stylesheet))
if self.linking: # and maybe check for pdf
@@ -325,20 +664,24 @@
# NOTE: Latex wants a date and an author, rst puts this into
# docinfo, so normally we donot want latex author/date handling.
# latex article has its own handling of date and author, deactivate.
- self.latex_docinfo = 0
+ # So we always emit \title{...} \author{...} \date{...}, even if the
+ # "..." are empty strings.
self.head = [ ]
- if not self.latex_docinfo:
- self.head.extend( [ '\\author{}\n', '\\date{}\n' ] )
+ # separate title, so we can appen subtitle.
+ self.title = ''
+ # if use_latex_docinfo: collects lists of author/organization/contact/address lines
+ self.author_stack = []
+ self.date = ''
+
self.body_prefix = ['\\raggedbottom\n']
- # separate title, so we can appen subtitle.
- self.title = ""
self.body = []
self.body_suffix = ['\n']
self.section_level = 0
self.context = []
- self.topic_class = ''
+ self.topic_classes = []
# column specification for tables
- self.colspecs = []
+ self.table_caption = None
+
# Flags to encode
# ---------------
# verbatim: to tell encode not to encode.
@@ -352,31 +695,105 @@
# enumeration is done by list environment.
self._enum_cnt = 0
- # docinfo.
+
+ # Stack of section counters so that we don't have to use_latex_toc.
+ # This will grow and shrink as processing occurs.
+ # Initialized for potential first-level sections.
+ self._section_number = [0]
+
+ # The current stack of enumerations so that we can expand
+ # them into a compound enumeration
+ self._enumeration_counters = []
+
+ self._bibitems = []
+
+ # docinfo.
self.docinfo = None
# inside literal block: no quote mangling.
self.literal_block = 0
+ self.literal_block_stack = []
self.literal = 0
+ # true when encoding in math mode
+ self.mathmode = 0
- def get_stylesheet_reference(self):
- if self.settings.stylesheet_path:
- return self.settings.stylesheet_path
- else:
- return self.settings.stylesheet
+ def to_latex_encoding(self,docutils_encoding):
+ """
+ Translate docutils encoding name into latex's.
+ Default fallback method is remove "-" and "_" chars from docutils_encoding.
+
+ """
+ tr = { "iso-8859-1": "latin1", # west european
+ "iso-8859-2": "latin2", # east european
+ "iso-8859-3": "latin3", # esperanto, maltese
+ "iso-8859-4": "latin4", # north european,scandinavian, baltic
+ "iso-8859-5": "iso88595", # cyrillic (ISO)
+ "iso-8859-9": "latin5", # turkish
+ "iso-8859-15": "latin9", # latin9, update to latin1.
+ "mac_cyrillic": "maccyr", # cyrillic (on Mac)
+ "windows-1251": "cp1251", # cyrillic (on Windows)
+ "koi8-r": "koi8-r", # cyrillic (Russian)
+ "koi8-u": "koi8-u", # cyrillic (Ukrainian)
+ "windows-1250": "cp1250", #
+ "windows-1252": "cp1252", #
+ "us-ascii": "ascii", # ASCII (US)
+ # unmatched encodings
+ #"": "applemac",
+ #"": "ansinew", # windows 3.1 ansi
+ #"": "ascii", # ASCII encoding for the range 32--127.
+ #"": "cp437", # dos latine us
+ #"": "cp850", # dos latin 1
+ #"": "cp852", # dos latin 2
+ #"": "decmulti",
+ #"": "latin10",
+ #"iso-8859-6": "" # arabic
+ #"iso-8859-7": "" # greek
+ #"iso-8859-8": "" # hebrew
+ #"iso-8859-10": "" # latin6, more complete iso-8859-4
+ }
+ if tr.has_key(docutils_encoding.lower()):
+ return tr[docutils_encoding.lower()]
+ return docutils_encoding.translate(string.maketrans("",""),"_-").lower()
+
def language_label(self, docutil_label):
return self.language.labels[docutil_label]
+ latex_equivalents = {
+ u'\u00A0' : '~',
+ u'\u2013' : '{--}',
+ u'\u2014' : '{---}',
+ u'\u2018' : '`',
+ u'\u2019' : '\'',
+ u'\u201A' : ',',
+ u'\u201C' : '``',
+ u'\u201D' : '\'\'',
+ u'\u201E' : ',,',
+ u'\u2020' : '{\\dag}',
+ u'\u2021' : '{\\ddag}',
+ u'\u2026' : '{\\dots}',
+ u'\u2122' : '{\\texttrademark}',
+ u'\u21d4' : '{$\\Leftrightarrow$}',
+ }
+
+ def unicode_to_latex(self,text):
+ # see LaTeX codec
+ # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
+ # Only some special chracters are translated, for documents with many
+ # utf-8 chars one should use the LaTeX unicode package.
+ for uchar in self.latex_equivalents.keys():
+ text = text.replace(uchar,self.latex_equivalents[uchar])
+ return text
+
def encode(self, text):
"""
Encode special characters in `text` & return.
# $ % & ~ _ ^ \ { }
Escaping with a backslash does not help with backslashes, ~ and ^.
- < > are only available in math-mode (really ?)
+ < > are only available in math-mode or tt font. (really ?)
$ starts math- mode.
AND quotes:
-
+
"""
if self.verbatim:
return text
@@ -395,34 +812,67 @@
# then dollar
text = text.replace("$", '{\\$}')
- # then all that needs math mode
- text = text.replace("<", '{$<$}')
- text = text.replace(">", '{$>$}')
+ if not ( self.literal_block or self.literal or self.mathmode ):
+ # the vertical bar: in mathmode |,\vert or \mid
+ # in textmode \textbar
+ text = text.replace("|", '{\\textbar}')
+ text = text.replace("<", '{\\textless}')
+ text = text.replace(">", '{\\textgreater}')
# then
text = text.replace("&", '{\\&}')
- text = text.replace("_", '{\\_}')
# the ^:
# * verb|^| does not work in mbox.
# * mathmode has wedge. hat{~} would also work.
- text = text.replace("^", '{\\ensuremath{^\\wedge}}')
+ # text = text.replace("^", '{\\ensuremath{^\\wedge}}')
+ text = text.replace("^", '{\\textasciicircum}')
text = text.replace("%", '{\\%}')
text = text.replace("#", '{\\#}')
- text = text.replace("~", '{\\~{ }}')
+ text = text.replace("~", '{\\textasciitilde}')
+ # Separate compound characters, e.g. "--" to "-{}-". (The
+ # actual separation is done later; see below.)
+ separate_chars = '-'
if self.literal_block or self.literal:
+ # In monospace-font, we also separate ",,", "``" and "''"
+ # and some other characters which can't occur in
+ # non-literal text.
+ separate_chars += ',`\'"<>'
# pdflatex does not produce doublequotes for ngerman.
text = self.babel.double_quotes_in_tt(text)
+ if self.font_encoding == 'OT1':
+ # We're using OT1 font-encoding and have to replace
+ # underscore by underlined blank, because this has
+ # correct width.
+ text = text.replace('_', '{\\underline{ }}')
+ # And the tt-backslash doesn't work in OT1, so we use
+ # a mirrored slash.
+ text = text.replace('\\textbackslash', '\\reflectbox{/}')
+ else:
+ text = text.replace('_', '{\\_}')
else:
text = self.babel.quote_quotes(text)
- if self.insert_newline:
- # HACK: insert a blank before the newline, to avoid
+ text = text.replace("_", '{\\_}')
+ for char in separate_chars * 2:
+ # Do it twice ("* 2") becaues otherwise we would replace
+ # "---" by "-{}--".
+ text = text.replace(char + char, char + '{}' + char)
+ if self.insert_newline or self.literal_block:
+ # Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace("\n", '~\\\\\n')
elif self.mbox_newline:
- text = text.replace("\n", '}\\\\\n\\mbox{')
+ if self.literal_block:
+ closings = "}" * len(self.literal_block_stack)
+ openings = "".join(self.literal_block_stack)
+ else:
+ closings = ""
+ openings = ""
+ text = text.replace("\n", "%s}\\\\\n\\mbox{%s" % (closings,openings))
+ # lines starting with "[" give errors.
+ text = text.replace('[', '{[}')
if self.insert_none_breaking_blanks:
text = text.replace(' ', '~')
- # unicode !!!
- text = text.replace(u'\u2020', '{$\\dagger$}')
+ if self.latex_encoding != 'utf8':
+ text = self.unicode_to_latex(text)
return text
def attval(self, text,
@@ -431,15 +881,19 @@
return self.encode(whitespace.sub(' ', text))
def astext(self):
- if self.pdfinfo:
+ if self.pdfinfo is not None:
if self.pdfauthor:
self.pdfinfo.append('pdfauthor={%s}' % self.pdfauthor)
+ if self.pdfinfo:
pdfinfo = '\\hypersetup{\n' + ',\n'.join(self.pdfinfo) + '\n}\n'
else:
pdfinfo = ''
- title = '\\title{%s}\n' % self.title
- return ''.join(self.head_prefix + [title]
- + self.head + [pdfinfo]
+ head = '\\title{%s}\n\\author{%s}\n\\date{%s}\n' % \
+ (self.title,
+ ' \\and\n'.join(['~\\\\\n'.join(author_lines)
+ for author_lines in self.author_stack]),
+ self.date)
+ return ''.join(self.head_prefix + [head] + self.head + [pdfinfo]
+ self.body_prefix + self.body + self.body_suffix)
def visit_Text(self, node):
@@ -454,14 +908,15 @@
def depart_address(self, node):
self.depart_docinfo_item(node)
- def visit_admonition(self, node, name):
+ def visit_admonition(self, node, name=''):
self.body.append('\\begin{center}\\begin{sffamily}\n')
self.body.append('\\fbox{\\parbox{\\admonitionwidth}{\n')
- self.body.append('\\textbf{\\large '+ self.language.labels[name] + '}\n');
+ if name:
+ self.body.append('\\textbf{\\large '+ self.language.labels[name] + '}\n');
self.body.append('\\vspace{2mm}\n')
- def depart_admonition(self):
+ def depart_admonition(self, node=None):
self.body.append('}}\n') # end parbox fbox
self.body.append('\\end{sffamily}\n\\end{center}\n');
@@ -478,12 +933,10 @@
self.depart_docinfo_item(node)
def visit_authors(self, node):
- # ignore. visit_author is called for each one
- # self.visit_docinfo_item(node, 'author')
+ # not used: visit_author is called anyway for each author.
pass
def depart_authors(self, node):
- # self.depart_docinfo_item(node)
pass
def visit_block_quote(self, node):
@@ -493,17 +946,37 @@
self.body.append( '\\end{quote}\n')
def visit_bullet_list(self, node):
- if not self.use_latex_toc and self.topic_class == 'contents':
- self.body.append( '\\begin{list}{}{}\n' )
+ if self.topic_classes == ['contents']:
+ if not self.use_latex_toc:
+ self.body.append( '\\begin{list}{}{}\n' )
else:
self.body.append( '\\begin{itemize}\n' )
def depart_bullet_list(self, node):
- if not self.use_latex_toc and self.topic_class == 'contents':
- self.body.append( '\\end{list}\n' )
+ if self.topic_classes == ['contents']:
+ if not self.use_latex_toc:
+ self.body.append( '\\end{list}\n' )
else:
self.body.append( '\\end{itemize}\n' )
+ # Imperfect superscript/subscript handling: mathmode italicizes
+ # all letters by default.
+ def visit_superscript(self, node):
+ self.body.append('$^{')
+ self.mathmode = 1
+
+ def depart_superscript(self, node):
+ self.body.append('}$')
+ self.mathmode = 0
+
+ def visit_subscript(self, node):
+ self.body.append('$_{')
+ self.mathmode = 1
+
+ def depart_subscript(self, node):
+ self.body.append('}$')
+ self.mathmode = 0
+
def visit_caption(self, node):
self.body.append( '\\caption{' )
@@ -516,29 +989,47 @@
def depart_caution(self, node):
self.depart_admonition()
+ def visit_title_reference(self, node):
+ self.body.append( '\\titlereference{' )
+
+ def depart_title_reference(self, node):
+ self.body.append( '}' )
+
def visit_citation(self, node):
- self.visit_footnote(node)
+ # TODO maybe use cite bibitems
+ if self._use_latex_citations:
+ self.context.append(len(self.body))
+ else:
+ self.body.append('\\begin{figure}[b]')
+ for id in node['ids']:
+ self.body.append('\\hypertarget{%s}' % id)
def depart_citation(self, node):
- self.depart_footnote(node)
+ if self._use_latex_citations:
+ size = self.context.pop()
+ label = self.body[size]
+ text = ''.join(self.body[size+1:])
+ del self.body[size:]
+ self._bibitems.append([label, text])
+ else:
+ self.body.append('\\end{figure}\n')
- def visit_title_reference(self, node):
- # BUG title-references are what?
- pass
-
- def depart_title_reference(self, node):
- pass
-
def visit_citation_reference(self, node):
- href = ''
- if node.has_key('refid'):
- href = node['refid']
- elif node.has_key('refname'):
- href = self.document.nameids[node['refname']]
- self.body.append('[\\hyperlink{%s}{' % href)
+ if self._use_latex_citations:
+ self.body.append('\\cite{')
+ else:
+ href = ''
+ if node.has_key('refid'):
+ href = node['refid']
+ elif node.has_key('refname'):
+ href = self.document.nameids[node['refname']]
+ self.body.append('[\\hyperlink{%s}{' % href)
def depart_citation_reference(self, node):
- self.body.append('}]')
+ if self._use_latex_citations:
+ self.body.append('}')
+ else:
+ self.body.append('}]')
def visit_classifier(self, node):
self.body.append( '(\\textbf{' )
@@ -547,20 +1038,22 @@
self.body.append( '})\n' )
def visit_colspec(self, node):
- if self.use_longtable:
- self.colspecs.append(node)
- else:
- self.context[-1] += 1
+ self.active_table.visit_colspec(node)
def depart_colspec(self, node):
pass
- def visit_comment(self, node,
- sub=re.compile('\n').sub):
- """Escape end of line by a ne comment start in comment text."""
- self.body.append('%% %s \n' % sub('\n% ', node.astext()))
+ def visit_comment(self, node):
+ # Escape end of line by a new comment start in comment text.
+ self.body.append('%% %s \n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
+ def visit_compound(self, node):
+ pass
+
+ def depart_compound(self, node):
+ pass
+
def visit_contact(self, node):
self.visit_docinfo_item(node, 'contact')
@@ -613,7 +1106,7 @@
def visit_description(self, node):
if self.use_optionlist_for_option_list:
self.body.append( ' ' )
- else:
+ else:
self.body.append( ' & ' )
def depart_description(self, node):
@@ -631,31 +1124,38 @@
self.body = self.docinfo + self.body
# clear docinfo, so field names are no longer appended.
self.docinfo = None
- if self.use_latex_toc:
- self.body.append('\\tableofcontents\n\n\\bigskip\n')
def visit_docinfo_item(self, node, name):
- if not self.latex_docinfo:
- self.docinfo.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'author':
if not self.pdfinfo == None:
if not self.pdfauthor:
self.pdfauthor = self.attval(node.astext())
else:
self.pdfauthor += self.author_separator + self.attval(node.astext())
- if self.latex_docinfo:
- self.head.append('\\author{%s}\n' % self.attval(node.astext()))
+ if self.use_latex_docinfo:
+ if name in ('author', 'organization', 'contact', 'address'):
+ # We attach these to the last author. If any of them precedes
+ # the first author, put them in a separate "author" group (for
+ # no better semantics).
+ if name == 'author' or not self.author_stack:
+ self.author_stack.append([])
+ if name == 'address': # newlines are meaningful
+ self.insert_newline = 1
+ text = self.encode(node.astext())
+ self.insert_newline = 0
+ else:
+ text = self.attval(node.astext())
+ self.author_stack[-1].append(text)
raise nodes.SkipNode
- elif name == 'date':
- if self.latex_docinfo:
- self.head.append('\\date{%s}\n' % self.attval(node.astext()))
+ elif name == 'date':
+ self.date = self.attval(node.astext())
raise nodes.SkipNode
+ self.docinfo.append('\\textbf{%s}: &\n\t' % self.language_label(name))
if name == 'address':
- # BUG will fail if latex_docinfo is set.
- self.insert_newline = 1
+ self.insert_newline = 1
self.docinfo.append('{\\raggedright\n')
self.context.append(' } \\\\\n')
- else:
+ else:
self.context.append(' \\\\\n')
self.context.append(self.docinfo)
self.context.append(len(self.body))
@@ -680,45 +1180,74 @@
def visit_document(self, node):
self.body_prefix.append('\\begin{document}\n')
- self.body_prefix.append('\\maketitle\n\n')
- # alternative use titlepage environment.
- # \begin{titlepage}
+ # titled document?
+ if self.use_latex_docinfo or len(node) and isinstance(node[0], nodes.title):
+ self.body_prefix.append('\\maketitle\n\n')
+ # alternative use titlepage environment.
+ # \begin{titlepage}
+ self.body.append('\n\\setlength{\\locallinewidth}{\\linewidth}\n')
def depart_document(self, node):
+ # TODO insertion point of bibliography should none automatic.
+ if self._use_latex_citations and len(self._bibitems)>0:
+ widest_label = ""
+ for bi in self._bibitems:
+ if len(widest_label)<len(bi[0]):
+ widest_label = bi[0]
+ self.body.append('\n\\begin{thebibliography}{%s}\n'%widest_label)
+ for bi in self._bibitems:
+ self.body.append('\\bibitem[%s]{%s}{%s}\n' % (bi[0], bi[0], bi[1]))
+ self.body.append('\\end{thebibliography}\n')
+
self.body_suffix.append('\\end{document}\n')
def visit_emphasis(self, node):
self.body.append('\\emph{')
+ self.literal_block_stack.append('\\emph{')
def depart_emphasis(self, node):
self.body.append('}')
+ self.literal_block_stack.pop()
def visit_entry(self, node):
+ self.active_table.visit_entry()
# cell separation
- column_one = 1
- if self.context[-1] > 0:
- column_one = 0
- if not column_one:
+ if self.active_table.get_entry_number() == 1:
+ # if the firstrow is a multirow, this actually is the second row.
+ # this gets hairy if rowspans follow each other.
+ if self.active_table.get_rowspan(0):
+ count = 0
+ while self.active_table.get_rowspan(count):
+ count += 1
+ self.body.append(' & ')
+ self.active_table.visit_entry() # increment cell count
+ else:
self.body.append(' & ')
# multi{row,column}
+ # IN WORK BUG TODO HACK continues here
+ # multirow in LaTeX simply will enlarge the cell over several rows
+ # (the following n if n is positive, the former if negative).
if node.has_key('morerows') and node.has_key('morecols'):
- raise NotImplementedError('LaTeX can\'t handle cells that'
- 'span multiple rows *and* columns, sorry.')
- atts = {}
+ raise NotImplementedError('Cells that '
+ 'span multiple rows *and* columns are not supported, sorry.')
if node.has_key('morerows'):
count = node['morerows'] + 1
- self.body.append('\\multirow{%d}*{' % count)
+ self.active_table.set_rowspan(self.active_table.get_entry_number()-1,count)
+ self.body.append('\\multirow{%d}{%s}{' % \
+ (count,self.active_table.get_column_width()))
self.context.append('}')
+ # BUG following rows must have empty cells.
elif node.has_key('morecols'):
# the vertical bar before column is missing if it is the first column.
# the one after always.
- if column_one:
- bar = '|'
+ if self.active_table.get_entry_number() == 1:
+ bar1 = self.active_table.get_vertical_bar()
else:
- bar = ''
+ bar1 = ''
count = node['morecols'] + 1
- self.body.append('\\multicolumn{%d}{%sl|}{' % (count, bar))
+ self.body.append('\\multicolumn{%d}{%sl%s}{' % \
+ (count, bar1, self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
@@ -733,8 +1262,17 @@
def depart_entry(self, node):
self.body.append(self.context.pop()) # header / not header
self.body.append(self.context.pop()) # multirow/column
- self.context[-1] += 1
+ # if following row is spanned from above.
+ if self.active_table.get_rowspan(self.active_table.get_entry_number()):
+ self.body.append(' & ')
+ self.active_table.visit_entry() # increment cell count
+ def visit_row(self, node):
+ self.active_table.visit_row()
+
+ def depart_row(self, node):
+ self.body.extend(self.active_table.depart_row())
+
def visit_enumerated_list(self, node):
# We create our own enumeration list environment.
# This allows to set the style and starting value
@@ -743,7 +1281,7 @@
enum_style = {'arabic':'arabic',
'loweralpha':'alph',
- 'upperalpha':'Alph',
+ 'upperalpha':'Alph',
'lowerroman':'roman',
'upperroman':'Roman' }
enum_suffix = ""
@@ -752,13 +1290,22 @@
enum_prefix = ""
if node.has_key('prefix'):
enum_prefix = node['prefix']
-
+ if self.compound_enumerators:
+ pref = ""
+ if self.section_prefix_for_enumerators and self.section_level:
+ for i in range(self.section_level):
+ pref += '%d.' % self._section_number[i]
+ pref = pref[:-1] + self.section_enumerator_separator
+ enum_prefix += pref
+ for counter in self._enumeration_counters:
+ enum_prefix += counter + '.'
enum_type = "arabic"
if node.has_key('enumtype'):
enum_type = node['enumtype']
if enum_style.has_key(enum_type):
enum_type = enum_style[enum_type]
counter_name = "listcnt%d" % self._enum_cnt;
+ self._enumeration_counters.append("\\%s{%s}" % (enum_type,counter_name))
self.body.append('\\newcounter{%s}\n' % counter_name)
self.body.append('\\begin{list}{%s\\%s{%s}%s}\n' % \
(enum_prefix,enum_type,counter_name,enum_suffix))
@@ -774,6 +1321,7 @@
def depart_enumerated_list(self, node):
self.body.append('\\end{list}\n')
+ self._enumeration_counters.pop()
def visit_error(self, node):
self.visit_admonition(node, 'error')
@@ -798,7 +1346,7 @@
def visit_field_body(self, node):
# BUG by attach as text we loose references.
if self.docinfo:
- self.docinfo.append('%s \\\\\n' % node.astext())
+ self.docinfo.append('%s \\\\\n' % self.encode(node.astext()))
raise nodes.SkipNode
# BUG: what happens if not docinfo
@@ -818,7 +1366,7 @@
def visit_field_name(self, node):
# BUG this duplicates docinfo_item
if self.docinfo:
- self.docinfo.append('\\textbf{%s}: &\n\t' % node.astext())
+ self.docinfo.append('\\textbf{%s}: &\n\t' % self.encode(node.astext()))
raise nodes.SkipNode
else:
self.body.append('\\item [')
@@ -828,10 +1376,10 @@
self.body.append(':]')
def visit_figure(self, node):
- self.body.append( '\\begin{figure}\n' )
+ self.body.append( '\\begin{figure}[htbp]\\begin{center}\n' )
def depart_figure(self, node):
- self.body.append( '\\end{figure}\n' )
+ self.body.append( '\\end{center}\\end{figure}\n' )
def visit_footer(self, node):
self.context.append(len(self.body))
@@ -844,14 +1392,26 @@
del self.body[start:]
def visit_footnote(self, node):
- notename = node['id']
- self.body.append('\\begin{figure}[b]')
- self.body.append('\\hypertarget{%s}' % notename)
+ if self.use_latex_footnotes:
+ num,text = node.astext().split(None,1)
+ num = self.encode(num.strip())
+ self.body.append('\\footnotetext['+num+']')
+ self.body.append('{')
+ else:
+ self.body.append('\\begin{figure}[b]')
+ for id in node['ids']:
+ self.body.append('\\hypertarget{%s}' % id)
def depart_footnote(self, node):
- self.body.append('\\end{figure}\n')
+ if self.use_latex_footnotes:
+ self.body.append('}\n')
+ else:
+ self.body.append('\\end{figure}\n')
def visit_footnote_reference(self, node):
+ if self.use_latex_footnotes:
+ self.body.append("\\footnotemark["+self.encode(node.astext())+"]")
+ raise nodes.SkipNode
href = ''
if node.has_key('refid'):
href = node['refid']
@@ -869,8 +1429,31 @@
self.body.append('%s\\hyperlink{%s}{' % (suffix,href))
def depart_footnote_reference(self, node):
+ if self.use_latex_footnotes:
+ return
self.body.append('}%s' % self.context.pop())
+ # footnote/citation label
+ def label_delim(self, node, bracket, superscript):
+ if isinstance(node.parent, nodes.footnote):
+ if self.use_latex_footnotes:
+ raise nodes.SkipNode
+ if self.settings.footnote_references == 'brackets':
+ self.body.append(bracket)
+ else:
+ self.body.append(superscript)
+ else:
+ assert isinstance(node.parent, nodes.citation)
+ if not self._use_latex_citations:
+ self.body.append(bracket)
+
+ def visit_label(self, node):
+ self.label_delim(node, '[', '$^{')
+
+ def depart_label(self, node):
+ self.label_delim(node, ']', '}$')
+
+ # elements generated by the framework e.g. section numbers.
def visit_generated(self, node):
pass
@@ -894,11 +1477,40 @@
self.depart_admonition()
def visit_image(self, node):
- atts = node.attributes.copy()
- href = atts['uri']
- ##self.body.append('\\begin{center}\n')
- self.body.append('\n\\includegraphics{%s}\n' % href)
- ##self.body.append('\\end{center}\n')
+ attrs = node.attributes
+ # Add image URI to dependency list, assuming that it's
+ # referring to a local file.
+ self.settings.record_dependencies.add(attrs['uri'])
+ pre = [] # in reverse order
+ post = ['\\includegraphics{%s}' % attrs['uri']]
+ inline = isinstance(node.parent, nodes.TextElement)
+ if attrs.has_key('scale'):
+ # Could also be done with ``scale`` option to
+ # ``\includegraphics``; doing it this way for consistency.
+ pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,))
+ post.append('}')
+ if attrs.has_key('align'):
+ align_prepost = {
+ # By default latex aligns the top of an image.
+ (1, 'top'): ('', ''),
+ (1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'),
+ (1, 'bottom'): ('\\raisebox{-\\height}{', '}'),
+ (0, 'center'): ('{\\hfill', '\\hfill}'),
+ # These 2 don't exactly do the right thing. The image should
+ # be floated alongside the paragraph. See
+ # http://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG
+ (0, 'left'): ('{', '\\hfill}'),
+ (0, 'right'): ('{\\hfill', '}'),}
+ try:
+ pre.append(align_prepost[inline, attrs['align']][0])
+ post.append(align_prepost[inline, attrs['align']][1])
+ except KeyError:
+ pass # XXX complain here?
+ if not inline:
+ pre.append('\n')
+ post.append('\n')
+ pre.reverse()
+ self.body.extend(pre + post)
def depart_image(self, node):
pass
@@ -917,45 +1529,32 @@
def depart_interpreted(self, node):
self.depart_literal(node)
- def visit_label(self, node):
- # footnote/citation label
- self.body.append('[')
-
- def depart_label(self, node):
- self.body.append(']')
-
def visit_legend(self, node):
self.body.append('{\\small ')
def depart_legend(self, node):
self.body.append('}')
+ def visit_line(self, node):
+ self.body.append('\item[] ')
+
+ def depart_line(self, node):
+ self.body.append('\n')
+
def visit_line_block(self, node):
- """line-block:
- * whitespace (including linebreaks) is significant
- * inline markup is supported.
- * serif typeface
- """
- self.body.append('\\begin{flushleft}\n')
- self.insert_none_breaking_blanks = 1
- self.line_block_without_mbox = 1
- if self.line_block_without_mbox:
- self.insert_newline = 1
+ if isinstance(node.parent, nodes.line_block):
+ self.body.append('\\item[] \n'
+ '\\begin{lineblock}{\\lineblockindentation}\n')
else:
- self.mbox_newline = 1
- self.body.append('\\mbox{')
+ self.body.append('\n\\begin{lineblock}{0em}\n')
def depart_line_block(self, node):
- if self.line_block_without_mbox:
- self.insert_newline = 0
- else:
- self.body.append('}')
- self.mbox_newline = 0
- self.insert_none_breaking_blanks = 0
- self.body.append('\n\\end{flushleft}\n')
+ self.body.append('\\end{lineblock}\n')
def visit_list_item(self, node):
- self.body.append('\\item ')
+ # Append "{}" in case the next character is "[", which would break
+ # LaTeX's list environment (no numbering and the "[" is not printed).
+ self.body.append('\\item {} ')
def depart_list_item(self, node):
self.body.append('\n')
@@ -970,31 +1569,54 @@
def visit_literal_block(self, node):
"""
- .. parsed-literal::
+ Render a literal-block.
+
+ Literal blocks are used for "::"-prefixed literal-indented
+ blocks of text, where the inline markup is not recognized,
+ but are also the product of the parsed-literal directive,
+ where the markup is respected.
"""
- # typically in a typewriter/monospaced typeface.
- # care must be taken with the text, because inline markup is recognized.
- #
- # possibilities:
- # * verbatim: is no possibility, as inline markup does not work.
- # * obey..: is from julien and never worked for me (grubert).
- self.use_for_literal_block = "mbox"
- self.literal_block = 1
- if (self.use_for_literal_block == "mbox"):
- self.mbox_newline = 1
+ # In both cases, we want to use a typewriter/monospaced typeface.
+ # For "real" literal-blocks, we can use \verbatim, while for all
+ # the others we must use \mbox.
+ #
+ # We can distinguish between the two kinds by the number of
+ # siblings the compose this node: if it is composed by a
+ # single element, it's surely is either a real one, otherwise
+ # it's a parsed-literal that does not contain any markup.
+ #
+ if (self.settings.use_verbatim_when_possible and (len(node) == 1)
+ # in case of a parsed-literal containing just a "**bold**" word:
+ and isinstance(node[0], nodes.Text)):
+ self.verbatim = 1
+ self.body.append('\\begin{quote}\\begin{verbatim}\n')
+ else:
+ self.literal_block = 1
self.insert_none_breaking_blanks = 1
- self.body.append('\\begin{ttfamily}\\begin{flushleft}\n\\mbox{')
- else:
- self.body.append('{\\obeylines\\obeyspaces\\ttfamily\n')
+ if self.active_table.is_open():
+ self.body.append('\n{\\ttfamily \\raggedright \\noindent\n')
+ else:
+ # no quote inside tables, to avoid vertical sppace between
+ # table border and literal block.
+ # BUG: fails if normal text preceeds the literal block.
+ self.body.append('\\begin{quote}')
+ self.body.append('{\\ttfamily \\raggedright \\noindent\n')
+ # * obey..: is from julien and never worked for me (grubert).
+ # self.body.append('{\\obeylines\\obeyspaces\\ttfamily\n')
def depart_literal_block(self, node):
- if (self.use_for_literal_block == "mbox"):
- self.body.append('}\n\\end{flushleft}\\end{ttfamily}\n')
+ if self.verbatim:
+ self.body.append('\n\\end{verbatim}\\end{quote}\n')
+ self.verbatim = 0
+ else:
+ if self.active_table.is_open():
+ self.body.append('\n}\n')
+ else:
+ self.body.append('\n')
+ self.body.append('}\\end{quote}\n')
self.insert_none_breaking_blanks = 0
- self.mbox_newline = 0
- else:
- self.body.append('}\n')
- self.literal_block = 0
+ self.literal_block = 0
+ # obey end: self.body.append('}\n')
def visit_meta(self, node):
self.body.append('[visit_meta]\n')
@@ -1030,14 +1652,13 @@
if self.use_optionlist_for_option_list:
self.body.append('\\item [')
else:
- atts = {}
if len(node.astext()) > 14:
self.body.append('\\multicolumn{2}{l}{')
self.context.append('} \\\\\n ')
else:
self.context.append('')
self.body.append('\\texttt{')
- # flag for first option
+ # flag for first option
self.context.append(0)
def depart_option_group(self, node):
@@ -1086,14 +1707,16 @@
self.depart_docinfo_item(node)
def visit_paragraph(self, node):
- if not self.topic_class == 'contents':
+ index = node.parent.index(node)
+ if not (self.topic_classes == ['contents'] or
+ (isinstance(node.parent, nodes.compound) and
+ index > 0 and
+ not isinstance(node.parent[index - 1], nodes.paragraph) and
+ not isinstance(node.parent[index - 1], nodes.compound))):
self.body.append('\n')
def depart_paragraph(self, node):
- if self.topic_class == 'contents':
- self.body.append('\n')
- else:
- self.body.append('\n')
+ self.body.append('\n')
def visit_problematic(self, node):
self.body.append('{\\color{red}\\bfseries{}')
@@ -1102,24 +1725,26 @@
self.body.append('}')
def visit_raw(self, node):
- if node.has_key('format') and node['format'].lower() == 'latex':
+ if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_reference(self, node):
- # for pdflatex hyperrefs might be supported
+ # BUG: hash_char "#" is trouble some in LaTeX.
+ # mbox and other environment do not like the '#'.
+ hash_char = '\\#'
if node.has_key('refuri'):
- href = node['refuri']
+ href = node['refuri'].replace('#',hash_char)
elif node.has_key('refid'):
- href = '#' + node['refid']
+ href = hash_char + node['refid']
elif node.has_key('refname'):
- href = '#' + self.document.nameids[node['refname']]
- ##self.body.append('[visit_reference]')
+ href = hash_char + self.document.nameids[node['refname']]
+ else:
+ raise AssertionError('Unknown reference.')
self.body.append('\\href{%s}{' % href)
def depart_reference(self, node):
self.body.append('}')
- ##self.body.append('[depart_reference]')
def visit_revision(self, node):
self.visit_docinfo_item(node, 'revision')
@@ -1127,27 +1752,28 @@
def depart_revision(self, node):
self.depart_docinfo_item(node)
- def visit_row(self, node):
- self.context.append(0)
-
- def depart_row(self, node):
- self.context.pop() # remove cell counter
- self.body.append(' \\\\ \\hline\n')
-
def visit_section(self, node):
self.section_level += 1
+ # Initialize counter for potential subsections:
+ self._section_number.append(0)
+ # Counter for this section's level (initialized by parent section):
+ self._section_number[self.section_level - 1] += 1
def depart_section(self, node):
+ # Remove counter for potential subsections:
+ self._section_number.pop()
self.section_level -= 1
def visit_sidebar(self, node):
- # BUG: this is just a hack to make sidebars render something
+ # BUG: this is just a hack to make sidebars render something
+ self.body.append('\n\\setlength{\\locallinewidth}{0.9\\admonitionwidth}\n')
self.body.append('\\begin{center}\\begin{sffamily}\n')
self.body.append('\\fbox{\\colorbox[gray]{0.80}{\\parbox{\\admonitionwidth}{\n')
def depart_sidebar(self, node):
self.body.append('}}}\n') # end parbox colorbox fbox
self.body.append('\\end{sffamily}\n\\end{center}\n');
+ self.body.append('\n\\setlength{\\locallinewidth}{\\linewidth}\n')
attribution_formats = {'dash': ('---', ''),
@@ -1173,9 +1799,11 @@
def visit_strong(self, node):
self.body.append('\\textbf{')
+ self.literal_block_stack.append('\\textbf{')
def depart_strong(self, node):
self.body.append('}')
+ self.literal_block_stack.pop()
def visit_substitution_definition(self, node):
raise nodes.SkipNode
@@ -1187,79 +1815,42 @@
if isinstance(node.parent, nodes.sidebar):
self.body.append('~\\\\\n\\textbf{')
self.context.append('}\n\\smallskip\n')
- else:
+ elif isinstance(node.parent, nodes.document):
self.title = self.title + \
- '\\\\\n\\large{%s}\n' % self.encode(node.astext())
+ '\\\\\n\\large{%s}\n' % self.encode(node.astext())
raise nodes.SkipNode
+ elif isinstance(node.parent, nodes.section):
+ self.body.append('\\textbf{')
+ self.context.append('}\\vspace{0.2cm}\n\n\\noindent ')
def depart_subtitle(self, node):
- if isinstance(node.parent, nodes.sidebar):
- self.body.append(self.context.pop())
+ self.body.append(self.context.pop())
def visit_system_message(self, node):
- if node['level'] < self.document.reporter['writer'].report_level:
+ if node['level'] < self.document.reporter.report_level:
raise nodes.SkipNode
-
def depart_system_message(self, node):
self.body.append('\n')
- def get_colspecs(self):
- """
- Return column specification for longtable.
-
- Assumes reST line length being 80 characters.
- """
- width = 80
-
- total_width = 0.0
- # first see if we get too wide.
- for node in self.colspecs:
- colwidth = float(node['colwidth']) / width
- total_width += colwidth
- # donot make it full linewidth
- factor = 0.93
- if total_width > 1.0:
- factor /= total_width
-
- latex_table_spec = ""
- for node in self.colspecs:
- colwidth = factor * float(node['colwidth']) / width
- latex_table_spec += "|p{%.2f\\linewidth}" % colwidth
- self.colspecs = []
- return latex_table_spec+"|"
-
def visit_table(self, node):
- if self.use_longtable:
- self.body.append('\n\\begin{longtable}[c]')
- else:
- self.body.append('\n\\begin{tabularx}{\\linewidth}')
- self.context.append('table_sentinel') # sentinel
- self.context.append(0) # column counter
+ if self.active_table.is_open():
+ print 'nested tables are not supported'
+ raise AssertionError
+ self.active_table.open()
+ self.body.append('\n' + self.active_table.get_opening())
def depart_table(self, node):
- if self.use_longtable:
- self.body.append('\\end{longtable}\n')
- else:
- self.body.append('\\end{tabularx}\n')
- sentinel = self.context.pop()
- if sentinel != 'table_sentinel':
- print 'context:', self.context + [sentinel]
- raise AssertionError
+ self.body.append(self.active_table.get_closing() + '\n')
+ self.active_table.close()
- def table_preamble(self):
- if self.use_longtable:
- self.body.append('{%s}\n' % self.get_colspecs())
- else:
- if self.context[-1] != 'table_sentinel':
- self.body.append('{%s}' % ('|X' * self.context.pop() + '|'))
- self.body.append('\n\\hline')
-
def visit_target(self, node):
+ # BUG: why not (refuri or refid or refname) means not footnote ?
if not (node.has_key('refuri') or node.has_key('refid')
or node.has_key('refname')):
- self.body.append('\\hypertarget{%s}{' % node['name'])
- self.context.append('}')
+ for id in node['ids']:
+ self.body.append('\\hypertarget{%s}{' % id)
+ self.context.append('}' * len(node['ids']))
else:
self.context.append('')
@@ -1269,20 +1860,19 @@
def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
- if self.colspecs:
+ if not self.active_table.get('preamble written'):
self.visit_thead(None)
- self.depart_thead(None)
- self.body.append('%[visit_tbody]\n')
+ # self.depart_thead(None)
def depart_tbody(self, node):
- self.body.append('%[depart_tbody]\n')
+ pass
def visit_term(self, node):
- self.body.append('\\item[')
+ self.body.append('\\item[{')
def depart_term(self, node):
# definition list term.
- self.body.append(':]\n')
+ self.body.append('}] ')
def visit_tgroup(self, node):
#self.body.append(self.starttag(node, 'colgroup'))
@@ -1293,29 +1883,27 @@
pass
def visit_thead(self, node):
- # number_of_columns will be zero after get_colspecs.
- # BUG ! push onto context for depart to pop it.
- number_of_columns = len(self.colspecs)
- self.table_preamble()
- #BUG longtable needs firstpage and lastfooter too.
- self.body.append('\\hline\n')
+ self.body.append('{%s}\n' % self.active_table.get_colspecs())
+ if self.active_table.caption:
+ self.body.append('\\caption{%s}\\\\\n' % self.active_table.caption)
+ self.active_table.set('preamble written',1)
+ # TODO longtable supports firsthead and lastfoot too.
+ self.body.extend(self.active_table.visit_thead())
def depart_thead(self, node):
- if self.use_longtable:
- # the table header written should be on every page
- # => \endhead
- self.body.append('\\endhead\n')
- # and the firsthead => \endfirsthead
- # BUG i want a "continued from previous page" on every not
- # firsthead, but then we need the header twice.
- #
- # there is a \endfoot and \endlastfoot too.
- # but we need the number of columns to
- # self.body.append('\\multicolumn{%d}{c}{"..."}\n' % number_of_columns)
- # self.body.append('\\hline\n\\endfoot\n')
- # self.body.append('\\hline\n')
- # self.body.append('\\endlastfoot\n')
-
+ # the table header written should be on every page
+ # => \endhead
+ self.body.extend(self.active_table.depart_thead())
+ # and the firsthead => \endfirsthead
+ # BUG i want a "continued from previous page" on every not
+ # firsthead, but then we need the header twice.
+ #
+ # there is a \endfoot and \endlastfoot too.
+ # but we need the number of columns to
+ # self.body.append('\\multicolumn{%d}{c}{"..."}\n' % number_of_columns)
+ # self.body.append('\\hline\n\\endfoot\n')
+ # self.body.append('\\hline\n')
+ # self.body.append('\\endlastfoot\n')
def visit_tip(self, node):
self.visit_admonition(node, 'tip')
@@ -1323,20 +1911,47 @@
def depart_tip(self, node):
self.depart_admonition()
+ def bookmark(self, node):
+ """Append latex href and pdfbookmarks for titles.
+ """
+ if node.parent['ids']:
+ for id in node.parent['ids']:
+ self.body.append('\\hypertarget{%s}{}\n' % id)
+ if not self.use_latex_toc:
+ # BUG level depends on style. pdflatex allows level 0 to 3
+ # ToC would be the only on level 0 so i choose to decrement the rest.
+ # "Table of contents" bookmark to see the ToC. To avoid this
+ # we set all zeroes to one.
+ l = self.section_level
+ if l>0:
+ l = l-1
+ # pdftex does not like "_" subscripts in titles
+ text = self.encode(node.astext())
+ for id in node.parent['ids']:
+ self.body.append('\\pdfbookmark[%d]{%s}{%s}\n' % \
+ (l, text, id))
+
def visit_title(self, node):
"""Only 3 section levels are supported by LaTeX article (AFAIR)."""
+
if isinstance(node.parent, nodes.topic):
# section titles before the table of contents.
- if node.parent.hasattr('id'):
- self.body.append('\\hypertarget{%s}{}' % node.parent['id'])
+ self.bookmark(node)
# BUG: latex chokes on center environment with "perhaps a missing item".
# so we use hfill.
- self.body.append('\\subsection*{~\\hfill ')
+ self.body.append('\\subsubsection*{~\\hfill ')
# the closing brace for subsection.
self.context.append('\\hfill ~}\n')
- elif isinstance(node.parent, nodes.sidebar):
+ # TODO: for admonition titles before the first section
+ # either specify every possible node or ... ?
+ elif isinstance(node.parent, nodes.sidebar) \
+ or isinstance(node.parent, nodes.admonition):
self.body.append('\\textbf{\\large ')
self.context.append('}\n\\smallskip\n')
+ elif isinstance(node.parent, nodes.table):
+ # caption must be written after column spec
+ self.active_table.caption = self.encode(node.astext())
+ raise nodes.SkipNode
elif self.section_level == 0:
# document title
self.title = self.encode(node.astext())
@@ -1347,54 +1962,39 @@
self.body.append('\n\n')
self.body.append('%' + '_' * 75)
self.body.append('\n\n')
- if node.parent.hasattr('id'):
- self.body.append('\\hypertarget{%s}{}\n' % node.parent['id'])
- # section_level 0 is title and handled above.
- # BUG: latex has no deeper sections (actually paragrah is no section either).
+ self.bookmark(node)
+
if self.use_latex_toc:
section_star = ""
else:
section_star = "*"
- if (self.section_level<=3): # 1,2,3
- self.body.append('\\%ssection%s{' % ('sub'*(self.section_level-1),section_star))
- elif (self.section_level==4):
- #self.body.append('\\paragraph*{')
- self.body.append('\\subsubsection%s{' % (section_star))
- else:
- #self.body.append('\\subparagraph*{')
- self.body.append('\\subsubsection%s{' % (section_star))
- # BUG: self.body.append( '\\label{%s}\n' % name)
+
+ section_name = self.d_class.section(self.section_level)
+ self.body.append('\\%s%s{' % (section_name, section_star))
+
self.context.append('}\n')
def depart_title(self, node):
self.body.append(self.context.pop())
- if isinstance(node.parent, nodes.sidebar):
- return
- # BUG level depends on style.
- elif node.parent.hasattr('id') and not self.use_latex_toc:
- # pdflatex allows level 0 to 3
- # ToC would be the only on level 0 so i choose to decrement the rest.
- # "Table of contents" bookmark to see the ToC. To avoid this
- # we set all zeroes to one.
- l = self.section_level
- if l>0:
- l = l-1
- self.body.append('\\pdfbookmark[%d]{%s}{%s}\n' % \
- (l,node.astext(),node.parent['id']))
def visit_topic(self, node):
- self.topic_class = node.get('class')
- if self.use_latex_toc:
- self.topic_class = ''
+ self.topic_classes = node['classes']
+ if 'contents' in node['classes'] and self.use_latex_toc:
+ self.body.append('\\tableofcontents\n\n\\bigskip\n')
+ self.topic_classes = []
raise nodes.SkipNode
+ def visit_inline(self, node): # titlereference
+ self.body.append( '\\docutilsrole%s{' % node.get('class'))
+
+ def depart_inline(self, node):
+ self.body.append( '}' )
+
def depart_topic(self, node):
- self.topic_class = ''
+ self.topic_classes = []
self.body.append('\n')
def visit_rubric(self, node):
-# self.body.append('\\hfill {\\color{red}\\bfseries{}')
-# self.context.append('} \\hfill ~\n')
self.body.append('\\rubric{')
self.context.append('}\n')
@@ -1408,7 +2008,6 @@
self.body.append('\n\n')
def depart_transition(self, node):
- #self.body.append('[depart_transition]')
pass
def visit_version(self, node):
@@ -1429,5 +2028,5 @@
# def unknown_visit(self, node):
# def default_visit(self, node):
-
+
# vim: set ts=4 et ai :
Added: Zope3/trunk/src/docutils/writers/newlatex2e.py
===================================================================
--- Zope3/trunk/src/docutils/writers/newlatex2e.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/writers/newlatex2e.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,759 @@
+"""
+:Author: Felix Wiemann
+:Contact: Felix_Wiemann at ososo.de
+:Revision: $Revision: 3376 $
+:Date: $Date: 2005-05-26 23:33:24 +0200 (Thu, 26 May 2005) $
+:Copyright: This module has been placed in the public domain.
+
+LaTeX2e document tree Writer.
+"""
+
+# Thanks to Engelbert Gruber and various contributors for the original
+# LaTeX writer, some code and many ideas of which have been used for
+# this writer.
+
+__docformat__ = 'reStructuredText'
+
+
+from __future__ import nested_scopes
+
+import re
+import os.path
+from types import ListType
+
+import docutils
+from docutils import nodes, writers, utils
+
+
+class Writer(writers.Writer):
+
+ supported = ('newlatex', 'newlatex2e')
+ """Formats this writer supports."""
+
+ settings_spec = (
+ 'LaTeX-Specific Options',
+ 'The LaTeX "--output-encoding" default is "latin-1:strict". '
+ 'Note that this LaTeX writer is still EXPERIMENTAL.',
+ (('Specify a stylesheet file. The path is used verbatim to include '
+ 'the file. Overrides --stylesheet-path.',
+ ['--stylesheet'],
+ {'default': '', 'metavar': '<file>',
+ 'overrides': 'stylesheet_path'}),
+ ('Specify a stylesheet file, relative to the current working '
+ 'directory. Overrides --stylesheet.',
+ ['--stylesheet-path'],
+ {'metavar': '<file>', 'overrides': 'stylesheet'}),
+ ('Specify a uesr stylesheet file. See --stylesheet.',
+ ['--user-stylesheet'],
+ {'default': '', 'metavar': '<file>',
+ 'overrides': 'user_stylesheet_path'}),
+ ('Specify a user stylesheet file. See --stylesheet-path.',
+ ['--user-stylesheet-path'],
+ {'metavar': '<file>', 'overrides': 'user_stylesheet'})
+ ),)
+
+ settings_defaults = {'output_encoding': 'latin-1',
+ 'trim_footnote_reference_space': 1,
+ # Currently unsupported:
+ 'docinfo_xform': 0,
+ # During development:
+ 'traceback': 1}
+
+ relative_path_settings = ('stylesheet_path',)
+
+ config_section = 'newlatex2e writer'
+ config_section_dependencies = ('writers',)
+
+ output = None
+ """Final translated form of `document`."""
+
+ def __init__(self):
+ writers.Writer.__init__(self)
+ self.translator_class = LaTeXTranslator
+
+ def translate(self):
+ visitor = self.translator_class(self.document)
+ self.document.walkabout(visitor)
+ assert not visitor.context, 'context not empty: %s' % visitor.context
+ self.output = visitor.astext()
+ self.head = visitor.header
+ self.body = visitor.body
+
+
+class Babel:
+ """Language specifics for LaTeX."""
+ # country code by a.schlock.
+ # partly manually converted from iso and babel stuff, dialects and some
+ _ISO639_TO_BABEL = {
+ 'no': 'norsk', # added by hand ( forget about nynorsk?)
+ 'gd': 'scottish', # added by hand
+ 'hu': 'magyar', # added by hand
+ 'pt': 'portuguese',# added by hand
+ 'sl': 'slovenian',
+ 'af': 'afrikaans',
+ 'bg': 'bulgarian',
+ 'br': 'breton',
+ 'ca': 'catalan',
+ 'cs': 'czech',
+ 'cy': 'welsh',
+ 'da': 'danish',
+ 'fr': 'french',
+ # french, francais, canadien, acadian
+ 'de': 'ngerman', # rather than german
+ # ngerman, naustrian, german, germanb, austrian
+ 'el': 'greek',
+ 'en': 'english',
+ # english, USenglish, american, UKenglish, british, canadian
+ 'eo': 'esperanto',
+ 'es': 'spanish',
+ 'et': 'estonian',
+ 'eu': 'basque',
+ 'fi': 'finnish',
+ 'ga': 'irish',
+ 'gl': 'galician',
+ 'he': 'hebrew',
+ 'hr': 'croatian',
+ 'hu': 'hungarian',
+ 'is': 'icelandic',
+ 'it': 'italian',
+ 'la': 'latin',
+ 'nl': 'dutch',
+ 'pl': 'polish',
+ 'pt': 'portuguese',
+ 'ro': 'romanian',
+ 'ru': 'russian',
+ 'sk': 'slovak',
+ 'sr': 'serbian',
+ 'sv': 'swedish',
+ 'tr': 'turkish',
+ 'uk': 'ukrainian'
+ }
+
+ def __init__(self, lang):
+ self.language = lang
+
+ def get_language(self):
+ if self._ISO639_TO_BABEL.has_key(self.language):
+ return self._ISO639_TO_BABEL[self.language]
+ else:
+ # Support dialects.
+ l = self.language.split("_")[0]
+ if self._ISO639_TO_BABEL.has_key(l):
+ return self._ISO639_TO_BABEL[l]
+ return None
+
+
+class LaTeXException(Exception):
+ """
+ Exception base class to for exceptions which influence the
+ automatic generation of LaTeX code.
+ """
+
+
+class SkipAttrParentLaTeX(LaTeXException):
+ """
+ Do not generate \Dattr and \renewcommand{\Dparent}{...} for this
+ node.
+
+ To be raised from before_... methods.
+ """
+
+
+class SkipParentLaTeX(LaTeXException):
+ """
+ Do not generate \renewcommand{\DNparent}{...} for this node.
+
+ To be raised from before_... methods.
+ """
+
+
+class LaTeXTranslator(nodes.SparseNodeVisitor):
+
+ # Start with left double quote.
+ left_quote = 1
+
+ def __init__(self, document):
+ nodes.NodeVisitor.__init__(self, document)
+ self.settings = document.settings
+ self.header = []
+ self.body = []
+ self.context = []
+ self.stylesheet_path = utils.get_stylesheet_reference(
+ self.settings, os.path.join(os.getcwd(), 'dummy'))
+ if self.stylesheet_path:
+ self.settings.record_dependencies.add(self.stylesheet_path)
+ # This ugly hack will be cleaned up when refactoring the
+ # stylesheet mess.
+ self.settings.stylesheet = self.settings.user_stylesheet
+ self.settings.stylesheet_path = self.settings.user_stylesheet_path
+ self.user_stylesheet_path = utils.get_stylesheet_reference(
+ self.settings, os.path.join(os.getcwd(), 'dummy'))
+ if self.user_stylesheet_path:
+ self.settings.record_dependencies.add(self.user_stylesheet_path)
+ self.write_header()
+ for key, value in self.character_map.items():
+ self.character_map[key] = '{%s}' % value
+
+ def write_header(self):
+ a = self.header.append
+ a('%% Generated by Docutils %s <http://docutils.sourceforge.net>.\n'
+ % docutils.__version__)
+ if self.user_stylesheet_path:
+ a('% User stylesheet:')
+ a(r'\input{%s}' % self.user_stylesheet_path)
+ a('% Docutils stylesheet:')
+ a(r'\input{%s}' % self.stylesheet_path)
+ a('')
+ a('% Definitions for Docutils Nodes:')
+ for node_name in nodes.node_class_names:
+ a(r'\providecommand{\DN%s}[1]{#1}' % node_name.replace('_', ''))
+ a('')
+ a('% Auxiliary definitions:')
+ a(r'\providecommand{\Dsetattr}[2]{}')
+ a(r'\providecommand{\Dparent}{} % variable')
+ a(r'\providecommand{\Dattr}[5]{#5}')
+ a(r'\providecommand{\Dattrlen}{} % variable')
+ a(r'\providecommand{\Dtitleastext}{x}')
+ a(r'\providecommand{\Dsinglebackref}{} % variable')
+ a(r'\providecommand{\Dmultiplebackrefs}{} % variable')
+ a('\n\n')
+
+ def to_latex_encoding(self,docutils_encoding):
+ """
+ Translate docutils encoding name into latex's.
+
+ Default fallback method is remove "-" and "_" chars from
+ docutils_encoding.
+ """
+ tr = { "iso-8859-1": "latin1", # west european
+ "iso-8859-2": "latin2", # east european
+ "iso-8859-3": "latin3", # esperanto, maltese
+ "iso-8859-4": "latin4", # north european,scandinavian, baltic
+ "iso-8859-5": "iso88595", # cyrillic (ISO)
+ "iso-8859-9": "latin5", # turkish
+ "iso-8859-15": "latin9", # latin9, update to latin1.
+ "mac_cyrillic": "maccyr", # cyrillic (on Mac)
+ "windows-1251": "cp1251", # cyrillic (on Windows)
+ "koi8-r": "koi8-r", # cyrillic (Russian)
+ "koi8-u": "koi8-u", # cyrillic (Ukrainian)
+ "windows-1250": "cp1250", #
+ "windows-1252": "cp1252", #
+ "us-ascii": "ascii", # ASCII (US)
+ # unmatched encodings
+ #"": "applemac",
+ #"": "ansinew", # windows 3.1 ansi
+ #"": "ascii", # ASCII encoding for the range 32--127.
+ #"": "cp437", # dos latine us
+ #"": "cp850", # dos latin 1
+ #"": "cp852", # dos latin 2
+ #"": "decmulti",
+ #"": "latin10",
+ #"iso-8859-6": "" # arabic
+ #"iso-8859-7": "" # greek
+ #"iso-8859-8": "" # hebrew
+ #"iso-8859-10": "" # latin6, more complete iso-8859-4
+ }
+ if tr.has_key(docutils_encoding.lower()):
+ return tr[docutils_encoding.lower()]
+ return docutils_encoding.translate(string.maketrans("",""),"_-").lower()
+
+ def language_label(self, docutil_label):
+ return self.language.labels[docutil_label]
+
+ # To do: Use unimap.py from TeXML instead. Have to deal with
+ # legal cruft before, because it's LPGL.
+ character_map_string = r"""
+ \ \textbackslash
+ { \{
+ } \}
+ $ \$
+ & \&
+ % \%
+ # \#
+ [ [
+ ] ]
+ - -
+ ` `
+ ' '
+ , ,
+ " "
+ | \textbar
+ < \textless
+ > \textgreater
+ ^ \textasciicircum
+ ~ \textasciitilde
+ _ \Dtextunderscore
+ """
+
+ #special_map = {'\n': ' ', '\r': ' ', '\t': ' ', '\v': ' ', '\f': ' '}
+
+ unicode_map = {
+ u'\u00A0': '~',
+ u'\u2009': '{\\,}',
+ u'\u2013': '{--}',
+ u'\u2014': '{---}',
+ u'\u2018': '`',
+ u'\u2019': '\'',
+ u'\u201A': ',',
+ u'\u201C': '``',
+ u'\u201D': "''",
+ u'\u201E': ',,',
+ u'\u2020': '{\\dag}',
+ u'\u2021': '{\\ddag}',
+ u'\u2026': '{\\dots}',
+ u'\u2122': '{\\texttrademark}',
+ u'\u21d4': '{$\\Leftrightarrow$}',
+ }
+
+ character_map = {}
+ for pair in character_map_string.strip().split('\n'):
+ char, replacement = pair.split()
+ character_map[char] = replacement
+ character_map.update(unicode_map)
+ #character_map.update(special_map)
+
+ def encode(self, text, attval=0):
+ """
+ Encode special characters in ``text`` and return it.
+
+ If attval is true, preserve as much as possible verbatim (used in
+ attribute value encoding).
+ """
+ if not attval:
+ get = self.character_map.get
+ else:
+ # According to
+ # <http://www-h.eng.cam.ac.uk/help/tpl/textprocessing/teTeX/latex/latex2e-html/ltx-164.html>,
+ # the following characters are special: # $ % & ~ _ ^ \ { }
+ # These work without special treatment in macro parameters:
+ # $, &, ~, _, ^
+ get = {'#': '\\#',
+ '%': '\\%',
+ # We cannot do anything about backslashes.
+ '\\': '',
+ '{': '\\{',
+ '}': '\\}',
+ # The quotation mark may be redefined by babel.
+ '"': '"{}',
+ }.get
+ text = ''.join([get(c, c) for c in text])
+ if (self.literal_block or self.inline_literal) and not attval:
+ # NB: We can have inline literals within literal blocks.
+ # Shrink '\r\n'.
+ text = text.replace('\r\n', '\n')
+ # Convert space. If "{ }~~~~~" is wrapped (at the
+ # brace-enclosed space "{ }"), the following non-breaking
+ # spaces ("~~~~") do *not* wind up at the beginning of the
+ # next line. Also note that, for some not-so-obvious
+ # reason, no hyphenation is done if the breaking space ("{
+ # }") comes *after* the non-breaking spaces.
+ if self.literal_block:
+ # Replace newlines with real newlines.
+ text = text.replace('\n', '\mbox{}\\\\')
+ firstspace = '~'
+ else:
+ firstspace = '{ }'
+ text = re.sub(r'\s+', lambda m: firstspace +
+ '~' * (len(m.group()) - 1), text)
+ # Protect hyphens; if we don't, line breaks will be
+ # possible at the hyphens and even the \textnhtt macro
+ # from the hyphenat package won't change that.
+ text = text.replace('-', r'\mbox{-}')
+ text = text.replace("'", r'{\Dtextliteralsinglequote}')
+ return text
+ else:
+ if not attval:
+ # Replace space with single protected space.
+ text = re.sub(r'\s+', '{ }', text)
+ # Replace double quotes with macro calls.
+ L = []
+ for part in text.split('"'):
+ if L:
+ # Insert quote.
+ L.append(self.left_quote and r'\Dtextleftdblquote' or
+ r'\Dtextrightdblquote')
+ self.left_quote = not self.left_quote
+ L.append(part)
+ return ''.join(L)
+ else:
+ return text
+
+ def astext(self):
+ return '\n'.join(self.header) + (''.join(self.body))
+
+ def append(self, text, newline='%\n'):
+ """
+ Append text, stripping newlines, producing nice LaTeX code.
+ """
+ lines = [' ' * self.indentation_level + line + newline
+ for line in text.splitlines(0)]
+ self.body.append(''.join(lines))
+
+ def visit_Text(self, node):
+ self.append(self.encode(node.astext()))
+
+ def depart_Text(self, node):
+ pass
+
+ def before_title(self, node):
+ self.append(r'\renewcommand{\Dtitleastext}{%s}'
+ % self.encode(node.astext()))
+ self.append(r'\renewcommand{\Dhassubtitle}{%s}'
+ % ((len(node.parent) > 2 and
+ isinstance(node.parent[1], nodes.subtitle))
+ and 'true' or 'false'))
+
+ literal_block = 0
+
+ def visit_literal_block(self, node):
+ self.literal_block = 1
+
+ def depart_literal_block(self, node):
+ self.literal_block = 0
+
+ visit_doctest_block = visit_literal_block
+ depart_doctest_block = depart_literal_block
+
+ inline_literal = 0
+
+ def visit_literal(self, node):
+ self.inline_literal += 1
+
+ def depart_literal(self, node):
+ self.inline_literal -= 1
+
+ def visit_comment(self, node):
+ self.append('\n'.join(['% ' + line for line
+ in node.astext().splitlines(0)]), newline='\n')
+ raise nodes.SkipChildren
+
+ bullet_list_level = 0
+
+ def visit_bullet_list(self, node):
+ self.append(r'\Dsetbullet{\labelitem%s}' %
+ ['i', 'ii', 'iii', 'iv'][min(self.bullet_list_level, 3)])
+ self.bullet_list_level += 1
+
+ def depart_bullet_list(self, node):
+ self.bullet_list_level -= 1
+
+ enum_styles = {'arabic': 'arabic', 'loweralpha': 'alph', 'upperalpha':
+ 'Alph', 'lowerroman': 'roman', 'upperroman': 'Roman'}
+
+ enum_counter = 0
+
+ def visit_enumerated_list(self, node):
+ # We create our own enumeration list environment. This allows
+ # to set the style and starting value and unlimited nesting.
+ # Maybe this can be moved to the stylesheet?
+ self.enum_counter += 1
+ enum_prefix = self.encode(node['prefix'])
+ enum_suffix = self.encode(node['suffix'])
+ enum_type = '\\' + self.enum_styles.get(node['enumtype'], r'arabic')
+ start = node.get('start', 1) - 1
+ counter = 'Denumcounter%d' % self.enum_counter
+ self.append(r'\Dmakeenumeratedlist{%s}{%s}{%s}{%s}{%s}{'
+ % (enum_prefix, enum_type, enum_suffix, counter, start))
+ # for Emacs: }
+
+ def depart_enumerated_list(self, node):
+ self.append('}') # for Emacs: {
+
+ def before_list_item(self, node):
+ # XXX needs cleanup.
+ if (len(node) and (isinstance(node[-1], nodes.TextElement) or
+ isinstance(node[-1], nodes.Text)) and
+ node.parent.index(node) == len(node.parent) - 1):
+ node['lastitem'] = 'true'
+
+ before_line = before_list_item
+
+ def before_raw(self, node):
+ if 'latex' in node.get('format', '').split():
+ # We're inserting the text in before_raw and thus outside
+ # of \DN... and \Dattr in order to make grouping with
+ # curly brackets work.
+ self.append(node.astext())
+ raise nodes.SkipChildren
+
+ def process_backlinks(self, node, type):
+ self.append(r'\renewcommand{\Dsinglebackref}{}')
+ self.append(r'\renewcommand{\Dmultiplebackrefs}{}')
+ if len(node['backrefs']) > 1:
+ refs = []
+ for i in range(len(node['backrefs'])):
+ refs.append(r'\Dmulti%sbacklink{%s}{%s}'
+ % (type, node['backrefs'][i], i + 1))
+ self.append(r'\renewcommand{\Dmultiplebackrefs}{(%s){ }}'
+ % ', '.join(refs))
+ elif len(node['backrefs']) == 1:
+ self.append(r'\renewcommand{\Dsinglebackref}{%s}'
+ % node['backrefs'][0])
+
+ def visit_footnote(self, node):
+ self.process_backlinks(node, 'footnote')
+
+ def visit_citation(self, node):
+ self.process_backlinks(node, 'citation')
+
+ def before_table(self, node):
+ # A tables contains exactly one tgroup. See before_tgroup.
+ pass
+
+ def before_tgroup(self, node):
+ widths = []
+ total_width = 0
+ for i in range(int(node['cols'])):
+ assert isinstance(node[i], nodes.colspec)
+ widths.append(int(node[i]['colwidth']) + 1)
+ total_width += widths[-1]
+ del node[:len(widths)]
+ tablespec = '|'
+ for w in widths:
+ # 0.93 is probably wrong in many cases. XXX Find a
+ # solution which works *always*.
+ tablespec += r'p{%s\linewidth}|' % (0.93 * w /
+ max(total_width, 60))
+ self.append(r'\Dmaketable{%s}{' % tablespec)
+ self.context.append('}')
+ raise SkipAttrParentLaTeX
+
+ def depart_tgroup(self, node):
+ self.append(self.context.pop())
+
+ def before_row(self, node):
+ raise SkipAttrParentLaTeX
+
+ def before_thead(self, node):
+ raise SkipAttrParentLaTeX
+
+ def before_tbody(self, node):
+ raise SkipAttrParentLaTeX
+
+ def is_simply_entry(self, node):
+ return (len(node) == 1 and isinstance(node[0], nodes.paragraph) or
+ len(node) == 0)
+
+ def before_entry(self, node):
+ is_leftmost = 0
+ if node.hasattr('morerows'):
+ self.document.reporter.severe('Rowspans are not supported.')
+ # Todo: Add empty cells below rowspanning cell and issue
+ # warning instead of severe.
+ if node.hasattr('morecols'):
+ # The author got a headache trying to implement
+ # multicolumn support.
+ if not self.is_simply_entry(node):
+ self.document.reporter.severe(
+ 'Colspanning table cells may only contain one paragraph.')
+ # Todo: Same as above.
+ # The number of columns this entry spans (as a string).
+ colspan = int(node['morecols']) + 1
+ del node['morecols']
+ else:
+ colspan = 1
+ # Macro to call.
+ macro_name = r'\Dcolspan'
+ if node.parent.index(node) == 0:
+ # Leftmost column.
+ macro_name += 'left'
+ is_leftmost = 1
+ if colspan > 1:
+ self.append('%s{%s}{' % (macro_name, colspan))
+ self.context.append('}')
+ else:
+ # Do not add a multicolumn with colspan 1 beacuse we need
+ # at least one non-multicolumn cell per column to get the
+ # desired column widths, and we can only do colspans with
+ # cells consisting of only one paragraph.
+ if not is_leftmost:
+ self.append(r'\Dsubsequententry{')
+ self.context.append('}')
+ else:
+ self.context.append('')
+ if isinstance(node.parent.parent, nodes.thead):
+ node['tableheaderentry'] = 'true'
+
+ # Don't add \renewcommand{\Dparent}{...} because there must
+ # not be any non-expandable commands in front of \multicolumn.
+ raise SkipParentLaTeX
+
+ def depart_entry(self, node):
+ self.append(self.context.pop())
+
+ def before_substitution_definition(self, node):
+ raise nodes.SkipNode
+
+ indentation_level = 0
+
+ def node_name(self, node):
+ return node.__class__.__name__.replace('_', '')
+
+ def propagate_attributes(self, node):
+ # Propagate attributes using \Dattr macros.
+ node_name = self.node_name(node)
+ attlist = []
+ if isinstance(node, nodes.Element):
+ attlist = node.attlist()
+ numatts = 0
+ pass_contents = self.pass_contents(node)
+ for key, value in attlist:
+ if isinstance(value, ListType):
+ self.append(r'\renewcommand{\Dattrlen}{%s}' % len(value))
+ for i in range(len(value)):
+ self.append(r'\Dattr{%s}{%s}{%s}{%s}{' %
+ (i+1, key, self.encode(value[i], attval=1),
+ node_name))
+ if not pass_contents:
+ self.append('}')
+ numatts += len(value)
+ else:
+ self.append(r'\Dattr{}{%s}{%s}{%s}{' %
+ (key, self.encode(unicode(value), attval=1),
+ node_name))
+ if not pass_contents:
+ self.append('}')
+ numatts += 1
+ if pass_contents:
+ self.context.append('}' * numatts) # for Emacs: {
+ else:
+ self.context.append('')
+
+ def visit_docinfo(self, node):
+ raise NotImplementedError('Docinfo not yet implemented.')
+
+ def visit_document(self, node):
+ document = node
+ # Move IDs into TextElements. This won't work for images.
+ # Need to review this.
+ for node in document.traverse(lambda n: isinstance(n, nodes.Element)):
+ if node.has_key('ids') and not isinstance(node,
+ nodes.TextElement):
+ next_text_element = node.next_node(
+ lambda n: isinstance(n, nodes.TextElement))
+ if next_text_element:
+ next_text_element['ids'].extend(node['ids'])
+ node['ids'] = []
+
+ def pass_contents(self, node):
+ r"""
+ Return true if the node contents should be passed in
+ parameters of \DN... and \Dattr.
+ """
+ return not isinstance(node, (nodes.document, nodes.section))
+
+ def dispatch_visit(self, node):
+ skip_attr = skip_parent = 0
+ # TreePruningException to be propagated.
+ tree_pruning_exception = None
+ if hasattr(self, 'before_' + node.__class__.__name__):
+ try:
+ getattr(self, 'before_' + node.__class__.__name__)(node)
+ except SkipParentLaTeX:
+ skip_parent = 1
+ except SkipAttrParentLaTeX:
+ skip_attr = 1
+ skip_parent = 1
+ except nodes.SkipNode:
+ raise
+ except (nodes.SkipChildren, nodes.SkipSiblings), instance:
+ tree_pruning_exception = instance
+ except nodes.SkipDeparture:
+ raise NotImplementedError(
+ 'SkipDeparture not usable in LaTeX writer')
+
+ if not isinstance(node, nodes.Text):
+ node_name = self.node_name(node)
+ # attribute_deleters will be appended to self.context.
+ attribute_deleters = []
+ if not skip_parent and not isinstance(node, nodes.document):
+ self.append(r'\renewcommand{\Dparent}{%s}'
+ % self.node_name(node.parent))
+ for name, value in node.attlist():
+ # @@@ Evaluate if this is really needed and refactor.
+ if not isinstance(value, ListType) and not ':' in name:
+ macro = r'\DcurrentN%sA%s' % (node_name, name)
+ self.append(r'\def%s{%s}' % (
+ macro, self.encode(unicode(value), attval=1)))
+ attribute_deleters.append(r'\let%s=\relax' % macro)
+ self.context.append('\n'.join(attribute_deleters))
+ if self.pass_contents(node):
+ self.append(r'\DN%s{' % node_name)
+ self.context.append('}')
+ else:
+ self.append(r'\Dvisit%s' % node_name)
+ self.context.append(r'\Ddepart%s' % node_name)
+ self.indentation_level += 1
+ if not skip_attr:
+ self.propagate_attributes(node)
+ else:
+ self.context.append('')
+
+ if (isinstance(node, nodes.TextElement) and
+ not isinstance(node.parent, nodes.TextElement)):
+ # Reset current quote to left.
+ self.left_quote = 1
+
+ # Call visit_... method.
+ try:
+ nodes.SparseNodeVisitor.dispatch_visit(self, node)
+ except LaTeXException:
+ raise NotImplementedError(
+ 'visit_... methods must not raise LaTeXExceptions')
+
+ if tree_pruning_exception:
+ # Propagate TreePruningException raised in before_... method.
+ raise tree_pruning_exception
+
+ def is_invisible(self, node):
+ # Return true if node is invisible or moved away in the LaTeX
+ # rendering.
+ return (isinstance(node, nodes.Invisible) or
+ isinstance(node, nodes.footnote) or
+ isinstance(node, nodes.citation) or
+ # We never know what's inside raw nodes, and often
+ # they *are* invisible. So let's have the user take
+ # care of them.
+ isinstance(node, nodes.raw) or
+ # Horizontally aligned image or figure.
+ node.get('align', None) in ('left', 'center', 'right'))
+
+ def needs_space(self, node):
+ # Return true if node is a visible block-level element.
+ return ((isinstance(node, nodes.Body) or
+ isinstance(node, nodes.topic) or
+ #isinstance(node, nodes.rubric) or
+ isinstance(node, nodes.transition) or
+ isinstance(node, nodes.caption) or
+ isinstance(node, nodes.legend)) and
+ not (self.is_invisible(node) or
+ isinstance(node.parent, nodes.TextElement)))
+
+ def dispatch_departure(self, node):
+ # Call departure method.
+ nodes.SparseNodeVisitor.dispatch_departure(self, node)
+
+ if not isinstance(node, nodes.Text):
+ # Close attribute and node handler call (\DN...{...}).
+ self.indentation_level -= 1
+ self.append(self.context.pop() + self.context.pop())
+ # Delete \Dcurrent... attribute macros.
+ self.append(self.context.pop())
+ # Insert space.
+ if self.needs_space(node):
+ # Next sibling.
+ next_node = node.next_node(
+ ascend=0, siblings=1, descend=0,
+ condition=lambda n: not self.is_invisible(n))
+ if self.needs_space(next_node):
+ # Insert space.
+ if isinstance(next_node, nodes.paragraph):
+ if isinstance(node, nodes.paragraph):
+ # Space between paragraphs.
+ self.append(r'\Dparagraphspace')
+ else:
+ # Space in front of a paragraph.
+ self.append(r'\Dauxiliaryparspace')
+ else:
+ # Space in front of something else than a paragraph.
+ self.append(r'\Dauxiliaryspace')
Added: Zope3/trunk/src/docutils/writers/null.py
===================================================================
--- Zope3/trunk/src/docutils/writers/null.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/writers/null.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -0,0 +1,23 @@
+# Author: David Goodger
+# Contact: goodger at python.org
+# Revision: $Revision: 3246 $
+# Date: $Date: 2005-04-23 21:23:21 +0200 (Sat, 23 Apr 2005) $
+# Copyright: This module has been placed in the public domain.
+
+"""
+A do-nothing Writer.
+"""
+
+from docutils import writers
+
+
+class Writer(writers.Writer):
+
+ supported = ('null',)
+ """Formats this writer supports."""
+
+ config_section = 'null writer'
+ config_section_dependencies = ('writers',)
+
+ def translate(self):
+ pass
Modified: Zope3/trunk/src/docutils/writers/pep_html.py
===================================================================
--- Zope3/trunk/src/docutils/writers/pep_html.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/writers/pep_html.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Author: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:07 $
+# Revision: $Revision: 3129 $
+# Date: $Date: 2005-03-26 17:21:28 +0100 (Sat, 26 Mar 2005) $
# Copyright: This module has been placed in the public domain.
"""
@@ -13,7 +13,7 @@
import sys
import docutils
-from docutils import nodes, frontend, utils
+from docutils import frontend, nodes, utils
from docutils.writers import html4css1
@@ -21,20 +21,9 @@
settings_spec = html4css1.Writer.settings_spec + (
'PEP/HTML-Specific Options',
- 'The HTML --footnote-references option is set to "brackets" by '
- 'default.',
- (('Specify a PEP stylesheet URL, used verbatim. Default is '
- '--stylesheet\'s value. If given, --pep-stylesheet overrides '
- '--stylesheet.',
- ['--pep-stylesheet'],
- {'metavar': '<URL>'}),
- ('Specify a PEP stylesheet file, relative to the current working '
- 'directory. The path is adjusted relative to the output HTML '
- 'file. Overrides --pep-stylesheet and --stylesheet-path.',
- ['--pep-stylesheet-path'],
- {'metavar': '<path>'}),
- ('Specify a template file. Default is "pep-html-template".',
- ['--pep-template'],
+ None,
+ (('Specify a template file. Default is "pep-html-template".',
+ ['--template'],
{'default': 'pep-html-template', 'metavar': '<file>'}),
('Python\'s home URL. Default is ".." (parent directory).',
['--python-home'],
@@ -42,14 +31,16 @@
('Home URL prefix for PEPs. Default is "." (current directory).',
['--pep-home'],
{'default': '.', 'metavar': '<URL>'}),
- # Workaround for SourceForge's broken Python
- # (``import random`` causes a segfault).
+ # For testing.
(frontend.SUPPRESS_HELP,
- ['--no-random'], {'action': 'store_true'}),))
+ ['--no-random'],
+ {'action': 'store_true', 'validator': frontend.validate_boolean}),))
- settings_default_overrides = {'footnote_references': 'brackets'}
+ relative_path_settings = (html4css1.Writer.relative_path_settings
+ + ('template',))
- relative_path_settings = ('pep_stylesheet_path', 'pep_template')
+ config_section = 'pep_html writer'
+ config_section_dependencies = ('writers', 'html4css1 writer')
def __init__(self):
html4css1.Writer.__init__(self)
@@ -58,7 +49,7 @@
def translate(self):
html4css1.Writer.translate(self)
settings = self.document.settings
- template = open(settings.pep_template).read()
+ template = open(settings.template).read()
# Substitutions dict for template:
subs = {}
subs['encoding'] = settings.output_encoding
@@ -82,7 +73,7 @@
subs['banner'] = random.randrange(64)
try:
subs['pepnum'] = '%04i' % int(pepnum)
- except:
+ except ValueError:
subs['pepnum'] = pepnum
subs['title'] = header[1][1].astext()
subs['body'] = ''.join(
@@ -93,21 +84,7 @@
class HTMLTranslator(html4css1.HTMLTranslator):
- def get_stylesheet_reference(self, relative_to=None):
- settings = self.settings
- if relative_to == None:
- relative_to = settings._destination
- if settings.pep_stylesheet_path:
- return utils.relative_path(relative_to,
- settings.pep_stylesheet_path)
- elif settings.pep_stylesheet:
- return settings.pep_stylesheet
- elif settings._stylesheet_path:
- return utils.relative_path(relative_to, settings.stylesheet_path)
- else:
- return settings.stylesheet
-
def depart_field_list(self, node):
html4css1.HTMLTranslator.depart_field_list(self, node)
- if node.get('class') == 'rfc2822':
+ if 'rfc2822' in node['classes']:
self.body.append('<hr />\n')
Modified: Zope3/trunk/src/docutils/writers/pseudoxml.py
===================================================================
--- Zope3/trunk/src/docutils/writers/pseudoxml.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/docutils/writers/pseudoxml.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -1,7 +1,7 @@
# Authors: David Goodger
# Contact: goodger at users.sourceforge.net
-# Revision: $Revision: 1.1 $
-# Date: $Date: 2003/07/30 20:14:07 $
+# Revision: $Revision: 1645 $
+# Date: $Date: 2003-08-27 22:50:43 +0200 (Wed, 27 Aug 2003) $
# Copyright: This module has been placed in the public domain.
"""
@@ -19,6 +19,9 @@
supported = ('pprint', 'pformat', 'pseudoxml')
"""Formats this writer supports."""
+ config_section = 'pseudoxml writer'
+ config_section_dependencies = ('writers',)
+
output = None
"""Final translated form of `document`."""
Modified: Zope3/trunk/src/zope/app/apidoc/browser/apidoc.css
===================================================================
--- Zope3/trunk/src/zope/app/apidoc/browser/apidoc.css 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/zope/app/apidoc/browser/apidoc.css 2005-07-14 12:25:18 UTC (rev 33317)
@@ -142,7 +142,11 @@
margin: 0.5em 2em;
}
+div.documentation a.reference img {
+ border: none;
+}
+
div.inline-documentation h1 {
font-size: 120%;
padding: 0;
@@ -171,6 +175,10 @@
margin: 0 0.5em 0 0;
}
+div.inline-documentation a.reference img {
+ border: none;
+}
+
/* Styles for ZCML markup */
code.zcml a {
Modified: Zope3/trunk/src/zope/app/apidoc/codemodule/browser/README.txt
===================================================================
--- Zope3/trunk/src/zope/app/apidoc/codemodule/browser/README.txt 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/zope/app/apidoc/codemodule/browser/README.txt 2005-07-14 12:25:18 UTC (rev 33317)
@@ -34,11 +34,9 @@
Get the doc string of the module formatted in STX or ReST.
>>> print details.getDoc().strip()
- <div class="document">
<p>Code Documentation Module</p>
<p>This module is able to take a dotted name of any class and display
documentation for it.</p>
- </div>
`getEntries(columns=True)`
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -165,7 +163,7 @@
Get all methods of this class.
>>> pprint(details.getMethods()[-2:])
- [{'doc': u'<div class="document">\nSetup module and class tree.</div>\n',
+ [{'doc': u'<p>Setup module and class tree.</p>\n',
'interface': None,
'name': 'setup',
'read_perm': None,
@@ -184,8 +182,7 @@
Get the doc string of the class STX formatted.
>>> print details.getDoc()[:-1]
- <div class="document">
- Represent the code browser documentation root</div>
+ <p>Represent the code browser documentation root</p>
Function Details
@@ -207,7 +204,7 @@
Get the doc string of the function in a rendered format.
>>> details.getDocString()
- u'<div class="document">\nThis is the foo function.</div>\n'
+ u'<p>This is the foo function.</p>\n'
`getAttributes()`
~~~~~~~~~~~~~~~~~
@@ -240,7 +237,7 @@
Render the file content to HTML.
- >>> print details.renderedContent()[54:102]
+ >>> print details.renderedContent()[:48]
<h1 class="title">Code Documentation Module</h1>
Modified: Zope3/trunk/src/zope/app/apidoc/component.txt
===================================================================
--- Zope3/trunk/src/zope/app/apidoc/component.txt 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/zope/app/apidoc/component.txt 2005-07-14 12:25:18 UTC (rev 33317)
@@ -278,7 +278,7 @@
>>> pprint(component.getFactoryInfoDictionary(
... component.getFactories(IFooBar).next()))
- {'description': u'<div class="document">\nMy Foo Bar</div>\n',
+ {'description': u'<p>My Foo Bar</p>\n',
'name': 'MyFooBar',
'title': 'MyFooBar',
'url': '__builtin__/MyFooBar'}
Modified: Zope3/trunk/src/zope/app/apidoc/ifacemodule/browser.txt
===================================================================
--- Zope3/trunk/src/zope/app/apidoc/ifacemodule/browser.txt 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/zope/app/apidoc/ifacemodule/browser.txt 2005-07-14 12:25:18 UTC (rev 33317)
@@ -184,8 +184,8 @@
Return the main documentation string of the interface.
- >>> details.getDoc()[:55]
- u'<div class="document">\n<p>This is the Foo interface</p>'
+ >>> details.getDoc()[:32]
+ u'<p>This is the Foo interface</p>'
`getBases()`
@@ -227,9 +227,9 @@
Return a list of attributes in the order they were specified.
>>> pprint(details.getAttributes())
- [{'doc': u'<div class="document">\nThis is bar.</div>\n',
+ [{'doc': u'<p>This is bar.</p>\n',
'name': 'bar'},
- {'doc': u'<div class="document">\nThis is foo.</div>\n',
+ {'doc': u'<p>This is foo.</p>\n',
'name': 'foo'}]
@@ -239,10 +239,10 @@
Return a list of methods in the order they were specified.
>>> pprint(details.getMethods())
- [{'doc': u'<div class="document">\nThis is blah.</div>\n',
+ [{'doc': u'<p>This is blah.</p>\n',
'name': 'blah',
'signature': '()'},
- {'doc': u'<div class="document">\nThis is get.</div>\n',
+ {'doc': u'<p>This is get.</p>\n',
'name': 'get',
'signature': '(key, default=None)'}]
@@ -258,7 +258,7 @@
[{'class': {'name': 'TextLine',
'path': 'zope/schema/_bootstrapfields/TextLine'},
'default': "u'Foo'",
- 'description': u'<div class="document">\nTitle</div>\n',
+ 'description': u'<p>Title</p>\n',
'iface': {'id': 'zope.schema.interfaces.ITextLine',
'name': 'ITextLine'},
'name': 'title',
@@ -268,7 +268,7 @@
{'class': {'name': 'Text',
'path': 'zope/schema/_bootstrapfields/Text'},
'default': "u'Foo.'",
- 'description': u'<div class="document">\nDesc</div>\n',
+ 'description': u'<p>Desc</p>\n',
'iface': {'id': 'zope.schema.interfaces.IText',
'name': 'IText'},
'name': 'description',
Modified: Zope3/trunk/src/zope/app/apidoc/interface.txt
===================================================================
--- Zope3/trunk/src/zope/app/apidoc/interface.txt 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/zope/app/apidoc/interface.txt 2005-07-14 12:25:18 UTC (rev 33317)
@@ -215,7 +215,7 @@
attribute:
>>> pprint(interface.getAttributeInfoDictionary(IFoo['baz']))
- {'doc': u'<div class="document">\nThis is the baz attribute</div>\n',
+ {'doc': u'<p>This is the baz attribute</p>\n',
'name': 'baz'}
@@ -226,7 +226,7 @@
>>> pprint(interface.getMethodInfoDictionary(IFoo['blah'])) #doc
{'doc':
- u'<div class="document">\nThis is the <cite>blah</cite> method.</div>\n',
+ u'<p>This is the <cite>blah</cite> method.</p>\n',
'name': 'blah',
'signature': '(one, two, three=None, *args, **kwargs)'}
@@ -240,7 +240,7 @@
{'class': {'name': 'TextLine',
'path': 'zope/schema/_bootstrapfields/TextLine'},
'default': "u'My Bar'",
- 'description': u'<div class="document">\nThe Bar</div>\n',
+ 'description': u'<p>The Bar</p>\n',
'iface': {'id': 'zope.schema.interfaces.ITextLine',
'name': 'ITextLine'},
'name': 'bar',
Modified: Zope3/trunk/src/zope/app/apidoc/utilities.txt
===================================================================
--- Zope3/trunk/src/zope/app/apidoc/utilities.txt 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/zope/app/apidoc/utilities.txt 2005-07-14 12:25:18 UTC (rev 33317)
@@ -564,11 +564,11 @@
You specify the format as follows:
>>> utilities.renderText('Hello!\n', format='zope.source.rest')
- u'<div class="document">\nHello!</div>\n'
+ u'<p>Hello!</p>\n'
Note that the format string must be a valid source factory id; if the factory
id is not a match, 'zope.source.stx' is used. Thus, specifying the module is
often safer (if available):
>>> utilities.renderText('Hello!\n', module=apidoc)
- u'<div class="document">\nHello!</div>\n'
\ No newline at end of file
+ u'<p>Hello!</p>\n'
\ No newline at end of file
Modified: Zope3/trunk/src/zope/app/generations/browser/managerdetails.py
===================================================================
--- Zope3/trunk/src/zope/app/generations/browser/managerdetails.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/zope/app/generations/browser/managerdetails.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -54,12 +54,8 @@
>>> import pprint
>>> pp = pprint.PrettyPrinter(width=76)
>>> pp.pprint(info)
- [{'from': 0,
- 'info': u'<div class="document">\nEvolver 1</div>\n',
- 'to': 1},
- {'from': 1,
- 'info': u'<div class="document">\nEvolver 2</div>\n',
- 'to': 2},
+ [{'info': u'<p>Evolver 1</p>\n', 'to': 1, 'from': 0},
+ {'info': u'<p>Evolver 2</p>\n', 'to': 2, 'from': 1},
{'info': '', 'to': 3, 'from': 2}]
We'd better clean up:
Modified: Zope3/trunk/src/zope/app/renderer/rest.py
===================================================================
--- Zope3/trunk/src/zope/app/renderer/rest.py 2005-07-14 07:41:49 UTC (rev 33316)
+++ Zope3/trunk/src/zope/app/renderer/rest.py 2005-07-14 12:25:18 UTC (rev 33317)
@@ -18,7 +18,7 @@
import docutils.core, docutils.io
from docutils import nodes, writers
from docutils.writers.html4css1 import HTMLTranslator
-from docutils.writers.html4css1 import Writer as HTMLWriter
+from docutils.writers.html4css1 import Writer
from zope.interface import implements
from zope.app.publisher.browser import BrowserView
@@ -35,60 +35,15 @@
ReStructuredTextSourceFactory = SourceFactory(IReStructuredTextSource)
-class Writer(writers.Writer):
- """
- A custom docutils writer that will ultimately give us
- only a body, utilizing the docutils framework.
- """
- supported = ('html',)
- """ Formats this writer supports."""
-
- settings_spec = (
- 'Zope 3 Specific Options',
- None,
- (('Specify base section (i.e. if 3, a top-level section '
- 'would be written as H3, 2nd level H4, etc...). Default is 3.',
- ['--base-section'],
- {'choices': ['1','2','3','4'],
- 'default': '3',
- 'metavar': '<NUMBER>'}),) + HTMLWriter.settings_spec[2]
- )
-
- relative_path_settings = ('stylesheet_path',)
-
- output = None
-
- def __init__(self):
- writers.Writer.__init__(self)
- self.translator_class = ZopeTranslator
-
- def translate(self):
- visitor = self.translator_class(self.document)
- self.document.walkabout(visitor)
- self.output = visitor.astext()
- self.stylesheet = visitor.stylesheet
- self.body = visitor.body
-
-
class ZopeTranslator(HTMLTranslator):
"""
The ZopeTranslator extends the base HTML processor for reST. It
augments reST by:
- - Starting headers at level 3 (this does not apply to the title
- header, which occurs if a reST header element appears as the first
- element in a document). This generally allows reST HTML code to
- fit in an existing site.
-
- Outputs *only* the 'body' parts of the document tree, using the
internal docutils structure.
"""
- def __init__(self, document):
- document.settings.embed_stylesheet = 0
- document.settings.base_section = int(document.settings.base_section)
-
- HTMLTranslator.__init__(self, document)
-
+
def astext(self):
"""
This is where we join the document parts that we want in
@@ -98,22 +53,7 @@
body = self.body_pre_docinfo + self.docinfo + self.body
return u"".join(body)
- def visit_title(self, node):
- """
- Handles the base section settings (ie - starting the
- document with header level 3)
- """
- if isinstance(node.parent, nodes.topic):
- HTMLTranslator.visit_title(self, node)
- elif self.section_level == 0:
- HTMLTranslator.visit_title(self, node)
- else:
- # offset section level to account for ``base_section``.
- self.section_level += (self.settings.base_section - 1)
- HTMLTranslator.visit_title(self, node)
- self.section_level -= (self.settings.base_section - 1)
-
class ReStructuredTextToHTMLRenderer(BrowserView):
r"""An Adapter to convert from Restructured Text to HTML.
@@ -129,13 +69,11 @@
... ''')
>>> renderer = ReStructuredTextToHTMLRenderer(source, TestRequest())
>>> print renderer.render().strip()
- <div class="document">
<p>This is source.</p>
<div class="section" id="header-3">
<h3><a name="header-3">Header 3</a></h3>
<p>This is more source.</p>
</div>
- </div>
"""
@@ -149,19 +87,20 @@
>>> renderer = ReStructuredTextToHTMLRenderer(u'b\xc3h', None)
>>> renderer.render()
- u'<div class="document">\nb\xc3h</div>\n'
+ u'<p>b\xc3h</p>\n'
"""
settings_overrides = {
- 'footnote_references': 'brackets',
'report_level': 1,
'halt_level': 6,
- 'stylesheet': 'zope3.css',
'input_encoding': 'unicode',
'output_encoding': 'unicode',
+ 'initial_header_level': 3
}
+ writer = Writer()
+ writer.translator_class = ZopeTranslator
html = docutils.core.publish_string(
self.context,
- writer=Writer(), # Our custom writer
+ writer=writer,
settings_overrides=settings_overrides,
)
return html
More information about the Zope3-Checkins
mailing list