[Zope3-checkins] SVN: zope.testing/branches/regebro-doctest-patching/src/zope/testing/ Checkpoint.

Lennart Regebro regebro at gmail.com
Mon Apr 19 15:33:03 EDT 2010


Log message for revision 111141:
  Checkpoint.

Changed:
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/doctest/__init__.py
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/doctest.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-arguments.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-colors.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-coverage.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-edge-cases.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-errors.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-knit.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-layers.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-leaks.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-progress.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-repeat.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-simple.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-subunit.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-test-selection.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-verbose.txt
  U   zope.testing/branches/regebro-doctest-patching/src/zope/testing/tests.py

-=-
Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/doctest/__init__.py
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/doctest/__init__.py	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/doctest/__init__.py	2010-04-19 19:33:03 UTC (rev 111141)
@@ -1,52 +1,3 @@
-# Module doctest.
-# Released to the public domain 16-Jan-2001, by Tim Peters (tim at python.org).
-# Major enhancements and refactoring by:
-#     Jim Fulton
-#     Edward Loper
-
-# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
-
-r"""Module doctest -- a framework for running examples in docstrings.
-
-In simplest use, end each module M to be tested with:
-
-def _test():
-    import doctest
-    doctest.testmod()
-
-if __name__ == "__main__":
-    _test()
-
-Then running the module as a script will cause the examples in the
-docstrings to get executed and verified:
-
-python M.py
-
-This won't display anything unless an example fails, in which case the
-failing example(s) and the cause(s) of the failure(s) are printed to stdout
-(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
-line of output is "Test failed.".
-
-Run it with the -v switch instead:
-
-python M.py -v
-
-and a detailed report of all examples tried is printed to stdout, along
-with assorted summaries at the end.
-
-You can force verbose mode by passing "verbose=True" to testmod, or prohibit
-it by passing "verbose=False".  In either of those cases, sys.argv is not
-examined by testmod.
-
-There are a variety of other ways to run doctests, including integration
-with the unittest framework, and support for running non-Python text
-files containing doctests.  There are also many ways to override parts
-of doctest's default behaviors.  See the Library Reference Manual for
-details.
-"""
-
-__docformat__ = 'reStructuredText en'
-
 __all__ = [
     # 0, Option Flags
     'register_optionflag',
@@ -54,6 +5,7 @@
     'DONT_ACCEPT_BLANKLINE',
     'NORMALIZE_WHITESPACE',
     'ELLIPSIS',
+    'SKIP',
     'IGNORE_EXCEPTION_DETAIL',
     'COMPARISON_FLAGS',
     'REPORT_UDIFF',
@@ -62,7 +14,6 @@
     'REPORT_ONLY_FIRST_FAILURE',
     'REPORTING_FLAGS',
     # 1. Utility Functions
-    'is_private',
     # 2. Example & DocTest
     'Example',
     'DocTest',
@@ -93,2872 +44,40 @@
     'debug',
 ]
 
-import __future__
-
-import doctest
-import sys, traceback, inspect, linecache, os, re
-import unittest, difflib, pdb, tempfile
 import warnings
-from doctest import DocTestFailure, UnexpectedException
-from StringIO import StringIO
-from zope.testing.exceptions import DocTestFailureException
-
-# Don't whine about the deprecated is_private function in this
-# module's tests.
-warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
-                        __name__, 0)
-
 # Tell people to use the builtin module instead.
 warnings.warn('zope.testing.doctest is deprecated in favour of '
               'the Python standard library doctest module', DeprecationWarning,
                stacklevel=2)
 
-class UnusedFootnoteWarning(Warning):
-    """Warn about a footnote that is defined, but never referenced."""
-
-real_pdb_set_trace = pdb.set_trace
-
-# There are 4 basic classes:
-#  - Example: a <source, want> pair, plus an intra-docstring line number.
-#  - DocTest: a collection of examples, parsed from a docstring, plus
-#    info about where the docstring came from (name, filename, lineno).
-#  - DocTestFinder: extracts DocTests from a given object's docstring and
-#    its contained objects' docstrings.
-#  - DocTestRunner: runs DocTest cases, and accumulates statistics.
-#
-# So the basic picture is:
-#
-#                             list of:
-# +------+                   +---------+                   +-------+
-# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
-# +------+                   +---------+                   +-------+
-#                            | Example |
-#                            |   ...   |
-#                            | Example |
-#                            +---------+
-
-# Option constants.
-
-from doctest import register_optionflag, \
-    OPTIONFLAGS_BY_NAME, \
-    DONT_ACCEPT_TRUE_FOR_1, \
-    DONT_ACCEPT_BLANKLINE, \
-    NORMALIZE_WHITESPACE, \
-    ELLIPSIS, \
-    IGNORE_EXCEPTION_DETAIL, \
-    COMPARISON_FLAGS, \
-    REPORT_UDIFF, \
-    REPORT_CDIFF, \
-    REPORT_NDIFF, \
-    REPORT_ONLY_FIRST_FAILURE, \
-    REPORTING_FLAGS
-
-INTERPRET_FOOTNOTES = register_optionflag('INTERPRET_FOOTNOTES')
-
-# Special string markers for use in `want` strings:
-BLANKLINE_MARKER = '<BLANKLINE>'
-ELLIPSIS_MARKER = '...'
-
-######################################################################
-## Table of Contents
-######################################################################
-#  1. Utility Functions
-#  2. Example & DocTest -- store test cases
-#  3. DocTest Parser -- extracts examples from strings
-#  4. DocTest Finder -- extracts test cases from objects
-#  5. DocTest Runner -- runs test cases
-#  6. Test Functions -- convenient wrappers for testing
-#  7. Tester Class -- for backwards compatibility
-#  8. Unittest Support
-#  9. Debugging Support
-# 10. Example Usage
-
-######################################################################
-## 1. Utility Functions
-######################################################################
-
-def is_private(prefix, base):
-    """prefix, base -> true iff name prefix + "." + base is "private".
-
-    Prefix may be an empty string, and base does not contain a period.
-    Prefix is ignored (although functions you write conforming to this
-    protocol may make use of it).
-    Return true iff base begins with an (at least one) underscore, but
-    does not both begin and end with (at least) two underscores.
-
-    >>> is_private("a.b", "my_func")
-    False
-    >>> is_private("____", "_my_func")
-    True
-    >>> is_private("someclass", "__init__")
-    False
-    >>> is_private("sometypo", "__init_")
-    True
-    >>> is_private("x.y.z", "_")
-    True
-    >>> is_private("_x.y.z", "__")
-    False
-    >>> is_private("", "")  # senseless but consistent
-    False
-    """
-    warnings.warn("is_private is deprecated; it wasn't useful; "
-                  "examine DocTestFinder.find() lists instead",
-                  DeprecationWarning, stacklevel=2)
-    return base[:1] == "_" and not base[:2] == "__" == base[-2:]
-
-def _extract_future_flags(globs):
-    """
-    Return the compiler-flags associated with the future features that
-    have been imported into the given namespace (globs).
-    """
-    flags = 0
-    for fname in __future__.all_feature_names:
-        feature = globs.get(fname, None)
-        if feature is getattr(__future__, fname):
-            flags |= feature.compiler_flag
-    return flags
-
-def _normalize_module(module, depth=2):
-    """
-    Return the module specified by `module`.  In particular:
-      - If `module` is a module, then return module.
-      - If `module` is a string, then import and return the
-        module with that name.
-      - If `module` is None, then return the calling module.
-        The calling module is assumed to be the module of
-        the stack frame at the given depth in the call stack.
-    """
-    if inspect.ismodule(module):
-        return module
-    elif isinstance(module, (str, unicode)):
-        return __import__(module, globals(), locals(), ["*"])
-    elif module is None:
-        return sys.modules[sys._getframe(depth).f_globals['__name__']]
-    else:
-        raise TypeError("Expected a module, string, or None")
-
-def _indent(s, indent=4):
-    """
-    Add the given number of space characters to the beginning every
-    non-blank line in `s`, and return the result.
-    """
-    # This regexp matches the start of non-blank lines:
-    return re.sub('(?m)^(?!$)', indent*' ', s)
-
-def _exception_traceback(exc_info):
-    """
-    Return a string containing a traceback message for the given
-    exc_info tuple (as returned by sys.exc_info()).
-    """
-    # Get a traceback message.
-    excout = StringIO()
-    exc_type, exc_val, exc_tb = exc_info
-    traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
-    return excout.getvalue()
-
-# Override some StringIO methods.
-class _SpoofOut(StringIO):
-    def getvalue(self):
-        result = StringIO.getvalue(self)
-        # If anything at all was written, make sure there's a trailing
-        # newline.  There's no way for the expected output to indicate
-        # that a trailing newline is missing.
-        if result and not result.endswith("\n"):
-            result += "\n"
-        # Prevent softspace from screwing up the next test case, in
-        # case they used print with a trailing comma in an example.
-        if hasattr(self, "softspace"):
-            del self.softspace
-        return result
-
+# Patch to fix an error that makes subsequent tests fail after you have
+# returned unicode in a test.
+import doctest
+_org_SpoofOut = doctest._SpoofOut
+class _patched_SpoofOut(_org_SpoofOut):
     def truncate(self,   size=None):
-        StringIO.truncate(self, size)
-        if hasattr(self, "softspace"):
-            del self.softspace
+        _org_SpoofOut.truncate(self, size)
+        if not self.buf:
+            self.buf = ''
+doctest._SpoofOut = _patched_SpoofOut
 
-    def write(self, value):
-        if isinstance(value, unicode):
-            value = value.encode('utf8')
-        StringIO.write(self, value)
-
-# Worst-case linear-time ellipsis matching.
-def _ellipsis_match(want, got):
-    """
-    Essentially the only subtle case:
-    >>> _ellipsis_match('aa...aa', 'aaa')
-    False
-    """
-    if ELLIPSIS_MARKER not in want:
-        return want == got
-
-    # Find "the real" strings.
-    ws = want.split(ELLIPSIS_MARKER)
-    assert len(ws) >= 2
-
-    # Deal with exact matches possibly needed at one or both ends.
-    startpos, endpos = 0, len(got)
-    w = ws[0]
-    if w:   # starts with exact match
-        if got.startswith(w):
-            startpos = len(w)
-            del ws[0]
-        else:
-            return False
-    w = ws[-1]
-    if w:   # ends with exact match
-        if got.endswith(w):
-            endpos -= len(w)
-            del ws[-1]
-        else:
-            return False
-
-    if startpos > endpos:
-        # Exact end matches required more characters than we have, as in
-        # _ellipsis_match('aa...aa', 'aaa')
-        return False
-
-    # For the rest, we only need to find the leftmost non-overlapping
-    # match for each piece.  If there's no overall match that way alone,
-    # there's no overall match period.
-    for w in ws:
-        # w may be '' at times, if there are consecutive ellipses, or
-        # due to an ellipsis at the start or end of `want`.  That's OK.
-        # Search for an empty string succeeds, and doesn't change startpos.
-        startpos = got.find(w, startpos, endpos)
-        if startpos < 0:
-            return False
-        startpos += len(w)
-
-    return True
-
-def _comment_line(line):
-    "Return a commented form of the given line"
-    line = line.rstrip()
-    if line:
-        return '# '+line
-    else:
-        return '#'
-
-class _OutputRedirectingPdb(pdb.Pdb):
-    """
-    A specialized version of the python debugger that redirects stdout
-    to a given stream when interacting with the user.  Stdout is *not*
-    redirected when traced code is executed.
-    """
-    def __init__(self, out):
-        self.__out = out
-        self.__debugger_used = False
-        try:
-            pdb.Pdb.__init__(self, stdin=sys.stdin, stdout=out)
-        except TypeError:
-            pdb.Pdb.__init__(self)
-        # enable readline
-        self.use_rawinput = 1
-
-    def set_trace(self):
-        self.__debugger_used = True
-        pdb.Pdb.set_trace(self)
-
-    def set_continue(self):
-        # Calling set_continue unconditionally would break unit test coverage
-        # reporting, as Bdb.set_continue calls sys.settrace(None).
-        if self.__debugger_used:
-            pdb.Pdb.set_continue(self)
-
-    def trace_dispatch(self, *args):
-        # Redirect stdout to the given stream.
-        save_stdout = sys.stdout
-        sys.stdout = self.__out
-        # Call Pdb's trace dispatch method.
-        result = pdb.Pdb.trace_dispatch(self, *args)
-        # Restore stdout.
-        sys.stdout = save_stdout
-        return result
-
-# [XX] Normalize with respect to os.path.pardir?
-def _module_relative_path(module, path):
-    if not inspect.ismodule(module):
-        raise TypeError('Expected a module: %r' % module)
-    if path.startswith('/'):
-        raise ValueError('Module-relative files may not have absolute paths')
-
-    # Find the base directory for the path.
-    if hasattr(module, '__file__'):
-        # A normal module/package
-        basedir = os.path.split(module.__file__)[0]
-    elif module.__name__ == '__main__':
-        # An interactive session.
-        if len(sys.argv)>0 and sys.argv[0] != '':
-            basedir = os.path.split(sys.argv[0])[0]
-        else:
-            basedir = os.curdir
-    else:
-        # A module w/o __file__ (this includes builtins)
-        raise ValueError("Can't resolve paths relative to the module " +
-                         module + " (it has no __file__)")
-
-    # Combine the base directory and the path.
-    return os.path.join(basedir, *(path.split('/')))
-
-######################################################################
-## 2. Example & DocTest
-######################################################################
-## - An "example" is a <source, want> pair, where "source" is a
-##   fragment of source code, and "want" is the expected output for
-##   "source."  The Example class also includes information about
-##   where the example was extracted from.
-##
-## - A "doctest" is a collection of examples, typically extracted from
-##   a string (such as an object's docstring).  The DocTest class also
-##   includes information about where the string was extracted from.
-
-class Example:
-    """
-    A single doctest example, consisting of source code and expected
-    output.  `Example` defines the following attributes:
-
-      - source: A single Python statement, always ending with a newline.
-        The constructor adds a newline if needed.
-
-      - want: The expected output from running the source code (either
-        from stdout, or a traceback in case of exception).  `want` ends
-        with a newline unless it's empty, in which case it's an empty
-        string.  The constructor adds a newline if needed.
-
-      - exc_msg: The exception message generated by the example, if
-        the example is expected to generate an exception; or `None` if
-        it is not expected to generate an exception.  This exception
-        message is compared against the return value of
-        `traceback.format_exception_only()`.  `exc_msg` ends with a
-        newline unless it's `None`.  The constructor adds a newline
-        if needed.
-
-      - lineno: The line number within the DocTest string containing
-        this Example where the Example begins.  This line number is
-        zero-based, with respect to the beginning of the DocTest.
-
-      - indent: The example's indentation in the DocTest string.
-        I.e., the number of space characters that preceed the
-        example's first prompt.
-
-      - options: A dictionary mapping from option flags to True or
-        False, which is used to override default options for this
-        example.  Any option flags not contained in this dictionary
-        are left at their default value (as specified by the
-        DocTestRunner's optionflags).  By default, no options are set.
-    """
-    def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
-                 options=None):
-        # Normalize inputs.
-        if not source.endswith('\n'):
-            source += '\n'
-        if want and not want.endswith('\n'):
-            want += '\n'
-        if exc_msg is not None and not exc_msg.endswith('\n'):
-            exc_msg += '\n'
-        # Store properties.
-        self.source = source
-        self.want = want
-        self.lineno = lineno
-        self.indent = indent
-        if options is None: options = {}
-        self.options = options
-        self.exc_msg = exc_msg
-
-class DocTest:
-    """
-    A collection of doctest examples that should be run in a single
-    namespace.  Each `DocTest` defines the following attributes:
-
-      - examples: the list of examples.
-
-      - globs: The namespace (aka globals) that the examples should
-        be run in.
-
-      - name: A name identifying the DocTest (typically, the name of
-        the object whose docstring this DocTest was extracted from).
-
-      - filename: The name of the file that this DocTest was extracted
-        from, or `None` if the filename is unknown.
-
-      - lineno: The line number within filename where this DocTest
-        begins, or `None` if the line number is unavailable.  This
-        line number is zero-based, with respect to the beginning of
-        the file.
-
-      - docstring: The string that the examples were extracted from,
-        or `None` if the string is unavailable.
-    """
-    def __init__(self, examples, globs, name, filename, lineno, docstring):
-        """
-        Create a new DocTest containing the given examples.  The
-        DocTest's globals are initialized with a copy of `globs`.
-        """
-        assert not isinstance(examples, basestring), \
-               "DocTest no longer accepts str; use DocTestParser instead"
-        self.examples = examples
-        self.docstring = docstring
-        self.globs = globs.copy()
-        self.name = name
-        self.filename = filename
-        self.lineno = lineno
-
-    def __repr__(self):
-        if len(self.examples) == 0:
-            examples = 'no examples'
-        elif len(self.examples) == 1:
-            examples = '1 example'
-        else:
-            examples = '%d examples' % len(self.examples)
-        return ('<DocTest %s from %s:%s (%s)>' %
-                (self.name, self.filename, self.lineno, examples))
-
-
-    # This lets us sort tests by name:
-    def __cmp__(self, other):
-        if not isinstance(other, DocTest):
-            return -1
-        return cmp((self.name, self.filename, self.lineno, id(self)),
-                   (other.name, other.filename, other.lineno, id(other)))
-
-######################################################################
-## 3. DocTestParser
-######################################################################
-
-class DocTestParser:
-    """
-    A class used to parse strings containing doctest examples.
-    """
-    # This regular expression is used to find doctest examples in a
-    # string.  It defines three groups: `source` is the source code
-    # (including leading indentation and prompts); `indent` is the
-    # indentation of the first (PS1) line of the source code; and
-    # `want` is the expected output (including leading indentation).
-    _EXAMPLE_RE = re.compile(r'''
-        # Source consists of a PS1 line followed by zero or more PS2 lines.
-        (?P<source>
-            (?:^(?P<indent> [ ]*) >>>    .*)    # PS1 line
-            (?:\n           [ ]*  \.\.\. .*)*)  # PS2 lines
-        \n?
-        # Want consists of any non-blank lines that do not start with PS1.
-        (?P<want> (?:(?![ ]*$)    # Not a blank line
-                     (?![ ]*>>>)  # Not a line starting with PS1
-                     .*$\n?       # But any other line
-                  )*)
-        ''', re.MULTILINE | re.VERBOSE)
-
-    # A regular expression for handling `want` strings that contain
-    # expected exceptions.  It divides `want` into three pieces:
-    #    - the traceback header line (`hdr`)
-    #    - the traceback stack (`stack`)
-    #    - the exception message (`msg`), as generated by
-    #      traceback.format_exception_only()
-    # `msg` may have multiple lines.  We assume/require that the
-    # exception message is the first non-indented line starting with a word
-    # character following the traceback header line.
-    _EXCEPTION_RE = re.compile(r"""
-        # Grab the traceback header.  Different versions of Python have
-        # said different things on the first traceback line.
-        ^(?P<hdr> Traceback\ \(
-            (?: most\ recent\ call\ last
-            |   innermost\ last
-            ) \) :
-        )
-        \s* $                # toss trailing whitespace on the header.
-        (?P<stack> .*?)      # don't blink: absorb stuff until...
-        ^ (?P<msg> \w+ .*)   #     a line *starts* with alphanum.
-        """, re.VERBOSE | re.MULTILINE | re.DOTALL)
-
-    # A callable returning a true value iff its argument is a blank line
-    # or contains a single comment.
-    _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
-
-    # Find footnote references.
-    _FOOTNOTE_REFERENCE_RE = re.compile(r'\[([^\]]+)]_')
-
-    # Find footnote definitions.
-    _FOOTNOTE_DEFINITION_RE = re.compile(
-        r'^\.\.\s*\[\s*([^\]]+)\s*\].*$', re.MULTILINE)
-
-    # End of footnote regex.   Just looks for any unindented line.
-    _FOOTNOTE_END_RE = re.compile(r'^\S+', re.MULTILINE)
-
-    def parse(self, string, name='<string>', optionflags=0):
-        """
-        Divide the given string into examples and intervening text,
-        and return them as a list of alternating Examples and strings.
-        Line numbers for the Examples are 0-based.  The optional
-        argument `name` is a name identifying this string, and is only
-        used for error messages.
-        """
-        string = string.expandtabs()
-        # If all lines begin with the same indentation, then strip it.
-        min_indent = self._min_indent(string)
-        if min_indent > 0:
-            string = '\n'.join([l[min_indent:] for l in string.split('\n')])
-
-        output = []
-        charno, lineno = 0, 0
-        # Find all doctest examples in the string:
-        for m in self._EXAMPLE_RE.finditer(string):
-            # Add the pre-example text to `output`.
-            output.append(string[charno:m.start()])
-            # Update lineno (lines before this example)
-            lineno += string.count('\n', charno, m.start())
-            # Extract info from the regexp match.
-            (source, options, want, exc_msg) = \
-                     self._parse_example(m, name, lineno)
-            # Create an Example, and add it to the list.
-            if not self._IS_BLANK_OR_COMMENT(source):
-                output.append( Example(source, want, exc_msg,
-                                    lineno=lineno,
-                                    indent=min_indent+len(m.group('indent')),
-                                    options=options) )
-            # Update lineno (lines inside this example)
-            lineno += string.count('\n', m.start(), m.end())
-            # Update charno.
-            charno = m.end()
-        # Add any remaining post-example text to `output`.
-        output.append(string[charno:])
-
-        if optionflags & INTERPRET_FOOTNOTES:
-            footnotes = {}
-            in_footnote = False
-            # collect all the footnotes
-            for x in output:
-                if in_footnote:
-                    footnote.append(x)
-                    # we're collecting prose and examples for a footnote
-                    if isinstance(x, Example):
-                        x._footnote_name = name
-                    elif self._FOOTNOTE_END_RE.search(x):
-                        # this looks like prose that ends a footnote
-                        in_footnote = False
-                        footnotes[name] = footnote
-                        del name
-                        del footnote
-
-                if not in_footnote:
-                    if not isinstance(x, Example):
-                        matches = list(
-                            self._FOOTNOTE_DEFINITION_RE.finditer(x))
-
-                        if matches:
-                            # all but the last one don't have any code
-                            # note: we intentionally reuse the "leaked" value
-                            # of match below
-                            for match in matches:
-                                footnotes[match.group(1)] = []
-
-                            in_footnote = True
-                            name = match.group(1)
-                            footnote = []
-
-            # if we were still collecting a footnote when the loop ended,
-            # stash it away so it's not lost
-            if in_footnote:
-                footnotes[name] = footnote
-
-            # inject each footnote into the point(s) at which it is referenced
-            new_output = []
-            defined_footnotes = []
-            used_footnotes = []
-            for x in output:
-                if isinstance(x, Example):
-                    # we don't want to execute footnotes where they're defined
-                    if hasattr(x, '_footnote_name'):
-                        defined_footnotes.append(x._footnote_name)
-                        continue
-                else:
-                    m = None
-                    for m in self._FOOTNOTE_REFERENCE_RE.finditer(x):
-                        name = m.group(1)
-                        if name not in footnotes:
-                            raise KeyError(
-                                'A footnote was referred to, but never'
-                                ' defined: %r' % name)
-
-                        new_output.append(x)
-                        new_output.extend(footnotes[name])
-                        used_footnotes.append(name)
-                    if m is not None:
-                        continue
-
-                new_output.append(x)
-            output = new_output
-
-            # make sure that all of the footnotes found were actually used
-            unused_footnotes = set(defined_footnotes) - set(used_footnotes)
-            for x in unused_footnotes:
-                warnings.warn('a footnote was defined, but never used: %r' % x,
-                              UnusedFootnoteWarning)
-
-        return output
-
-    def get_doctest(self, string, globs, name, filename, lineno,
-                    optionflags=0):
-        """
-        Extract all doctest examples from the given string, and
-        collect them into a `DocTest` object.
-
-        `globs`, `name`, `filename`, and `lineno` are attributes for
-        the new `DocTest` object.  See the documentation for `DocTest`
-        for more information.
-        """
-        return DocTest(self.get_examples(string, name, optionflags), globs,
-                       name, filename, lineno, string)
-
-    def get_examples(self, string, name='<string>', optionflags=0):
-        """
-        Extract all doctest examples from the given string, and return
-        them as a list of `Example` objects.  Line numbers are
-        0-based, because it's most common in doctests that nothing
-        interesting appears on the same line as opening triple-quote,
-        and so the first interesting line is called \"line 1\" then.
-
-        The optional argument `name` is a name identifying this
-        string, and is only used for error messages.
-        """
-        return [x for x in self.parse(string, name, optionflags)
-                if isinstance(x, Example)]
-
-    def _parse_example(self, m, name, lineno):
-        """
-        Given a regular expression match from `_EXAMPLE_RE` (`m`),
-        return a pair `(source, want)`, where `source` is the matched
-        example's source code (with prompts and indentation stripped);
-        and `want` is the example's expected output (with indentation
-        stripped).
-
-        `name` is the string's name, and `lineno` is the line number
-        where the example starts; both are used for error messages.
-        """
-        # Get the example's indentation level.
-        indent = len(m.group('indent'))
-
-        # Divide source into lines; check that they're properly
-        # indented; and then strip their indentation & prompts.
-        source_lines = m.group('source').split('\n')
-        self._check_prompt_blank(source_lines, indent, name, lineno)
-        self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
-        source = '\n'.join([sl[indent+4:] for sl in source_lines])
-
-        # Divide want into lines; check that it's properly indented; and
-        # then strip the indentation.  Spaces before the last newline should
-        # be preserved, so plain rstrip() isn't good enough.
-        want = m.group('want')
-        want_lines = want.split('\n')
-        if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
-            del want_lines[-1]  # forget final newline & spaces after it
-        self._check_prefix(want_lines, ' '*indent, name,
-                           lineno + len(source_lines))
-        want = '\n'.join([wl[indent:] for wl in want_lines])
-
-        # If `want` contains a traceback message, then extract it.
-        m = self._EXCEPTION_RE.match(want)
-        if m:
-            exc_msg = m.group('msg')
-        else:
-            exc_msg = None
-
-        # Extract options from the source.
-        options = self._find_options(source, name, lineno)
-
-        return source, options, want, exc_msg
-
-    # This regular expression looks for option directives in the
-    # source code of an example.  Option directives are comments
-    # starting with "doctest:".  Warning: this may give false
-    # positives for string-literals that contain the string
-    # "#doctest:".  Eliminating these false positives would require
-    # actually parsing the string; but we limit them by ignoring any
-    # line containing "#doctest:" that is *followed* by a quote mark.
-    _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
-                                      re.MULTILINE)
-
-    def _find_options(self, source, name, lineno):
-        """
-        Return a dictionary containing option overrides extracted from
-        option directives in the given source string.
-
-        `name` is the string's name, and `lineno` is the line number
-        where the example starts; both are used for error messages.
-        """
-        options = {}
-        # (note: with the current regexp, this will match at most once:)
-        for m in self._OPTION_DIRECTIVE_RE.finditer(source):
-            option_strings = m.group(1).replace(',', ' ').split()
-            for option in option_strings:
-                if (option[0] not in '+-' or
-                    option[1:] not in OPTIONFLAGS_BY_NAME):
-                    raise ValueError('line %r of the doctest for %s '
-                                     'has an invalid option: %r' %
-                                     (lineno+1, name, option))
-                flag = OPTIONFLAGS_BY_NAME[option[1:]]
-                options[flag] = (option[0] == '+')
-        if options and self._IS_BLANK_OR_COMMENT(source):
-            raise ValueError('line %r of the doctest for %s has an option '
-                             'directive on a line with no example: %r' %
-                             (lineno, name, source))
-        return options
-
-    # This regular expression finds the indentation of every non-blank
-    # line in a string.
-    _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
-
-    def _min_indent(self, s):
-        "Return the minimum indentation of any non-blank line in `s`"
-        indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
-        if len(indents) > 0:
-            return min(indents)
-        else:
-            return 0
-
-    def _check_prompt_blank(self, lines, indent, name, lineno):
-        """
-        Given the lines of a source string (including prompts and
-        leading indentation), check to make sure that every prompt is
-        followed by a space character.  If any line is not followed by
-        a space character, then raise ValueError.
-        """
-        for i, line in enumerate(lines):
-            if len(line) >= indent+4 and line[indent+3] != ' ':
-                raise ValueError('line %r of the docstring for %s '
-                                 'lacks blank after %s: %r' %
-                                 (lineno+i+1, name,
-                                  line[indent:indent+3], line))
-
-    def _check_prefix(self, lines, prefix, name, lineno):
-        """
-        Check that every line in the given list starts with the given
-        prefix; if any line does not, then raise a ValueError.
-        """
-        for i, line in enumerate(lines):
-            if line and not line.startswith(prefix):
-                raise ValueError('line %r of the docstring for %s has '
-                                 'inconsistent leading whitespace: %r' %
-                                 (lineno+i+1, name, line))
-
-
-######################################################################
-## 4. DocTest Finder
-######################################################################
-
-class DocTestFinder:
-    """
-    A class used to extract the DocTests that are relevant to a given
-    object, from its docstring and the docstrings of its contained
-    objects.  Doctests can currently be extracted from the following
-    object types: modules, functions, classes, methods, staticmethods,
-    classmethods, and properties.
-    """
-
-    def __init__(self, verbose=False, parser=DocTestParser(),
-                 recurse=True, _namefilter=None, exclude_empty=True):
-        """
-        Create a new doctest finder.
-
-        The optional argument `parser` specifies a class or
-        function that should be used to create new DocTest objects (or
-        objects that implement the same interface as DocTest).  The
-        signature for this factory function should match the signature
-        of the DocTest constructor.
-
-        If the optional argument `recurse` is false, then `find` will
-        only examine the given object, and not any contained objects.
-
-        If the optional argument `exclude_empty` is false, then `find`
-        will include tests for objects with empty docstrings.
-        """
-        self._parser = parser
-        self._verbose = verbose
-        self._recurse = recurse
-        self._exclude_empty = exclude_empty
-        # _namefilter is undocumented, and exists only for temporary backward-
-        # compatibility support of testmod's deprecated isprivate mess.
-        self._namefilter = _namefilter
-
-    def find(self, obj, name=None, module=None, globs=None,
-             extraglobs=None, optionflags=0):
-        """
-        Return a list of the DocTests that are defined by the given
-        object's docstring, or by any of its contained objects'
-        docstrings.
-
-        The optional parameter `module` is the module that contains
-        the given object.  If the module is not specified or is None, then
-        the test finder will attempt to automatically determine the
-        correct module.  The object's module is used:
-
-            - As a default namespace, if `globs` is not specified.
-            - To prevent the DocTestFinder from extracting DocTests
-              from objects that are imported from other modules.
-            - To find the name of the file containing the object.
-            - To help find the line number of the object within its
-              file.
-
-        Contained objects whose module does not match `module` are ignored.
-
-        If `module` is False, no attempt to find the module will be made.
-        This is obscure, of use mostly in tests:  if `module` is False, or
-        is None but cannot be found automatically, then all objects are
-        considered to belong to the (non-existent) module, so all contained
-        objects will (recursively) be searched for doctests.
-
-        The globals for each DocTest is formed by combining `globs`
-        and `extraglobs` (bindings in `extraglobs` override bindings
-        in `globs`).  A new copy of the globals dictionary is created
-        for each DocTest.  If `globs` is not specified, then it
-        defaults to the module's `__dict__`, if specified, or {}
-        otherwise.  If `extraglobs` is not specified, then it defaults
-        to {}.
-
-        """
-        # If name was not specified, then extract it from the object.
-        if name is None:
-            name = getattr(obj, '__name__', None)
-            if name is None:
-                raise ValueError("DocTestFinder.find: name must be given "
-                        "when obj.__name__ doesn't exist: %r" %
-                                 (type(obj),))
-
-        # Find the module that contains the given object (if obj is
-        # a module, then module=obj.).  Note: this may fail, in which
-        # case module will be None.
-        if module is False:
-            module = None
-        elif module is None:
-            module = inspect.getmodule(obj)
-
-        # Read the module's source code.  This is used by
-        # DocTestFinder._find_lineno to find the line number for a
-        # given object's docstring.
-        try:
-            file = inspect.getsourcefile(obj) or inspect.getfile(obj)
-            source_lines = linecache.getlines(file)
-            if not source_lines:
-                source_lines = None
-        except TypeError:
-            source_lines = None
-
-        # Initialize globals, and merge in extraglobs.
-        if globs is None:
-            if module is None:
-                globs = {}
-            else:
-                globs = module.__dict__.copy()
-        else:
-            globs = globs.copy()
-        if extraglobs is not None:
-            globs.update(extraglobs)
-
-        # Recursively expore `obj`, extracting DocTests.
-        tests = []
-        self._find(tests, obj, name, module, source_lines, globs, {},
-                   optionflags=optionflags)
-        return tests
-
-    def _filter(self, obj, prefix, base):
-        """
-        Return true if the given object should not be examined.
-        """
-        return (self._namefilter is not None and
-                self._namefilter(prefix, base))
-
-    def _from_module(self, module, object):
-        """
-        Return true if the given object is defined in the given
-        module.
-        """
-        if module is None:
-            return True
-        elif inspect.isfunction(object):
-            return module.__dict__ is object.func_globals
-        elif inspect.isclass(object):
-            return module.__name__ == object.__module__
-        elif inspect.getmodule(object) is not None:
-            return module is inspect.getmodule(object)
-        elif hasattr(object, '__module__'):
-            return module.__name__ == object.__module__
-        elif isinstance(object, property):
-            return True # [XX] no way not be sure.
-        else:
-            raise ValueError("object must be a class or function")
-
-    def _find(self, tests, obj, name, module, source_lines, globs, seen,
-              optionflags):
-        """
-        Find tests for the given object and any contained objects, and
-        add them to `tests`.
-        """
-        if self._verbose:
-            print 'Finding tests in %s' % name
-
-        # If we've already processed this object, then ignore it.
-        if id(obj) in seen:
-            return
-        seen[id(obj)] = 1
-
-        # Find a test for this object, and add it to the list of tests.
-        test = self._get_test(obj, name, module, globs, source_lines,
-                              optionflags)
-        if test is not None:
-            tests.append(test)
-
-        # Look for tests in a module's contained objects.
-        if inspect.ismodule(obj) and self._recurse:
-            for valname, val in obj.__dict__.items():
-                # Check if this contained object should be ignored.
-                if self._filter(val, name, valname):
-                    continue
-                valname = '%s.%s' % (name, valname)
-                # Recurse to functions & classes.
-                if ((inspect.isfunction(val) or inspect.isclass(val)) and
-                    self._from_module(module, val)):
-                    self._find(tests, val, valname, module, source_lines,
-                               globs, seen, optionflags)
-
-        # Look for tests in a module's __test__ dictionary.
-        if inspect.ismodule(obj) and self._recurse:
-            for valname, val in getattr(obj, '__test__', {}).items():
-                if not isinstance(valname, basestring):
-                    raise ValueError("DocTestFinder.find: __test__ keys "
-                                     "must be strings: %r" %
-                                     (type(valname),))
-                if not (inspect.isfunction(val) or inspect.isclass(val) or
-                        inspect.ismethod(val) or inspect.ismodule(val) or
-                        isinstance(val, basestring)):
-                    raise ValueError("DocTestFinder.find: __test__ values "
-                                     "must be strings, functions, methods, "
-                                     "classes, or modules: %r" %
-                                     (type(val),))
-                valname = '%s.__test__.%s' % (name, valname)
-                self._find(tests, val, valname, module, source_lines,
-                           globs, seen, optionflags)
-
-        # Look for tests in a class's contained objects.
-        if inspect.isclass(obj) and self._recurse:
-            for valname, val in obj.__dict__.items():
-                # Check if this contained object should be ignored.
-                if self._filter(val, name, valname):
-                    continue
-                # Special handling for staticmethod/classmethod.
-                if isinstance(val, staticmethod):
-                    val = getattr(obj, valname)
-                if isinstance(val, classmethod):
-                    val = getattr(obj, valname).im_func
-
-                # Recurse to methods, properties, and nested classes.
-                if ((inspect.isfunction(val) or inspect.isclass(val) or
-                      isinstance(val, property)) and
-                      self._from_module(module, val)):
-                    valname = '%s.%s' % (name, valname)
-                    self._find(tests, val, valname, module, source_lines,
-                               globs, seen, optionflags)
-
-    def _get_test(self, obj, name, module, globs, source_lines, optionflags):
-        """
-        Return a DocTest for the given object, if it defines a docstring;
-        otherwise, return None.
-        """
-        # Extract the object's docstring.  If it doesn't have one,
-        # then return None (no test for this object).
-        if isinstance(obj, basestring):
-            docstring = obj
-        else:
-            try:
-                if obj.__doc__ is None:
-                    docstring = ''
-                else:
-                    docstring = obj.__doc__
-                    if not isinstance(docstring, basestring):
-                        docstring = str(docstring)
-            except (TypeError, AttributeError):
-                docstring = ''
-
-        # Find the docstring's location in the file.
-        lineno = self._find_lineno(obj, source_lines)
-
-        # Don't bother if the docstring is empty.
-        if self._exclude_empty and not docstring:
-            return None
-
-        # Return a DocTest for this object.
-        if module is None:
-            filename = None
-        else:
-            filename = getattr(module, '__file__', module.__name__)
-            if filename[-4:] in (".pyc", ".pyo"):
-                filename = filename[:-1]
-        return self._parser.get_doctest(docstring, globs, name,
-                                        filename, lineno, optionflags)
-
-    def _find_lineno(self, obj, source_lines):
-        """
-        Return a line number of the given object's docstring.  Note:
-        this method assumes that the object has a docstring.
-        """
-        lineno = None
-
-        # Find the line number for modules.
-        if inspect.ismodule(obj):
-            lineno = 0
-
-        # Find the line number for classes.
-        # Note: this could be fooled if a class is defined multiple
-        # times in a single file.
-        if inspect.isclass(obj):
-            if source_lines is None:
-                return None
-            pat = re.compile(r'^\s*class\s*%s\b' %
-                             getattr(obj, '__name__', '-'))
-            for i, line in enumerate(source_lines):
-                if pat.match(line):
-                    lineno = i
-                    break
-
-        # Find the line number for functions & methods.
-        if inspect.ismethod(obj): obj = obj.im_func
-        if inspect.isfunction(obj): obj = obj.func_code
-        if inspect.istraceback(obj): obj = obj.tb_frame
-        if inspect.isframe(obj): obj = obj.f_code
-        if inspect.iscode(obj):
-            lineno = getattr(obj, 'co_firstlineno', None)-1
-
-        # Find the line number where the docstring starts.  Assume
-        # that it's the first line that begins with a quote mark.
-        # Note: this could be fooled by a multiline function
-        # signature, where a continuation line begins with a quote
-        # mark.
-        if lineno is not None:
-            if source_lines is None:
-                return lineno+1
-            pat = re.compile('(^|.*:)\s*\w*("|\')')
-            for lineno in range(lineno, len(source_lines)):
-                if pat.match(source_lines[lineno]):
-                    return lineno
-
-        # We couldn't find the line number.
-        return None
-
-######################################################################
-## 5. DocTest Runner
-######################################################################
-
-class DocTestRunner:
-    """
-    A class used to run DocTest test cases, and accumulate statistics.
-    The `run` method is used to process a single DocTest case.  It
-    returns a tuple `(f, t)`, where `t` is the number of test cases
-    tried, and `f` is the number of test cases that failed.
-
-        >>> tests = DocTestFinder().find(_TestClass)
-        >>> runner = DocTestRunner(verbose=False)
-        >>> for test in tests:
-        ...     print runner.run(test)
-        (0, 2)
-        (0, 1)
-        (0, 2)
-        (0, 2)
-
-    The `summarize` method prints a summary of all the test cases that
-    have been run by the runner, and returns an aggregated `(f, t)`
-    tuple:
-
-        >>> runner.summarize(verbose=1)
-        4 items passed all tests:
-           2 tests in _TestClass
-           2 tests in _TestClass.__init__
-           2 tests in _TestClass.get
-           1 tests in _TestClass.square
-        7 tests in 4 items.
-        7 passed and 0 failed.
-        Test passed.
-        (0, 7)
-
-    The aggregated number of tried examples and failed examples is
-    also available via the `tries` and `failures` attributes:
-
-        >>> runner.tries
-        7
-        >>> runner.failures
-        0
-
-    The comparison between expected outputs and actual outputs is done
-    by an `OutputChecker`.  This comparison may be customized with a
-    number of option flags; see the documentation for `testmod` for
-    more information.  If the option flags are insufficient, then the
-    comparison may also be customized by passing a subclass of
-    `OutputChecker` to the constructor.
-
-    The test runner's display output can be controlled in two ways.
-    First, an output function (`out) can be passed to
-    `TestRunner.run`; this function will be called with strings that
-    should be displayed.  It defaults to `sys.stdout.write`.  If
-    capturing the output is not sufficient, then the display output
-    can be also customized by subclassing DocTestRunner, and
-    overriding the methods `report_start`, `report_success`,
-    `report_unexpected_exception`, and `report_failure`.
-    """
-    # This divider string is used to separate failure messages, and to
-    # separate sections of the summary.
-    DIVIDER = "*" * 70
-
-    def __init__(self, checker=None, verbose=None, optionflags=0):
-        """
-        Create a new test runner.
-
-        Optional keyword arg `checker` is the `OutputChecker` that
-        should be used to compare the expected outputs and actual
-        outputs of doctest examples.
-
-        Optional keyword arg 'verbose' prints lots of stuff if true,
-        only failures if false; by default, it's true iff '-v' is in
-        sys.argv.
-
-        Optional argument `optionflags` can be used to control how the
-        test runner compares expected output to actual output, and how
-        it displays failures.  See the documentation for `testmod` for
-        more information.
-        """
-        self._checker = checker or OutputChecker()
-        if verbose is None:
-            verbose = '-v' in sys.argv
-        self._verbose = verbose
-        self.optionflags = optionflags
-        self.original_optionflags = optionflags
-
-        # Keep track of the examples we've run.
-        self.tries = 0
-        self.failures = 0
-        self._name2ft = {}
-
-        # Create a fake output target for capturing doctest output.
-        self._fakeout = _SpoofOut()
-
-    #/////////////////////////////////////////////////////////////////
-    # Reporting methods
-    #/////////////////////////////////////////////////////////////////
-
-    def report_start(self, out, test, example):
-        """
-        Report that the test runner is about to process the given
-        example.  (Only displays a message if verbose=True)
-        """
-        if self._verbose:
-            if example.want:
-                out('Trying:\n' + _indent(example.source) +
-                    'Expecting:\n' + _indent(example.want))
-            else:
-                out('Trying:\n' + _indent(example.source) +
-                    'Expecting nothing\n')
-
-    def report_success(self, out, test, example, got):
-        """
-        Report that the given example ran successfully.  (Only
-        displays a message if verbose=True)
-        """
-        if self._verbose:
-            out("ok\n")
-
-    def report_failure(self, out, test, example, got):
-        """
-        Report that the given example failed.
-        """
-        out(self._failure_header(test, example) +
-            self._checker.output_difference(example, got, self.optionflags))
-
-    def report_unexpected_exception(self, out, test, example, exc_info):
-        """
-        Report that the given example raised an unexpected exception.
-        """
-        out(self._failure_header(test, example) +
-            'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
-
-    def _failure_header(self, test, example):
-        out = [self.DIVIDER]
-        if test.filename:
-            if test.lineno is not None and example.lineno is not None:
-                lineno = test.lineno + example.lineno + 1
-            else:
-                lineno = '?'
-            out.append('File "%s", line %s, in %s' %
-                       (test.filename, lineno, test.name))
-        else:
-            out.append('Line %s, in %s' % (example.lineno+1, test.name))
-        out.append('Failed example:')
-        source = example.source
-        out.append(_indent(source))
-        return '\n'.join(out)
-
-    #/////////////////////////////////////////////////////////////////
-    # DocTest Running
-    #/////////////////////////////////////////////////////////////////
-
-    def __run(self, test, compileflags, out):
-        """
-        Run the examples in `test`.  Write the outcome of each example
-        with one of the `DocTestRunner.report_*` methods, using the
-        writer function `out`.  `compileflags` is the set of compiler
-        flags that should be used to execute examples.  Return a tuple
-        `(f, t)`, where `t` is the number of examples tried, and `f`
-        is the number of examples that failed.  The examples are run
-        in the namespace `test.globs`.
-        """
-        # Keep track of the number of failures and tries.
-        failures = tries = 0
-
-        # Save the option flags (since option directives can be used
-        # to modify them).
-        original_optionflags = self.optionflags
-
-        SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
-
-        check = self._checker.check_output
-
-        # Process each example.
-        for examplenum, example in enumerate(test.examples):
-
-            # If REPORT_ONLY_FIRST_FAILURE is set, then supress
-            # reporting after the first failure.
-            quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
-                     failures > 0)
-
-            # Merge in the example's options.
-            self.optionflags = original_optionflags
-            if example.options:
-                for (optionflag, val) in example.options.items():
-                    if val:
-                        self.optionflags |= optionflag
-                    else:
-                        self.optionflags &= ~optionflag
-
-            # Record that we started this example.
-            tries += 1
-            if not quiet:
-                self.report_start(out, test, example)
-
-            # Use a special filename for compile(), so we can retrieve
-            # the source code during interactive debugging (see
-            # __patched_linecache_getlines).
-            # Line number counting starts with 0 so we add one to get
-            # the real line number.
-            filename = '<doctest %s[line %d, example %d]>' % (
-                test.name, example.lineno+1, examplenum)
-
-            # Run the example in the given context (globs), and record
-            # any exception that gets raised.  (But don't intercept
-            # keyboard interrupts.)
-            try:
-                # Don't blink!  This is where the user's code gets run.
-                exec compile(example.source, filename, "single",
-                             compileflags, 1) in test.globs
-                self.debugger.set_continue() # ==== Example Finished ====
-                exception = None
-            except KeyboardInterrupt:
-                raise
-            except:
-                exception = sys.exc_info()
-                self.debugger.set_continue() # ==== Example Finished ====
-            got = self._fakeout.getvalue()  # the actual output
-            self._fakeout.truncate(0)
-            outcome = FAILURE   # guilty until proved innocent or insane
-
-            # If the example executed without raising any exceptions,
-            # verify its output.
-            if exception is None:
-                if check(example.want, got, self.optionflags):
-                    outcome = SUCCESS
-
-            # The example raised an exception:  check if it was expected.
-            else:
-                exc_info = sys.exc_info()
-                exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
-                if not quiet:
-                    got += _exception_traceback(exc_info)
-
-                # If `example.exc_msg` is None, then we weren't expecting
-                # an exception.
-                if example.exc_msg is None:
-                    outcome = BOOM
-
-                # We expected an exception:  see whether it matches.
-                elif check(example.exc_msg, exc_msg, self.optionflags):
-                    outcome = SUCCESS
-
-                # Another chance if they didn't care about the detail.
-                elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
-                    m1 = re.match(r'[^:]*:', example.exc_msg)
-                    m2 = re.match(r'[^:]*:', exc_msg)
-                    if m1 and m2 and check(m1.group(0), m2.group(0),
-                                           self.optionflags):
-                        outcome = SUCCESS
-
-            # Report the outcome.
-            if outcome is SUCCESS:
-                if not quiet:
-                    self.report_success(out, test, example, got)
-            elif outcome is FAILURE:
-                if not quiet:
-                    self.report_failure(out, test, example, got)
-                failures += 1
-            elif outcome is BOOM:
-                if not quiet:
-                    self.report_unexpected_exception(out, test, example,
-                                                     exc_info)
-                failures += 1
-            else:
-                assert False, ("unknown outcome", outcome)
-
-        # Restore the option flags (in case they were modified)
-        self.optionflags = original_optionflags
-
-        # Record and return the number of failures and tries.
-        self.__record_outcome(test, failures, tries)
-        return failures, tries
-
-    def __record_outcome(self, test, f, t):
-        """
-        Record the fact that the given DocTest (`test`) generated `f`
-        failures out of `t` tried examples.
-        """
-        f2, t2 = self._name2ft.get(test.name, (0,0))
-        self._name2ft[test.name] = (f+f2, t+t2)
-        self.failures += f
-        self.tries += t
-
-    __LINECACHE_FILENAME_RE = re.compile(
-        r'<doctest (?P<name>[\w\.]+)\[line \d+, example (?P<examplenum>\d+)\]>$'
-        )
-
-    def __patched_linecache_getlines(self, filename, module_globals=None):
-        m = self.__LINECACHE_FILENAME_RE.match(filename)
-        if m and m.group('name') == self.test.name:
-            example = self.test.examples[int(m.group('examplenum'))]
-            return example.source.splitlines(True)
-        else:
-            if module_globals is None:
-                return self.save_linecache_getlines(filename)
-            else:
-                return self.save_linecache_getlines(filename, module_globals)
-
-    def run(self, test, compileflags=None, out=None, clear_globs=True):
-        """
-        Run the examples in `test`, and display the results using the
-        writer function `out`.
-
-        The examples are run in the namespace `test.globs`.  If
-        `clear_globs` is true (the default), then this namespace will
-        be cleared after the test runs, to help with garbage
-        collection.  If you would like to examine the namespace after
-        the test completes, then use `clear_globs=False`.
-
-        `compileflags` gives the set of flags that should be used by
-        the Python compiler when running the examples.  If not
-        specified, then it will default to the set of future-import
-        flags that apply to `globs`.
-
-        The output of each example is checked using
-        `DocTestRunner.check_output`, and the results are formatted by
-        the `DocTestRunner.report_*` methods.
-        """
-        self.test = test
-
-        if compileflags is None:
-            compileflags = _extract_future_flags(test.globs)
-
-        save_stdout = sys.stdout
-        if out is None:
-            out = save_stdout.write
-        sys.stdout = self._fakeout
-
-        # Patch pdb.set_trace to restore sys.stdout during interactive
-        # debugging (so it's not still redirected to self._fakeout).
-        # Note that the interactive output will go to *our*
-        # save_stdout, even if that's not the real sys.stdout; this
-        # allows us to write test cases for the set_trace behavior.
-        save_set_trace = pdb.set_trace
-        self.debugger = _OutputRedirectingPdb(save_stdout)
-        self.debugger.reset()
-        pdb.set_trace = self.debugger.set_trace
-
-        # Patch linecache.getlines, so we can see the example's source
-        # when we're inside the debugger.
-        self.save_linecache_getlines = linecache.getlines
-        linecache.getlines = self.__patched_linecache_getlines
-
-        try:
-            return self.__run(test, compileflags, out)
-        finally:
-            sys.stdout = save_stdout
-            pdb.set_trace = save_set_trace
-            linecache.getlines = self.save_linecache_getlines
-            if clear_globs:
-                test.globs.clear()
-
-    #/////////////////////////////////////////////////////////////////
-    # Summarization
-    #/////////////////////////////////////////////////////////////////
-    def summarize(self, verbose=None):
-        """
-        Print a summary of all the test cases that have been run by
-        this DocTestRunner, and return a tuple `(f, t)`, where `f` is
-        the total number of failed examples, and `t` is the total
-        number of tried examples.
-
-        The optional `verbose` argument controls how detailed the
-        summary is.  If the verbosity is not specified, then the
-        DocTestRunner's verbosity is used.
-        """
-        if verbose is None:
-            verbose = self._verbose
-        notests = []
-        passed = []
-        failed = []
-        totalt = totalf = 0
-        for x in self._name2ft.items():
-            name, (f, t) = x
-            assert f <= t
-            totalt += t
-            totalf += f
-            if t == 0:
-                notests.append(name)
-            elif f == 0:
-                passed.append( (name, t) )
-            else:
-                failed.append(x)
-        if verbose:
-            if notests:
-                print len(notests), "items had no tests:"
-                notests.sort()
-                for thing in notests:
-                    print "   ", thing
-            if passed:
-                print len(passed), "items passed all tests:"
-                passed.sort()
-                for thing, count in passed:
-                    print " %3d tests in %s" % (count, thing)
-        if failed:
-            print self.DIVIDER
-            print len(failed), "items had failures:"
-            failed.sort()
-            for thing, (f, t) in failed:
-                print " %3d of %3d in %s" % (f, t, thing)
-        if verbose:
-            print totalt, "tests in", len(self._name2ft), "items."
-            print totalt - totalf, "passed and", totalf, "failed."
-        if totalf:
-            print "***Test Failed***", totalf, "failures."
-        elif verbose:
-            print "Test passed."
-        return totalf, totalt
-
-    #/////////////////////////////////////////////////////////////////
-    # Backward compatibility cruft to maintain doctest.master.
-    #/////////////////////////////////////////////////////////////////
-    def merge(self, other):
-        d = self._name2ft
-        for name, (f, t) in other._name2ft.items():
-            if name in d:
-                print "*** DocTestRunner.merge: '" + name + "' in both" \
-                    " testers; summing outcomes."
-                f2, t2 = d[name]
-                f = f + f2
-                t = t + t2
-            d[name] = f, t
-
-class OutputChecker:
-    """
-    A class used to check the whether the actual output from a doctest
-    example matches the expected output.  `OutputChecker` defines two
-    methods: `check_output`, which compares a given pair of outputs,
-    and returns true if they match; and `output_difference`, which
-    returns a string describing the differences between two outputs.
-    """
-    def check_output(self, want, got, optionflags):
-        """
-        Return True iff the actual output from an example (`got`)
-        matches the expected output (`want`).  These strings are
-        always considered to match if they are identical; but
-        depending on what option flags the test runner is using,
-        several non-exact match types are also possible.  See the
-        documentation for `TestRunner` for more information about
-        option flags.
-        """
-        # Handle the common case first, for efficiency:
-        # if they're string-identical, always return true.
-        if got == want:
-            return True
-
-        # The values True and False replaced 1 and 0 as the return
-        # value for boolean comparisons in Python 2.3.
-        if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
-            if (got,want) == ("True\n", "1\n"):
-                return True
-            if (got,want) == ("False\n", "0\n"):
-                return True
-
-        # <BLANKLINE> can be used as a special sequence to signify a
-        # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
-        if not (optionflags & DONT_ACCEPT_BLANKLINE):
-            # Replace <BLANKLINE> in want with a blank line.
-            want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
-                          '', want)
-            # If a line in got contains only spaces, then remove the
-            # spaces.
-            got = re.sub('(?m)^\s*?$', '', got)
-            if got == want:
-                return True
-
-        # This flag causes doctest to ignore any differences in the
-        # contents of whitespace strings.  Note that this can be used
-        # in conjunction with the ELLIPSIS flag.
-        if optionflags & NORMALIZE_WHITESPACE:
-            got = ' '.join(got.split())
-            want = ' '.join(want.split())
-            if got == want:
-                return True
-
-        # The ELLIPSIS flag says to let the sequence "..." in `want`
-        # match any substring in `got`.
-        if optionflags & ELLIPSIS:
-            if _ellipsis_match(want, got):
-                return True
-
-        # We didn't find any match; return false.
-        return False
-
-    # Should we do a fancy diff?
-    def _do_a_fancy_diff(self, want, got, optionflags):
-        # Not unless they asked for a fancy diff.
-        if not optionflags & (REPORT_UDIFF |
-                              REPORT_CDIFF |
-                              REPORT_NDIFF):
-            return False
-
-        # If expected output uses ellipsis, a meaningful fancy diff is
-        # too hard ... or maybe not.  In two real-life failures Tim saw,
-        # a diff was a major help anyway, so this is commented out.
-        # [todo] _ellipsis_match() knows which pieces do and don't match,
-        # and could be the basis for a kick-ass diff in this case.
-        ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
-        ##    return False
-
-        # ndiff does intraline difference marking, so can be useful even
-        # for 1-line differences.
-        if optionflags & REPORT_NDIFF:
-            return True
-
-        # The other diff types need at least a few lines to be helpful.
-        return want.count('\n') > 2 and got.count('\n') > 2
-
-    def output_difference(self, example, got, optionflags):
-        """
-        Return a string describing the differences between the
-        expected output for a given example (`example`) and the actual
-        output (`got`).  `optionflags` is the set of option flags used
-        to compare `want` and `got`.
-        """
-        want = example.want
-        # If <BLANKLINE>s are being used, then replace blank lines
-        # with <BLANKLINE> in the actual output string.
-        if not (optionflags & DONT_ACCEPT_BLANKLINE):
-            got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
-
-        # Check if we should use diff.
-        if self._do_a_fancy_diff(want, got, optionflags):
-            # Split want & got into lines.
-            want_lines = want.splitlines(True)  # True == keep line ends
-            got_lines = got.splitlines(True)
-            # Use difflib to find their differences.
-            if optionflags & REPORT_UDIFF:
-                diff = difflib.unified_diff(want_lines, got_lines, n=2)
-                diff = list(diff)[2:] # strip the diff header
-                kind = 'unified diff with -expected +actual'
-            elif optionflags & REPORT_CDIFF:
-                diff = difflib.context_diff(want_lines, got_lines, n=2)
-                diff = list(diff)[2:] # strip the diff header
-                kind = 'context diff with expected followed by actual'
-            elif optionflags & REPORT_NDIFF:
-                engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
-                diff = list(engine.compare(want_lines, got_lines))
-                kind = 'ndiff with -expected +actual'
-            else:
-                assert 0, 'Bad diff option'
-            # Remove trailing whitespace on diff output.
-            diff = [line.rstrip() + '\n' for line in diff]
-            return 'Differences (%s):\n' % kind + _indent(''.join(diff))
-
-        # If we're not using diff, then simply list the expected
-        # output followed by the actual output.
-        if want and got:
-            return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
-        elif want:
-            return 'Expected:\n%sGot nothing\n' % _indent(want)
-        elif got:
-            return 'Expected nothing\nGot:\n%s' % _indent(got)
-        else:
-            return 'Expected nothing\nGot nothing\n'
-
-
-class DebugRunner(DocTestRunner):
-    r"""Run doc tests but raise an exception as soon as there is a failure.
-
-       If an unexpected exception occurs, an UnexpectedException is raised.
-       It contains the test, the example, and the original exception:
-
-         >>> runner = DebugRunner(verbose=False)
-         >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
-         ...                                    {}, 'foo', 'foo.py', 0)
-         >>> try:
-         ...     runner.run(test)
-         ... except UnexpectedException, failure:
-         ...     pass
-
-         >>> failure.test is test
-         True
-
-         >>> failure.example.want
-         '42\n'
-
-         >>> exc_info = failure.exc_info
-         >>> raise exc_info[0], exc_info[1], exc_info[2]
-         Traceback (most recent call last):
-         ...
-         KeyError
-
-       We wrap the original exception to give the calling application
-       access to the test and example information.
-
-       If the output doesn't match, then a DocTestFailure is raised:
-
-         >>> test = DocTestParser().get_doctest('''
-         ...      >>> x = 1
-         ...      >>> x
-         ...      2
-         ...      ''', {}, 'foo', 'foo.py', 0)
-
-         >>> try:
-         ...    runner.run(test)
-         ... except DocTestFailure, failure:
-         ...    pass
-
-       DocTestFailure objects provide access to the test:
-
-         >>> failure.test is test
-         True
-
-       As well as to the example:
-
-         >>> failure.example.want
-         '2\n'
-
-       and the actual output:
-
-         >>> failure.got
-         '1\n'
-
-       If a failure or error occurs, the globals are left intact:
-
-         >>> del test.globs['__builtins__']
-         >>> test.globs
-         {'x': 1}
-
-         >>> test = DocTestParser().get_doctest('''
-         ...      >>> x = 2
-         ...      >>> raise KeyError
-         ...      ''', {}, 'foo', 'foo.py', 0)
-
-         >>> runner.run(test)
-         Traceback (most recent call last):
-         ...
-         UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
-
-         >>> del test.globs['__builtins__']
-         >>> test.globs
-         {'x': 2}
-
-       But the globals are cleared if there is no error:
-
-         >>> test = DocTestParser().get_doctest('''
-         ...      >>> x = 2
-         ...      ''', {}, 'foo', 'foo.py', 0)
-
-         >>> runner.run(test)
-         (0, 1)
-
-         >>> test.globs
-         {}
-
-       """
-
-    def run(self, test, compileflags=None, out=None, clear_globs=True):
-        r = DocTestRunner.run(self, test, compileflags, out, False)
-        if clear_globs:
-            test.globs.clear()
-        return r
-
-    def report_unexpected_exception(self, out, test, example, exc_info):
-        raise UnexpectedException(test, example, exc_info)
-
-    def report_failure(self, out, test, example, got):
-        raise DocTestFailure(test, example, got)
-
-######################################################################
-## 6. Test Functions
-######################################################################
-# These should be backwards compatible.
-
-# For backward compatibility, a global instance of a DocTestRunner
-# class, updated by testmod.
-master = None
-
-def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
-            report=True, optionflags=0, extraglobs=None,
-            raise_on_error=False, exclude_empty=False):
-    """m=None, name=None, globs=None, verbose=None, isprivate=None,
-       report=True, optionflags=0, extraglobs=None, raise_on_error=False,
-       exclude_empty=False
-
-    Test examples in docstrings in functions and classes reachable
-    from module m (or the current module if m is not supplied), starting
-    with m.__doc__.  Unless isprivate is specified, private names
-    are not skipped.
-
-    Also test examples reachable from dict m.__test__ if it exists and is
-    not None.  m.__test__ maps names to functions, classes and strings;
-    function and class docstrings are tested even if the name is private;
-    strings are tested directly, as if they were docstrings.
-
-    Return (#failures, #tests).
-
-    See doctest.__doc__ for an overview.
-
-    Optional keyword arg "name" gives the name of the module; by default
-    use m.__name__.
-
-    Optional keyword arg "globs" gives a dict to be used as the globals
-    when executing examples; by default, use m.__dict__.  A copy of this
-    dict is actually used for each docstring, so that each docstring's
-    examples start with a clean slate.
-
-    Optional keyword arg "extraglobs" gives a dictionary that should be
-    merged into the globals that are used to execute examples.  By
-    default, no extra globals are used.  This is new in 2.4.
-
-    Optional keyword arg "verbose" prints lots of stuff if true, prints
-    only failures if false; by default, it's true iff "-v" is in sys.argv.
-
-    Optional keyword arg "report" prints a summary at the end when true,
-    else prints nothing at the end.  In verbose mode, the summary is
-    detailed, else very brief (in fact, empty if all tests passed).
-
-    Optional keyword arg "optionflags" or's together module constants,
-    and defaults to 0.  This is new in 2.3.  Possible values (see the
-    docs for details):
-
-        DONT_ACCEPT_TRUE_FOR_1
-        DONT_ACCEPT_BLANKLINE
-        NORMALIZE_WHITESPACE
-        ELLIPSIS
-        IGNORE_EXCEPTION_DETAIL
-        REPORT_UDIFF
-        REPORT_CDIFF
-        REPORT_NDIFF
-        REPORT_ONLY_FIRST_FAILURE
-
-    Optional keyword arg "raise_on_error" raises an exception on the
-    first unexpected exception or failure. This allows failures to be
-    post-mortem debugged.
-
-    Deprecated in Python 2.4:
-    Optional keyword arg "isprivate" specifies a function used to
-    determine whether a name is private.  The default function is
-    treat all functions as public.  Optionally, "isprivate" can be
-    set to doctest.is_private to skip over functions marked as private
-    using the underscore naming convention; see its docs for details.
-
-    Advanced tomfoolery:  testmod runs methods of a local instance of
-    class doctest.Tester, then merges the results into (or creates)
-    global Tester instance doctest.master.  Methods of doctest.master
-    can be called directly too, if you want to do something unusual.
-    Passing report=0 to testmod is especially useful then, to delay
-    displaying a summary.  Invoke doctest.master.summarize(verbose)
-    when you're done fiddling.
-    """
-    global master
-
-    if isprivate is not None:
-        warnings.warn("the isprivate argument is deprecated; "
-                      "examine DocTestFinder.find() lists instead",
-                      DeprecationWarning)
-
-    # If no module was given, then use __main__.
-    if m is None:
-        # DWA - m will still be None if this wasn't invoked from the command
-        # line, in which case the following TypeError is about as good an error
-        # as we should expect
-        m = sys.modules.get('__main__')
-
-    # Check that we were actually given a module.
-    if not inspect.ismodule(m):
-        raise TypeError("testmod: module required; %r" % (m,))
-
-    # If no name was given, then use the module's name.
-    if name is None:
-        name = m.__name__
-
-    # Find, parse, and run all tests in the given module.
-    finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
-
-    if raise_on_error:
-        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
-    else:
-        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-
-    for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
-        runner.run(test)
-
-    if report:
-        runner.summarize()
-
-    if master is None:
-        master = runner
-    else:
-        master.merge(runner)
-
-    return runner.failures, runner.tries
-
-def testfile(filename, module_relative=True, name=None, package=None,
-             globs=None, verbose=None, report=True, optionflags=0,
-             extraglobs=None, raise_on_error=False, parser=DocTestParser(),
-             encoding=None):
-    """
-    Test examples in the given file.  Return (#failures, #tests).
-
-    Optional keyword arg "module_relative" specifies how filenames
-    should be interpreted:
-
-      - If "module_relative" is True (the default), then "filename"
-         specifies a module-relative path.  By default, this path is
-         relative to the calling module's directory; but if the
-         "package" argument is specified, then it is relative to that
-         package.  To ensure os-independence, "filename" should use
-         "/" characters to separate path segments, and should not
-         be an absolute path (i.e., it may not begin with "/").
-
-      - If "module_relative" is False, then "filename" specifies an
-        os-specific path.  The path may be absolute or relative (to
-        the current working directory).
-
-    Optional keyword arg "name" gives the name of the test; by default
-    use the file's basename.
-
-    Optional keyword argument "package" is a Python package or the
-    name of a Python package whose directory should be used as the
-    base directory for a module relative filename.  If no package is
-    specified, then the calling module's directory is used as the base
-    directory for module relative filenames.  It is an error to
-    specify "package" if "module_relative" is False.
-
-    Optional keyword arg "globs" gives a dict to be used as the globals
-    when executing examples; by default, use {}.  A copy of this dict
-    is actually used for each docstring, so that each docstring's
-    examples start with a clean slate.
-
-    Optional keyword arg "extraglobs" gives a dictionary that should be
-    merged into the globals that are used to execute examples.  By
-    default, no extra globals are used.
-
-    Optional keyword arg "verbose" prints lots of stuff if true, prints
-    only failures if false; by default, it's true iff "-v" is in sys.argv.
-
-    Optional keyword arg "report" prints a summary at the end when true,
-    else prints nothing at the end.  In verbose mode, the summary is
-    detailed, else very brief (in fact, empty if all tests passed).
-
-    Optional keyword arg "optionflags" or's together module constants,
-    and defaults to 0.  Possible values (see the docs for details):
-
-        DONT_ACCEPT_TRUE_FOR_1
-        DONT_ACCEPT_BLANKLINE
-        NORMALIZE_WHITESPACE
-        ELLIPSIS
-        IGNORE_EXCEPTION_DETAIL
-        REPORT_UDIFF
-        REPORT_CDIFF
-        REPORT_NDIFF
-        REPORT_ONLY_FIRST_FAILURE
-
-    Optional keyword arg "raise_on_error" raises an exception on the
-    first unexpected exception or failure. This allows failures to be
-    post-mortem debugged.
-
-    Optional keyword arg "parser" specifies a DocTestParser (or
-    subclass) that should be used to extract tests from the files.
-
-    Optional keyword arg "encoding" specifies an encoding that should
-    be used to convert the file to unicode.
-
-    Advanced tomfoolery:  testmod runs methods of a local instance of
-    class doctest.Tester, then merges the results into (or creates)
-    global Tester instance doctest.master.  Methods of doctest.master
-    can be called directly too, if you want to do something unusual.
-    Passing report=0 to testmod is especially useful then, to delay
-    displaying a summary.  Invoke doctest.master.summarize(verbose)
-    when you're done fiddling.
-    """
-    global master
-
-    if package and not module_relative:
-        raise ValueError("Package may only be specified for module-"
-                         "relative paths.")
-
-    # Relativize the path
+# Patch to fix tests that has mixed line endings:
+import os
+def _patched_load_testfile(filename, package, module_relative):
     if module_relative:
-        package = _normalize_module(package)
-        filename = _module_relative_path(package, filename)
+        package = doctest._normalize_module(package, 3)
+        filename = doctest._module_relative_path(package, filename)
+        if hasattr(package, '__loader__'):
+            if hasattr(package.__loader__, 'get_data'):
+                file_contents = package.__loader__.get_data(filename)
+                # get_data() opens files as 'rb', so one must do the equivalent
+                # conversion as universal newlines would do.
+                return file_contents.replace(os.linesep, '\n'), filename
+    return open(filename, 'U').read(), filename
+doctest._load_testfile = _patched_load_testfile
 
-    # If no name was given, then use the file's name.
-    if name is None:
-        name = os.path.basename(filename)
+# Use a special exception for the test runner:
+from zope.testing.exceptions import DocTestFailureException
+doctest.DocTestCase.failureException = DocTestFailureException
 
-    # Assemble the globals.
-    if globs is None:
-        globs = {}
-    else:
-        globs = globs.copy()
-    if extraglobs is not None:
-        globs.update(extraglobs)
-
-    if raise_on_error:
-        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
-    else:
-        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-
-    # Read the file, convert it to a test, and run it.
-    s = open(filename, 'U').read()
-    if encoding is None:
-        encoding = pep263_encoding(s)
-    if encoding is not None:
-        s = s.decode(encoding)
-    test = parser.get_doctest(s, globs, name, filename, 0)
-    runner.run(test)
-
-    if report:
-        runner.summarize()
-
-    if master is None:
-        master = runner
-    else:
-        master.merge(runner)
-
-    return runner.failures, runner.tries
-
-pep263_re_search = re.compile("coding[:=]\s*([-\w.]+)").search
-def pep263_encoding(s):
-    """Try to find the encoding of a string by looking for a pep263 coding.
-    """
-    for line in s.split('\n')[:2]:
-        r = pep263_re_search(line)
-        if r:
-            return r.group(1)
-
-
-
-def run_docstring_examples(f, globs, verbose=False, name="NoName",
-                           compileflags=None, optionflags=0):
-    """
-    Test examples in the given object's docstring (`f`), using `globs`
-    as globals.  Optional argument `name` is used in failure messages.
-    If the optional argument `verbose` is true, then generate output
-    even if there are no failures.
-
-    `compileflags` gives the set of flags that should be used by the
-    Python compiler when running the examples.  If not specified, then
-    it will default to the set of future-import flags that apply to
-    `globs`.
-
-    Optional keyword arg `optionflags` specifies options for the
-    testing and output.  See the documentation for `testmod` for more
-    information.
-    """
-    # Find, parse, and run all tests in the given module.
-    finder = DocTestFinder(verbose=verbose, recurse=False)
-    runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-    for test in finder.find(f, name, globs=globs):
-        runner.run(test, compileflags=compileflags)
-
-######################################################################
-## 7. Tester
-######################################################################
-# This is provided only for backwards compatibility.  It's not
-# actually used in any way.
-
-class Tester:
-    def __init__(self, mod=None, globs=None, verbose=None,
-                 isprivate=None, optionflags=0):
-
-        warnings.warn("class Tester is deprecated; "
-                      "use class doctest.DocTestRunner instead",
-                      DeprecationWarning, stacklevel=2)
-        if mod is None and globs is None:
-            raise TypeError("Tester.__init__: must specify mod or globs")
-        if mod is not None and not inspect.ismodule(mod):
-            raise TypeError("Tester.__init__: mod must be a module; %r" %
-                            (mod,))
-        if globs is None:
-            globs = mod.__dict__
-        self.globs = globs
-
-        self.verbose = verbose
-        self.isprivate = isprivate
-        self.optionflags = optionflags
-        self.testfinder = DocTestFinder(_namefilter=isprivate)
-        self.testrunner = DocTestRunner(verbose=verbose,
-                                        optionflags=optionflags)
-
-    def runstring(self, s, name):
-        test = DocTestParser().get_doctest(s, self.globs, name, None, None,
-                                           self.optionflags)
-        if self.verbose:
-            print "Running string", name
-        (f,t) = self.testrunner.run(test)
-        if self.verbose:
-            print f, "of", t, "examples failed in string", name
-        return (f,t)
-
-    def rundoc(self, object, name=None, module=None):
-        f = t = 0
-        tests = self.testfinder.find(object, name, module=module,
-                                     globs=self.globs)
-        for test in tests:
-            (f2, t2) = self.testrunner.run(test)
-            (f,t) = (f+f2, t+t2)
-        return (f,t)
-
-    def rundict(self, d, name, module=None):
-        import new
-        m = new.module(name)
-        m.__dict__.update(d)
-        if module is None:
-            module = False
-        return self.rundoc(m, name, module)
-
-    def run__test__(self, d, name):
-        import new
-        m = new.module(name)
-        m.__test__ = d
-        return self.rundoc(m, name)
-
-    def summarize(self, verbose=None):
-        return self.testrunner.summarize(verbose)
-
-    def merge(self, other):
-        self.testrunner.merge(other.testrunner)
-
-######################################################################
-## 8. Unittest Support
-######################################################################
-
-from doctest import set_unittest_reportflags
-
-_para_re = re.compile(r'\s*\n\s*\n\s*')
-def _unittest_count(docstring):
-    words = 0
-    count = 0
-    for p in _para_re.split(docstring):
-        p = p.strip()
-        if not p:
-            continue
-        if p.startswith('>>> '):
-            if words:
-                count += 1
-                words = 0
-        else:
-            words = 1
-
-    return count or 1
-
-
-class DocTestCase(unittest.TestCase):
-
-    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
-                 checker=None):
-
-        unittest.TestCase.__init__(self)
-        self._dt_optionflags = optionflags
-        self._dt_checker = checker
-        self._dt_test = test
-        self._dt_globs = test.globs.copy()
-        self._dt_setUp = setUp
-        self._dt_tearDown = tearDown
-
-        self._dt_count = _unittest_count(test.docstring)
-
-    def countTestCases(self):
-        return self._dt_count
-
-    def setUp(self):
-        test = self._dt_test
-
-        if self._dt_setUp is not None:
-            self._dt_setUp(test)
-
-    def tearDown(self):
-        test = self._dt_test
-
-        if self._dt_tearDown is not None:
-            self._dt_tearDown(test)
-
-        # restore the original globs
-        test.globs.clear()
-        test.globs.update(self._dt_globs)
-
-    failureException = DocTestFailureException
-
-    def runTest(self):
-        test = self._dt_test
-        old = sys.stdout
-        new = StringIO()
-        optionflags = self._dt_optionflags
-
-        if not (optionflags & REPORTING_FLAGS):
-            # The option flags don't include any reporting flags,
-            # so add the default reporting flags
-            optionflags |= doctest._unittest_reportflags
-
-        if doctest._unittest_reportflags & REPORT_ONLY_FIRST_FAILURE:
-            optionflags |= REPORT_ONLY_FIRST_FAILURE
-
-        runner = DocTestRunner(optionflags=optionflags,
-                               checker=self._dt_checker, verbose=False)
-        def write(value):
-            if isinstance(value, unicode):
-                value = value.encode('utf8')
-            new.write(value)
-
-        try:
-            runner.DIVIDER = "-"*70
-            failures, tries = runner.run(
-                test, out=write, clear_globs=False)
-        finally:
-            sys.stdout = old
-
-        if failures:
-            raise self.failureException(self.format_failure(new.getvalue()))
-
-    def format_failure(self, err):
-        test = self._dt_test
-        if test.lineno is None:
-            lineno = 'unknown line number'
-        else:
-            lineno = '%s' % test.lineno
-        lname = '.'.join(test.name.split('.')[-1:])
-        return ('Failed doctest test for %s\n'
-                '  File "%s", line %s, in %s\n\n%s'
-                % (test.name, test.filename, lineno, lname, err)
-                )
-
-    def debug(self):
-        r"""Run the test case without results and without catching exceptions
-
-           The unit test framework includes a debug method on test cases
-           and test suites to support post-mortem debugging.  The test code
-           is run in such a way that errors are not caught.  This way a
-           caller can catch the errors and initiate post-mortem debugging.
-
-           The DocTestCase provides a debug method that raises
-           UnexpectedException errors if there is an unexepcted
-           exception:
-
-             >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
-             ...                {}, 'foo', 'foo.py', 0)
-             >>> case = DocTestCase(test)
-             >>> try:
-             ...     case.debug()
-             ... except UnexpectedException, failure:
-             ...     pass
-
-           The UnexpectedException contains the test, the example, and
-           the original exception:
-
-             >>> failure.test is test
-             True
-
-             >>> failure.example.want
-             '42\n'
-
-             >>> exc_info = failure.exc_info
-             >>> raise exc_info[0], exc_info[1], exc_info[2]
-             Traceback (most recent call last):
-             ...
-             KeyError
-
-           If the output doesn't match, then a DocTestFailure is raised:
-
-             >>> test = DocTestParser().get_doctest('''
-             ...      >>> x = 1
-             ...      >>> x
-             ...      2
-             ...      ''', {}, 'foo', 'foo.py', 0)
-             >>> case = DocTestCase(test)
-
-             >>> try:
-             ...    case.debug()
-             ... except DocTestFailure, failure:
-             ...    pass
-
-           DocTestFailure objects provide access to the test:
-
-             >>> failure.test is test
-             True
-
-           As well as to the example:
-
-             >>> failure.example.want
-             '2\n'
-
-           and the actual output:
-
-             >>> failure.got
-             '1\n'
-
-           """
-
-        self.setUp()
-        runner = DebugRunner(optionflags=self._dt_optionflags,
-                             checker=self._dt_checker, verbose=False)
-        runner.run(self._dt_test, clear_globs=False)
-        self.tearDown()
-
-    def id(self):
-        return self._dt_test.name
-
-    def __repr__(self):
-        name = self._dt_test.name.split('.')
-        return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
-
-    __str__ = __repr__
-
-    def shortDescription(self):
-        return "Doctest: " + self._dt_test.name
-
-def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
-                 **options):
-    """
-    Convert doctest tests for a module to a unittest test suite.
-
-    This converts each documentation string in a module that
-    contains doctest tests to a unittest test case.  If any of the
-    tests in a doc string fail, then the test case fails.  An exception
-    is raised showing the name of the file containing the test and a
-    (sometimes approximate) line number.
-
-    The `module` argument provides the module to be tested.  The argument
-    can be either a module or a module name.
-
-    If no argument is given, the calling module is used.
-
-    A number of options may be provided as keyword arguments:
-
-    setUp
-      A set-up function.  This is called before running the
-      tests in each file. The setUp function will be passed a DocTest
-      object.  The setUp function can access the test globals as the
-      globs attribute of the test passed.
-
-    tearDown
-      A tear-down function.  This is called after running the
-      tests in each file.  The tearDown function will be passed a DocTest
-      object.  The tearDown function can access the test globals as the
-      globs attribute of the test passed.
-
-    globs
-      A dictionary containing initial global variables for the tests.
-
-    optionflags
-       A set of doctest option flags expressed as an integer.
-    """
-
-    if test_finder is None:
-        test_finder = DocTestFinder()
-
-    module = _normalize_module(module)
-    tests = test_finder.find(module, globs=globs, extraglobs=extraglobs,
-                             optionflags=options.get('optionflags', 0))
-    if globs is None:
-        globs = module.__dict__
-    if not tests:
-        # Why do we want to do this? Because it reveals a bug that might
-        # otherwise be hidden.
-        raise ValueError(module, "has no tests")
-
-    tests.sort()
-    suite = unittest.TestSuite()
-    for test in tests:
-        if len(test.examples) == 0:
-            continue
-        if not test.filename:
-            filename = module.__file__
-            if filename[-4:] in (".pyc", ".pyo"):
-                filename = filename[:-1]
-            test.filename = filename
-        suite.addTest(DocTestCase(test, **options))
-
-    return suite
-
-class DocFileCase(DocTestCase):
-
-    def id(self):
-        return '_'.join(self._dt_test.name.split('.'))
-
-    def __repr__(self):
-        return self._dt_test.filename
-    __str__ = __repr__
-
-    def format_failure(self, err):
-        return ('Failed doctest test for %s\n  File "%s", line 0\n\n%s'
-                % (self._dt_test.name, self._dt_test.filename, err)
-                )
-
-def DocFileTest(path, module_relative=True, package=None,
-                globs=None, parser=DocTestParser(),
-                encoding=None, **options):
-    if globs is None:
-        globs = {}
-    else:
-        globs = globs.copy()
-
-    if package and not module_relative:
-        raise ValueError("Package may only be specified for module-"
-                         "relative paths.")
-
-    # Relativize the path.
-    if module_relative:
-        package = _normalize_module(package)
-        path = _module_relative_path(package, path)
-    if "__file__" not in globs:
-        globs["__file__"] = path
-
-    # Find the file and read it.
-    name = os.path.basename(path)
-    doc = open(path, 'U').read()
-
-    # If an encoding is specified, use it to convert the file to unicode
-    if encoding is None:
-        encoding = pep263_encoding(doc)
-    if encoding is not None:
-        doc = doc.decode(encoding)
-
-    optionflags = options.get('optionflags', 0)
-    # Convert it to a test, and wrap it in a DocFileCase.
-    test = parser.get_doctest(doc, globs, name, path, 0, optionflags)
-    return DocFileCase(test, **options)
-
-def DocFileSuite(*paths, **kw):
-    """A unittest suite for one or more doctest files.
-
-    The path to each doctest file is given as a string; the
-    interpretation of that string depends on the keyword argument
-    "module_relative".
-
-    A number of options may be provided as keyword arguments:
-
-    module_relative
-      If "module_relative" is True, then the given file paths are
-      interpreted as os-independent module-relative paths.  By
-      default, these paths are relative to the calling module's
-      directory; but if the "package" argument is specified, then
-      they are relative to that package.  To ensure os-independence,
-      "filename" should use "/" characters to separate path
-      segments, and may not be an absolute path (i.e., it may not
-      begin with "/").
-
-      If "module_relative" is False, then the given file paths are
-      interpreted as os-specific paths.  These paths may be absolute
-      or relative (to the current working directory).
-
-    package
-      A Python package or the name of a Python package whose directory
-      should be used as the base directory for module relative paths.
-      If "package" is not specified, then the calling module's
-      directory is used as the base directory for module relative
-      filenames.  It is an error to specify "package" if
-      "module_relative" is False.
-
-    setUp
-      A set-up function.  This is called before running the
-      tests in each file. The setUp function will be passed a DocTest
-      object.  The setUp function can access the test globals as the
-      globs attribute of the test passed.
-
-    tearDown
-      A tear-down function.  This is called after running the
-      tests in each file.  The tearDown function will be passed a DocTest
-      object.  The tearDown function can access the test globals as the
-      globs attribute of the test passed.
-
-    globs
-      A dictionary containing initial global variables for the tests.
-
-    optionflags
-      A set of doctest option flags expressed as an integer.
-
-    parser
-      A DocTestParser (or subclass) that should be used to extract
-      tests from the files.
-
-    encoding
-      An encoding that will be used to convert the files to unicode.
-    """
-    suite = unittest.TestSuite()
-
-    # We do this here so that _normalize_module is called at the right
-    # level.  If it were called in DocFileTest, then this function
-    # would be the caller and we might guess the package incorrectly.
-    if kw.get('module_relative', True):
-        kw['package'] = _normalize_module(kw.get('package'))
-
-    for path in paths:
-        suite.addTest(DocFileTest(path, **kw))
-
-    return suite
-
-######################################################################
-## 9. Debugging Support
-######################################################################
-
-def script_from_examples(s):
-    r"""Extract script from text with examples.
-
-       Converts text with examples to a Python script.  Example input is
-       converted to regular code.  Example output and all other words
-       are converted to comments:
-
-       >>> text = '''
-       ...       Here are examples of simple math.
-       ...
-       ...           Python has super accurate integer addition
-       ...
-       ...           >>> 2 + 2
-       ...           5
-       ...
-       ...           And very friendly error messages:
-       ...
-       ...           >>> 1/0
-       ...           To Infinity
-       ...           And
-       ...           Beyond
-       ...
-       ...           You can use logic if you want:
-       ...
-       ...           >>> if 0:
-       ...           ...    blah
-       ...           ...    blah
-       ...           ...
-       ...
-       ...           Ho hum
-       ...           '''
-
-       >>> print script_from_examples(text)
-       # Here are examples of simple math.
-       #
-       #     Python has super accurate integer addition
-       #
-       2 + 2
-       # Expected:
-       ## 5
-       #
-       #     And very friendly error messages:
-       #
-       1/0
-       # Expected:
-       ## To Infinity
-       ## And
-       ## Beyond
-       #
-       #     You can use logic if you want:
-       #
-       if 0:
-          blah
-          blah
-       #
-       #     Ho hum
-       """
-    output = []
-    for piece in DocTestParser().parse(s):
-        if isinstance(piece, Example):
-            # Add the example's source code (strip trailing NL)
-            output.append(piece.source[:-1])
-            # Add the expected output:
-            want = piece.want
-            if want:
-                output.append('# Expected:')
-                output += ['## '+l for l in want.split('\n')[:-1]]
-        else:
-            # Add non-example text.
-            output += [_comment_line(l)
-                       for l in piece.split('\n')[:-1]]
-
-    # Trim junk on both ends.
-    while output and output[-1] == '#':
-        output.pop()
-    while output and output[0] == '#':
-        output.pop(0)
-    # Combine the output, and return it.
-    return '\n'.join(output)
-
-def testsource(module, name):
-    """Extract the test sources from a doctest docstring as a script.
-
-    Provide the module (or dotted name of the module) containing the
-    test to be debugged and the name (within the module) of the object
-    with the doc string with tests to be debugged.
-    """
-    module = _normalize_module(module)
-    tests = DocTestFinder().find(module)
-    test = [t for t in tests if t.name == name]
-    if not test:
-        raise ValueError(name, "not found in tests")
-    test = test[0]
-    testsrc = script_from_examples(test.docstring)
-    return testsrc
-
-def debug_src(src, pm=False, globs=None):
-    """Debug a single doctest docstring, in argument `src`'"""
-    testsrc = script_from_examples(src)
-    debug_script(testsrc, pm, globs)
-
-def debug_script(src, pm=False, globs=None):
-    "Debug a test script.  `src` is the script, as a string."
-    import pdb
-
-    # Note that tempfile.NameTemporaryFile() cannot be used.  As the
-    # docs say, a file so created cannot be opened by name a second time
-    # on modern Windows boxes, and execfile() needs to open it.
-    srcfilename = tempfile.mktemp(".py", "doctestdebug")
-    f = open(srcfilename, 'w')
-    f.write(src)
-    f.close()
-
-    try:
-        if globs:
-            globs = globs.copy()
-        else:
-            globs = {}
-
-        if pm:
-            try:
-                execfile(srcfilename, globs, globs)
-            except:
-                print sys.exc_info()[1]
-                pdb.post_mortem(sys.exc_info()[2])
-        else:
-            # Note that %r is vital here.  '%s' instead can, e.g., cause
-            # backslashes to get treated as metacharacters on Windows.
-            pdb.run("execfile(%r)" % srcfilename, globs, globs)
-
-    finally:
-        os.remove(srcfilename)
-
-def debug(module, name, pm=False):
-    """Debug a single doctest docstring.
-
-    Provide the module (or dotted name of the module) containing the
-    test to be debugged and the name (within the module) of the object
-    with the docstring with tests to be debugged.
-    """
-    module = _normalize_module(module)
-    testsrc = testsource(module, name)
-    debug_script(testsrc, pm, module.__dict__)
-
-######################################################################
-## 10. Example Usage
-######################################################################
-class _TestClass:
-    """
-    A pointless class, for sanity-checking of docstring testing.
-
-    Methods:
-        square()
-        get()
-
-    >>> _TestClass(13).get() + _TestClass(-12).get()
-    1
-    >>> hex(_TestClass(13).square().get())
-    '0xa9'
-    """
-
-    def __init__(self, val):
-        """val -> _TestClass object with associated value val.
-
-        >>> t = _TestClass(123)
-        >>> print t.get()
-        123
-        """
-
-        self.val = val
-
-    def square(self):
-        """square() -> square TestClass's associated value
-
-        >>> _TestClass(13).square().get()
-        169
-        """
-
-        self.val = self.val ** 2
-        return self
-
-    def get(self):
-        """get() -> return TestClass's associated value.
-
-        >>> x = _TestClass(-42)
-        >>> print x.get()
-        -42
-        """
-
-        return self.val
-
-__test__ = {"_TestClass": _TestClass,
-            "string": r"""
-                      Example of a string object, searched as-is.
-                      >>> x = 1; y = 2
-                      >>> x + y, x * y
-                      (3, 2)
-                      """,
-
-            "bool-int equivalence": r"""
-                                    In 2.2, boolean expressions displayed
-                                    0 or 1.  By default, we still accept
-                                    them.  This can be disabled by passing
-                                    DONT_ACCEPT_TRUE_FOR_1 to the new
-                                    optionflags argument.
-                                    >>> 4 == 4
-                                    1
-                                    >>> 4 == 4
-                                    True
-                                    >>> 4 > 4
-                                    0
-                                    >>> 4 > 4
-                                    False
-                                    """,
-
-            "blank lines": r"""
-                Blank lines can be marked with <BLANKLINE>:
-                    >>> print 'foo\n\nbar\n'
-                    foo
-                    <BLANKLINE>
-                    bar
-                    <BLANKLINE>
-            """,
-
-            "ellipsis": r"""
-                If the ellipsis flag is used, then '...' can be used to
-                elide substrings in the desired output:
-                    >>> print range(1000) #doctest: +ELLIPSIS
-                    [0, 1, 2, ..., 999]
-            """,
-
-            "whitespace normalization": r"""
-                If the whitespace normalization flag is used, then
-                differences in whitespace are ignored.
-                    >>> print range(30) #doctest: +NORMALIZE_WHITESPACE
-                    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
-                     15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
-                     27, 28, 29]
-            """,
-           }
-
-def _test_footnotes():
-    '''
-    Footnotes
-    =========
-
-    If the INTERPRET_FOOTNOTES flag is passed as part of optionflags, then
-    footnotes will be looked up and their code injected at each point of
-    reference.  For example:
-
-        >>> counter = 0
-
-    Here is some text that references a footnote [1]_
-
-        >>> counter
-        1
-
-    .. [1] and here we increment ``counter``
-        >>> counter += 1
-
-    Footnotes can also be referenced after they are defined: [1]_
-
-        >>> counter
-        2
-
-    Footnotes can also be "citations", which just means that the value in
-    the brackets is alphanumeric: [citation]_
-
-        >>> print from_citation
-        hi
-
-    .. [citation] this is a citation.
-        >>> from_citation = 'hi'
-
-    Footnotes can contain more than one example: [multi example]_
-
-        >>> print one
-        1
-
-        >>> print two
-        2
-
-        >>> print three
-        3
-
-    .. [multi example] Here's a footnote with multiple examples:
-
-        >>> one = 1
-
-        and now another (note indentation to make this part of the footnote):
-
-        >>> two = 2
-
-        and a third:
-
-        >>> three = 3
-
-
-    More than one footnote can be referenced at a time [1]_ [2]_
-
-        >>> counter
-        6
-
-    .. [2] let's multiply ``counter`` by two
-        >>> counter *= 2
-
-
-    Parsing Details
-    ---------------
-
-    If the INTERPRET_FOOTNOTES optionflag isn't set, footnotes are ignored.
-
-    >>> doctest = """
-    ... This is a doctest. [#1]_
-    ...
-    ...     >>> print var
-    ...
-    ... .. [#1] a footnote
-    ...     Here we set up the variable
-    ...
-    ...     >>> var = 1
-    ... """
-
-    >>> print_structure(doctest)
-    Prose| This is a doctest. [#1]_
-    Code | print var
-    Prose| .. [#1] a footnote
-    Code | var = 1
-    Prose|
-
-    If INTERPRET_FOOTNOTES is set, footnotes are also copied to the point at
-    which they are referenced.
-
-    >>> print_structure(doctest, optionflags=INTERPRET_FOOTNOTES)
-    Prose| This is a doctest. [#1]_
-    Code | var = 1
-    Prose|
-    Code | print var
-    Prose| .. [#1] a footnote
-    Prose|
-
-    >>> print_structure("""
-    ... Footnotes can have code that starts with no prose. [#quick code]_
-    ...
-    ... .. [#quick code]
-    ...     >>> print 'this is some code'
-    ...     this is some code
-    ... """, optionflags=INTERPRET_FOOTNOTES)
-    Prose| Footnotes can have code that starts with no prose. [#quick code]_
-    Code | print 'this is some code'
-    Prose|
-    Prose|
-
-    >>> print_structure("""
-    ... Footnotes can be back-to-back [#first]_ [#second]_
-    ... .. [#first]
-    ... .. [#second]
-    ...     >>> 1+1
-    ...     2
-    ... """, optionflags=INTERPRET_FOOTNOTES)
-    Prose| Footnotes can be back-to-back [#first]_ [#second]_
-    Prose| Footnotes can be back-to-back [#first]_ [#second]_
-    Code | 1+1
-    Prose|
-    Prose|
-
-    >>> print_structure("""
-    ... .. [#no code] Footnotes can also be defined with no code.
-    ... """, optionflags=INTERPRET_FOOTNOTES)
-    Prose| .. [#no code] Footnotes can also be defined with no code.
-
-    If there are multiple footnotes with no code, then one with code, they are
-    parsed correctly.
-
-    >>> print_structure("""
-    ... I'd like some code to go here [#some code]_
-    ... .. [#no code 1] Footnotes can also be defined with no code.
-    ... .. [#no code 2] Footnotes can also be defined with no code.
-    ... .. [#no code 3] Footnotes can also be defined with no code.
-    ... .. [#some code]
-    ...     >>> print 'hi'
-    ...     hi
-    ... """, optionflags=INTERPRET_FOOTNOTES)
-    Prose| I'd like some code to go here [#some code]_
-    Code | print 'hi'
-    Prose|
-    Prose|
-
-    The non-autonumbered flavor of labels works too.
-
-    >>> print_structure("""
-    ... Here is some text. [foo]_
-    ... .. [foo]
-    ...     >>> print 'hi'
-    ...     hi
-    ... """, optionflags=INTERPRET_FOOTNOTES)
-    Prose| Here is some text. [foo]_
-    Code | print 'hi'
-    Prose|
-    Prose|
-    '''
-
-
-def print_structure(doctest, optionflags=0):
-    def preview(s):
-        first_line = s.strip().split('\n')[0]
-        MAX_LENGTH = 70
-        if len(first_line) <= MAX_LENGTH:
-            return first_line
-
-        return '%s...' % first_line[:MAX_LENGTH].strip()
-
-    parser = DocTestParser()
-    for x in parser.parse(doctest, optionflags=optionflags):
-        if isinstance(x, Example):
-            result = 'Code | ' + preview(x.source)
-        else:
-            result = 'Prose| ' + preview(x)
-
-        print result.strip()
-
-
-def _test():
-    r = unittest.TextTestRunner()
-    r.run(DocTestSuite(optionflags=INTERPRET_FOOTNOTES))
-
-if __name__ == "__main__":
-    _test()
-
-# TODO:
-# - make tracebacks show where the footnote was referenced
-# - teach script_from_examples and testsource about INTERPRET_FOOTNOTES
-# - update comments (including docstring for testfile)
+from doctest import *

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/doctest.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/doctest.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/doctest.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -21,7 +21,8 @@
 Let's now run it as a doctest:
 
   >>> from zope.testing import doctest
-  >>> doctest.testfile(fn, False)
+  >>> a, b = doctest.testfile(fn, False) # doctest 2.6 and later uses a named tuple here.
+  >>> a, b
   (0, 1)
 
 It worked. Let's also try the test file suite:

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-arguments.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-arguments.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-arguments.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -18,7 +18,7 @@
       Set up samplelayers.Layer1 in N.NNN seconds.
       Set up samplelayers.Layer11 in N.NNN seconds.
       Set up samplelayers.Layer111 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer111 in N.NNN seconds.
       Tear down samplelayers.Layerx in N.NNN seconds.

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-colors.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-colors.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-colors.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -118,7 +118,7 @@
     {red}    Traceback (most recent call last):{normal}
     {red}      File ".../doctest/__init__.py", line 1356, in __run{normal}
     {red}        compileflags, 1) in test.globs{normal}
-    {red}      File "<doctest sample2.sampletests_e.eek[line 2, example 0]>", line 1, in ?{normal}
+    {red}      File "<doctest sample2.sampletests_e.eek[0]>", line 1, in ?{normal}
     {red}        f(){normal}
     {red}      File "testrunner-ex/sample2/sampletests_e.py", line 19, in f{normal}
     {red}        g(){normal}
@@ -156,9 +156,9 @@
     {red}    Traceback (most recent call last):{normal}
     {red}      File ".../doctest/__init__.py", line 1356, in __run{normal}
     {red}        compileflags, 1) in test.globs{normal}
-    {red}      File "<doctest e.txt[line 4, example 1]>", line 1, in ?{normal}
+    {red}      File "<doctest e.txt[1]>", line 1, in ?{normal}
     {red}        f(){normal}
-    {red}      File "<doctest e.txt[line 1, example 0]>", line 2, in f{normal}
+    {red}      File "<doctest e.txt[0]>", line 2, in f{normal}
     {red}        return x{normal}
     {red}    NameError: global name 'x' is not defined{normal}
     <BLANKLINE>
@@ -335,7 +335,7 @@
       Set up samplelayers.Layer1 in 0.000 seconds.
       Set up samplelayers.Layer12 in 0.000 seconds.
       Set up samplelayers.Layer122 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.007 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.007 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.
@@ -348,7 +348,7 @@
       Set up samplelayers.Layer1 in 0.000 seconds.
       Set up samplelayers.Layer12 in 0.000 seconds.
       Set up samplelayers.Layer122 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.007 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.007 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.
@@ -369,7 +369,7 @@
       Set up samplelayers.Layer1 in 0.000 seconds.
       Set up samplelayers.Layer12 in 0.000 seconds.
       Set up samplelayers.Layer122 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.007 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.007 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.
@@ -412,7 +412,7 @@
       Set up samplelayers.Layer1 in 0.000 seconds.
       Set up samplelayers.Layer12 in 0.000 seconds.
       Set up samplelayers.Layer122 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.007 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.007 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-coverage.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-coverage.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-coverage.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -21,34 +21,34 @@
       Ran 9 tests with 0 failures and 0 errors in 0.000 seconds.
     Running samplelayers.Layer11 tests:
       Set up samplelayers.Layer11 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.125 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.125 seconds.
     Running samplelayers.Layer111 tests:
       Set up samplelayers.Layerx in 0.000 seconds.
       Set up samplelayers.Layer111 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.125 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.125 seconds.
     Running samplelayers.Layer112 tests:
       Tear down samplelayers.Layer111 in 0.000 seconds.
       Set up samplelayers.Layer112 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.125 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.125 seconds.
     Running samplelayers.Layer12 tests:
       Tear down samplelayers.Layer112 in 0.000 seconds.
       Tear down samplelayers.Layerx in 0.000 seconds.
       Tear down samplelayers.Layer11 in 0.000 seconds.
       Set up samplelayers.Layer12 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.140 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.140 seconds.
     Running samplelayers.Layer121 tests:
       Set up samplelayers.Layer121 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.125 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.125 seconds.
     Running samplelayers.Layer122 tests:
       Tear down samplelayers.Layer121 in 0.000 seconds.
       Set up samplelayers.Layer122 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.125 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.125 seconds.
     Running zope.testing.testrunner.layer.UnitTests tests:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.
       Tear down samplelayers.Layer1 in 0.000 seconds.
       Set up zope.testing.testrunner.layer.UnitTests in 0.000 seconds.
-      Ran 192 tests with 0 failures and 0 errors in 0.687 seconds.
+      Ran 156 tests with 0 failures and 0 errors in 0.687 seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in 0.000 seconds.
     lines   cov%   module   (path)
@@ -62,7 +62,7 @@
        74   100%   sampletests.test122   (testrunner-ex/sampletests/test122.py)
        48   100%   sampletests.test_one   (testrunner-ex/sampletests/test_one.py)
       112    95%   sampletestsf   (testrunner-ex/sampletestsf.py)
-    Total: 405 tests, 0 failures, 0 errors in 0.630 seconds.
+    Total: 321 tests, 0 failures, 0 errors in 0.630 seconds.
     False
 
 The directory specified with the --coverage option will have been created and

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-edge-cases.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-edge-cases.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-edge-cases.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -45,10 +45,10 @@
       Tear down samplelayers.Layer12 in N.NNN seconds.
       Tear down samplelayers.Layer1 in N.NNN seconds.
       Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-      Ran 192 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 156 tests with 0 failures and 0 errors in N.NNN seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-    Total: 405 tests, 0 failures, 0 errors in N.NNN seconds.
+    Total: 321 tests, 0 failures, 0 errors in N.NNN seconds.
     False
 
 Bug #251759: The test runner's protection against descending into non-package
@@ -111,18 +111,20 @@
     ...             ' -t set_trace4').split()
     >>> try: testrunner.run_internal(defaults)
     ... finally: sys.stdin = real_stdin
-    Running zope.testing.testrunner.layer.UnitTests tests:...
-    --Return--
-    > doctest.py(351)set_trace()->None
-    -> pdb.Pdb.set_trace(self)
+    Running zope.testing.testrunner.layer.UnitTests tests:
+      Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
+    > testrunner-ex/sample3/sampletests_d.py(NNN)f()
+    -> y = x
     (Pdb) n
-    > testrunner-ex/sample3/sampletests_d.py(42)f()
+    --Return--
+    > ...->None
     -> y = x
     (Pdb) p x
     1
     (Pdb) c
-      Ran 1 tests with 0 failures and 0 errors in 0.002 seconds.
-    ...
+      Ran 1 tests with 0 failures and 0 errors in N.NNN seconds.
+    Tearing down left over layers:
+      Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
     False
 
 Using pdb in a docstring-based doctest
@@ -132,18 +134,20 @@
     ...             ' -t set_trace3').split()
     >>> try: testrunner.run_internal(defaults)
     ... finally: sys.stdin = real_stdin
-    Running zope.testing.testrunner.layer.UnitTests tests:...
-    --Return--
-    > doctest.py(351)set_trace()->None
-    -> pdb.Pdb.set_trace(self)
+    Running zope.testing.testrunner.layer.UnitTests tests:
+      Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
+    > <doctest sample3.sampletests_d.set_trace3[1]>(3)?()
+    -> y = x
     (Pdb) n
-    > <doctest sample3.sampletests_d.set_trace3[line 3, example 1]>(3)...()
+    --Return--
+    > ...->None
     -> y = x
     (Pdb) p x
     1
     (Pdb) c
-      Ran 1 tests with 0 failures and 0 errors in 0.002 seconds.
-    ...
+      Ran 1 tests with 0 failures and 0 errors in N.NNN seconds.
+    Tearing down left over layers:
+      Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
     False
 
 Using pdb.set_trace in a doc file:
@@ -154,21 +158,22 @@
     ...             ' -t set_trace5').split()
     >>> try: testrunner.run_internal(defaults)
     ... finally: sys.stdin = real_stdin
-    Running zope.testing.testrunner.layer.UnitTests tests:...
-    --Return--
-    > doctest.py(351)set_trace()->None
-    -> pdb.Pdb.set_trace(self)
+    Running zope.testing.testrunner.layer.UnitTests tests:
+      Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
+    > <doctest set_trace5.txt[1]>(3)?()
+    -> y = x
     (Pdb) n
-    > <doctest set_trace5.txt[line 2, example 1]>(3)...()
+    --Return--
+    > ...->None
     -> y = x
     (Pdb) p x
     1
     (Pdb) c
-      Ran 1 tests with 0 failures and 0 errors in 0.002 seconds.
-    ...
+      Ran 1 tests with 0 failures and 0 errors in N.NNN seconds.
+    Tearing down left over layers:
+      Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
     False
 
-
 Using pdb.set_trace in a function called by a doctest in a doc file:
 
 
@@ -177,18 +182,20 @@
     ...             ' -t set_trace6').split()
     >>> try: testrunner.run_internal(defaults)
     ... finally: sys.stdin = real_stdin
-    Running zope.testing.testrunner.layer.UnitTests tests:...
-    --Return--
-    > doctest.py(351)set_trace()->None
-    -> pdb.Pdb.set_trace(self)
+    Running zope.testing.testrunner.layer.UnitTests tests:
+      Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
+    > testrunner-ex/sample3/sampletests_d.py(NNN)f()
+    -> y = x
     (Pdb) n
-    > testrunner-ex/sample3/sampletests_d.py(42)f()
+    --Return--
+    > ...->None
     -> y = x
     (Pdb) p x
     1
     (Pdb) c
-      Ran 1 tests with 0 failures and 0 errors in 0.002 seconds.
-    ...
+      Ran 1 tests with 0 failures and 0 errors in N.NNN seconds.
+    Tearing down left over layers:
+      Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
     False
 
 Post-mortem debugging function called from ordinary test:
@@ -235,21 +242,11 @@
     <BLANKLINE>
     Error in test post_mortem3 (sample3.sampletests_d)
     Traceback (most recent call last):
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in debug
-        runner.run(self._dt_test, clear_globs=False)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in run
-        r = DocTestRunner.run(self, test, compileflags, out, False)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in run
-        return self.__run(test, compileflags, out)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in __run
-        exc_info)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in report_unexpected_exception
-        raise UnexpectedException(test, example, exc_info)
-    UnexpectedException: testrunner-ex/sample3/sampletests_d.py:61 (2 examples)>
+    UnexpectedException: testrunner-ex/sample3/sampletests_d.py:NNN (2 examples)>
     <BLANKLINE>
     exceptions.ValueError:
     <BLANKLINE>
-    > <doctest sample3.sampletests_d.post_mortem3[line 3, example 1]>(1)...()
+    > <doctest sample3.sampletests_d.post_mortem3[1]>(1)?()
     (Pdb) p x
     1
     (Pdb) c
@@ -269,16 +266,6 @@
     <BLANKLINE>
     Error in test post_mortem4 (sample3.sampletests_d)
     Traceback (most recent call last):
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in debug
-        runner.run(self._dt_test, clear_globs=False)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in run
-        r = DocTestRunner.run(self, test, compileflags, out, False)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in run
-        return self.__run(test, compileflags, out)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in __run
-        exc_info)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in report_unexpected_exception
-        raise UnexpectedException(test, example, exc_info)
     UnexpectedException: testrunner-ex/sample3/sampletests_d.py:NNN (1 example)>
     <BLANKLINE>
     exceptions.ValueError:
@@ -304,27 +291,18 @@
     <BLANKLINE>
     Error testrunner-ex/sample3/post_mortem5.txt
     Traceback (most recent call last):
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in debug
-        runner.run(self._dt_test, clear_globs=False)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in run
-        r = DocTestRunner.run(self, test, compileflags, out, False)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in run
-        return self.__run(test, compileflags, out)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in __run
-        exc_info)
-      File ".../zope/testing/doctest/__init__.py", Line NNN, in report_unexpected_exception
-        raise UnexpectedException(test, example, exc_info)
     UnexpectedException: testrunner-ex/sample3/post_mortem5.txt:0 (2 examples)>
     <BLANKLINE>
     exceptions.ValueError:
     <BLANKLINE>
-    > <doctest post_mortem5.txt[line 2, example 1]>(1)...()
+    > <doctest post_mortem5.txt[1]>(1)?()
     (Pdb) p x
     1
     (Pdb) c
     True
 
 
+
 Post-mortem debugging function called from file-based doctest:
 
     >>> sys.stdin = Input('p x\nc')

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-errors.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-errors.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-errors.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -44,7 +44,7 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_e.eek[line 2, example 0]>", line 1, in ?
+          File "<doctest sample2.sampletests_e.eek[0]>", line 1, in ?
             f()
           File "testrunner-ex/sample2/sampletests_e.py", line 19, in f
             g()
@@ -80,9 +80,9 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest e.txt[line 4, example 1]>", line 1, in ?
+          File "<doctest e.txt[1]>", line 1, in ?
             f()
-          File "<doctest e.txt[line 1, example 0]>", line 2, in f
+          File "<doctest e.txt[0]>", line 2, in f
             return x
         NameError: global name 'x' is not defined
     <BLANKLINE>
@@ -96,10 +96,10 @@
         raise self.failureException, \
     AssertionError: 1 != 0
     <BLANKLINE>
-      Ran 200 tests with 3 failures and 1 errors in N.NNN seconds.
+      Ran 164 tests with 3 failures and 1 errors in N.NNN seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-    Total: 413 tests, 3 failures, 1 errors in N.NNN seconds.
+    Total: 329 tests, 3 failures, 1 errors in N.NNN seconds.
     True
 
 We see that we get an error report and a traceback for the failing
@@ -131,7 +131,7 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_e.eek[line 2, example 0]>", line 1, in ?
+          File "<doctest sample2.sampletests_e.eek[0]>", line 1, in ?
             f()
           File "testrunner-ex/sample2/sampletests_e.py", line 19, in f
             g()
@@ -168,9 +168,9 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest e.txt[line 4, example 1]>", line 1, in ?
+          File "<doctest e.txt[1]>", line 1, in ?
             f()
-          File "<doctest e.txt[line 1, example 0]>", line 2, in f
+          File "<doctest e.txt[0]>", line 2, in f
             return x
         NameError: global name 'x' is not defined
     <BLANKLINE>
@@ -186,7 +186,7 @@
     <BLANKLINE>
     ................................................................................................
     <BLANKLINE>
-      Ran 200 tests with 3 failures and 1 errors in 0.040 seconds.
+      Ran 164 tests with 3 failures and 1 errors in 0.040 seconds.
     ...
     <BLANKLINE>
     Tests with errors:
@@ -207,22 +207,21 @@
     Running zope.testing.testrunner.layer.UnitTests tests:
       Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
       Running:
-        1/56 (1.8%)
+        1/47 (2.1%)
     <BLANKLINE>
     Failure in test eek (sample2.sampletests_e)
     Failed doctest test for sample2.sampletests_e.eek
       File "testrunner-ex/sample2/sampletests_e.py", line 28, in eek
     <BLANKLINE>
     ----------------------------------------------------------------------
-    File "testrunner-ex/sample2/sampletests_e.py", line 30,
-           in sample2.sampletests_e.eek
+    File "testrunner-ex/sample2/sampletests_e.py", line 30, in sample2.sampletests_e.eek
     Failed example:
         f()
     Exception raised:
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_e.eek[line 2, example 0]>", line 1, in ?
+          File "<doctest sample2.sampletests_e.eek[0]>", line 1, in ?
             f()
           File "testrunner-ex/sample2/sampletests_e.py", line 19, in f
             g()
@@ -231,11 +230,11 @@
            - __traceback_info__: I don't know what Y should be.
         NameError: global name 'y' is not defined
     <BLANKLINE>
-        2/56 (3.6%)##r##
+        2/47 (4.3%)##r##
                    ##r##
-        3/56 (5.4%)##r##
+        3/47 (6.4%)##r##
                    ##r##
-        4/56 (7.1%)
+        4/47 (8.5%)
     <BLANKLINE>
     Error in test test3 (sample2.sampletests_e.Test)
     Traceback (most recent call last):
@@ -248,11 +247,11 @@
        - __traceback_info__: I don't know what Y should be.
     NameError: global name 'y' is not defined
     <BLANKLINE>
-        5/56 (8.9%)##r##
+        5/47 (8.9%)##r##
                    ##r##
-        6/56 (10.7%)##r##
+        6/47 (10.7%)##r##
                     ##r##
-        7/56 (12.5%)
+        7/47 (12.5%)
     <BLANKLINE>
     Failure in test testrunner-ex/sample2/e.txt
     Failed doctest test for e.txt
@@ -266,13 +265,13 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest e.txt[line 4, example 1]>", line 1, in ?
+          File "<doctest e.txt[1]>", line 1, in ?
             f()
-          File "<doctest e.txt[line 1, example 0]>", line 2, in f
+          File "<doctest e.txt[0]>", line 2, in f
             return x
         NameError: global name 'x' is not defined
     <BLANKLINE>
-        8/56 (14.3%)
+        8/47 (14.3%)
     <BLANKLINE>
     Failure in test test (sample2.sampletests_f.Test)
     Traceback (most recent call last):
@@ -282,83 +281,83 @@
         raise self.failureException, \
     AssertionError: 1 != 0
     <BLANKLINE>
-        9/56 (16.1%)##r##
+        9/47 (16.1%)##r##
                     ##r##
-        10/56 (17.9%)##r##
+        10/47 (17.9%)##r##
                      ##r##
-        11/56 (19.6%)##r##
+        11/47 (19.6%)##r##
                      ##r##
-        12/56 (21.4%)##r##
+        12/47 (21.4%)##r##
                      ##r##
-        13/56 (23.2%)##r##
+        13/47 (23.2%)##r##
                      ##r##
-        14/56 (25.0%)##r##
+        14/47 (25.0%)##r##
                      ##r##
-        15/56 (26.8%)##r##
+        15/47 (26.8%)##r##
                      ##r##
-        16/56 (28.6%)##r##
+        16/47 (28.6%)##r##
                      ##r##
-        17/56 (30.4%)##r##
+        17/47 (30.4%)##r##
                      ##r##
-        18/56 (32.1%)##r##
+        18/47 (32.1%)##r##
                      ##r##
-        19/56 (33.9%)##r##
+        19/47 (33.9%)##r##
                      ##r##
-        20/56 (35.7%)##r##
+        20/47 (35.7%)##r##
                      ##r##
-        24/56 (42.9%)##r##
+        24/47 (42.9%)##r##
                      ##r##
-        25/56 (44.6%)##r##
+        25/47 (44.6%)##r##
                      ##r##
-        26/56 (46.4%)##r##
+        26/47 (46.4%)##r##
                      ##r##
-        27/56 (48.2%)##r##
+        27/47 (48.2%)##r##
                      ##r##
-        28/56 (50.0%)##r##
+        28/47 (50.0%)##r##
                      ##r##
-        29/56 (51.8%)##r##
+        29/47 (51.8%)##r##
                      ##r##
-        30/56 (53.6%)##r##
+        30/47 (53.6%)##r##
                      ##r##
-        31/56 (55.4%)##r##
+        31/47 (55.4%)##r##
                      ##r##
-        32/56 (57.1%)##r##
+        32/47 (57.1%)##r##
                      ##r##
-        33/56 (58.9%)##r##
+        33/47 (58.9%)##r##
                      ##r##
-        34/56 (60.7%)##r##
+        34/47 (60.7%)##r##
                      ##r##
-        35/56 (62.5%)##r##
+        35/47 (62.5%)##r##
                      ##r##
-        36/56 (64.3%)##r##
+        36/47 (64.3%)##r##
                      ##r##
-        40/56 (71.4%)##r##
+        40/47 (71.4%)##r##
                      ##r##
-        41/56 (73.2%)##r##
+        41/47 (73.2%)##r##
                      ##r##
-        42/56 (75.0%)##r##
+        42/47 (75.0%)##r##
                      ##r##
-        43/56 (76.8%)##r##
+        43/47 (76.8%)##r##
                      ##r##
-        44/56 (78.6%)##r##
+        44/47 (78.6%)##r##
                      ##r##
-        45/56 (80.4%)##r##
+        45/47 (80.4%)##r##
                      ##r##
-        46/56 (82.1%)##r##
+        46/47 (82.1%)##r##
                      ##r##
-        47/56 (83.9%)##r##
+        47/47 (83.9%)##r##
                      ##r##
-        48/56 (85.7%)##r##
+        48/47 (85.7%)##r##
                      ##r##
-        49/56 (87.5%)##r##
+        49/47 (87.5%)##r##
                      ##r##
-        50/56 (89.3%)##r##
+        50/47 (89.3%)##r##
                      ##r##
-        51/56 (91.1%)##r##
+        51/47 (91.1%)##r##
                      ##r##
-        52/56 (92.9%)##r##
+        52/47 (92.9%)##r##
                      ##r##
-        56/56 (100.0%)##r##
+        56/47 (100.0%)##r##
                       ##r##
       Ran 56 tests with 3 failures and 1 errors in 0.054 seconds.
     Tearing down left over layers:
@@ -394,7 +393,7 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_1.eek[line 2, example 0]>", line 1, in ?
+          File "<doctest sample2.sampletests_1.eek[0]>", line 1, in ?
             x = y
         NameError: name 'y' is not defined
     ----------------------------------------------------------------------
@@ -406,7 +405,7 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_1.eek[line 4, example 1]>", line 1, in ?
+          File "<doctest sample2.sampletests_1.eek[1]>", line 1, in ?
             x
         NameError: name 'x' is not defined
     ----------------------------------------------------------------------
@@ -418,7 +417,7 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_1.eek[line 7, example 2]>", line 1, in ?
+          File "<doctest sample2.sampletests_1.eek[2]>", line 1, in ?
             z = x + 1
         NameError: name 'x' is not defined
     <BLANKLINE>
@@ -451,7 +450,7 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_1.eek[line 2, example 0]>", line 1, in ?
+          File "<doctest sample2.sampletests_1.eek[0]>", line 1, in ?
             x = y
         NameError: name 'y' is not defined
     <BLANKLINE>
@@ -484,7 +483,7 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_1.eek[line 2, example 0]>", line 1, in ?
+          File "<doctest sample2.sampletests_1.eek[0]>", line 1, in ?
             x = y
         NameError: name 'y' is not defined
     <BLANKLINE>
@@ -520,7 +519,7 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_1.eek[line 2, example 0]>", line 1, in ?
+          File "<doctest sample2.sampletests_1.eek[0]>", line 1, in ?
             x = y
         NameError: name 'y' is not defined
     ----------------------------------------------------------------------
@@ -532,7 +531,7 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_1.eek[line 4, example 1]>", line 1, in ?
+          File "<doctest sample2.sampletests_1.eek[1]>", line 1, in ?
             x
         NameError: name 'x' is not defined
     ----------------------------------------------------------------------
@@ -544,7 +543,7 @@
         Traceback (most recent call last):
           File ".../doctest/__init__.py", line 1256, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_1.eek[line 7, example 2]>", line 1, in ?
+          File "<doctest sample2.sampletests_1.eek[2]>", line 1, in ?
             z = x + 1
         NameError: name 'x' is not defined
     <BLANKLINE>
@@ -763,28 +762,28 @@
       Ran 9 tests with 0 failures and 0 errors in 0.000 seconds.
     Running samplelayers.Layer11 tests:
       Set up samplelayers.Layer11 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.007 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.007 seconds.
     Running samplelayers.Layer111 tests:
       Set up samplelayers.Layerx in 0.000 seconds.
       Set up samplelayers.Layer111 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.007 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.007 seconds.
     Running samplelayers.Layer112 tests:
       Tear down samplelayers.Layer111 in 0.000 seconds.
       Set up samplelayers.Layer112 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.007 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.007 seconds.
     Running samplelayers.Layer12 tests:
       Tear down samplelayers.Layer112 in 0.000 seconds.
       Tear down samplelayers.Layerx in 0.000 seconds.
       Tear down samplelayers.Layer11 in 0.000 seconds.
       Set up samplelayers.Layer12 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.007 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.007 seconds.
     Running samplelayers.Layer121 tests:
       Set up samplelayers.Layer121 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.007 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.007 seconds.
     Running samplelayers.Layer122 tests:
       Tear down samplelayers.Layer121 in 0.000 seconds.
       Set up samplelayers.Layer122 in 0.000 seconds.
-      Ran 34 tests with 0 failures and 0 errors in 0.006 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.006 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-knit.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-knit.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-knit.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -57,7 +57,7 @@
         testrunner-ex/sampletests/../sampletestsl.txt
         test_extra_test_in_products (sample4.products.sampletests.Test)
         test_another_test_in_products (sample4.products.more.sampletests.Test)
-      Ran 36 tests with 0 failures and 0 errors in 0.008 seconds.
+      Ran 28 tests with 0 failures and 0 errors in 0.008 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer111 in 0.000 seconds.
       Tear down samplelayers.Layerx in 0.000 seconds.

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-layers.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-layers.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-layers.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -18,17 +18,17 @@
       Set up samplelayers.Layer1 in N.NNN seconds.
       Set up samplelayers.Layer11 in N.NNN seconds.
       Set up samplelayers.Layer112 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running zope.testing.testrunner.layer.UnitTests tests:
       Tear down samplelayers.Layer112 in N.NNN seconds.
       Tear down samplelayers.Layerx in N.NNN seconds.
       Tear down samplelayers.Layer11 in N.NNN seconds.
       Tear down samplelayers.Layer1 in N.NNN seconds.
       Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-      Ran 192 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 156 tests with 0 failures and 0 errors in N.NNN seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-    Total: 226 tests, 0 failures, 0 errors in N.NNN seconds.
+    Total: 182 tests, 0 failures, 0 errors in N.NNN seconds.
     False
 
 
@@ -38,7 +38,7 @@
     >>> testrunner.run_internal(defaults)
     Running zope.testing.testrunner.layer.UnitTests tests:
       Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-      Ran 192 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 156 tests with 0 failures and 0 errors in N.NNN seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
     False
@@ -53,28 +53,28 @@
       Ran 9 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer11 tests:
       Set up samplelayers.Layer11 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer111 tests:
       Set up samplelayers.Layerx in N.NNN seconds.
       Set up samplelayers.Layer111 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer112 tests:
       Tear down samplelayers.Layer111 in N.NNN seconds.
       Set up samplelayers.Layer112 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer12 tests:
       Tear down samplelayers.Layer112 in N.NNN seconds.
       Tear down samplelayers.Layerx in N.NNN seconds.
       Tear down samplelayers.Layer11 in N.NNN seconds.
       Set up samplelayers.Layer12 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer121 tests:
       Set up samplelayers.Layer121 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer122 tests:
       Tear down samplelayers.Layer121 in N.NNN seconds.
       Set up samplelayers.Layer122 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in N.NNN seconds.
       Tear down samplelayers.Layer12 in N.NNN seconds.
@@ -91,37 +91,37 @@
       Ran 9 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer11 tests:
       Set up samplelayers.Layer11 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer111 tests:
       Set up samplelayers.Layerx in N.NNN seconds.
       Set up samplelayers.Layer111 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer112 tests:
       Tear down samplelayers.Layer111 in N.NNN seconds.
       Set up samplelayers.Layer112 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer12 tests:
       Tear down samplelayers.Layer112 in N.NNN seconds.
       Tear down samplelayers.Layerx in N.NNN seconds.
       Tear down samplelayers.Layer11 in N.NNN seconds.
       Set up samplelayers.Layer12 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer121 tests:
       Set up samplelayers.Layer121 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer122 tests:
       Tear down samplelayers.Layer121 in N.NNN seconds.
       Set up samplelayers.Layer122 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running zope.testing.testrunner.layer.UnitTests tests:
       Tear down samplelayers.Layer122 in N.NNN seconds.
       Tear down samplelayers.Layer12 in N.NNN seconds.
       Tear down samplelayers.Layer1 in N.NNN seconds.
       Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-      Ran 192 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 156 tests with 0 failures and 0 errors in N.NNN seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-    Total: 405 tests, 0 failures, 0 errors in N.NNN seconds.
+    Total: 321 tests, 0 failures, 0 errors in N.NNN seconds.
     False
 
 It is possible to force the layers to run in subprocesses and parallelize them.
@@ -135,41 +135,41 @@
       Running in a subprocess.
       Set up samplelayers.Layer1 in N.NNN seconds.
       Set up samplelayers.Layer11 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer111 tests:
       Running in a subprocess.
       Set up samplelayers.Layerx in N.NNN seconds.
       Set up samplelayers.Layer1 in N.NNN seconds.
       Set up samplelayers.Layer11 in N.NNN seconds.
       Set up samplelayers.Layer111 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer112 tests:
       Running in a subprocess.
       Set up samplelayers.Layerx in N.NNN seconds.
       Set up samplelayers.Layer1 in N.NNN seconds.
       Set up samplelayers.Layer11 in N.NNN seconds.
       Set up samplelayers.Layer112 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer12 tests:
       Running in a subprocess.
       Set up samplelayers.Layer1 in N.NNN seconds.
       Set up samplelayers.Layer12 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer121 tests:
       Running in a subprocess.
       Set up samplelayers.Layer1 in N.NNN seconds.
       Set up samplelayers.Layer12 in N.NNN seconds.
       Set up samplelayers.Layer121 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer122 tests:
       Running in a subprocess.
       Set up samplelayers.Layer1 in N.NNN seconds.
       Set up samplelayers.Layer12 in N.NNN seconds.
       Set up samplelayers.Layer122 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running zope.testing.testrunner.layer.UnitTests tests:
       Running in a subprocess.
       Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-      Ran 192 tests with 0 failures and 0 errors in N.NNN seconds.
-    Total: 405 tests, 0 failures, 0 errors in N.NNN seconds.
+      Ran 156 tests with 0 failures and 0 errors in N.NNN seconds.
+    Total: 321 tests, 0 failures, 0 errors in N.NNN seconds.
     False

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-leaks.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-leaks.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-leaks.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -21,29 +21,29 @@
       Set up samplelayers.Layer1 in 0.000 seconds.
       Set up samplelayers.Layer11 in 0.000 seconds.
     Iteration 1
-      Ran 34 tests with 0 failures and 0 errors in 0.013 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.013 seconds.
     Iteration 2
-      Ran 34 tests with 0 failures and 0 errors in 0.012 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.012 seconds.
       sys refcount=100401   change=0     
     Iteration 3
-      Ran 34 tests with 0 failures and 0 errors in 0.012 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.012 seconds.
       sys refcount=100401   change=0     
     Iteration 4
-      Ran 34 tests with 0 failures and 0 errors in 0.013 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.013 seconds.
       sys refcount=100401   change=0     
     Running samplelayers.Layer12 tests:
       Tear down samplelayers.Layer11 in 0.000 seconds.
       Set up samplelayers.Layer12 in 0.000 seconds.
     Iteration 1
-      Ran 34 tests with 0 failures and 0 errors in 0.013 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.013 seconds.
     Iteration 2
-      Ran 34 tests with 0 failures and 0 errors in 0.012 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.012 seconds.
       sys refcount=100411   change=0     
     Iteration 3
-      Ran 34 tests with 0 failures and 0 errors in 0.012 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.012 seconds.
       sys refcount=100411   change=0     
     Iteration 4
-      Ran 34 tests with 0 failures and 0 errors in 0.012 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.012 seconds.
       sys refcount=100411   change=0     
     Tearing down left over layers:
       Tear down samplelayers.Layer12 in 0.000 seconds.

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-progress.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-progress.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-progress.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -73,7 +73,7 @@
                      ##r##
         34/34 (100.0%)##r##
                       ##r##
-      Ran 34 tests with 0 failures and 0 errors in 0.008 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.008 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.
@@ -147,7 +147,7 @@
                                                ##r##
     34/34 (100.0%) ... pe/testing/testrunner-ex/sampletests/../sampletestsl.txt##r##
                                                                                ##r##
-  Ran 34 tests with 0 failures and 0 errors in 0.008 seconds.
+  Ran 26 tests with 0 failures and 0 errors in 0.008 seconds.
 Tearing down left over layers:
   Tear down samplelayers.Layer122 in 0.000 seconds.
   Tear down samplelayers.Layer12 in 0.000 seconds.
@@ -223,7 +223,7 @@
                                                    ##r##
         34/34 (100.0%) ... r-ex/sampletests/../sampletestsl.txt##r##
                                                                ##r##
-      Ran 34 tests with 0 failures and 0 errors in 0.008 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.008 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-repeat.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-repeat.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-repeat.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -21,11 +21,11 @@
       Set up samplelayers.Layer11 in 0.000 seconds.
       Set up samplelayers.Layer112 in 0.000 seconds.
     Iteration 1
-      Ran 34 tests with 0 failures and 0 errors in 0.010 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.010 seconds.
     Iteration 2
-      Ran 34 tests with 0 failures and 0 errors in 0.010 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.010 seconds.
     Iteration 3
-      Ran 34 tests with 0 failures and 0 errors in 0.010 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.010 seconds.
     Running zope.testing.testrunner.layer.UnitTests tests:
       Tear down samplelayers.Layer112 in N.NNN seconds.
       Tear down samplelayers.Layerx in N.NNN seconds.
@@ -33,14 +33,14 @@
       Tear down samplelayers.Layer1 in N.NNN seconds.
       Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
     Iteration 1
-      Ran 192 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 156 tests with 0 failures and 0 errors in N.NNN seconds.
     Iteration 2
-      Ran 192 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 156 tests with 0 failures and 0 errors in N.NNN seconds.
     Iteration 3
-      Ran 192 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 156 tests with 0 failures and 0 errors in N.NNN seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-    Total: 226 tests, 0 failures, 0 errors in N.NNN seconds.
+    Total: 182 tests, 0 failures, 0 errors in N.NNN seconds.
     False
 
 The tests are repeated by layer.  Layers are set up and torn down only

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-simple.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-simple.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-simple.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -52,37 +52,37 @@
       Ran 9 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer11 tests:
       Set up samplelayers.Layer11 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer111 tests:
       Set up samplelayers.Layerx in N.NNN seconds.
       Set up samplelayers.Layer111 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer112 tests:
       Tear down samplelayers.Layer111 in N.NNN seconds.
       Set up samplelayers.Layer112 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer12 tests:
       Tear down samplelayers.Layer112 in N.NNN seconds.
       Tear down samplelayers.Layerx in N.NNN seconds.
       Tear down samplelayers.Layer11 in N.NNN seconds.
       Set up samplelayers.Layer12 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer121 tests:
       Set up samplelayers.Layer121 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running samplelayers.Layer122 tests:
       Tear down samplelayers.Layer121 in N.NNN seconds.
       Set up samplelayers.Layer122 in N.NNN seconds.
-      Ran 34 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 26 tests with 0 failures and 0 errors in N.NNN seconds.
     Running zope.testing.testrunner.layer.UnitTests tests:
       Tear down samplelayers.Layer122 in N.NNN seconds.
       Tear down samplelayers.Layer12 in N.NNN seconds.
       Tear down samplelayers.Layer1 in N.NNN seconds.
       Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-      Ran 192 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 156 tests with 0 failures and 0 errors in N.NNN seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-    Total: 405 tests, 0 failures, 0 errors in N.NNN seconds.
+    Total: 321 tests, 0 failures, 0 errors in N.NNN seconds.
     False
 
 we see the normal testrunner output, which summarizes the tests run for

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-subunit.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-subunit.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-subunit.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -246,7 +246,7 @@
         Traceback (most recent call last):
           File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/doctest/__init__.py", line 1355, in __run
             compileflags, 1) in test.globs
-          File "<doctest sample2.sampletests_e.eek[line 2, example 0]>", line 1, in <module>
+          File "<doctest sample2.sampletests_e.eek[0]>", line 1, in <module>
             f()
           File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py", line 19, in f
             g()
@@ -303,9 +303,9 @@
         Traceback (most recent call last):
           File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/doctest/__init__.py", line 1355, in __run
             compileflags, 1) in test.globs
-          File "<doctest e.txt[line 4, example 1]>", line 1, in <module>
+          File "<doctest e.txt[1]>", line 1, in <module>
             f()
-          File "<doctest e.txt[line 1, example 0]>", line 2, in f
+          File "<doctest e.txt[0]>", line 2, in f
             return x
         NameError: global name 'x' is not defined
     0\r

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-test-selection.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-test-selection.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-test-selection.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -444,7 +444,7 @@
      test_y0 (sampletests.test_one.TestA)
      test_y1 (sampletests.test_one.TestB)
      test_y0 (sampletests.test_one)
-      Ran 36 tests with 0 failures and 0 errors in N.NNN seconds.
+      Ran 28 tests with 0 failures and 0 errors in N.NNN seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
     False

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-verbose.txt
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-verbose.txt	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/testrunner/testrunner-verbose.txt	2010-04-19 19:33:03 UTC (rev 111141)
@@ -23,7 +23,7 @@
       Set up samplelayers.Layer122 in 0.000 seconds.
       Running:
         ..................................
-      Ran 34 tests with 0 failures and 0 errors in 0.007 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.007 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.
@@ -40,7 +40,7 @@
       Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
       Running:
     ................................................................................................................................................................................................
-      Ran 192 tests with 0 failures and 0 errors in 0.035 seconds.
+      Ran 156 tests with 0 failures and 0 errors in 0.035 seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
     False
@@ -82,7 +82,7 @@
         test_y0 (sampletests.test122)
         test_z1 (sampletests.test122)
         testrunner-ex/sampletests/../sampletestsl.txt
-      Ran 34 tests with 0 failures and 0 errors in 0.009 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.009 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.
@@ -126,7 +126,7 @@
         test_y0 (sampletests.test122) (0.001 s)
         test_z1 (sampletests.test122) (0.001 s)
         testrunner-ex/sampletests/../sampletestsl.txt (0.001 s)
-      Ran 34 tests with 0 failures and 0 errors in 0.009 seconds.
+      Ran 26 tests with 0 failures and 0 errors in 0.009 seconds.
     Tearing down left over layers:
       Tear down samplelayers.Layer122 in 0.000 seconds.
       Tear down samplelayers.Layer12 in 0.000 seconds.
@@ -148,7 +148,7 @@
     >>> testrunner.run_internal(defaults)
     Running zope.testing.testrunner.layer.UnitTests tests:
       Set up zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
-      Ran 192 tests with 0 failures and 0 errors in 0.034 seconds.
+      Ran 156 tests with 0 failures and 0 errors in 0.034 seconds.
     Tearing down left over layers:
       Tear down zope.testing.testrunner.layer.UnitTests in N.NNN seconds.
     False

Modified: zope.testing/branches/regebro-doctest-patching/src/zope/testing/tests.py
===================================================================
--- zope.testing/branches/regebro-doctest-patching/src/zope/testing/tests.py	2010-04-19 19:32:13 UTC (rev 111140)
+++ zope.testing/branches/regebro-doctest-patching/src/zope/testing/tests.py	2010-04-19 19:33:03 UTC (rev 111141)
@@ -43,5 +43,5 @@
                 (re.compile('No module named zope.testing.unlikelymodulename'),
                  'No module named unlikelymodulename')])),
         doctest.DocFileSuite('setupstack.txt'),
-        doctest.DocTestSuite(doctest, optionflags=doctest.INTERPRET_FOOTNOTES),
+        #doctest.DocTestSuite(doctest, optionflags=doctest.INTERPRET_FOOTNOTES),
         ))



More information about the Zope3-Checkins mailing list