[Zope3-checkins] SVN: zope.testing/trunk/ - Added support for conditionally using a subunit-based output
Sidnei da Silva
sidnei.da.silva at gmail.com
Fri Mar 12 09:47:01 EST 2010
Log message for revision 109927:
- Added support for conditionally using a subunit-based output
formatter if subunit and testtools are available. Patch contributed
by Jonathan Lange.
Changed:
U zope.testing/trunk/CHANGES.txt
U zope.testing/trunk/src/zope/testing/testrunner/formatter.py
U zope.testing/trunk/src/zope/testing/testrunner/options.py
A zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit-err.txt
A zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit-leaks.txt
A zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit.txt
U zope.testing/trunk/src/zope/testing/testrunner/tests.py
-=-
Modified: zope.testing/trunk/CHANGES.txt
===================================================================
--- zope.testing/trunk/CHANGES.txt 2010-03-12 14:38:03 UTC (rev 109926)
+++ zope.testing/trunk/CHANGES.txt 2010-03-12 14:47:00 UTC (rev 109927)
@@ -10,6 +10,11 @@
- Fix testrunner-leaks.txt to use the run_internal helper, so that
sys.exit() isn't triggered during the test run.
+- Added support for conditionally using a subunit-based output
+ formatter upon request if subunit and testtools are available. Patch
+ contributed by Jonathan Lange.
+
+
3.8.6 (2009-12-23)
==================
Modified: zope.testing/trunk/src/zope/testing/testrunner/formatter.py
===================================================================
--- zope.testing/trunk/src/zope/testing/testrunner/formatter.py 2010-03-12 14:38:03 UTC (rev 109926)
+++ zope.testing/trunk/src/zope/testing/testrunner/formatter.py 2010-03-12 14:47:00 UTC (rev 109927)
@@ -17,10 +17,14 @@
"""
import doctest
+import os
import sys
import re
+import tempfile
import traceback
+from datetime import datetime, timedelta
+
from zope.testing.exceptions import DocTestFailureException
doctest_template = """
@@ -647,3 +651,405 @@
else:
print self.colorize('exception', line)
print
+
+
+class FakeTest(object):
+ """A fake test object that only has an id."""
+
+ failureException = None
+
+ def __init__(self, test_id):
+ self._id = test_id
+
+ def id(self):
+ return self._id
+
+
+# Conditional imports, we don't want zope.testing to have a hard dependency on
+# subunit.
+try:
+ import subunit
+ from subunit.iso8601 import Utc
+except ImportError:
+ subunit = None
+
+
+# testtools is a hard dependency of subunit itself, guarding separately for
+# richer error messages.
+try:
+ from testtools import content
+except ImportError:
+ content = None
+
+
+class SubunitOutputFormatter(object):
+ """A subunit output formatter.
+
+ This output formatter generates subunit compatible output (see
+ https://launchpad.net/subunit). Subunit output is essentially a stream of
+ results of unit tests. In this formatter, non-test events (such as layer
+ set up) are encoded as specially tagged tests and summary-generating
+ methods (such as modules_with_import_problems) deliberately do nothing.
+
+ In particular, for a layer 'foo', the fake tests related to layer set up
+ and tear down are tagged with 'zope:layer' and are called 'foo:setUp' and
+ 'foo:tearDown'. Any tests within layer 'foo' are tagged with
+ 'zope:layer:foo'.
+
+ Note that all tags specific to this formatter begin with 'zope:'
+ """
+
+ # subunit output is designed for computers, so displaying a progress bar
+ # isn't helpful.
+ progress = False
+ verbose = property(lambda self: self.options.verbose)
+
+ TAG_INFO_SUBOPTIMAL = 'zope:info_suboptimal'
+ TAG_ERROR_WITH_BANNER = 'zope:error_with_banner'
+ TAG_LAYER = 'zope:layer'
+ TAG_IMPORT_ERROR = 'zope:import_error'
+ TAG_PROFILER_STATS = 'zope:profiler_stats'
+ TAG_GARBAGE = 'zope:garbage'
+ TAG_THREADS = 'zope:threads'
+ TAG_REFCOUNTS = 'zope:refcounts'
+
+ def __init__(self, options):
+ if subunit is None:
+ raise Exception("Requires subunit 0.0.5 or better")
+ if content is None:
+ raise Exception("Requires testtools 0.9.2 or better")
+ self.options = options
+ self._stream = sys.stdout
+ self._subunit = subunit.TestProtocolClient(self._stream)
+ # Used to track the last layer that was set up or torn down. Either
+ # None or (layer_name, last_touched_time).
+ self._last_layer = None
+ self.UTC = Utc()
+ # Content types used in the output.
+ self.TRACEBACK_CONTENT_TYPE = content.ContentType(
+ 'text', 'x-traceback', dict(language='python', charset='utf8'))
+ self.PROFILE_CONTENT_TYPE = content.ContentType(
+ 'application', 'x-binary-profile')
+ self.PLAIN_TEXT = content.ContentType(
+ 'text', 'plain', {'charset': 'utf8'})
+
+ @property
+ def _time_tests(self):
+ return self.verbose > 2
+
+ def _emit_timestamp(self, now=None):
+ """Emit a timestamp to the subunit stream.
+
+ If 'now' is not specified, use the current time on the system clock.
+ """
+ if now is None:
+ now = datetime.now(self.UTC)
+ self._subunit.time(now)
+ return now
+
+ def _emit_tag(self, tag):
+ self._stream.write('tags: %s\n' % (tag,))
+
+ def _stop_tag(self, tag):
+ self._stream.write('tags: -%s\n' % (tag,))
+
+ def _emit_fake_test(self, message, tag, details=None):
+ """Emit a successful fake test to the subunit stream.
+
+ Use this to print tagged informative messages.
+ """
+ test = FakeTest(message)
+ self._subunit.startTest(test)
+ self._emit_tag(tag)
+ self._subunit.addSuccess(test, details)
+
+ def _emit_error(self, error_id, tag, exc_info):
+ """Emit an error to the subunit stream.
+
+ Use this to pass on information about errors that occur outside of
+ tests.
+ """
+ test = FakeTest(error_id)
+ self._subunit.startTest(test)
+ self._emit_tag(tag)
+ self._subunit.addError(test, exc_info)
+
+ def info(self, message):
+ """Print an informative message, but only if verbose."""
+ # info() output is not relevant to actual test results. It only says
+ # things like "Running tests" or "Tearing down left over layers",
+ # things that are communicated already by the subunit stream. Just
+ # suppress the info() output.
+ pass
+
+ def info_suboptimal(self, message):
+ """Print an informative message about losing some of the features.
+
+ For example, when you run some tests in a subprocess, you lose the
+ ability to use the debugger.
+ """
+ # Used _only_ to indicate running in a subprocess.
+ self._emit_fake_test(message.strip(), self.TAG_INFO_SUBOPTIMAL)
+
+ def error(self, message):
+ """Report an error."""
+ # XXX: Mostly used for user errors, sometimes used for errors in the
+ # test framework, sometimes used to record layer setUp failure (!!!).
+ # How should this be encoded?
+ raise NotImplementedError(self.error)
+
+ def error_with_banner(self, message):
+ """Report an error with a big ASCII banner."""
+ # Either "Could not communicate with subprocess"
+ # Or "Can't post-mortem debug when running a layer as a subprocess!"
+ self._emit_fake_test(message, self.TAG_ERROR_WITH_BANNER)
+
+ def _enter_layer(self, layer_name):
+ """Signal in the subunit stream that we are entering a layer."""
+ self._emit_tag('zope:layer:%s' % (layer_name,))
+
+ def _exit_layer(self, layer_name):
+ """Signal in the subunit stream that we are leaving a layer."""
+ self._stop_tag('zope:layer:%s' % (layer_name,))
+
+ def start_set_up(self, layer_name):
+ """Report that we're setting up a layer.
+
+ We do this by emitting a tag of the form 'layer:$LAYER_NAME'.
+ """
+ now = self._emit_timestamp()
+ self._subunit.startTest(FakeTest('%s:setUp' % (layer_name,)))
+ self._emit_tag(self.TAG_LAYER)
+ self._last_layer = (layer_name, now)
+
+ def stop_set_up(self, seconds):
+ layer_name, start_time = self._last_layer
+ self._last_layer = None
+ self._emit_timestamp(start_time + timedelta(seconds=seconds))
+ self._subunit.addSuccess(FakeTest('%s:setUp' % (layer_name,)))
+ self._enter_layer(layer_name)
+
+ def start_tear_down(self, layer_name):
+ """Report that we're tearing down a layer.
+
+ We do this by removing a tag of the form 'layer:$LAYER_NAME'.
+ """
+ self._exit_layer(layer_name)
+ now = self._emit_timestamp()
+ self._subunit.startTest(FakeTest('%s:tearDown' % (layer_name,)))
+ self._emit_tag(self.TAG_LAYER)
+ self._last_layer = (layer_name, now)
+
+ def stop_tear_down(self, seconds):
+ layer_name, start_time = self._last_layer
+ self._last_layer = None
+ self._emit_timestamp(start_time + timedelta(seconds=seconds))
+ self._subunit.addSuccess(FakeTest('%s:tearDown' % (layer_name,)))
+
+ def tear_down_not_supported(self):
+ """Report that we could not tear down a layer.
+
+ Should be called right after start_tear_down().
+ """
+ layer_name, start_time = self._last_layer
+ self._last_layer = None
+ self._emit_timestamp(datetime.now(self.UTC))
+ self._subunit.addSkip(
+ FakeTest('%s:tearDown' % (layer_name,)), "tearDown not supported")
+
+ def summary(self, n_tests, n_failures, n_errors, n_seconds):
+ """Print out a summary.
+
+ Since subunit is a stream protocol format, it has no need for a
+ summary. When the stream is finished other tools can generate a
+ summary if so desired.
+ """
+ pass
+
+ def start_test(self, test, tests_run, total_tests):
+ """Report that we're about to run a test.
+
+ The next output operation should be test_success(), test_error(), or
+ test_failure().
+ """
+ if self._time_tests:
+ self._emit_timestamp()
+ # Note that this always emits newlines, so it will function as well as
+ # other start_test implementations if we are running in a subprocess.
+ self._subunit.startTest(test)
+
+ def stop_test(self, test):
+ """Clean up the output state after a test."""
+ self._subunit.stopTest(test)
+ self._stream.flush()
+
+ def stop_tests(self):
+ """Clean up the output state after a collection of tests."""
+ self._stream.flush()
+
+ def test_success(self, test, seconds):
+ if self._time_tests:
+ self._emit_timestamp()
+ self._subunit.addSuccess(test)
+
+ def import_errors(self, import_errors):
+ """Report test-module import errors (if any)."""
+ if not import_errors:
+ return
+ for error in import_errors:
+ self._emit_error(
+ error.module, self.TAG_IMPORT_ERROR, error.exc_info)
+
+ def modules_with_import_problems(self, import_errors):
+ """Report names of modules with import problems (if any)."""
+ # This is simply a summary method, and subunit output doesn't benefit
+ # from summaries.
+ pass
+
+ def _exc_info_to_details(self, exc_info):
+ """Translate 'exc_info' into a details dictionary usable with subunit.
+ """
+ # In an ideal world, we'd use the pre-bundled 'TracebackContent' class
+ # from testtools. However, 'OutputFormatter' contains special logic to
+ # handle errors from doctests, so we have to use that and manually
+ # create an object equivalent to an instance of 'TracebackContent'.
+ formatter = OutputFormatter(None)
+ traceback = formatter.format_traceback(exc_info)
+ return {
+ 'traceback': content.Content(
+ self.TRACEBACK_CONTENT_TYPE, lambda: [traceback.encode('utf8')])}
+
+ def test_error(self, test, seconds, exc_info):
+ """Report that an error occurred while running a test.
+
+ Should be called right after start_test().
+
+ The next output operation should be stop_test().
+ """
+ if self._time_tests:
+ self._emit_timestamp()
+ details = self._exc_info_to_details(exc_info)
+ self._subunit.addError(test, details=details)
+
+ def test_failure(self, test, seconds, exc_info):
+ """Report that a test failed.
+
+ Should be called right after start_test().
+
+ The next output operation should be stop_test().
+ """
+ if self._time_tests:
+ self._emit_timestamp()
+ details = self._exc_info_to_details(exc_info)
+ self._subunit.addFailure(test, details=details)
+
+ def profiler_stats(self, stats):
+ """Report profiler stats."""
+ ignored, filename = tempfile.mkstemp()
+ try:
+ stats.dump_stats(filename)
+ profile_content = content.Content(
+ self.PROFILE_CONTENT_TYPE, open(filename).read)
+ details = {'profiler-stats': profile_content}
+ finally:
+ os.unlink(filename)
+ # Name the test 'zope:profiler_stats' just like its tag.
+ self._emit_fake_test(
+ self.TAG_PROFILER_STATS, self.TAG_PROFILER_STATS, details)
+
+ def tests_with_errors(self, errors):
+ """Report tests with errors.
+
+ Simply not supported by the subunit formatter. Fancy summary output
+ doesn't make sense.
+ """
+ pass
+
+ def tests_with_failures(self, failures):
+ """Report tests with failures.
+
+ Simply not supported by the subunit formatter. Fancy summary output
+ doesn't make sense.
+ """
+ pass
+
+ def totals(self, n_tests, n_failures, n_errors, n_seconds):
+ """Summarize the results of all layers.
+
+ Simply not supported by the subunit formatter. Fancy summary output
+ doesn't make sense.
+ """
+ pass
+
+ def list_of_tests(self, tests, layer_name):
+ """Report a list of test names."""
+ self._enter_layer(layer_name)
+ for test in tests:
+ self._subunit.startTest(test)
+ self._subunit.addSuccess(test)
+ self._exit_layer(layer_name)
+
+ def _get_text_details(self, name, text):
+ """Get a details dictionary that just has some plain text."""
+ return {
+ name: content.Content(
+ self.PLAIN_TEXT, lambda: [text.encode('utf8')])}
+
+ def garbage(self, garbage):
+ """Report garbage generated by tests."""
+ # XXX: Really, 'garbage', 'profiler_stats' and the 'refcounts' twins
+ # ought to add extra details to a fake test that represents the
+ # summary information for the whole suite. However, there's no event
+ # on output formatters for "everything is really finished, honest". --
+ # jml, 2010-02-14
+ details = self._get_text_details('garbage', unicode(garbage))
+ self._emit_fake_test(
+ self.TAG_GARBAGE, self.TAG_GARBAGE, details)
+
+ def test_garbage(self, test, garbage):
+ """Report garbage generated by a test.
+
+ Encoded in the subunit stream as a test error. Clients can filter out
+ these tests based on the tag if they don't think garbage should fail
+ the test run.
+ """
+ # XXX: Perhaps 'test_garbage' and 'test_threads' ought to be within
+ # the output for the actual test, appended as details to whatever
+ # result the test gets. Not an option with the present API, as there's
+ # no event for "no more output for this test". -- jml, 2010-02-14
+ self._subunit.startTest(test)
+ self._emit_tag(self.TAG_GARBAGE)
+ self._subunit.addError(
+ test, self._get_text_details('garbage', unicode(garbage)))
+
+ def test_threads(self, test, new_threads):
+ """Report threads left behind by a test.
+
+ Encoded in the subunit stream as a test error. Clients can filter out
+ these tests based on the tag if they don't think left-over threads
+ should fail the test run.
+ """
+ self._subunit.startTest(test)
+ self._emit_tag(self.TAG_THREADS)
+ self._subunit.addError(
+ test, self._get_text_details('garbage', unicode(new_threads)))
+
+ def refcounts(self, rc, prev):
+ """Report a change in reference counts."""
+ details = self._get_text_details('sys-refcounts', str(rc))
+ details.update(
+ self._get_text_details('changes', str(rc - prev)))
+ # XXX: Emit the details dict as JSON?
+ self._emit_fake_test(
+ self.TAG_REFCOUNTS, self.TAG_REFCOUNTS, details)
+
+ def detailed_refcounts(self, track, rc, prev):
+ """Report a change in reference counts, with extra detail."""
+ details = self._get_text_details('sys-refcounts', str(rc))
+ details.update(
+ self._get_text_details('changes', str(rc - prev)))
+ details.update(
+ self._get_text_details('track', str(track.delta)))
+
+ self._emit_fake_test(
+ self.TAG_REFCOUNTS, self.TAG_REFCOUNTS, details)
Modified: zope.testing/trunk/src/zope/testing/testrunner/options.py
===================================================================
--- zope.testing/trunk/src/zope/testing/testrunner/options.py 2010-03-12 14:38:03 UTC (rev 109926)
+++ zope.testing/trunk/src/zope/testing/testrunner/options.py 2010-03-12 14:47:00 UTC (rev 109927)
@@ -22,7 +22,11 @@
import sys
from zope.testing.testrunner.profiling import available_profilers
-from zope.testing.testrunner.formatter import OutputFormatter, ColorfulOutputFormatter
+from zope.testing.testrunner.formatter import (
+ OutputFormatter,
+ ColorfulOutputFormatter,
+ SubunitOutputFormatter,
+ )
from zope.testing.testrunner.formatter import terminal_has_colors
@@ -192,6 +196,12 @@
""")
reporting.add_option(
+ '--subunit', action="store_true", dest='subunit',
+ help="""\
+Use subunit output. Will not be colorized.
+""")
+
+reporting.add_option(
'--slow-test', type='float', dest='slow_test_threshold', metavar='N',
help="""\
With -c and -vvv, highlight tests that take longer than N seconds (default:
@@ -531,7 +541,19 @@
options, positional = parser.parse_args(args[1:], defaults)
options.original_testrunner_args = args
- if options.color:
+ if options.subunit:
+ try:
+ import subunit
+ except ImportError:
+ print """\
+ Subunit is not installed. Please install Subunit
+ to generate subunit output.
+ """
+ options.fail = True
+ return options
+ else:
+ options.output = SubunitOutputFormatter(options)
+ elif options.color:
options.output = ColorfulOutputFormatter(options)
options.output.slow_test_threshold = options.slow_test_threshold
else:
Added: zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit-err.txt
===================================================================
--- zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit-err.txt (rev 0)
+++ zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit-err.txt 2010-03-12 14:47:00 UTC (rev 109927)
@@ -0,0 +1,20 @@
+Using subunit output without subunit installed
+==============================================
+
+To use the --subunit reporting option, you must have subunit installed. If you
+do not, you will get an error message:
+
+ >>> import os.path, sys
+ >>> directory_with_tests = os.path.join(this_directory, 'testrunner-ex')
+ >>> defaults = [
+ ... '--path', directory_with_tests,
+ ... '--tests-pattern', '^sampletestsf?$',
+ ... ]
+
+ >>> from zope.testing import testrunner
+
+ >>> sys.argv = 'test --subunit'.split()
+ >>> _ = testrunner.run_internal(defaults)
+ Subunit is not installed. Please install Subunit
+ to generate subunit output.
+ <BLANKLINE>
Added: zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit-leaks.txt
===================================================================
--- zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit-leaks.txt (rev 0)
+++ zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit-leaks.txt 2010-03-12 14:47:00 UTC (rev 109927)
@@ -0,0 +1,107 @@
+Debugging Memory Leaks with subunit output
+==========================================
+
+The --report-refcounts (-r) option can be used with the --repeat (-N)
+option to detect and diagnose memory leaks. To use this option, you
+must configure Python with the --with-pydebug option. (On Unix, pass
+this option to configure and then build Python.)
+
+For more detailed documentation, see testrunner-leaks.txt.
+
+ >>> import os.path, sys
+ >>> directory_with_tests = os.path.join(this_directory, 'testrunner-ex')
+ >>> defaults = [
+ ... '--path', directory_with_tests,
+ ... '--tests-pattern', '^sampletestsf?$',
+ ... ]
+
+ >>> from zope.testing import testrunner
+
+Each layer is repeated the requested number of times. For each
+iteration after the first, the system refcount and change in system
+refcount is shown. The system refcount is the total of all refcount in
+the system. When a refcount on any object is changed, the system
+refcount is changed by the same amount. Tests that don't leak show
+zero changes in system refcount.
+
+Let's look at an example test that leaks:
+
+ >>> sys.argv = 'test --subunit --tests-pattern leak -N2 -r'.split()
+ >>> _ = testrunner.run_internal(defaults)
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: zope.testing.testrunner.layer.UnitTests:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: zope.testing.testrunner.layer.UnitTests:setUp
+ tags: zope:layer:zope.testing.testrunner.layer.UnitTests
+ test: leak.TestSomething.testleak
+ successful: leak.TestSomething.testleak
+ test: leak.TestSomething.testleak
+ successful: leak.TestSomething.testleak
+ test: zope:refcounts
+ tags: zope:refcounts
+ successful: zope:refcounts [ multipart
+ Content-Type: text/plain;charset=utf8
+ ...
+ ...\r
+ <BLANKLINE>
+ ...\r
+ <BLANKLINE>
+ Content-Type: text/plain;charset=utf8
+ ...
+ ...\r
+ <BLANKLINE>
+ ...\r
+ <BLANKLINE>
+ ]
+ tags: -zope:layer:zope.testing.testrunner.layer.UnitTests
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: zope.testing.testrunner.layer.UnitTests:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: zope.testing.testrunner.layer.UnitTests:tearDown
+
+Here we see that the system refcount is increasing. If we specify a
+verbosity greater than one, we can get details broken out by object
+type (or class):
+
+ >>> sys.argv = 'test --subunit --tests-pattern leak -N2 -v -r'.split()
+ >>> _ = testrunner.run_internal(defaults)
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: zope.testing.testrunner.layer.UnitTests:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: zope.testing.testrunner.layer.UnitTests:setUp
+ tags: zope:layer:zope.testing.testrunner.layer.UnitTests
+ test: leak.TestSomething.testleak
+ successful: leak.TestSomething.testleak
+ test: leak.TestSomething.testleak
+ successful: leak.TestSomething.testleak
+ test: zope:refcounts
+ tags: zope:refcounts
+ successful: zope:refcounts [ multipart
+ Content-Type: text/plain;charset=utf8
+ ...
+ ...\r
+ <BLANKLINE>
+ ...\r
+ <BLANKLINE>
+ Content-Type: text/plain;charset=utf8
+ ...
+ ...\r
+ <BLANKLINE>
+ ...\r
+ <BLANKLINE>
+ Content-Type: text/plain;charset=utf8
+ ...
+ ...\r
+ <BLANKLINE>
+ ...
+ <BLANKLINE>
+ ]
+ tags: -zope:layer:zope.testing.testrunner.layer.UnitTests
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: zope.testing.testrunner.layer.UnitTests:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: zope.testing.testrunner.layer.UnitTests:tearDown
Added: zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit.txt
===================================================================
--- zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit.txt (rev 0)
+++ zope.testing/trunk/src/zope/testing/testrunner/testrunner-subunit.txt 2010-03-12 14:47:00 UTC (rev 109927)
@@ -0,0 +1,678 @@
+Subunit Output
+==============
+
+Subunit is a streaming protocol for interchanging test results. More
+information can be found at https://launchpad.net/subunit.
+
+ >>> import os.path, sys
+ >>> directory_with_tests = os.path.join(this_directory, 'testrunner-ex')
+ >>> defaults = [
+ ... '--path', directory_with_tests,
+ ... '--tests-pattern', '^sampletestsf?$',
+ ... ]
+
+ >>> from zope.testing import testrunner
+
+
+Basic output
+------------
+
+Subunit output is line-based, with a 'test:' line for the start of each test
+and a 'successful:' line for each successful test.
+
+Zope layer set up and tear down events are represented as tests tagged with
+'zope:layer'. This allows them to be distinguished from actual tests, provides
+a place for the layer timing information in the subunit stream and allows us
+to include error information if necessary.
+
+Once the layer is set up, all future tests are tagged with
+'zope:layer:LAYER_NAME'.
+
+ >>> sys.argv = 'test --layer 122 --subunit -t TestNotMuch'.split()
+ >>> testrunner.run_internal(defaults)
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer1:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer1:setUp
+ tags: zope:layer:samplelayers.Layer1
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer12:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer12:setUp
+ tags: zope:layer:samplelayers.Layer12
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer122:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer122:setUp
+ tags: zope:layer:samplelayers.Layer122
+ test: sample1.sampletests.test122.TestNotMuch.test_1
+ successful: sample1.sampletests.test122.TestNotMuch.test_1
+ test: sample1.sampletests.test122.TestNotMuch.test_2
+ successful: sample1.sampletests.test122.TestNotMuch.test_2
+ test: sample1.sampletests.test122.TestNotMuch.test_3
+ successful: sample1.sampletests.test122.TestNotMuch.test_3
+ test: sampletests.test122.TestNotMuch.test_1
+ successful: sampletests.test122.TestNotMuch.test_1
+ test: sampletests.test122.TestNotMuch.test_2
+ successful: sampletests.test122.TestNotMuch.test_2
+ test: sampletests.test122.TestNotMuch.test_3
+ successful: sampletests.test122.TestNotMuch.test_3
+ tags: -zope:layer:samplelayers.Layer122
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer122:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer122:tearDown
+ tags: -zope:layer:samplelayers.Layer12
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer12:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer12:tearDown
+ tags: -zope:layer:samplelayers.Layer1
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer1:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer1:tearDown
+ False
+
+
+Timing tests
+------------
+
+When verbosity is high enough, the subunit stream includes timing information
+for the actual tests, as well as for the layers.
+
+ >>> sys.argv = 'test --layer 122 -vvv --subunit -t TestNotMuch'.split()
+ >>> testrunner.run_internal(defaults)
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer1:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer1:setUp
+ tags: zope:layer:samplelayers.Layer1
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer12:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer12:setUp
+ tags: zope:layer:samplelayers.Layer12
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer122:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer122:setUp
+ tags: zope:layer:samplelayers.Layer122
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample1.sampletests.test122.TestNotMuch.test_1
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: sample1.sampletests.test122.TestNotMuch.test_1
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample1.sampletests.test122.TestNotMuch.test_2
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: sample1.sampletests.test122.TestNotMuch.test_2
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample1.sampletests.test122.TestNotMuch.test_3
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: sample1.sampletests.test122.TestNotMuch.test_3
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sampletests.test122.TestNotMuch.test_1
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: sampletests.test122.TestNotMuch.test_1
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sampletests.test122.TestNotMuch.test_2
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: sampletests.test122.TestNotMuch.test_2
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sampletests.test122.TestNotMuch.test_3
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: sampletests.test122.TestNotMuch.test_3
+ tags: -zope:layer:samplelayers.Layer122
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer122:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer122:tearDown
+ tags: -zope:layer:samplelayers.Layer12
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer12:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer12:tearDown
+ tags: -zope:layer:samplelayers.Layer1
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer1:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer1:tearDown
+ False
+
+
+Listing tests
+-------------
+
+A subunit stream is a stream of test results, more or less, so the most
+natural way of listing tests in subunit is to simply emit successful test
+results without actually running the tests.
+
+Note that in this stream, we don't emit fake tests for the layer set up and
+tear down, because it simply doesn't happen.
+
+We also don't include the dependent layers in the stream (in this case Layer1
+and Layer12), since they are not provided to the reporter.
+
+ >>> sys.argv = (
+ ... 'test --layer 122 --list-tests --subunit -t TestNotMuch').split()
+ >>> testrunner.run_internal(defaults)
+ tags: zope:layer:samplelayers.Layer122
+ test: sample1.sampletests.test122.TestNotMuch.test_1
+ successful: sample1.sampletests.test122.TestNotMuch.test_1
+ test: sample1.sampletests.test122.TestNotMuch.test_2
+ successful: sample1.sampletests.test122.TestNotMuch.test_2
+ test: sample1.sampletests.test122.TestNotMuch.test_3
+ successful: sample1.sampletests.test122.TestNotMuch.test_3
+ test: sampletests.test122.TestNotMuch.test_1
+ successful: sampletests.test122.TestNotMuch.test_1
+ test: sampletests.test122.TestNotMuch.test_2
+ successful: sampletests.test122.TestNotMuch.test_2
+ test: sampletests.test122.TestNotMuch.test_3
+ successful: sampletests.test122.TestNotMuch.test_3
+ tags: -zope:layer:samplelayers.Layer122
+ False
+
+
+Profiling tests
+---------------
+
+Test suites often cover a lot of code, and the performance of test suites
+themselves is often a critical part of the development process. Thus, it's
+good to be able to profile a test run.
+
+ >>> sys.argv = (
+ ... 'test --layer 122 --profile=cProfile --subunit '
+ ... '-t TestNotMuch').split()
+ >>> testrunner.run_internal(defaults)
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer1:setUp
+ ...
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: samplelayers.Layer1:tearDown
+ test: zope:profiler_stats
+ tags: zope:profiler_stats
+ successful: zope:profiler_stats [ multipart
+ Content-Type: application/x-binary-profile
+ profiler-stats
+ ...\r
+ <BLANKLINE>
+ ...
+ <BLANKLINE>
+ ]
+ False
+
+
+Errors
+------
+
+Errors are recorded in the subunit stream as MIME-encoded chunks of text.
+
+ >>> sys.argv = [
+ ... 'test', '--subunit' , '--tests-pattern', '^sampletests_e$',
+ ... ]
+ >>> testrunner.run_internal(defaults)
+ time: 2010-02-05 15:27:05.113541Z
+ test: zope.testing.testrunner.layer.UnitTests:setUp
+ tags: zope:layer
+ time: 2010-02-05 15:27:05.113545Z
+ successful: zope.testing.testrunner.layer.UnitTests:setUp
+ tags: zope:layer:zope.testing.testrunner.layer.UnitTests
+ test: sample2.sampletests_e.eek
+ failure: sample2.sampletests_e.eek [ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 4B6\r
+ <BLANKLINE>
+ Failed doctest test for sample2.sampletests_e.eek
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py", line 29, in eek
+ <BLANKLINE>
+ ----------------------------------------------------------------------
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py", line 31, in sample2.sampletests_e.eek
+ Failed example:
+ f()
+ Exception raised:
+ Traceback (most recent call last):
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/doctest/__init__.py", line 1355, in __run
+ compileflags, 1) in test.globs
+ File "<doctest sample2.sampletests_e.eek[line 2, example 0]>", line 1, in <module>
+ f()
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py", line 19, in f
+ g()
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py", line 25, in g
+ x = y + 1
+ - __traceback_info__: I don't know what Y should be.
+ NameError: global name 'y' is not defined
+ 0\r
+ <BLANKLINE>
+ ]
+ test: sample2.sampletests_e.Test.test1
+ successful: sample2.sampletests_e.Test.test1
+ test: sample2.sampletests_e.Test.test2
+ successful: sample2.sampletests_e.Test.test2
+ test: sample2.sampletests_e.Test.test3
+ error: sample2.sampletests_e.Test.test3 [ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 29F\r
+ <BLANKLINE>
+ Traceback (most recent call last):
+ File "/usr/lib/python2.6/unittest.py", line 279, in run
+ testMethod()
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py", line 44, in test3
+ f()
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py", line 19, in f
+ g()
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample2/sampletests_e.py", line 25, in g
+ x = y + 1
+ - __traceback_info__: I don't know what Y should be.
+ NameError: global name 'y' is not defined
+ 0\r
+ <BLANKLINE>
+ ]
+ test: sample2.sampletests_e.Test.test4
+ successful: sample2.sampletests_e.Test.test4
+ test: sample2.sampletests_e.Test.test5
+ successful: sample2.sampletests_e.Test.test5
+ test: e_txt
+ failure: e_txt [
+ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 329\r
+ <BLANKLINE>
+ Failed doctest test for e.txt
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample2/e.txt", line 0
+ <BLANKLINE>
+ ----------------------------------------------------------------------
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample2/e.txt", line 4, in e.txt
+ Failed example:
+ f()
+ Exception raised:
+ Traceback (most recent call last):
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/doctest/__init__.py", line 1355, in __run
+ compileflags, 1) in test.globs
+ File "<doctest e.txt[line 4, example 1]>", line 1, in <module>
+ f()
+ File "<doctest e.txt[line 1, example 0]>", line 2, in f
+ return x
+ NameError: global name 'x' is not defined
+ 0\r
+ <BLANKLINE>
+ ]
+ tags: -zope:layer:zope.testing.testrunner.layer.UnitTests
+ time: 2010-02-05 15:27:05.147082Z
+ test: zope.testing.testrunner.layer.UnitTests:tearDown
+ tags: zope:layer
+ time: 2010-02-05 15:27:05.147088Z
+ successful: zope.testing.testrunner.layer.UnitTests:tearDown
+ True
+
+
+Layers that can't be torn down
+------------------------------
+
+A layer can have a tearDown method that raises NotImplementedError. If this is
+the case and there are no remaining tests to run, the subunit stream will say
+that the layer skipped its tearDown.
+
+ >>> import os.path, sys
+ >>> directory_with_tests = os.path.join(this_directory, 'testrunner-ex')
+ >>> from zope.testing import testrunner
+ >>> defaults = [
+ ... '--subunit',
+ ... '--path', directory_with_tests,
+ ... '--tests-pattern', '^sampletestsf?$',
+ ... ]
+
+ >>> sys.argv = 'test -ssample2 --tests-pattern sampletests_ntd$'.split()
+ >>> testrunner.run_internal(defaults)
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample2.sampletests_ntd.Layer:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: sample2.sampletests_ntd.Layer:setUp
+ tags: zope:layer:sample2.sampletests_ntd.Layer
+ test: sample2.sampletests_ntd.TestSomething.test_something
+ successful: sample2.sampletests_ntd.TestSomething.test_something
+ tags: -zope:layer:sample2.sampletests_ntd.Layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample2.sampletests_ntd.Layer:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ skip: sample2.sampletests_ntd.Layer:tearDown [
+ tearDown not supported
+ ]
+ False
+
+
+Module import errors
+--------------------
+
+We report module import errors too. They get encoded as tests with errors. The
+name of the test is the module that could not be imported, the test's result
+is an error containing the traceback. These "tests" are tagged with
+zope:import_error.
+
+Let's create a module with some bad syntax:
+
+ >>> badsyntax_path = os.path.join(directory_with_tests,
+ ... "sample2", "sampletests_i.py")
+ >>> f = open(badsyntax_path, "w")
+ >>> print >> f, "importx unittest" # syntax error
+ >>> f.close()
+
+And then run the tests:
+
+ >>> sys.argv = (
+ ... 'test --subunit --tests-pattern ^sampletests(f|_i)?$ --layer 1 '
+ ... ).split()
+ >>> testrunner.run_internal(defaults)
+ test: sample2.sampletests_i
+ tags: zope:import_error
+ error: sample2.sampletests_i [
+ Traceback (most recent call last):
+ testrunner-ex/sample2/sampletests_i.py", line 1
+ importx unittest
+ ^
+ SyntaxError: invalid syntax
+ ]
+ test: sample2.sample21.sampletests_i
+ tags: zope:import_error
+ error: sample2.sample21.sampletests_i [
+ Traceback (most recent call last):
+ testrunner-ex/sample2/sample21/sampletests_i.py", Line NNN, in ?
+ import zope.testing.huh
+ ImportError: No module named huh
+ ]
+ test: sample2.sample23.sampletests_i
+ tags: zope:import_error
+ error: sample2.sample23.sampletests_i [
+ Traceback (most recent call last):
+ testrunner-ex/sample2/sample23/sampletests_i.py", Line NNN, in ?
+ class Test(unittest.TestCase):
+ testrunner-ex/sample2/sample23/sampletests_i.py", Line NNN, in Test
+ raise TypeError('eek')
+ TypeError: eek
+ ]
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: samplelayers.Layer1:setUp
+ tags: zope:layer
+ ...
+ True
+
+Of course, because we care deeply about test isolation, we're going to have to
+delete the module with bad syntax now, lest it contaminate other tests or even
+future test runs.
+
+ >>> os.unlink(badsyntax_path)
+
+
+Tests in subprocesses
+---------------------
+
+If the tearDown method raises NotImplementedError and there are remaining
+layers to run, the test runner will restart itself as a new process,
+resuming tests where it left off:
+
+ >>> sys.argv = [testrunner_script, '--tests-pattern', 'sampletests_ntd$']
+ >>> testrunner.run_internal(defaults)
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample1.sampletests_ntd.Layer:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: sample1.sampletests_ntd.Layer:setUp
+ tags: zope:layer:sample1.sampletests_ntd.Layer
+ test: sample1.sampletests_ntd.TestSomething.test_something
+ successful: sample1.sampletests_ntd.TestSomething.test_something
+ tags: -zope:layer:sample1.sampletests_ntd.Layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample1.sampletests_ntd.Layer:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ skip: sample1.sampletests_ntd.Layer:tearDown [
+ tearDown not supported
+ ]
+ test: Running in a subprocess.
+ tags: zope:info_suboptimal
+ successful: Running in a subprocess.
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample2.sampletests_ntd.Layer:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: sample2.sampletests_ntd.Layer:setUp
+ tags: zope:layer:sample2.sampletests_ntd.Layer
+ test: sample2.sampletests_ntd.TestSomething.test_something
+ successful: sample2.sampletests_ntd.TestSomething.test_something
+ tags: -zope:layer:sample2.sampletests_ntd.Layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample2.sampletests_ntd.Layer:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ skip: sample2.sampletests_ntd.Layer:tearDown [
+ tearDown not supported
+ ]
+ test: Running in a subprocess.
+ tags: zope:info_suboptimal
+ successful: Running in a subprocess.
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample3.sampletests_ntd.Layer:setUp
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ successful: sample3.sampletests_ntd.Layer:setUp
+ tags: zope:layer:sample3.sampletests_ntd.Layer
+ test: sample3.sampletests_ntd.TestSomething.test_error1
+ error: sample3.sampletests_ntd.TestSomething.test_error1 [ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 14F\r
+ <BLANKLINE>
+ Traceback (most recent call last):
+ testrunner-ex/sample3/sampletests_ntd.py", Line NNN, in test_error1
+ raise TypeError("Can we see errors")
+ TypeError: Can we see errors
+ 0\r
+ <BLANKLINE>
+ ]
+ test: sample3.sampletests_ntd.TestSomething.test_error2
+ error: sample3.sampletests_ntd.TestSomething.test_error2 [ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 13F\r
+ <BLANKLINE>
+ Traceback (most recent call last):
+ testrunner-ex/sample3/sampletests_ntd.py", Line NNN, in test_error2
+ raise TypeError("I hope so")
+ TypeError: I hope so
+ 0\r
+ <BLANKLINE>
+ ]
+ test: sample3.sampletests_ntd.TestSomething.test_fail1
+ failure: sample3.sampletests_ntd.TestSomething.test_fail1 [ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 1AA\r
+ <BLANKLINE>
+ Traceback (most recent call last):
+ testrunner-ex/sample3/sampletests_ntd.py", Line NNN, in test_fail1
+ self.assertEqual(1, 2)
+ AssertionError: 1 != 2
+ 0\r
+ <BLANKLINE>
+ ]
+ test: sample3.sampletests_ntd.TestSomething.test_fail2
+ failure: sample3.sampletests_ntd.TestSomething.test_fail2 [ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 1AA\r
+ <BLANKLINE>
+ Traceback (most recent call last):
+ testrunner-ex/sample3/sampletests_ntd.py", Line NNN, in test_fail2
+ self.assertEqual(1, 3)
+ AssertionError: 1 != 3
+ 0\r
+ <BLANKLINE>
+ ]
+ test: sample3.sampletests_ntd.TestSomething.test_something
+ successful: sample3.sampletests_ntd.TestSomething.test_something
+ test: sample3.sampletests_ntd.TestSomething.test_something_else
+ successful: sample3.sampletests_ntd.TestSomething.test_something_else
+ tags: -zope:layer:sample3.sampletests_ntd.Layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ test: sample3.sampletests_ntd.Layer:tearDown
+ tags: zope:layer
+ time: YYYY-MM-DD HH:MM:SS.mmmmmmZ
+ skip: sample3.sampletests_ntd.Layer:tearDown [
+ tearDown not supported
+ ]
+ True
+
+Note that debugging doesn't work when running tests in a subprocess:
+
+ >>> sys.argv = [testrunner_script, '--tests-pattern', 'sampletests_ntd$',
+ ... '-D', ]
+ >>> testrunner.run_internal(defaults)
+ time: 2010-02-10 22:41:25.279692Z
+ test: sample1.sampletests_ntd.Layer:setUp
+ tags: zope:layer
+ time: 2010-02-10 22:41:25.279695Z
+ successful: sample1.sampletests_ntd.Layer:setUp
+ tags: zope:layer:sample1.sampletests_ntd.Layer
+ test: sample1.sampletests_ntd.TestSomething.test_something
+ successful: sample1.sampletests_ntd.TestSomething.test_something
+ tags: -zope:layer:sample1.sampletests_ntd.Layer
+ time: 2010-02-10 22:41:25.310078Z
+ test: sample1.sampletests_ntd.Layer:tearDown
+ tags: zope:layer
+ time: 2010-02-10 22:41:25.310171Z
+ skip: sample1.sampletests_ntd.Layer:tearDown [
+ tearDown not supported
+ ]
+ test: Running in a subprocess.
+ tags: zope:info_suboptimal
+ successful: Running in a subprocess.
+ time: 2010-02-10 22:41:25.753076Z
+ test: sample2.sampletests_ntd.Layer:setUp
+ tags: zope:layer
+ time: 2010-02-10 22:41:25.753079Z
+ successful: sample2.sampletests_ntd.Layer:setUp
+ tags: zope:layer:sample2.sampletests_ntd.Layer
+ test: sample2.sampletests_ntd.TestSomething.test_something
+ successful: sample2.sampletests_ntd.TestSomething.test_something
+ tags: -zope:layer:sample2.sampletests_ntd.Layer
+ time: 2010-02-10 22:41:25.779256Z
+ test: sample2.sampletests_ntd.Layer:tearDown
+ tags: zope:layer
+ time: 2010-02-10 22:41:25.779326Z
+ skip: sample2.sampletests_ntd.Layer:tearDown [
+ tearDown not supported
+ ]
+ test: Running in a subprocess.
+ tags: zope:info_suboptimal
+ successful: Running in a subprocess.
+ time: 2010-02-10 22:41:26.310296Z
+ test: sample3.sampletests_ntd.Layer:setUp
+ tags: zope:layer
+ time: 2010-02-10 22:41:26.310299Z
+ successful: sample3.sampletests_ntd.Layer:setUp
+ tags: zope:layer:sample3.sampletests_ntd.Layer
+ test: sample3.sampletests_ntd.TestSomething.test_error1
+ error: sample3.sampletests_ntd.TestSomething.test_error1 [ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 16A\r
+ <BLANKLINE>
+ Traceback (most recent call last):
+ File "/usr/lib/python2.6/unittest.py", line 305, in debug
+ getattr(self, self._testMethodName)()
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample3/sampletests_ntd.py", line 42, in test_error1
+ raise TypeError("Can we see errors")
+ TypeError: Can we see errors
+ 0\r
+ <BLANKLINE>
+ ]
+ test: Can't post-mortem debug when running a layer as a subprocess!
+ tags: zope:error_with_banner
+ successful: Can't post-mortem debug when running a layer as a subprocess!
+ test: sample3.sampletests_ntd.TestSomething.test_error2
+ error: sample3.sampletests_ntd.TestSomething.test_error2 [ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 15A\r
+ <BLANKLINE>
+ Traceback (most recent call last):
+ File "/usr/lib/python2.6/unittest.py", line 305, in debug
+ getattr(self, self._testMethodName)()
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample3/sampletests_ntd.py", line 45, in test_error2
+ raise TypeError("I hope so")
+ TypeError: I hope so
+ 0\r
+ <BLANKLINE>
+ ]
+ test: Can't post-mortem debug when running a layer as a subprocess!
+ tags: zope:error_with_banner
+ successful: Can't post-mortem debug when running a layer as a subprocess!
+ test: sample3.sampletests_ntd.TestSomething.test_fail1
+ error: sample3.sampletests_ntd.TestSomething.test_fail1 [ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 1C5\r
+ <BLANKLINE>
+ Traceback (most recent call last):
+ File "/usr/lib/python2.6/unittest.py", line 305, in debug
+ getattr(self, self._testMethodName)()
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample3/sampletests_ntd.py", line 48, in test_fail1
+ self.assertEqual(1, 2)
+ File "/usr/lib/python2.6/unittest.py", line 350, in failUnlessEqual
+ (msg or '%r != %r' % (first, second))
+ AssertionError: 1 != 2
+ 0\r
+ <BLANKLINE>
+ ]
+ test: Can't post-mortem debug when running a layer as a subprocess!
+ tags: zope:error_with_banner
+ successful: Can't post-mortem debug when running a layer as a subprocess!
+ test: sample3.sampletests_ntd.TestSomething.test_fail2
+ error: sample3.sampletests_ntd.TestSomething.test_fail2 [ multipart
+ Content-Type: text/x-traceback;charset=utf8,language=python
+ traceback
+ 1C5\r
+ <BLANKLINE>
+ Traceback (most recent call last):
+ File "/usr/lib/python2.6/unittest.py", line 305, in debug
+ getattr(self, self._testMethodName)()
+ File "/home/jml/src/zope.testing/subunit-output-formatter/src/zope/testing/testrunner/testrunner-ex/sample3/sampletests_ntd.py", line 51, in test_fail2
+ self.assertEqual(1, 3)
+ File "/usr/lib/python2.6/unittest.py", line 350, in failUnlessEqual
+ (msg or '%r != %r' % (first, second))
+ AssertionError: 1 != 3
+ 0\r
+ <BLANKLINE>
+ ]
+ test: Can't post-mortem debug when running a layer as a subprocess!
+ tags: zope:error_with_banner
+ successful: Can't post-mortem debug when running a layer as a subprocess!
+ test: sample3.sampletests_ntd.TestSomething.test_something
+ successful: sample3.sampletests_ntd.TestSomething.test_something
+ test: sample3.sampletests_ntd.TestSomething.test_something_else
+ successful: sample3.sampletests_ntd.TestSomething.test_something_else
+ tags: -zope:layer:sample3.sampletests_ntd.Layer
+ time: 2010-02-10 22:41:26.340878Z
+ test: sample3.sampletests_ntd.Layer:tearDown
+ tags: zope:layer
+ time: 2010-02-10 22:41:26.340945Z
+ skip: sample3.sampletests_ntd.Layer:tearDown [
+ tearDown not supported
+ ]
+ True
Modified: zope.testing/trunk/src/zope/testing/testrunner/tests.py
===================================================================
--- zope.testing/trunk/src/zope/testing/testrunner/tests.py 2010-03-12 14:38:03 UTC (rev 109926)
+++ zope.testing/trunk/src/zope/testing/testrunner/tests.py 2010-03-12 14:47:00 UTC (rev 109927)
@@ -103,6 +103,8 @@
(re.compile(r'\d+[.]\d\d\d seconds'), 'N.NNN seconds'),
(re.compile(r'\d+[.]\d\d\d s'), 'N.NNN s'),
(re.compile(r'\d+[.]\d\d\d{'), 'N.NNN{'),
+ (re.compile(r'\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\.\d+'),
+ 'YYYY-MM-DD HH:MM:SS.mmmmmm'),
(re.compile('( |")[^\n]+testrunner-ex'), r'\1testrunner-ex'),
(re.compile('( |")[^\n]+testrunner.py'), r'\1testrunner.py'),
(re.compile(r'> [^\n]*(doc|unit)test[.]py\(\d+\)'),
@@ -251,4 +253,29 @@
checker=checker,
)
)
+
+ try:
+ import subunit
+ except ImportError:
+ suites.append(
+ doctest.DocFileSuite(
+ 'testrunner-subunit-err.txt',
+ setUp=setUp, tearDown=tearDown,
+ optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE,
+ checker=checker))
+ else:
+ suites.append(
+ doctest.DocFileSuite(
+ 'testrunner-subunit.txt',
+ setUp=setUp, tearDown=tearDown,
+ optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE,
+ checker=checker))
+ if hasattr(sys, 'gettotalrefcount'):
+ suites.append(
+ doctest.DocFileSuite(
+ 'testrunner-subunit-leaks.txt',
+ setUp=setUp, tearDown=tearDown,
+ optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE,
+ checker=checker))
+
return unittest.TestSuite(suites)
More information about the Zope3-Checkins
mailing list