[Zope3-checkins] CVS: Zope3/src/zope/app/advanced/acquisition/tests - BasicStorage.py:1.2 ConflictResolution.py:1.2 Corruption.py:1.2 HistoryStorage.py:1.2 IteratorStorage.py:1.2 MTStorage.py:1.2 MinPO.py:1.2 PackableStorage.py:1.2 ReadOnlyStorage.py:1.2 RecoveryStorage.py:1.2 RevisionStorage.py:1.2 StorageTestBase.py:1.2 Synchronization.py:1.2 TransactionalUndoStorage.py:1.2 TransactionalUndoVersionStorage.py:1.2 VersionStorage.py:1.2 dangle.py:1.2 speed.py:1.2 testActivityMonitor.py:1.2 testCache.py:1.2 testConfig.py:1.2 testDB.py:1.2 testDemoStorage.py:1.2 testFileStorage.py:1.2 testMappingStorage.py:1.2 testPersistentList.py:1.2 testPersistentMapping.py:1.2 testRecover.py:1.2 testTimeStamp.py:1.2 testTransaction.py:1.2 testUtils.py:1.2 testZODB.py:1.2 testfsIndex.py:1.2

Sidnei da Silva sidnei at awkly.org
Thu Apr 1 13:34:41 EST 2004


Update of /cvs-repository/Zope3/src/zope/app/advanced/acquisition/tests
In directory cvs.zope.org:/tmp/cvs-serv24908

Modified Files:
	BasicStorage.py ConflictResolution.py Corruption.py 
	HistoryStorage.py IteratorStorage.py MTStorage.py MinPO.py 
	PackableStorage.py ReadOnlyStorage.py RecoveryStorage.py 
	RevisionStorage.py StorageTestBase.py Synchronization.py 
	TransactionalUndoStorage.py TransactionalUndoVersionStorage.py 
	VersionStorage.py dangle.py speed.py testActivityMonitor.py 
	testCache.py testConfig.py testDB.py testDemoStorage.py 
	testFileStorage.py testMappingStorage.py testPersistentList.py 
	testPersistentMapping.py testRecover.py testTimeStamp.py 
	testTransaction.py testUtils.py testZODB.py testfsIndex.py 
Log Message:
Fix some typoes


=== Zope3/src/zope/app/advanced/acquisition/tests/BasicStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/BasicStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/BasicStorage.py	Thu Apr  1 13:34:39 2004
@@ -19,20 +19,20 @@
 All storages should be able to pass these tests.
 """
 
-from ZODB.Transaction import Transaction
 from ZODB import POSException
-
 from ZODB.tests.MinPO import MinPO
 from ZODB.tests.StorageTestBase \
      import zodb_unpickle, zodb_pickle, handle_serials
 
+import transaction
+
 ZERO = '\0'*8
 
 
 
 class BasicStorage:
     def checkBasics(self):
-        t = Transaction()
+        t = transaction.Transaction()
         self._storage.tpc_begin(t)
         # This should simply return
         self._storage.tpc_begin(t)
@@ -44,10 +44,10 @@
         self.assertRaises(
             POSException.StorageTransactionError,
             self._storage.store,
-            0, 0, 0, 0, Transaction())
+            0, 0, 0, 0, transaction.Transaction())
 
         try:
-            self._storage.abortVersion('dummy', Transaction())
+            self._storage.abortVersion('dummy', transaction.Transaction())
         except (POSException.StorageTransactionError,
                 POSException.VersionCommitError):
             pass # test passed ;)
@@ -55,7 +55,7 @@
             assert 0, "Should have failed, invalid transaction."
 
         try:
-            self._storage.commitVersion('dummy', 'dummer', Transaction())
+            self._storage.commitVersion('dummy', 'dummer', transaction.Transaction())
         except (POSException.StorageTransactionError,
                 POSException.VersionCommitError):
             pass # test passed ;)
@@ -65,13 +65,13 @@
         self.assertRaises(
             POSException.StorageTransactionError,
             self._storage.store,
-            0, 1, 2, 3, Transaction())
+            0, 1, 2, 3, transaction.Transaction())
         self._storage.tpc_abort(t)
 
     def checkSerialIsNoneForInitialRevision(self):
         eq = self.assertEqual
         oid = self._storage.new_oid()
-        txn = Transaction()
+        txn = transaction.Transaction()
         self._storage.tpc_begin(txn)
         # Use None for serial.  Don't use _dostore() here because that coerces
         # serial=None to serial=ZERO.
@@ -85,9 +85,9 @@
         eq(value, MinPO(11))
         eq(revid, newrevid)
 
-    def checkNonVersionStore(self, oid=None, revid=None, version=None):
+    def checkNonVersionStore(self):
         revid = ZERO
-        newrevid = self._dostore(revid=revid)
+        newrevid = self._dostore(revid=None)
         # Finish the transaction.
         self.assertNotEqual(newrevid, revid)
 
@@ -113,14 +113,14 @@
     def checkConflicts(self):
         oid = self._storage.new_oid()
         revid1 = self._dostore(oid, data=MinPO(11))
-        revid2 = self._dostore(oid, revid=revid1, data=MinPO(12))
+        self._dostore(oid, revid=revid1, data=MinPO(12))
         self.assertRaises(POSException.ConflictError,
                           self._dostore,
                           oid, revid=revid1, data=MinPO(13))
 
     def checkWriteAfterAbort(self):
         oid = self._storage.new_oid()
-        t = Transaction()
+        t = transaction.Transaction()
         self._storage.tpc_begin(t)
         self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
         # Now abort this transaction
@@ -133,7 +133,7 @@
         oid1 = self._storage.new_oid()
         revid1 = self._dostore(oid=oid1, data=MinPO(-2))
         oid = self._storage.new_oid()
-        t = Transaction()
+        t = transaction.Transaction()
         self._storage.tpc_begin(t)
         self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
         # Now abort this transaction
@@ -176,7 +176,7 @@
 
     def checkTwoArgBegin(self):
         # XXX how standard is three-argument tpc_begin()?
-        t = Transaction()
+        t = transaction.Transaction()
         tid = '\0\0\0\0\0psu'
         self._storage.tpc_begin(t, tid)
         oid = self._storage.new_oid()
@@ -205,7 +205,7 @@
 
     def checkNote(self):
         oid = self._storage.new_oid()
-        t = Transaction()
+        t = transaction.Transaction()
         self._storage.tpc_begin(t)
         t.note('this is a test')
         self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)


=== Zope3/src/zope/app/advanced/acquisition/tests/ConflictResolution.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/ConflictResolution.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/ConflictResolution.py	Thu Apr  1 13:34:39 2004
@@ -13,17 +13,12 @@
 ##############################################################################
 """Tests for application-level conflict resolution."""
 
-from ZODB.Transaction import Transaction
 from ZODB.POSException import ConflictError, UndoError
-from Persistence import Persistent
+from persistent import Persistent
+from transaction import Transaction
 
 from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
 
-import sys
-import types
-from cPickle import Pickler, Unpickler
-from cStringIO import StringIO
-
 class PCounter(Persistent):
 
     _value = 0
@@ -94,14 +89,17 @@
         # pickle is to commit two different transactions relative to
         # revid1 that add two to _value.
         revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
-        self.assertRaises(ConflictError,
-                          self._dostoreNP,
-                          oid, revid=revid1, data=zodb_pickle(obj))
+        try:
+            self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
+        except ConflictError, err:
+            self.assert_("PCounter2" in str(err))
+        else:
+            self.fail("Expected ConflictError")
 
     def checkZClassesArentResolved(self):
-        from ZODB.ConflictResolution import bad_class
+        from ZODB.ConflictResolution import find_global, BadClassName
         dummy_class_tuple = ('*foobar', ())
-        assert bad_class(dummy_class_tuple) == 1
+        self.assertRaises(BadClassName, find_global, '*foobar', ())
 
     def checkBuggyResolve1(self):
         obj = PCounter3()
@@ -159,7 +157,7 @@
         tid = info[1]['id']
         t = Transaction()
         self._storage.tpc_begin(t)
-        self._storage.transactionalUndo(tid, t)
+        self._storage.undo(tid, t)
         self._storage.tpc_finish(t)
 
     def checkUndoUnresolvable(self):
@@ -180,6 +178,6 @@
         tid = info[1]['id']
         t = Transaction()
         self._storage.tpc_begin(t)
-        self.assertRaises(UndoError, self._storage.transactionalUndo,
+        self.assertRaises(UndoError, self._storage.undo,
                           tid, t)
         self._storage.tpc_abort(t)


=== Zope3/src/zope/app/advanced/acquisition/tests/Corruption.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/Corruption.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/Corruption.py	Thu Apr  1 13:34:39 2004
@@ -17,10 +17,9 @@
 import random
 import stat
 import tempfile
-import unittest
 
 import ZODB, ZODB.FileStorage
-from StorageTestBase import StorageTestBase, removefs
+from StorageTestBase import StorageTestBase
 
 class FileStorageCorruptTests(StorageTestBase):
 
@@ -30,7 +29,7 @@
 
     def tearDown(self):
         self._storage.close()
-        removefs(self.path)
+        self._storage.cleanup()
 
     def _do_stores(self):
         oids = []


=== Zope3/src/zope/app/advanced/acquisition/tests/HistoryStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/HistoryStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/HistoryStorage.py	Thu Apr  1 13:34:39 2004
@@ -17,11 +17,8 @@
 all these tests.
 """
 
-from ZODB.Transaction import Transaction
 from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import zodb_unpickle
-
-
+from transaction import Transaction
 
 class HistoryStorage:
     def checkSimpleHistory(self):
@@ -36,40 +33,40 @@
         h = self._storage.history(oid, size=1)
         eq(len(h), 1)
         d = h[0]
-        eq(d['serial'], revid3)
+        eq(d['tid'], revid3)
         eq(d['version'], '')
         # Try to get 2 historical revisions
         h = self._storage.history(oid, size=2)
         eq(len(h), 2)
         d = h[0]
-        eq(d['serial'], revid3)
+        eq(d['tid'], revid3)
         eq(d['version'], '')
         d = h[1]
-        eq(d['serial'], revid2)
+        eq(d['tid'], revid2)
         eq(d['version'], '')
         # Try to get all 3 historical revisions
         h = self._storage.history(oid, size=3)
         eq(len(h), 3)
         d = h[0]
-        eq(d['serial'], revid3)
+        eq(d['tid'], revid3)
         eq(d['version'], '')
         d = h[1]
-        eq(d['serial'], revid2)
+        eq(d['tid'], revid2)
         eq(d['version'], '')
         d = h[2]
-        eq(d['serial'], revid1)
+        eq(d['tid'], revid1)
         eq(d['version'], '')
         # There should be no more than 3 revisions
         h = self._storage.history(oid, size=4)
         eq(len(h), 3)
         d = h[0]
-        eq(d['serial'], revid3)
+        eq(d['tid'], revid3)
         eq(d['version'], '')
         d = h[1]
-        eq(d['serial'], revid2)
+        eq(d['tid'], revid2)
         eq(d['version'], '')
         d = h[2]
-        eq(d['serial'], revid1)
+        eq(d['tid'], revid1)
         eq(d['version'], '')
 
     def checkVersionHistory(self):
@@ -94,22 +91,22 @@
         h = self._storage.history(oid, version, 100)
         eq(len(h), 6)
         d = h[0]
-        eq(d['serial'], revid6)
+        eq(d['tid'], revid6)
         eq(d['version'], version)
         d = h[1]
-        eq(d['serial'], revid5)
+        eq(d['tid'], revid5)
         eq(d['version'], version)
         d = h[2]
-        eq(d['serial'], revid4)
+        eq(d['tid'], revid4)
         eq(d['version'], version)
         d = h[3]
-        eq(d['serial'], revid3)
+        eq(d['tid'], revid3)
         eq(d['version'], '')
         d = h[4]
-        eq(d['serial'], revid2)
+        eq(d['tid'], revid2)
         eq(d['version'], '')
         d = h[5]
-        eq(d['serial'], revid1)
+        eq(d['tid'], revid1)
         eq(d['version'], '')
 
     def checkHistoryAfterVersionCommit(self):
@@ -132,7 +129,7 @@
         # Now commit the version
         t = Transaction()
         self._storage.tpc_begin(t)
-        oids = self._storage.commitVersion(version, '', t)
+        self._storage.commitVersion(version, '', t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         # After consultation with Jim, we agreed that the semantics of
@@ -151,25 +148,25 @@
         h = self._storage.history(oid, version, 100)
         eq(len(h), 7)
         d = h[0]
-        eq(d['serial'], revid7)
+        eq(d['tid'], revid7)
         eq(d['version'], '')
         d = h[1]
-        eq(d['serial'], revid6)
+        eq(d['tid'], revid6)
         eq(d['version'], version)
         d = h[2]
-        eq(d['serial'], revid5)
+        eq(d['tid'], revid5)
         eq(d['version'], version)
         d = h[3]
-        eq(d['serial'], revid4)
+        eq(d['tid'], revid4)
         eq(d['version'], version)
         d = h[4]
-        eq(d['serial'], revid3)
+        eq(d['tid'], revid3)
         eq(d['version'], '')
         d = h[5]
-        eq(d['serial'], revid2)
+        eq(d['tid'], revid2)
         eq(d['version'], '')
         d = h[6]
-        eq(d['serial'], revid1)
+        eq(d['tid'], revid1)
         eq(d['version'], '')
 
     def checkHistoryAfterVersionAbort(self):
@@ -192,7 +189,7 @@
         # Now commit the version
         t = Transaction()
         self._storage.tpc_begin(t)
-        oids = self._storage.abortVersion(version, t)
+        self._storage.abortVersion(version, t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         # After consultation with Jim, we agreed that the semantics of
@@ -211,23 +208,23 @@
         h = self._storage.history(oid, version, 100)
         eq(len(h), 7)
         d = h[0]
-        eq(d['serial'], revid7)
+        eq(d['tid'], revid7)
         eq(d['version'], '')
         d = h[1]
-        eq(d['serial'], revid6)
+        eq(d['tid'], revid6)
         eq(d['version'], version)
         d = h[2]
-        eq(d['serial'], revid5)
+        eq(d['tid'], revid5)
         eq(d['version'], version)
         d = h[3]
-        eq(d['serial'], revid4)
+        eq(d['tid'], revid4)
         eq(d['version'], version)
         d = h[4]
-        eq(d['serial'], revid3)
+        eq(d['tid'], revid3)
         eq(d['version'], '')
         d = h[5]
-        eq(d['serial'], revid2)
+        eq(d['tid'], revid2)
         eq(d['version'], '')
         d = h[6]
-        eq(d['serial'], revid1)
+        eq(d['tid'], revid1)
         eq(d['version'], '')


=== Zope3/src/zope/app/advanced/acquisition/tests/IteratorStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/IteratorStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/IteratorStorage.py	Thu Apr  1 13:34:39 2004
@@ -20,8 +20,8 @@
 from ZODB.tests.MinPO import MinPO
 from ZODB.tests.StorageTestBase import zodb_pickle, zodb_unpickle
 from ZODB.utils import U64, p64
-from ZODB.Transaction import Transaction
 
+from transaction import Transaction
 
 class IteratorCompare:
 
@@ -33,7 +33,7 @@
             eq(reciter.tid, revid)
             for rec in reciter:
                 eq(rec.oid, oid)
-                eq(rec.serial, revid)
+                eq(rec.tid, revid)
                 eq(rec.version, '')
                 eq(zodb_unpickle(rec.data), MinPO(val))
                 val = val + 1
@@ -100,7 +100,7 @@
         # Undo the creation of the object, rendering it a zombie
         t = Transaction()
         self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
+        oids = self._storage.undo(tid, t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         # Now attempt to iterator over the storage
@@ -147,6 +147,20 @@
         finally:
             self._storage.tpc_finish(t)
 
+    def checkLoadEx(self):
+        oid = self._storage.new_oid()
+        self._dostore(oid, data=42)
+        data, tid, ver = self._storage.loadEx(oid, "")
+        self.assertEqual(zodb_unpickle(data), MinPO(42))
+        match = False
+        for txn in self._storage.iterator():
+            for rec in txn:
+                if rec.oid == oid and rec.tid == tid:
+                    self.assertEqual(txn.tid, tid)
+                    match = True
+        if not match:
+            self.fail("Could not find transaction with matching id")
+
 
 class ExtendedIteratorStorage(IteratorCompare):
 
@@ -202,7 +216,7 @@
             eq(txn1._extension,  txn2._extension)
             for rec1, rec2 in zip(txn1, txn2):
                 eq(rec1.oid,     rec2.oid)
-                eq(rec1.serial,  rec2.serial)
+                eq(rec1.tid,  rec2.tid)
                 eq(rec1.version, rec2.version)
                 eq(rec1.data,    rec2.data)
             # Make sure there are no more records left in rec1 and rec2,


=== Zope3/src/zope/app/advanced/acquisition/tests/MTStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/MTStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/MTStorage.py	Thu Apr  1 13:34:39 2004
@@ -3,13 +3,13 @@
 import threading
 import time
 
-import ZODB
-from PersistentMapping import PersistentMapping
+from persistent.mapping import PersistentMapping
+import transaction
 
+import ZODB
 from ZODB.tests.StorageTestBase \
-     import StorageTestBase, zodb_pickle, zodb_unpickle, handle_serials
+     import zodb_pickle, zodb_unpickle, handle_serials
 from ZODB.tests.MinPO import MinPO
-from ZODB.Transaction import Transaction
 from ZODB.POSException import ConflictError
 
 SHORT_DELAY = 0.01
@@ -30,10 +30,8 @@
     method.
     """
 
-    def __init__(self, test):
+    def __init__(self):
         threading.Thread.__init__(self)
-        self.test = test
-        self._fail = None
         self._exc_info = None
 
     def run(self):
@@ -42,9 +40,6 @@
         except:
             self._exc_info = sys.exc_info()
 
-    def fail(self, msg=""):
-        self._test.fail(msg)
-
     def join(self, timeout=None):
         threading.Thread.join(self, timeout)
         if self._exc_info:
@@ -55,7 +50,7 @@
     __super_init = TestThread.__init__
 
     def __init__(self, db, test, commits=10, delay=SHORT_DELAY):
-        self.__super_init(test)
+        self.__super_init()
         self.setDaemon(1)
         self.db = db
         self.test = test
@@ -64,6 +59,7 @@
 
     def runtest(self):
         conn = self.db.open()
+        conn.sync()
         root = conn.root()
         d = self.get_thread_dict(root)
         if d is None:
@@ -102,7 +98,7 @@
     __super_init = TestThread.__init__
 
     def __init__(self, storage, test, commits=10, delay=SHORT_DELAY):
-        self.__super_init(test)
+        self.__super_init()
         self.storage = storage
         self.test = test
         self.commits = commits
@@ -131,7 +127,7 @@
 
     def dostore(self, i):
         data = zodb_pickle(MinPO((self.getName(), i)))
-        t = Transaction()
+        t = transaction.Transaction()
         oid = self.oid()
         self.pause()
 
@@ -154,9 +150,12 @@
 class ExtStorageClientThread(StorageClientThread):
 
     def runtest(self):
-        # pick some other storage ops to execute
-        ops = [getattr(self, meth) for meth in dir(ExtStorageClientThread)
-               if meth.startswith('do_')]
+        # pick some other storage ops to execute, depending in part
+        # on the features provided by the storage.
+        names = ["do_load", "do_modifiedInVersion"]
+        if self.storage.supportsUndo():
+            names += ["do_loadSerial", "do_undoLog", "do_iterator"]
+        ops = [getattr(self, meth) for meth in names]
         assert ops, "Didn't find an storage ops in %s" % self.storage
         # do a store to guarantee there's at least one oid in self.oids
         self.dostore(0)


=== Zope3/src/zope/app/advanced/acquisition/tests/MinPO.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/MinPO.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/MinPO.py	Thu Apr  1 13:34:39 2004
@@ -13,7 +13,7 @@
 ##############################################################################
 """A minimal persistent object to use for tests"""
 
-from Persistence import Persistent
+from persistent import Persistent
 
 class MinPO(Persistent):
     def __init__(self, value=None):


=== Zope3/src/zope/app/advanced/acquisition/tests/PackableStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/PackableStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/PackableStorage.py	Thu Apr  1 13:34:39 2004
@@ -25,16 +25,17 @@
 except ImportError:
     from StringIO import StringIO
 
-import threading
 import time
 
 from ZODB import DB
-from Persistence import Persistent
-from ZODB.referencesf import referencesf
+from persistent import Persistent
+from persistent.mapping import PersistentMapping
+from ZODB.serialize import referencesf
 from ZODB.tests.MinPO import MinPO
 from ZODB.tests.StorageTestBase import snooze
 from ZODB.POSException import ConflictError, StorageError
-from ZODB.PersistentMapping import PersistentMapping
+
+from ZODB.tests.MTStorage import TestThread
 
 ZERO = '\0'*8
 
@@ -122,18 +123,14 @@
             return u.load()
         return loads
 
-
-
-class PackableStorage(PackableStorageBase):
     def _initroot(self):
         try:
             self._storage.load(ZERO, '')
         except KeyError:
-            import PersistentMapping
-            from ZODB.Transaction import Transaction
+            from transaction import Transaction
             file = StringIO()
             p = cPickle.Pickler(file, 1)
-            p.dump((PersistentMapping.PersistentMapping, None))
+            p.dump((PersistentMapping, None))
             p.dump({'_container': {}})
             t=Transaction()
             t.description='initial database creation'
@@ -142,6 +139,8 @@
             self._storage.tpc_vote(t)
             self._storage.tpc_finish(t)
 
+class PackableStorage(PackableStorageBase):
+
     def checkPackEmptyStorage(self):
         self._storage.pack(time.time(), referencesf)
 
@@ -153,6 +152,159 @@
         self._initroot()
         self._storage.pack(time.time() - 10000, referencesf)
 
+    def _PackWhileWriting(self, pack_now):
+        # A storage should allow some reading and writing during
+        # a pack.  This test attempts to exercise locking code
+        # in the storage to test that it is safe.  It generates
+        # a lot of revisions, so that pack takes a long time.
+
+        db = DB(self._storage)
+        conn = db.open()
+        root = conn.root()
+
+        for i in range(10):
+            root[i] = MinPO(i)
+        get_transaction().commit()
+
+        snooze()
+        packt = time.time()
+
+        choices = range(10)
+        for dummy in choices:
+            for i in choices:
+                root[i].value = MinPO(i)
+                get_transaction().commit()
+
+        # How many client threads should we run, and how long should we
+        # wait for them to finish?  Hard to say.  Running 4 threads and
+        # waiting 30 seconds too often left a thread still alive on Tim's
+        # Win98SE box, during ZEO flavors of this test.  Those tend to
+        # run one thread at a time to completion, and take about 10 seconds
+        # per thread.  There doesn't appear to be a compelling reason to
+        # run that many threads.  Running 3 threads and waiting up to a
+        # minute seems to work well in practice.  The ZEO tests normally
+        # finish faster than that, and the non-ZEO tests very much faster
+        # than that.
+        NUM_LOOP_TRIP = 50
+        timer = ElapsedTimer(time.time())
+        threads = [ClientThread(db, choices, NUM_LOOP_TRIP, timer, i)
+                   for i in range(3)]
+        for t in threads:
+            t.start()
+
+        if pack_now:
+            db.pack(time.time())
+        else:
+            db.pack(packt)
+
+        for t in threads:
+            t.join(60)
+        liveness = [t.isAlive() for t in threads]
+        if True in liveness:
+            # They should have finished by now.
+            print 'Liveness:', liveness
+            # Combine the outcomes, and sort by start time.
+            outcomes = []
+            for t in threads:
+                outcomes.extend(t.outcomes)
+            # each outcome list has as many of these as a loop trip got thru:
+            #     thread_id
+            #     elapsed millis at loop top
+            #     elapsed millis at attempt to assign to self.root[index]
+            #     index into self.root getting replaced
+            #     elapsed millis when outcome known
+            #     'OK' or 'Conflict'
+            #     True if we got beyond this line, False if it raised an
+            #         exception (one possible Conflict cause):
+            #             self.root[index].value = MinPO(j)
+            def cmp_by_time(a, b):
+                return cmp((a[1], a[0]), (b[1], b[0]))
+            outcomes.sort(cmp_by_time)
+            counts = [0] * 4
+            for outcome in outcomes:
+                n = len(outcome)
+                assert n >= 2
+                tid = outcome[0]
+                print 'tid:%d top:%5d' % (tid, outcome[1]),
+                if n > 2:
+                    print 'commit:%5d' % outcome[2],
+                    if n > 3:
+                        print 'index:%2d' % outcome[3],
+                        if n > 4:
+                            print 'known:%5d' % outcome[4],
+                            if n > 5:
+                                print '%8s' % outcome[5],
+                                if n > 6:
+                                    print 'assigned:%5s' % outcome[6],
+                counts[tid] += 1
+                if counts[tid] == NUM_LOOP_TRIP:
+                    print 'thread %d done' % tid,
+                print
+
+            self.fail('a thread is still alive')
+
+        # Iterate over the storage to make sure it's sane, but not every
+        # storage supports iterators.
+        if not hasattr(self._storage, "iterator"):
+            return
+
+        it = self._storage.iterator()
+        for txn in it:
+            for data in txn:
+                pass
+        it.close()
+
+    def checkPackWhileWriting(self):
+        self._PackWhileWriting(pack_now=False)
+
+    def checkPackNowWhileWriting(self):
+        self._PackWhileWriting(pack_now=True)
+
+    def checkPackLotsWhileWriting(self):
+        # This is like the other pack-while-writing tests, except it packs
+        # repeatedly until the client thread is done.  At the time it was
+        # introduced, it reliably provoked
+        #     CorruptedError:  ... transaction with checkpoint flag set
+        # in the ZEO flavor of the FileStorage tests.
+
+        db = DB(self._storage)
+        conn = db.open()
+        root = conn.root()
+
+        choices = range(10)
+        for i in choices:
+            root[i] = MinPO(i)
+        get_transaction().commit()
+
+        snooze()
+        packt = time.time()
+
+        for dummy in choices:
+           for i in choices:
+               root[i].value = MinPO(i)
+               get_transaction().commit()
+
+        NUM_LOOP_TRIP = 100
+        timer = ElapsedTimer(time.time())
+        thread = ClientThread(db, choices, NUM_LOOP_TRIP, timer, 0)
+        thread.start()
+        while thread.isAlive():
+            db.pack(packt)
+            snooze()
+            packt = time.time()
+        thread.join()
+
+        # Iterate over the storage to make sure it's sane.
+        if not hasattr(self._storage, "iterator"):
+            return
+        it = self._storage.iterator()
+        for txn in it:
+            for data in txn:
+                pass
+        it.close()
+
+class PackableUndoStorage(PackableStorageBase):
+
     def checkPackAllRevisions(self):
         self._initroot()
         eq = self.assertEqual
@@ -382,60 +534,6 @@
 
         eq(root['obj'].value, 7)
 
-    def _PackWhileWriting(self, pack_now=0):
-        # A storage should allow some reading and writing during
-        # a pack.  This test attempts to exercise locking code
-        # in the storage to test that it is safe.  It generates
-        # a lot of revisions, so that pack takes a long time.
-
-        db = DB(self._storage)
-        conn = db.open()
-        root = conn.root()
-
-        for i in range(10):
-            root[i] = MinPO(i)
-        get_transaction().commit()
-
-        snooze()
-        packt = time.time()
-
-        for j in range(10):
-            for i in range(10):
-                root[i].value = MinPO(i)
-                get_transaction().commit()
-
-        threads = [ClientThread(db) for i in range(4)]
-        for t in threads:
-            t.start()
-
-        if pack_now:
-            db.pack(time.time())
-        else:
-            db.pack(packt)
-
-        for t in threads:
-            t.join(30)
-        for t in threads:
-            t.join(1)
-            self.assert_(not t.isAlive())
-
-        # Iterate over the storage to make sure it's sane, but not every
-        # storage supports iterators.
-        if not hasattr(self._storage, "iterator"):
-            return
-
-        iter = self._storage.iterator()
-        for txn in iter:
-            for data in txn:
-                pass
-        iter.close()
-
-    def checkPackWhileWriting(self):
-        self._PackWhileWriting(pack_now=0)
-
-    def checkPackNowWhileWriting(self):
-        self._PackWhileWriting(pack_now=1)
-
     def checkRedundantPack(self):
         # It is an error to perform a pack with a packtime earlier
         # than a previous packtime.  The storage can't do a full
@@ -482,8 +580,6 @@
 
     def checkPackUndoLog(self):
         self._initroot()
-        eq = self.assertEqual
-        raises = self.assertRaises
         # Create a `persistent' object
         obj = self._newobj()
         oid = obj.getoid()
@@ -494,9 +590,9 @@
         snooze()
         packtime = time.time()
         snooze()
-        revid2 = self._dostoreNP(oid, revid=revid1, data=pickle.dumps(obj))
+        self._dostoreNP(oid, revid=revid1, data=pickle.dumps(obj))
         # Now pack the first transaction
-        self.assertEqual(3,len(self._storage.undoLog()))
+        self.assertEqual(3, len(self._storage.undoLog()))
         self._storage.pack(packtime, referencesf)
         # The undo log contains only the most resent transaction
         self.assertEqual(1,len(self._storage.undoLog()))
@@ -533,12 +629,12 @@
         revid13 = self._dostoreNP(oid1, revid=revid11,
                                   data=pickle.dumps(obj1), description="1-3")
         obj1.value = 4
-        revid14 = self._dostoreNP(oid1, revid=revid13,
-                                  data=pickle.dumps(obj1), description="1-4")
+        self._dostoreNP(oid1, revid=revid13,
+                        data=pickle.dumps(obj1), description="1-4")
         # Commit one revision of the second object
         obj2.value = 5
-        revid25 = self._dostoreNP(oid2, revid=revid22,
-                                  data=pickle.dumps(obj2), description="2-5")
+        self._dostoreNP(oid2, revid=revid22,
+                        data=pickle.dumps(obj2), description="2-5")
         # Now pack
         self.assertEqual(6,len(self._storage.undoLog()))
         print '\ninitial undoLog was'
@@ -549,16 +645,55 @@
         for r in self._storage.undoLog(): print r
         # what can we assert about that?
 
-class ClientThread(threading.Thread):
 
-    def __init__(self, db):
-        threading.Thread.__init__(self)
-        self.root = db.open().root()
+# A number of these threads are kicked off by _PackWhileWriting().  Their
+# purpose is to abuse the database passed to the constructor with lots of
+# random write activity while the main thread is packing it.
+class ClientThread(TestThread):
 
-    def run(self):
-        for j in range(50):
+    def __init__(self, db, choices, loop_trip, timer, thread_id):
+        TestThread.__init__(self)
+        self.root = db.open().root()
+        self.choices = choices
+        self.loop_trip = loop_trip
+        self.millis = timer.elapsed_millis
+        self.thread_id = thread_id
+        # list of lists; each list has as many of these as a loop trip
+        # got thru:
+        #     thread_id
+        #     elapsed millis at loop top
+        #     elapsed millis at attempt
+        #     index into self.root getting replaced
+        #     elapsed millis when outcome known
+        #     'OK' or 'Conflict'
+        #     True if we got beyond this line, False if it raised an exception:
+        #          self.root[index].value = MinPO(j)
+        self.outcomes = []
+
+    def runtest(self):
+        from random import choice
+
+        for j in range(self.loop_trip):
+            assign_worked = False
+            alist = [self.thread_id, self.millis()]
+            self.outcomes.append(alist)
             try:
-                self.root[j % 10].value = MinPO(j)
+                index = choice(self.choices)
+                alist.extend([self.millis(), index])
+                self.root[index].value = MinPO(j)
+                assign_worked = True
                 get_transaction().commit()
+                alist.append(self.millis())
+                alist.append('OK')
             except ConflictError:
+                alist.append(self.millis())
+                alist.append('Conflict')
                 get_transaction().abort()
+            alist.append(assign_worked)
+
+class ElapsedTimer:
+    def __init__(self, start_time):
+        self.start_time = start_time
+
+    def elapsed_millis(self):
+        return int((time.time() - self.start_time) * 1000)


=== Zope3/src/zope/app/advanced/acquisition/tests/ReadOnlyStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/ReadOnlyStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/ReadOnlyStorage.py	Thu Apr  1 13:34:39 2004
@@ -11,8 +11,8 @@
 # FOR A PARTICULAR PURPOSE.
 #
 ##############################################################################
-from ZODB.POSException import ReadOnlyError
-from ZODB.Transaction import Transaction
+from ZODB.POSException import ReadOnlyError, Unsupported
+import transaction
 
 class ReadOnlyStorage:
 
@@ -26,7 +26,7 @@
 
     def _make_readonly(self):
         self._storage.close()
-        self.open(read_only=1)
+        self.open(read_only=True)
         self.assert_(self._storage.isReadOnly())
 
     def checkReadMethods(self):
@@ -37,13 +37,17 @@
             data, revid = self._storage.load(oid, '')
             self.assertEqual(revid, self.oids[oid])
             self.assert_(not self._storage.modifiedInVersion(oid))
-            _data = self._storage.loadSerial(oid, revid)
-            self.assertEqual(data, _data)
+            # Storages without revisions may not have loadSerial().
+            try:
+                _data = self._storage.loadSerial(oid, revid)
+                self.assertEqual(data, _data)
+            except Unsupported:
+                pass
 
     def checkWriteMethods(self):
         self._make_readonly()
         self.assertRaises(ReadOnlyError, self._storage.new_oid)
-        t = Transaction()
+        t = transaction.Transaction()
         self.assertRaises(ReadOnlyError, self._storage.tpc_begin, t)
 
         if self._storage.supportsVersions():
@@ -56,5 +60,5 @@
                           '\000' * 8, None, '', '', t)
 
         if self._storage.supportsTransactionalUndo():
-            self.assertRaises(ReadOnlyError, self._storage.transactionalUndo,
+            self.assertRaises(ReadOnlyError, self._storage.undo,
                               '\000' * 8, t)


=== Zope3/src/zope/app/advanced/acquisition/tests/RecoveryStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/RecoveryStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/RecoveryStorage.py	Thu Apr  1 13:34:39 2004
@@ -13,11 +13,11 @@
 ##############################################################################
 """More recovery and iterator tests."""
 
-from ZODB.Transaction import Transaction
+from transaction import Transaction
 from ZODB.tests.IteratorStorage import IteratorDeepCompare
 from ZODB.tests.StorageTestBase import MinPO, zodb_unpickle, snooze
 from ZODB import DB
-from ZODB.referencesf import referencesf
+from ZODB.serialize import referencesf
 
 import time
 
@@ -54,7 +54,7 @@
         # Now abort the version and the creation
         t = Transaction()
         self._storage.tpc_begin(t)
-        oids = self._storage.abortVersion('one', t)
+        tid, oids = self._storage.abortVersion('one', t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         self.assertEqual(oids, [oid])
@@ -80,9 +80,9 @@
                                 data=MinPO(92))
         revid_c = self._dostore(oid, revid=revid_b, version=version,
                                 data=MinPO(93))
-        self._undo(self._storage.undoInfo()[0]['id'], oid)
+        self._undo(self._storage.undoInfo()[0]['id'], [oid])
         self._commitVersion(version, '')
-        self._undo(self._storage.undoInfo()[0]['id'], oid)
+        self._undo(self._storage.undoInfo()[0]['id'], [oid])
 
         # now copy the records to a new storage
         self._dst.copyTransactionsFrom(self._storage)
@@ -95,7 +95,7 @@
 
         self._abortVersion(version)
         self.assert_(self._storage.versionEmpty(version))
-        self._undo(self._storage.undoInfo()[0]['id'], oid)
+        self._undo(self._storage.undoInfo()[0]['id'], [oid])
         self.assert_(not self._storage.versionEmpty(version))
 
         # check the data is what we expect it to be
@@ -109,7 +109,7 @@
         self._storage = self._dst
         self._abortVersion(version)
         self.assert_(self._storage.versionEmpty(version))
-        self._undo(self._storage.undoInfo()[0]['id'], oid)
+        self._undo(self._storage.undoInfo()[0]['id'], [oid])
         self.assert_(not self._storage.versionEmpty(version))
 
         # check the data is what we expect it to be
@@ -149,7 +149,7 @@
         final = list(it)[-1]
         self._dst.tpc_begin(final, final.tid, final.status)
         for r in final:
-            self._dst.restore(r.oid, r.serial, r.data, r.version, r.data_txn,
+            self._dst.restore(r.oid, r.tid, r.data, r.version, r.data_txn,
                               final)
         it.close()
         self._dst.tpc_vote(final)


=== Zope3/src/zope/app/advanced/acquisition/tests/RevisionStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/RevisionStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/RevisionStorage.py	Thu Apr  1 13:34:39 2004
@@ -14,7 +14,11 @@
 """Check loadSerial() on storages that support historical revisions."""
 
 from ZODB.tests.MinPO import MinPO
-from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
+from ZODB.tests.StorageTestBase import \
+     zodb_unpickle, zodb_pickle, snooze, handle_serials
+from ZODB.utils import p64, u64
+
+import transaction
 
 ZERO = '\0'*8
 
@@ -31,3 +35,141 @@
         for revid, value in revisions.items():
             data = self._storage.loadSerial(oid, revid)
             self.assertEqual(zodb_unpickle(data), value)
+
+    def checkLoadBefore(self):
+        # Store 10 revisions of one object and then make sure that we
+        # can get all the non-current revisions back.
+        oid = self._storage.new_oid()
+        revs = []
+        revid = None
+        for i in range(10):
+            # We need to ensure that successive timestamps are at least
+            # two apart, so that a timestamp exists that's unambiguously
+            # between successive timestamps.  Each call to snooze()
+            # guarantees that the next timestamp will be at least one
+            # larger (and probably much more than that) than the previous
+            # one.
+            snooze()
+            snooze()
+            revid = self._dostore(oid, revid, data=MinPO(i))
+            revs.append(self._storage.loadEx(oid, ""))
+
+        prev = u64(revs[0][1])
+        for i in range(1, 10):
+            tid = revs[i][1]
+            cur = u64(tid)
+            middle = prev + (cur - prev) // 2
+            assert prev < middle < cur  # else the snooze() trick failed
+            prev = cur
+            t = self._storage.loadBefore(oid, p64(middle))
+            self.assert_(t is not None)
+            data, start, end = t
+            self.assertEqual(revs[i-1][0], data)
+            self.assertEqual(tid, end)
+
+    def checkLoadBeforeEdges(self):
+        # Check the edges cases for a non-current load.
+        oid = self._storage.new_oid()
+
+        self.assertRaises(KeyError, self._storage.loadBefore,
+                          oid, p64(0))
+
+        revid1 = self._dostore(oid, data=MinPO(1))
+
+        self.assertEqual(self._storage.loadBefore(oid, p64(0)), None)
+        self.assertEqual(self._storage.loadBefore(oid, revid1), None)
+
+        cur = p64(u64(revid1) + 1)
+        data, start, end = self._storage.loadBefore(oid, cur)
+        self.assertEqual(zodb_unpickle(data), MinPO(1))
+        self.assertEqual(start, revid1)
+        self.assertEqual(end, None)
+
+        revid2 = self._dostore(oid, revid=revid1, data=MinPO(2))
+        data, start, end = self._storage.loadBefore(oid, cur)
+        self.assertEqual(zodb_unpickle(data), MinPO(1))
+        self.assertEqual(start, revid1)
+        self.assertEqual(end, revid2)
+
+    def checkLoadBeforeOld(self):
+        # Look for a very old revision.  With the BaseStorage implementation
+        # this should require multple history() calls.
+        oid = self._storage.new_oid()
+        revs = []
+        revid = None
+        for i in range(50):
+            revid = self._dostore(oid, revid, data=MinPO(i))
+            revs.append(revid)
+
+        data, start, end = self._storage.loadBefore(oid, revs[12])
+        self.assertEqual(zodb_unpickle(data), MinPO(11))
+        self.assertEqual(start, revs[11])
+        self.assertEqual(end, revs[12])
+
+
+    # XXX Is it okay to assume everyone testing against RevisionStorage
+    # implements undo?
+
+    def checkLoadBeforeUndo(self):
+        # Do several transactions then undo them.
+        oid = self._storage.new_oid()
+        revid = None
+        for i in range(5):
+            revid = self._dostore(oid, revid, data=MinPO(i))
+        revs = []
+        for i in range(4):
+            info = self._storage.undoInfo()
+            tid = info[0]["id"]
+            # Always undo the most recent txn, so the value will
+            # alternate between 3 and 4.
+            self._undo(tid, [oid], note="undo %d" % i)
+            revs.append(self._storage.loadEx(oid, ""))
+
+        prev_tid = None
+        for i, (data, tid, ver) in enumerate(revs):
+            t = self._storage.loadBefore(oid, p64(u64(tid) + 1))
+            self.assertEqual(data, t[0])
+            self.assertEqual(tid, t[1])
+            if prev_tid:
+                self.assert_(prev_tid < t[1])
+            prev_tid = t[1]
+            if i < 3:
+                self.assertEqual(revs[i+1][1], t[2])
+            else:
+                self.assertEqual(None, t[2])
+
+    def checkLoadBeforeConsecutiveTids(self):
+        eq = self.assertEqual
+        oid = self._storage.new_oid()
+        def helper(tid, revid, x):
+            data = zodb_pickle(MinPO(x))
+            t = transaction.Transaction()
+            try:
+                self._storage.tpc_begin(t, p64(tid))
+                r1 = self._storage.store(oid, revid, data, '', t)
+                # Finish the transaction
+                r2 = self._storage.tpc_vote(t)
+                newrevid = handle_serials(oid, r1, r2)
+                self._storage.tpc_finish(t)
+            except:
+                self._storage.tpc_abort(t)
+                raise
+            return newrevid
+        revid1 = helper(1, None, 1)
+        revid2 = helper(2, revid1, 2)
+        revid3 = helper(3, revid2, 3)
+        data, start_tid, end_tid = self._storage.loadBefore(oid, p64(2))
+        eq(zodb_unpickle(data), MinPO(1))
+        eq(u64(start_tid), 1)
+        eq(u64(end_tid), 2)
+
+    def checkLoadBeforeCreation(self):
+        eq = self.assertEqual
+        oid1 = self._storage.new_oid()
+        oid2 = self._storage.new_oid()
+        revid1 = self._dostore(oid1)
+        revid2 = self._dostore(oid2)
+        results = self._storage.loadBefore(oid2, revid2)
+        eq(results, None)
+
+    # XXX There are other edge cases to handle, including pack.


=== Zope3/src/zope/app/advanced/acquisition/tests/StorageTestBase.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/StorageTestBase.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/StorageTestBase.py	Thu Apr  1 13:34:39 2004
@@ -19,10 +19,6 @@
 single object revision.
 """
 
-import errno
-import os
-import pickle
-import string
 import sys
 import time
 import types
@@ -30,9 +26,9 @@
 from cPickle import Pickler, Unpickler
 from cStringIO import StringIO
 
-from ZODB.Transaction import Transaction
-from ZODB.utils import u64
+import transaction
 
+from ZODB.utils import u64
 from ZODB.tests.MinPO import MinPO
 
 ZERO = '\0'*8
@@ -46,11 +42,18 @@
     while now == time.time():
         time.sleep(0.1)
 
+def _persistent_id(obj):
+    oid = getattr(obj, "_p_oid", None)
+    if getattr(oid, "__get__", None) is not None:
+        return None
+    else:
+        return oid
+
 def zodb_pickle(obj):
     """Create a pickle in the format expected by ZODB."""
     f = StringIO()
     p = Pickler(f, 1)
-    p.persistent_id = lambda obj: getattr(obj, '_p_oid', None)
+    p.persistent_id = _persistent_id
     klass = obj.__class__
     assert not hasattr(obj, '__getinitargs__'), "not ready for constructors"
     args = None
@@ -76,22 +79,24 @@
     u.persistent_load = persistent_load
     klass_info = u.load()
     if isinstance(klass_info, types.TupleType):
-        if isinstance(klass_info[0], types.TupleType):
-            modname, klassname = klass_info[0]
-            args = klass_info[1]
+        if isinstance(klass_info[0], type):
+            # XXX what is the second part of klass_info?
+            klass, xxx = klass_info
+            assert not xxx
         else:
-            modname, klassname = klass_info
-            args = None
-        if modname == "__main__":
-            ns = globals()
-        else:
-            mod = import_helper(modname)
-            ns = mod.__dict__
-        try:
-            klass = ns[klassname]
-        except KeyError:
-            sys.stderr.write("can't find %s in %s" % (klassname,
-                                                      repr(ns)))
+            if isinstance(klass_info[0], tuple):
+                modname, klassname = klass_info[0]
+            else:
+                modname, klassname = klass_info
+            if modname == "__main__":
+                ns = globals()
+            else:
+                mod = import_helper(modname)
+                ns = mod.__dict__
+            try:
+                klass = ns[klassname]
+            except KeyError:
+                print >> sys.stderr, "can't find %s in %r" % (klassname, ns)
         inst = klass()
     else:
         raise ValueError, "expected class info: %s" % repr(klass_info)
@@ -133,19 +138,9 @@
     return handle_all_serials(oid, *args)[oid]
 
 def import_helper(name):
-    mod = __import__(name)
+    __import__(name)
     return sys.modules[name]
 
-def removefs(base):
-    """Remove all files created by FileStorage with path base."""
-    for ext in '', '.old', '.tmp', '.lock', '.index', '.pack':
-        path = base + ext
-        try:
-            os.remove(path)
-        except os.error, err:
-            if err[0] != errno.ENOENT:
-                raise
-
 
 class StorageTestBase(unittest.TestCase):
 
@@ -189,7 +184,7 @@
         if version is None:
             version = ''
         # Begin the transaction
-        t = Transaction()
+        t = transaction.Transaction()
         if user is not None:
             t.user = user
         if description is not None:
@@ -213,34 +208,35 @@
 
     # The following methods depend on optional storage features.
 
-    def _undo(self, tid, oid=None):
+    def _undo(self, tid, expected_oids=None, note=None):
         # Undo a tid that affects a single object (oid).
         # XXX This is very specialized
-        t = Transaction()
-        t.note("undo")
+        t = transaction.Transaction()
+        t.note(note or "undo")
         self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
+        tid, oids = self._storage.undo(tid, t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
-        if oid is not None:
-            self.assertEqual(len(oids), 1)
-            self.assertEqual(oids[0], oid)
+        if expected_oids is not None:
+            self.assertEqual(len(oids), len(expected_oids), repr(oids))
+            for oid in expected_oids:
+                self.assert_(oid in oids)
         return self._storage.lastTransaction()
 
     def _commitVersion(self, src, dst):
-        t = Transaction()
+        t = transaction.Transaction()
         t.note("commit %r to %r" % (src, dst))
         self._storage.tpc_begin(t)
-        oids = self._storage.commitVersion(src, dst, t)
+        tid, oids = self._storage.commitVersion(src, dst, t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         return oids
 
     def _abortVersion(self, ver):
-        t = Transaction()
+        t = transaction.Transaction()
         t.note("abort %r" % ver)
         self._storage.tpc_begin(t)
-        oids = self._storage.abortVersion(ver, t)
+        tid, oids = self._storage.abortVersion(ver, t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         return oids


=== Zope3/src/zope/app/advanced/acquisition/tests/Synchronization.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/Synchronization.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/Synchronization.py	Thu Apr  1 13:34:39 2004
@@ -41,7 +41,7 @@
 New and/or unspecified methods:
 
 tpc_vote(): handled like tpc_abort
-transactionalUndo(): handled like undo()  (which is how?)
+undo(): how's that handled?
 
 Methods that have nothing to do with committing/non-committing:
 load(), loadSerial(), getName(), getSize(), __len__(), history(),
@@ -62,7 +62,7 @@
 
 """
 
-from ZODB.Transaction import Transaction
+from transaction import Transaction
 from ZODB.POSException import StorageTransactionError
 
 VERSION = "testversion"


=== Zope3/src/zope/app/advanced/acquisition/tests/TransactionalUndoStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/TransactionalUndoStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/TransactionalUndoStorage.py	Thu Apr  1 13:34:39 2004
@@ -11,21 +11,22 @@
 # FOR A PARTICULAR PURPOSE.
 #
 ##############################################################################
-"""Check transactionalUndo().
+"""Check undo().
 
-Any storage that supports transactionalUndo() must pass these tests.
+Any storage that supports undo() must pass these tests.
 """
-from __future__ import nested_scopes
 
 import time
 import types
+
+from persistent import Persistent
+from transaction import Transaction
+
 from ZODB import POSException
-from ZODB.Transaction import Transaction
-from ZODB.referencesf import referencesf
-from ZODB.utils import u64, p64
+from ZODB.serialize import referencesf
+from ZODB.utils import p64
 from ZODB import DB
 
-from Persistence import Persistent
 from ZODB.tests.MinPO import MinPO
 from ZODB.tests.StorageTestBase import zodb_pickle, zodb_unpickle
 
@@ -99,6 +100,15 @@
             for rec in txn:
                 pass
 
+    def undo(self, tid, note):
+        t = Transaction()
+        t.note(note)
+        self._storage.tpc_begin(t)
+        oids = self._storage.undo(tid, t)
+        self._storage.tpc_vote(t)
+        self._storage.tpc_finish(t)
+        return oids
+
     def checkSimpleTransactionalUndo(self):
         eq = self.assertEqual
         oid = self._storage.new_oid()
@@ -107,55 +117,27 @@
         revid = self._dostore(oid, revid=revid, data=MinPO(25))
 
         info = self._storage.undoInfo()
-        tid = info[0]['id']
         # Now start an undo transaction
-        t = Transaction()
-        t.note('undo1')
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
+        self._undo(info[0]["id"], [oid], note="undo1")
         data, revid = self._storage.load(oid, '')
         eq(zodb_unpickle(data), MinPO(24))
+
         # Do another one
         info = self._storage.undoInfo()
-        tid = info[2]['id']
-        t = Transaction()
-        t.note('undo2')
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
+        self._undo(info[2]["id"], [oid], note="undo2")
         data, revid = self._storage.load(oid, '')
         eq(zodb_unpickle(data), MinPO(23))
+
         # Try to undo the first record
         info = self._storage.undoInfo()
-        tid = info[4]['id']
-        t = Transaction()
-        t.note('undo3')
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
+        self._undo(info[4]["id"], [oid], note="undo3")
         # This should fail since we've undone the object's creation
         self.assertRaises(KeyError,
                           self._storage.load, oid, '')
+
         # And now let's try to redo the object's creation
         info = self._storage.undoInfo()
-        tid = info[0]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
+        self._undo(info[0]["id"], [oid])
         data, revid = self._storage.load(oid, '')
         eq(zodb_unpickle(data), MinPO(23))
         self._iterate()
@@ -163,14 +145,14 @@
     def checkCreationUndoneGetSerial(self):
         # create an object
         oid = self._storage.new_oid()
-        revid = self._dostore(oid, data=MinPO(23))
+        self._dostore(oid, data=MinPO(23))
         # undo its creation
         info = self._storage.undoInfo()
         tid = info[0]['id']
         t = Transaction()
         t.note('undo1')
         self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
+        self._storage.undo(tid, t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         # Check that calling getSerial on an uncreated object raises a KeyError
@@ -184,27 +166,14 @@
         revid = self._dostore(oid, revid=revid, data=MinPO(12))
         # Undo the last transaction
         info = self._storage.undoInfo()
-        tid = info[0]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
+        self._undo(info[0]['id'], [oid])
         data, revid = self._storage.load(oid, '')
         eq(zodb_unpickle(data), MinPO(11))
+
         # Now from here, we can either redo the last undo, or undo the object
         # creation.  Let's undo the object creation.
         info = self._storage.undoInfo()
-        tid = info[2]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
+        self._undo(info[2]['id'], [oid])
         self.assertRaises(KeyError, self._storage.load, oid, '')
         self._iterate()
 
@@ -215,27 +184,13 @@
         revid = self._dostore(oid, revid=revid, data=MinPO(12))
         # Undo the last transaction
         info = self._storage.undoInfo()
-        tid = info[0]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
+        self._undo(info[0]['id'], [oid])
         data, revid = self._storage.load(oid, '')
         eq(zodb_unpickle(data), MinPO(11))
         # Now from here, we can either redo the last undo, or undo the object
         # creation.  Let's redo the last undo
         info = self._storage.undoInfo()
-        tid = info[0]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 1)
-        eq(oids[0], oid)
+        self._undo(info[0]['id'], [oid])
         data, revid = self._storage.load(oid, '')
         eq(zodb_unpickle(data), MinPO(12))
         self._iterate()
@@ -277,17 +232,10 @@
         eq(zodb_unpickle(data), MinPO(32))
         data, revid2 = self._storage.load(oid2, '')
         eq(zodb_unpickle(data), MinPO(52))
+
         # Now attempt to undo the transaction containing two objects
         info = self._storage.undoInfo()
-        tid = info[0]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 2)
-        self.failUnless(oid1 in oids)
-        self.failUnless(oid2 in oids)
+        self._undo(info[0]['id'], [oid1, oid2])
         data, revid1 = self._storage.load(oid1, '')
         eq(zodb_unpickle(data), MinPO(31))
         data, revid2 = self._storage.load(oid2, '')
@@ -333,13 +281,11 @@
         tid1 = info[1]['id']
         t = Transaction()
         self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        oids1 = self._storage.transactionalUndo(tid1, t)
+        tid, oids = self._storage.undo(tid, t)
+        tid, oids1 = self._storage.undo(tid1, t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         # We get the finalization stuff called an extra time:
-##        self._storage.tpc_vote(t)
-##        self._storage.tpc_finish(t)
         eq(len(oids), 2)
         eq(len(oids1), 2)
         unless(oid1 in oids)
@@ -348,17 +294,10 @@
         eq(zodb_unpickle(data), MinPO(30))
         data, revid2 = self._storage.load(oid2, '')
         eq(zodb_unpickle(data), MinPO(50))
+
         # Now try to undo the one we just did to undo, whew
         info = self._storage.undoInfo()
-        tid = info[0]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 2)
-        unless(oid1 in oids)
-        unless(oid2 in oids)
+        self._undo(info[0]['id'], [oid1, oid2])
         data, revid1 = self._storage.load(oid1, '')
         eq(zodb_unpickle(data), MinPO(32))
         data, revid2 = self._storage.load(oid2, '')
@@ -390,15 +329,7 @@
         eq(revid1, revid2)
         # Now attempt to undo the transaction containing two objects
         info = self._storage.undoInfo()
-        tid = info[0]['id']
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        eq(len(oids), 2)
-        self.failUnless(oid1 in oids)
-        self.failUnless(oid2 in oids)
+        self._undo(info[0]["id"], [oid1, oid2])
         data, revid1 = self._storage.load(oid1, '')
         eq(zodb_unpickle(data), MinPO(31))
         data, revid2 = self._storage.load(oid2, '')
@@ -424,7 +355,7 @@
         tid = info[1]['id']
         t = Transaction()
         self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
+        tid, oids = self._storage.undo(tid, t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         eq(len(oids), 1)
@@ -450,7 +381,7 @@
         t = Transaction()
         self._storage.tpc_begin(t)
         self.assertRaises(POSException.UndoError,
-                          self._storage.transactionalUndo,
+                          self._storage.undo,
                           tid, t)
         self._storage.tpc_abort(t)
         # Now have more fun: object1 and object2 are in the same transaction,
@@ -489,7 +420,7 @@
         t = Transaction()
         self._storage.tpc_begin(t)
         self.assertRaises(POSException.UndoError,
-                          self._storage.transactionalUndo,
+                          self._storage.undo,
                           tid, t)
         self._storage.tpc_abort(t)
         self._iterate()
@@ -502,7 +433,7 @@
         packtime = time.time()
         snooze()                # time.time() now distinct from packtime
         revid2 = self._dostore(oid, revid=revid1, data=MinPO(52))
-        revid3 = self._dostore(oid, revid=revid2, data=MinPO(53))
+        self._dostore(oid, revid=revid2, data=MinPO(53))
         # Now get the undo log
         info = self._storage.undoInfo()
         eq(len(info), 3)
@@ -517,7 +448,7 @@
         # And now attempt to undo the last transaction
         t = Transaction()
         self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
+        tid, oids = self._storage.undo(tid, t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         eq(len(oids), 1)
@@ -670,7 +601,10 @@
         for t in packtimes:
             self._storage.pack(t, referencesf)
             cn.sync()
-            cn._cache.clear()
+
+            # XXX Is _cache supposed to have a clear() method, or not?
+            # cn._cache.clear()
+
             # The last undo set the value to 3 and pack should
             # never change that.
             self.assertEqual(rt["test"].value, 3)
@@ -720,7 +654,7 @@
             base = i * OBJECTS + i
             for j in range(OBJECTS):
                 tid = info[base + j]['id']
-                s.transactionalUndo(tid, t)
+                s.undo(tid, t)
             s.tpc_vote(t)
             s.tpc_finish(t)
 
@@ -744,7 +678,7 @@
             tid = p64(i + 1)
             eq(txn.tid, tid)
 
-            L1 = [(rec.oid, rec.serial, rec.data_txn) for rec in txn]
+            L1 = [(rec.oid, rec.tid, rec.data_txn) for rec in txn]
             L2 = [(oid, revid, None) for _tid, oid, revid in orig
                   if _tid == tid]
 


=== Zope3/src/zope/app/advanced/acquisition/tests/TransactionalUndoVersionStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/TransactionalUndoVersionStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/TransactionalUndoVersionStorage.py	Thu Apr  1 13:34:39 2004
@@ -11,16 +11,14 @@
 # FOR A PARTICULAR PURPOSE.
 #
 ##############################################################################
-from __future__ import nested_scopes
-
-# Check interactions between transactionalUndo() and versions.  Any storage
-# that supports both transactionalUndo() and versions must pass these tests.
+# Check interactions between undo() and versions.  Any storage that
+# supports both undo() and versions must pass these tests.
 
 import time
 
-from ZODB import POSException
-from ZODB.referencesf import referencesf
-from ZODB.Transaction import Transaction
+import transaction
+
+from ZODB.serialize import referencesf
 from ZODB.tests.MinPO import MinPO
 from ZODB.tests.StorageTestBase import zodb_unpickle
 
@@ -42,15 +40,6 @@
                 pass # not expected
         return self._dostore(*args, **kwargs)
 
-    def _undo(self, tid, oid):
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.transactionalUndo(tid, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
-        self.assertEqual(len(oids), 1)
-        self.assertEqual(oids[0], oid)
-
     def checkUndoInVersion(self):
         eq = self.assertEqual
         unless = self.failUnless
@@ -70,21 +59,17 @@
                                 version=version)
 
         info = self._storage.undoInfo()
-        self._undo(info[0]['id'], oid)
+        self._undo(info[0]['id'], [oid])
 
         data, revid = self._storage.load(oid, '')
-        eq(revid, revid_a)
+##        eq(revid, revid_a)
         eq(zodb_unpickle(data), MinPO(91))
         data, revid = self._storage.load(oid, version)
         unless(revid > revid_b and revid > revid_c)
         eq(zodb_unpickle(data), MinPO(92))
 
         # Now commit the version...
-        t = Transaction()
-        self._storage.tpc_begin(t)
-        oids = self._storage.commitVersion(version, '', t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
+        oids = self._commitVersion(version, "")
         eq(len(oids), 1)
         eq(oids[0], oid)
 
@@ -92,7 +77,7 @@
 
         # ...and undo the commit
         info = self._storage.undoInfo()
-        self._undo(info[0]['id'], oid)
+        self._undo(info[0]['id'], [oid])
 
         check_objects(91, 92)
 
@@ -104,7 +89,7 @@
 
         # Now undo the abort
         info=self._storage.undoInfo()
-        self._undo(info[0]['id'], oid)
+        self._undo(info[0]['id'], [oid])
 
         check_objects(91, 92)
 
@@ -125,12 +110,12 @@
         version = 'version'
         revid1 = self._x_dostore(oid1, data=MinPO(0), description='create1')
         revid2 = self._x_dostore(oid1, data=MinPO(1), revid=revid1,
-                               version=version, description='version1')
-        revid3 = self._x_dostore(oid1, data=MinPO(2), revid=revid2,
-                               version=version, description='version2')
+                                 version=version, description='version1')
+        self._x_dostore(oid1, data=MinPO(2), revid=revid2,
+                        version=version, description='version2')
         self._x_dostore(description='create2')
 
-        t = Transaction()
+        t = transaction.Transaction()
         t.description = 'commit version'
         self._storage.tpc_begin(t)
         self._storage.commitVersion(version, '', t)
@@ -145,16 +130,24 @@
 
         self._storage.pack(pt, referencesf)
 
-        t = Transaction()
-        t.description = 'undo commit version'
-        self._storage.tpc_begin(t)
-        self._storage.transactionalUndo(t_id, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
+        self._undo(t_id, note="undo commit version")
 
         self.assertEqual(load_value(oid1), 0)
         self.assertEqual(load_value(oid1, version), 2)
 
+        data, tid, ver = self._storage.loadEx(oid1, "")
+        # After undoing the version commit, the non-version data
+        # once again becomes the non-version data from 'create1'.
+        self.assertEqual(tid, self._storage.lastTransaction())
+        self.assertEqual(ver, "")
+
+        # The current version data comes from an undo record, which
+        # means that it gets data via the backpointer but tid from the
+        # current txn.
+        data, tid, ver = self._storage.loadEx(oid1, version)
+        self.assertEqual(ver, version)
+        self.assertEqual(tid, self._storage.lastTransaction())
+
     def checkUndoAbortVersion(self):
         def load_value(oid, version=''):
             data, revid = self._storage.load(oid, version)
@@ -172,17 +165,12 @@
         version = 'version'
         revid1 = self._x_dostore(oid1, data=MinPO(0), description='create1')
         revid2 = self._x_dostore(oid1, data=MinPO(1), revid=revid1,
-                               version=version, description='version1')
-        revid3 = self._x_dostore(oid1, data=MinPO(2), revid=revid2,
-                               version=version, description='version2')
+                                 version=version, description='version1')
+        self._x_dostore(oid1, data=MinPO(2), revid=revid2,
+                        version=version, description='version2')
         self._x_dostore(description='create2')
 
-        t = Transaction()
-        t.description = 'abort version'
-        self._storage.tpc_begin(t)
-        self._storage.abortVersion(version, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
+        self._abortVersion(version)
 
         info = self._storage.undoInfo()
         t_id = info[0]['id']
@@ -191,12 +179,7 @@
         # after abort, we should see non-version data
         self.assertEqual(load_value(oid1, version), 0)
 
-        t = Transaction()
-        t.description = 'undo abort version'
-        self._storage.tpc_begin(t)
-        self._storage.transactionalUndo(t_id, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
+        self._undo(t_id, note="undo abort version")
 
         self.assertEqual(load_value(oid1), 0)
         # t undo will re-create the version
@@ -207,12 +190,7 @@
 
         self._storage.pack(pt, referencesf)
 
-        t = Transaction()
-        t.description = 'undo undo'
-        self._storage.tpc_begin(t)
-        self._storage.transactionalUndo(t_id, t)
-        self._storage.tpc_vote(t)
-        self._storage.tpc_finish(t)
+        self._undo(t_id, note="undo undo")
 
         # undo of undo will put as back where we started
         self.assertEqual(load_value(oid1), 0)


=== Zope3/src/zope/app/advanced/acquisition/tests/VersionStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/VersionStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/VersionStorage.py	Thu Apr  1 13:34:39 2004
@@ -16,15 +16,12 @@
 Any storage that supports versions should be able to pass all these tests.
 """
 
-# XXX we should clean this code up to get rid of the #JF# comments.
-# They were introduced when Jim reviewed the original version of the
-# code.  Barry and Jeremy didn't understand versions then.
-
 import time
 
+from transaction import Transaction
+
 from ZODB import POSException
-from ZODB.referencesf import referencesf
-from ZODB.Transaction import Transaction
+from ZODB.serialize import referencesf
 from ZODB.tests.MinPO import MinPO
 from ZODB.tests.StorageTestBase import zodb_unpickle, snooze
 from ZODB import DB
@@ -48,26 +45,33 @@
         revid1 = self._dostore(oid, data=MinPO(12))
         revid2 = self._dostore(oid, revid=revid1, data=MinPO(13),
                                version="version")
+        data, tid, ver = self._storage.loadEx(oid, "version")
+        self.assertEqual(revid2, tid)
+        self.assertEqual(zodb_unpickle(data), MinPO(13))
         oids = self._abortVersion("version")
         self.assertEqual([oid], oids)
         data, revid3 = self._storage.load(oid, "")
         # use repr() to avoid getting binary data in a traceback on error
-        self.assertEqual(`revid1`, `revid3`)
-        self.assertNotEqual(`revid2`, `revid3`)
+        self.assertNotEqual(revid1, revid3)
+        self.assertNotEqual(revid2, revid3)
+        data, tid, ver = self._storage.loadEx(oid, "")
+        self.assertEqual(revid3, tid)
+        self.assertEqual(zodb_unpickle(data), MinPO(12))
+        self.assertEqual(tid, self._storage.lastTransaction())
 
     def checkVersionedStoreAndLoad(self):
         eq = self.assertEqual
         # Store a couple of non-version revisions of the object
         oid = self._storage.new_oid()
         revid = self._dostore(oid, data=MinPO(11))
-        revid = self._dostore(oid, revid=revid, data=MinPO(12))
+        revid1 = self._dostore(oid, revid=revid, data=MinPO(12))
         # And now store some new revisions in a version
         version = 'test-version'
-        revid = self._dostore(oid, revid=revid, data=MinPO(13),
+        revid = self._dostore(oid, revid=revid1, data=MinPO(13),
                               version=version)
         revid = self._dostore(oid, revid=revid, data=MinPO(14),
                               version=version)
-        revid = self._dostore(oid, revid=revid, data=MinPO(15),
+        revid2 = self._dostore(oid, revid=revid, data=MinPO(15),
                               version=version)
         # Now read back the object in both the non-version and version and
         # make sure the values jive.
@@ -78,6 +82,20 @@
         if hasattr(self._storage, 'getSerial'):
             s = self._storage.getSerial(oid)
             eq(s, max(revid, vrevid))
+        data, tid, ver = self._storage.loadEx(oid, version)
+        eq(zodb_unpickle(data), MinPO(15))
+        eq(tid, revid2)
+        data, tid, ver = self._storage.loadEx(oid, "other version")
+        eq(zodb_unpickle(data), MinPO(12))
+        eq(tid, revid2)
+        # loadSerial returns non-version data
+        try:
+            data = self._storage.loadSerial(oid, revid)
+            eq(zodb_unpickle(data), MinPO(12))
+            data = self._storage.loadSerial(oid, revid2)
+            eq(zodb_unpickle(data), MinPO(12))
+        except POSException.Unsupported:
+            pass
 
     def checkVersionedLoadErrors(self):
         oid = self._storage.new_oid()
@@ -89,11 +107,6 @@
         self.assertRaises(KeyError,
                           self._storage.load,
                           self._storage.new_oid(), '')
-        # Try to load a bogus version string
-        #JF# Nope, fall back to non-version
-        #JF# self.assertRaises(KeyError,
-        #JF#                   self._storage.load,
-        #JF#                   oid, 'bogus')
         data, revid = self._storage.load(oid, 'bogus')
         self.assertEqual(zodb_unpickle(data), MinPO(11))
 
@@ -112,9 +125,6 @@
     def checkVersionEmpty(self):
         # Before we store anything, these versions ought to be empty
         version = 'test-version'
-        #JF# The empty string is not a valid version. I think that this should
-        #JF# be an error. Let's punt for now.
-        #JF# assert self._storage.versionEmpty('')
         self.failUnless(self._storage.versionEmpty(version))
         # Now store some objects
         oid = self._storage.new_oid()
@@ -125,10 +135,6 @@
         revid = self._dostore(oid, revid=revid, data=MinPO(14),
                               version=version)
         # The blank version should not be empty
-        #JF# The empty string is not a valid version. I think that this should
-        #JF# be an error. Let's punt for now.
-        #JF# assert not self._storage.versionEmpty('')
-
         # Neither should 'test-version'
         self.failUnless(not self._storage.versionEmpty(version))
         # But this non-existant version should be empty
@@ -165,7 +171,7 @@
         oid = self._storage.new_oid()
         revid = self._dostore(oid, data=MinPO(49))
         revid = self._dostore(oid, revid=revid, data=MinPO(50))
-        nvrevid = revid = self._dostore(oid, revid=revid, data=MinPO(51))
+        revid = self._dostore(oid, revid=revid, data=MinPO(51))
         # Now do some stores in a version
         revid = self._dostore(oid, revid=revid, data=MinPO(52),
                               version=version)
@@ -190,6 +196,20 @@
         data, revid = self._storage.load(oid, '')
         eq(zodb_unpickle(data), MinPO(51))
 
+    def checkAbortVersionNonCurrent(self):
+        # Make sure the non-current serial number is correctly
+        # after a version is aborted.
+        oid, version = self._setup_version()
+        self._abortVersion(version)
+        data, tid, ver = self._storage.loadEx(oid, "")
+        # write a new revision of oid so that the aborted-version txn
+        # is not current
+        self._dostore(oid, revid=tid, data=MinPO(17))
+        ltid = self._storage.lastTransaction()
+        ncdata, ncstart, end = self._storage.loadBefore(oid, ltid)
+        self.assertEqual(data, ncdata)
+        self.assertEqual(tid, ncstart)
+
     def checkAbortVersionErrors(self):
         eq = self.assertEqual
         oid, version = self._setup_version()
@@ -197,13 +217,6 @@
         t = Transaction()
         self._storage.tpc_begin(t)
 
-        #JF# The spec is silent on what happens if you abort or commit
-        #JF# a non-existent version. FileStorage consideres this a noop.
-        #JF# We can change the spec, but until we do ....
-        #JF# self.assertRaises(POSException.VersionError,
-        #JF#                   self._storage.abortVersion,
-        #JF#                   'bogus', t)
-
         # And try to abort the empty version
         if (hasattr(self._storage, 'supportsTransactionalUndo')
             and self._storage.supportsTransactionalUndo()):
@@ -213,7 +226,7 @@
                               '', t)
 
         # But now we really try to abort the version
-        oids = self._storage.abortVersion(version, t)
+        tid, oids = self._storage.abortVersion(version, t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         eq(len(oids), 1)
@@ -240,19 +253,18 @@
             self._storage.tpc_abort(t)
 
     def checkNewSerialOnCommitVersionToVersion(self):
-        eq = self.assertEqual
         oid, version = self._setup_version()
-        data, vserial = self._storage.load(oid, version)
-        data, nserial = self._storage.load(oid, '')
+        data, vtid = self._storage.load(oid, version)
+        data, ntid = self._storage.load(oid, '')
 
         version2 = 'test version 2'
         self._commitVersion(version, version2)
-        data, serial = self._storage.load(oid, version2)
+        data, tid = self._storage.load(oid, version2)
 
-        self.failUnless(serial != vserial and serial != nserial,
-                        "New serial, %r, should be different from the old "
-                        "version, %r, and non-version, %r, serials."
-                        % (serial, vserial, nserial))
+        self.failUnless(tid != vtid and tid != ntid,
+                        "New tid, %r, should be different from the old "
+                        "version, %r, and non-version, %r, tids."
+                        % (tid, vtid, ntid))
 
     def checkModifyAfterAbortVersion(self):
         eq = self.assertEqual
@@ -333,13 +345,8 @@
         data, revid = self._storage.load(oid1, '')
         eq(zodb_unpickle(data), MinPO(51))
 
-        #JF# Ditto
-        #JF# self.assertRaises(POSException.VersionError,
-        #JF#                   self._storage.load, oid1, version1)
         data, revid = self._storage.load(oid1, '')
         eq(zodb_unpickle(data), MinPO(51))
-        #JF# self.assertRaises(POSException.VersionError,
-        #JF#                   self._storage.load, oid1, version2)
         data, revid = self._storage.load(oid1, '')
         eq(zodb_unpickle(data), MinPO(51))
 
@@ -360,7 +367,6 @@
         data, revid = self._storage.load(oid2, version2)
         eq(zodb_unpickle(data), MinPO(54))
 
-        #JF# To do a test like you want, you have to add the data in a version
         oid = self._storage.new_oid()
         revid = self._dostore(oid, revid=revid, data=MinPO(54), version='one')
         self.assertRaises(KeyError,
@@ -376,7 +382,7 @@
         # Now abort the version and the creation
         t = Transaction()
         self._storage.tpc_begin(t)
-        oids = self._storage.abortVersion('one', t)
+        tid, oids = self._storage.abortVersion('one', t)
         self._storage.tpc_vote(t)
         self._storage.tpc_finish(t)
         self.assertEqual(oids, [oid])
@@ -492,7 +498,6 @@
 
         self._storage.pack(time.time(), referencesf)
         cn.sync()
-        cn._cache.clear()
 
         # make sure all the non-version data is there
         for name, obj in root.items():
@@ -528,3 +533,14 @@
         cn2 = db.open(version="b")
         rt2 = cn2.root()
         self.assertEqual(rt2["b"].value.value, "still version")
+
+    def checkLoadBeforeVersion(self):
+        eq = self.assertEqual
+        oid = self._storage.new_oid()
+        revid1 = self._dostore(oid, data=1)
+        revid2 = self._dostore(oid, data=2, revid=revid1, version="kobe")
+        revid3 = self._dostore(oid, data=3, revid=revid2, version="kobe")
+        data, start_tid, end_tid = self._storage.loadBefore(oid, revid3)
+        eq(zodb_unpickle(data), MinPO(1))
+        eq(start_tid, revid1)
+        eq(end_tid, None)


=== Zope3/src/zope/app/advanced/acquisition/tests/dangle.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/dangle.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/dangle.py	Thu Apr  1 13:34:39 2004
@@ -21,7 +21,7 @@
 from ZODB.FileStorage import FileStorage
 from ZODB import DB
 
-from Persistence import Persistent
+from persistent import Persistent
 
 class P(Persistent):
     pass


=== Zope3/src/zope/app/advanced/acquisition/tests/speed.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/speed.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/speed.py	Thu Apr  1 13:34:39 2004
@@ -39,9 +39,9 @@
 sys.path.insert(0, os.getcwd())
 
 import ZODB, ZODB.FileStorage
-import Persistence
+import persistent
 
-class P(Persistence.Persistent): pass
+class P(persistent.Persistent): pass
 
 def main(args):
 


=== Zope3/src/zope/app/advanced/acquisition/tests/testActivityMonitor.py 1.1 => 1.2 ===


=== Zope3/src/zope/app/advanced/acquisition/tests/testCache.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testCache.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testCache.py	Thu Apr  1 13:34:39 2004
@@ -17,21 +17,19 @@
 purposes. It acts like a memo for unpickling.  It also keeps recent
 objects in memory under the assumption that they may be used again.
 """
-from __future__ import nested_scopes
 
+import gc
 import time
-import types
 import unittest
 
 import ZODB
 import ZODB.MappingStorage
-from ZODB.cPickleCache import PickleCache
-from ZODB.POSException import ConflictError
-from ZODB.PersistentMapping import PersistentMapping
+from persistent.cPickleCache import PickleCache
+from persistent.mapping import PersistentMapping
 from ZODB.tests.MinPO import MinPO
 from ZODB.utils import p64
 
-from Persistence import Persistent
+from persistent import Persistent
 
 class CacheTestBase(unittest.TestCase):
 
@@ -82,8 +80,8 @@
 
     def checkCacheDetail(self):
         for name, count in self.db.cacheDetail():
-            self.assert_(isinstance(name, types.StringType))
-            self.assert_(isinstance(count, types.IntType))
+            self.assert_(isinstance(name, str))
+            self.assert_(isinstance(count, int))
 
     def checkCacheExtremeDetail(self):
         expected = ['conn_no', 'id', 'oid', 'rc', 'klass', 'state']
@@ -103,14 +101,14 @@
     def checkFullSweep(self):
         old_size = self.db.cacheSize()
         time.sleep(3)
-        self.db.cacheFullSweep(0)
+        self.db.cacheFullSweep()
         new_size = self.db.cacheSize()
         self.assert_(new_size < old_size, "%s < %s" % (old_size, new_size))
 
     def checkMinimize(self):
         old_size = self.db.cacheSize()
         time.sleep(3)
-        self.db.cacheMinimize(0)
+        self.db.cacheMinimize()
         new_size = self.db.cacheSize()
         self.assert_(new_size < old_size, "%s < %s" % (old_size, new_size))
 
@@ -179,13 +177,19 @@
         CONNS = 3
         for i in range(CONNS):
             self.noodle_new_connection()
-
+        
         self.assertEquals(self.db.cacheSize(), CACHE_SIZE * CONNS)
         details = self.db.cacheDetailSize()
         self.assertEquals(len(details), CONNS)
         for d in details:
             self.assertEquals(d['ngsize'], CACHE_SIZE)
-            self.assertEquals(d['size'], CACHE_SIZE)
+
+            # The assertion below is non-sensical
+            # The (poorly named) cache size is a target for non-ghosts.
+            # The cache *usually* contains non-ghosts, so that the
+            # size normally exceeds the target size.
+            
+            #self.assertEquals(d['size'], CACHE_SIZE)
 
     def checkDetail(self):
         CACHE_SIZE = 10
@@ -195,6 +199,28 @@
         for i in range(CONNS):
             self.noodle_new_connection()
 
+        gc.collect()
+
+        # XXX The above gc.collect call is necessary to make this test
+        # pass.
+        #
+        # This test then only works because the other of computations
+        # and object accesses in the "noodle" calls is such that the
+        # persistent mapping containing the MinPO objects is
+        # deactivated before the MinPO objects.
+        #
+        # - Without the gc call, the cache will contain ghost MinPOs
+        #   and the check of the MinPO count below will fail. That's 
+        #   because the counts returned by cacheDetail include ghosts.
+        #
+        # - If the mapping object containing the MinPOs isn't
+        #   deactivated, there will be one fewer non-ghost MinPO and
+        #   the test will fail anyway.
+        #
+        # This test really needs to be thought through and documented
+        # better. 
+
+
         for klass, count in self.db.cacheDetail():
             if klass.endswith('MinPO'):
                 self.assertEqual(count, CONNS * CACHE_SIZE)
@@ -224,7 +250,7 @@
         self.cache = PickleCache(self.jar)
 
     def checkGetBogusKey(self):
-        self.assertRaises(KeyError, self.cache.get, p64(0))
+        self.assertEqual(self.cache.get(p64(0)), None)
         try:
             self.cache[12]
         except KeyError:


=== Zope3/src/zope/app/advanced/acquisition/tests/testConfig.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testConfig.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testConfig.py	Thu Apr  1 13:34:39 2004
@@ -12,23 +12,24 @@
 #
 ##############################################################################
 
-import os
-import errno
-import shutil
 import tempfile
 import unittest
 
 import ZODB.config
-import ZODB.tests
 from ZODB.POSException import ReadOnlyError
-from ZEO.ClientStorage import ClientDisconnected
+
 
 class ConfigTestBase(unittest.TestCase):
     def _opendb(self, s):
         return ZODB.config.databaseFromString(s)
 
+    def tearDown(self):
+        if getattr(self, "storage", None) is not None:
+            self.storage.cleanup()
+
     def _test(self, s):
         db = self._opendb(s)
+        self.storage = db._storage
         # Do something with the database to make sure it works
         cn = db.open()
         rt = cn.root()
@@ -56,7 +57,6 @@
             """)
 
     def test_file_config1(self):
-        import ZODB.FileStorage
         path = tempfile.mktemp()
         self._test(
             """
@@ -66,10 +66,8 @@
               </filestorage>
             </zodb>
             """ % path)
-        ZODB.FileStorage.cleanup(path)
 
     def test_file_config2(self):
-        import ZODB.FileStorage
         path = tempfile.mktemp()
         cfg = """
         <zodb>
@@ -81,23 +79,6 @@
         </zodb>
         """ % path
         self.assertRaises(ReadOnlyError, self._test, cfg)
-        ZODB.FileStorage.cleanup(path)
-
-    def test_zeo_config(self):
-        # We're looking for a port that doesn't exist so a connection attempt
-        # will fail.  Instead of elaborate logic to loop over a port
-        # calculation, we'll just pick a simple "random", likely to not-exist
-        # port number and add an elaborate comment explaining this instead.
-        # Go ahead, grep for 9.
-        cfg = """
-        <zodb>
-          <zeoclient>
-            server localhost:56897
-            wait false
-          </zeoclient>
-        </zodb>
-        """
-        self.assertRaises(ClientDisconnected, self._test, cfg)
 
     def test_demo_config(self):
         cfg = """
@@ -110,46 +91,31 @@
         """
         self._test(cfg)
 
-class BDBConfigTest(ConfigTestBase):
-    def setUp(self):
-        self._path = tempfile.mktemp()
-        try:
-            os.mkdir(self._path)
-        except OSError, e:
-            if e.errno <> errno.EEXIST:
-                raise
 
-    def tearDown(self):
-        shutil.rmtree(self._path)
-
-    def test_bdbfull_simple(self):
-        cfg = """
-        <zodb>
-          <fullstorage>
-             envdir %s
-          </fullstorage>
-        </zodb>
-        """ % self._path
-        self._test(cfg)
-
-    def test_bdbminimal_simple(self):
+class ZEOConfigTest(ConfigTestBase):
+    def test_zeo_config(self):
+        # We're looking for a port that doesn't exist so a
+        # connection attempt will fail.  Instead of elaborate
+        # logic to loop over a port calculation, we'll just pick a
+        # simple "random", likely to not-exist port number and add
+        # an elaborate comment explaining this instead.  Go ahead,
+        # grep for 9.
+        from ZEO.ClientStorage import ClientDisconnected
         cfg = """
         <zodb>
-          <minimalstorage>
-            envdir %s
-          </minimalstorage>
+          <zeoclient>
+            server localhost:56897
+            wait false
+          </zeoclient>
         </zodb>
-        """ % self._path
-        self._test(cfg)
+        """
+        self.assertRaises(ClientDisconnected, self._test, cfg)
 
 
 def test_suite():
     suite = unittest.TestSuite()
     suite.addTest(unittest.makeSuite(ZODBConfigTest))
-    # Only run the Berkeley tests if they are available
-    import BDBStorage
-    if BDBStorage.is_available:
-        suite.addTest(unittest.makeSuite(BDBConfigTest))
+    suite.addTest(unittest.makeSuite(ZEOConfigTest))
     return suite
 
 


=== Zope3/src/zope/app/advanced/acquisition/tests/testDB.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testDB.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testDB.py	Thu Apr  1 13:34:39 2004
@@ -14,6 +14,7 @@
 import os
 import time
 import unittest
+import warnings
 
 import ZODB
 import ZODB.FileStorage
@@ -48,9 +49,15 @@
 
     def testSets(self):
         # test set methods that have non-trivial implementations
-        self.db.setCacheDeactivateAfter(12) # deprecated
+        warnings.filterwarnings("error", category=DeprecationWarning)
+        self.assertRaises(DeprecationWarning,
+                          self.db.setCacheDeactivateAfter, 12)
+        self.assertRaises(DeprecationWarning,
+                          self.db.setVersionCacheDeactivateAfter, 12)
+        # XXX There is no API call for removing the warning we just
+        # added, but filters appears to be a public variable.
+        del warnings.filters[0]
         self.db.setCacheSize(15)
-        self.db.setVersionCacheDeactivateAfter(12) # deprecated
         self.db.setVersionCacheSize(15)
 
     def test_removeVersionPool(self):


=== Zope3/src/zope/app/advanced/acquisition/tests/testDemoStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testDemoStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testDemoStorage.py	Thu Apr  1 13:34:39 2004
@@ -12,7 +12,7 @@
 #
 ##############################################################################
 import ZODB.DemoStorage
-import os, unittest
+import unittest
 
 from ZODB.tests import StorageTestBase, BasicStorage, \
      VersionStorage, Synchronization
@@ -33,6 +33,25 @@
         # This base class test checks for the common case where a storage
         # doesnt support huge transaction metadata. This storage doesnt
         # have this limit, so we inhibit this test here.
+        pass
+
+    def checkAbortVersionNonCurrent(self):
+        # XXX Need to implement a real loadBefore for DemoStorage?
+        pass
+
+    def checkLoadBeforeVersion(self):
+        # XXX Need to implement a real loadBefore for DemoStorage?
+        pass
+
+    # the next three pack tests depend on undo
+
+    def checkPackVersionReachable(self):
+        pass
+
+    def checkPackVersions(self):
+        pass
+
+    def checkPackVersionsInPast(self):
         pass
 
 


=== Zope3/src/zope/app/advanced/acquisition/tests/testFileStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testFileStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testFileStorage.py	Thu Apr  1 13:34:39 2004
@@ -11,16 +11,10 @@
 # FOR A PARTICULAR PURPOSE.
 #
 ##############################################################################
-from __future__ import nested_scopes
-
+import os, unittest
+import transaction
 import ZODB.FileStorage
-import sys, os, unittest
-import errno
-import filecmp
-import StringIO
-from ZODB.Transaction import Transaction
 from ZODB import POSException
-from ZODB.fsrecover import recover
 
 from ZODB.tests import StorageTestBase, BasicStorage, \
      TransactionalUndoStorage, VersionStorage, \
@@ -28,16 +22,30 @@
      Synchronization, ConflictResolution, HistoryStorage, \
      IteratorStorage, Corruption, RevisionStorage, PersistentStorage, \
      MTStorage, ReadOnlyStorage, RecoveryStorage
-from ZODB.tests.StorageTestBase import MinPO, zodb_unpickle
+from ZODB.tests.StorageTestBase import MinPO, zodb_pickle
+
+class BaseFileStorageTests(StorageTestBase.StorageTestBase):
+
+    def open(self, **kwargs):
+        self._storage = ZODB.FileStorage.FileStorage('FileStorageTests.fs',
+                                                     **kwargs)
+
+    def setUp(self):
+        self.open(create=1)
+
+    def tearDown(self):
+        self._storage.close()
+        self._storage.cleanup()
 
 class FileStorageTests(
-    StorageTestBase.StorageTestBase,
+    BaseFileStorageTests,
     BasicStorage.BasicStorage,
     TransactionalUndoStorage.TransactionalUndoStorage,
     RevisionStorage.RevisionStorage,
     VersionStorage.VersionStorage,
     TransactionalUndoVersionStorage.TransactionalUndoVersionStorage,
     PackableStorage.PackableStorage,
+    PackableStorage.PackableUndoStorage,
     Synchronization.SynchronizedStorage,
     ConflictResolution.ConflictResolvingStorage,
     ConflictResolution.ConflictResolvingTransUndoStorage,
@@ -49,17 +57,6 @@
     ReadOnlyStorage.ReadOnlyStorage
     ):
 
-    def open(self, **kwargs):
-        self._storage = ZODB.FileStorage.FileStorage('FileStorageTests.fs',
-                                                     **kwargs)
-
-    def setUp(self):
-        self.open(create=1)
-
-    def tearDown(self):
-        self._storage.close()
-        StorageTestBase.removefs("FileStorageTests.fs")
-
     def checkLongMetadata(self):
         s = "X" * 75000
         try:
@@ -170,15 +167,6 @@
 
         self.failUnless(self._storage._records_before_save > 20)
 
-    # There are a bunch of tests that the current pack() implementation
-    # does not past.  We need to fix pack(), but don't want tests to
-    # fail until then.
-
-    def checkPackVersionsInPast(self):
-        pass
-
-    def checkPackAfterUndoDeletion(self):
-        pass
 
 class FileStorageRecoveryTest(
     StorageTestBase.StorageTestBase,
@@ -186,28 +174,43 @@
     ):
 
     def setUp(self):
-        StorageTestBase.removefs("Source.fs")
-        StorageTestBase.removefs("Dest.fs")
-        self._storage = ZODB.FileStorage.FileStorage('Source.fs')
-        self._dst = ZODB.FileStorage.FileStorage('Dest.fs')
+        self._storage = ZODB.FileStorage.FileStorage("Source.fs", create=True)
+        self._dst = ZODB.FileStorage.FileStorage("Dest.fs", create=True)
 
     def tearDown(self):
         self._storage.close()
         self._dst.close()
-        StorageTestBase.removefs("Source.fs")
-        StorageTestBase.removefs("Dest.fs")
+        self._storage.cleanup()
+        self._dst.cleanup()
 
     def new_dest(self):
-        StorageTestBase.removefs('Dest.fs')
         return ZODB.FileStorage.FileStorage('Dest.fs')
 
+class SlowFileStorageTest(BaseFileStorageTests):
+
+    level = 2
+
+    def check10Kstores(self):
+        # The _get_cached_serial() method has a special case
+        # every 8000 calls.  Make sure it gets minimal coverage.
+        oids = [[self._storage.new_oid(), None] for i in range(100)]
+        for i in range(100):
+            t = transaction.Transaction()
+            self._storage.tpc_begin(t)
+            for j in range(100):
+                o = MinPO(j)
+                oid, revid = oids[j]
+                serial = self._storage.store(oid, revid, zodb_pickle(o), "", t)
+                oids[j][1] = serial
+            self._storage.tpc_vote(t)
+            self._storage.tpc_finish(t)
+
 
 def test_suite():
-    suite = unittest.makeSuite(FileStorageTests, 'check')
-    suite2 = unittest.makeSuite(Corruption.FileStorageCorruptTests, 'check')
-    suite3 = unittest.makeSuite(FileStorageRecoveryTest, 'check')
-    suite.addTest(suite2)
-    suite.addTest(suite3)
+    suite = unittest.TestSuite()
+    for klass in [FileStorageTests, Corruption.FileStorageCorruptTests,
+                  FileStorageRecoveryTest, SlowFileStorageTest]:
+        suite.addTest(unittest.makeSuite(klass, "check"))
     return suite
 
 if __name__=='__main__':


=== Zope3/src/zope/app/advanced/acquisition/tests/testMappingStorage.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testMappingStorage.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testMappingStorage.py	Thu Apr  1 13:34:39 2004
@@ -12,14 +12,18 @@
 #
 ##############################################################################
 import ZODB.MappingStorage
-import os, unittest
+import unittest
 
-from ZODB.tests import StorageTestBase, BasicStorage, Synchronization
+from ZODB.tests import StorageTestBase
+from ZODB.tests \
+     import BasicStorage, MTStorage, Synchronization, PackableStorage
 
 class MappingStorageTests(StorageTestBase.StorageTestBase,
-                       BasicStorage.BasicStorage,
-                       Synchronization.SynchronizedStorage,
-                       ):
+                          BasicStorage.BasicStorage,
+                          MTStorage.MTStorage,
+                          PackableStorage.PackableStorage,
+                          Synchronization.SynchronizedStorage,
+                          ):
 
     def setUp(self):
         self._storage = ZODB.MappingStorage.MappingStorage()


=== Zope3/src/zope/app/advanced/acquisition/tests/testPersistentList.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testPersistentList.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testPersistentList.py	Thu Apr  1 13:34:39 2004
@@ -15,7 +15,7 @@
 """
 
 import unittest
-from ZODB.PersistentList import PersistentList
+from persistent.list import PersistentList
 
 l0 = []
 l1 = [0]


=== Zope3/src/zope/app/advanced/acquisition/tests/testPersistentMapping.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testPersistentMapping.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testPersistentMapping.py	Thu Apr  1 13:34:39 2004
@@ -58,7 +58,7 @@
         r = db.open().root()
         r[1] = 1
         r[2] = 2
-##        r[3] = r
+        r[3] = r
         get_transaction().commit()
         # MappingStorage stores serialno + pickle in its _index.
         root_pickle = s._index['\000' * 8][8:]
@@ -67,7 +67,7 @@
         u = cPickle.Unpickler(f)
         klass_info = u.load()
         klass = find_global(*klass_info[0])
-        inst = klass()
+        inst = klass.__new__(klass)
         state = u.load()
         inst.__setstate__(state)
 
@@ -78,7 +78,7 @@
     """Helper for this test suite to get special PersistentMapping"""
 
     if classname == "PersistentMapping":
-        class PersistentMapping:
+        class PersistentMapping(object):
             def __setstate__(self, state):
                 self.__dict__.update(state)
         return PersistentMapping
@@ -88,9 +88,8 @@
         return getattr(mod, classname)
 
 def test_suite():
+    return None
     return unittest.makeSuite(PMTests, 'check')
 
 if __name__ == "__main__":
-    loader = unittest.TestLoader()
-    loader.testMethodPrefix = "check"
-    unittest.main(testLoader=loader)
+    unittest.main()


=== Zope3/src/zope/app/advanced/acquisition/tests/testRecover.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testRecover.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testRecover.py	Thu Apr  1 13:34:39 2004
@@ -23,11 +23,9 @@
 
 import ZODB
 from ZODB.FileStorage import FileStorage
-from ZODB.PersistentMapping import PersistentMapping
 from ZODB.fsrecover import recover
-from ZODB.tests.StorageTestBase import removefs
 
-from ZODB.fsdump import Dumper
+from persistent.mapping import PersistentMapping
 
 class RecoverTest(unittest.TestCase):
 
@@ -46,8 +44,10 @@
         self.storage.close()
         if self.recovered is not None:
             self.recovered.close()
-        removefs(self.path)
-        removefs(self.dest)
+        self.storage.cleanup()
+        temp = FileStorage(self.dest)
+        temp.close()
+        temp.cleanup()
 
     def populate(self):
         db = ZODB.DB(self.storage)


=== Zope3/src/zope/app/advanced/acquisition/tests/testTimeStamp.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testTimeStamp.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testTimeStamp.py	Thu Apr  1 13:34:39 2004
@@ -16,7 +16,7 @@
 import time
 import unittest
 
-from ZODB.TimeStamp import TimeStamp
+from persistent.TimeStamp import TimeStamp
 
 EPSILON = 0.000001
 
@@ -106,7 +106,6 @@
         self.assertEquals(t.hour(), 10)
         self.assertEquals(t.minute(), 48)
         self.assertEquals(round(t.second()), 5)
-        self.assertEquals(t.second(), t.seconds()) # Alias
         self.assertEquals(t.timeTime(), 1011782885)
         t1 = TimeStamp(2002, 1, 23, 10, 48, 10)
         self.assertEquals(str(t1), '2002-01-23 10:48:10.000000')


=== Zope3/src/zope/app/advanced/acquisition/tests/testTransaction.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testTransaction.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testTransaction.py	Thu Apr  1 13:34:39 2004
@@ -11,13 +11,7 @@
 # FOR A PARTICULAR PURPOSE
 #
 ##############################################################################
-
-"""
-Revision information:
-$Id$
-"""
-
-"""
+"""Test tranasction behavior for variety of cases.
 
 I wrote these unittests to investigate some odd transaction
 behavior when doing unittests of integrating non sub transaction
@@ -42,27 +36,24 @@
     add in tests for objects which are modified multiple times,
     for example an object that gets modified in multiple sub txns.
 
+$Id$
 """
 
-import random
-from types import TupleType
 import unittest
-
-from ZODB import Transaction
+import transaction
 
 class TransactionTests(unittest.TestCase):
 
     def setUp(self):
-
-        Transaction.hosed = 0
+        self.orig_tm = transaction.manager
+        transaction.manager = transaction.TransactionManager()
         self.sub1 = DataObject()
         self.sub2 = DataObject()
         self.sub3 = DataObject()
         self.nosub1 = DataObject(nost=1)
 
     def tearDown(self):
-
-        Transaction.free_transaction()
+        transaction.manager = self.orig_tm
 
     # basic tests with two sub trans jars
     # really we only need one, so tests for
@@ -125,18 +116,12 @@
         assert self.sub1._p_jar.cabort_sub == 1
 
     def testMultipleSubTransactionCommitCommit(self):
-
-        # add it
         self.sub1.modify()
-
         get_transaction().commit(1)
 
-        # add another
         self.sub2.modify()
-
         # reset a flag on the original to test it again
         self.sub1.ctpc_finish = 0
-
         get_transaction().commit(1)
 
         # this is interesting.. we go through
@@ -151,7 +136,7 @@
 
         get_transaction().commit()
 
-        # we did an implicit sub commit, is this impl artifiact?
+        # we did an implicit sub commit, is this impl artifact?
         assert self.sub3._p_jar.ccommit_sub == 1
         assert self.sub1._p_jar.ctpc_finish > 1
 
@@ -351,7 +336,6 @@
         assert self.nosub1._p_jar.ctpc_finish == 0
         assert self.nosub1._p_jar.ccommit == 1
         assert self.nosub1._p_jar.ctpc_abort == 1
-        assert Transaction.hosed == 0
 
     def testExceptionInTpcVote(self):
 
@@ -368,7 +352,6 @@
         assert self.nosub1._p_jar.ccommit == 1
         assert self.nosub1._p_jar.ctpc_abort == 1
         assert self.sub1._p_jar.ctpc_abort == 1
-        assert Transaction.hosed == 0
 
     def testExceptionInTpcBegin(self):
         """
@@ -407,29 +390,18 @@
         except TestTxnException: pass
 
         assert self.nosub1._p_jar.ctpc_abort == 1
-        assert Transaction.hosed == 0
 
     ### More Failure modes...
     # now we mix in some sub transactions
     ###
 
     def testExceptionInSubCommitSub(self):
-        """
-        this tests exhibits some odd behavior,
-        nothing thats technically incorrect...
-
-        basically it seems non deterministic, even
-        stranger the behavior seems dependent on what
-        values i test after the fact... very odd,
-        almost relativistic.
-
-        in-retrospect this is from the fact that
-        dictionaries are used to store jars at some point
-
-        """
+        # It's harder than normal to verify test results, because
+        # the subtransaction jars are stored in a dictionary.  The
+        # order in which jars are processed depends on the order
+        # they come out of the dictionary.
 
         self.sub1.modify()
-
         get_transaction().commit(1)
 
         self.nosub1.modify()
@@ -443,24 +415,30 @@
 
         try:
             get_transaction().commit()
-        except TestTxnException: pass
-
+        except TestTxnException:
+            pass
 
-        # odd this doesn't seem to be entirely deterministic..
         if self.sub1._p_jar.ccommit_sub:
-            assert self.sub1._p_jar.ctpc_abort == 1
+            self.assertEqual(self.sub1._p_jar.ctpc_abort, 1)
         else:
-            assert self.sub1._p_jar.cabort_sub == 1
+            self.assertEqual(self.sub1._p_jar.cabort_sub, 1)
+
+        self.assertEqual(self.sub2._p_jar.ctpc_abort, 1)
+        self.assertEqual(self.nosub1._p_jar.ctpc_abort, 1)
 
         if self.sub3._p_jar.ccommit_sub:
-            assert self.sub3._p_jar.ctpc_abort == 1
+            self.assertEqual(self.sub3._p_jar.ctpc_abort, 1)
         else:
-            assert self.sub3._p_jar.cabort_sub == 1
-
-        assert self.sub2._p_jar.ctpc_abort == 1
-        assert self.nosub1._p_jar.ctpc_abort == 1
+            self.assertEqual(self.sub3._p_jar.cabort_sub, 1)
 
     def testExceptionInSubAbortSub(self):
+        # This test has two errors.  When commit_sub() is called on
+        # sub1, it will fail.  If sub1 is handled first, it will raise
+        # an except and abort_sub() will be called on sub2.  If sub2
+        # is handled first, then commit_sub() will fail after sub2 has
+        # already begun its top-level transaction and tpc_abort() will
+        # be called.
+
         self.sub1._p_jar = SubTransactionJar(errors='commit_sub')
         self.sub1.modify(nojar=1)
         get_transaction().commit(1)
@@ -483,51 +461,47 @@
         # called, then tpc_abort() should be called to abort the
         # actual transaction.  If not, then calling abort_sub() is
         # sufficient.
-        if self.sub3._p_jar.ccommit_sub == 1:
+        if self.sub3._p_jar.ccommit_sub:
             self.assertEqual(self.sub3._p_jar.ctpc_abort, 1)
         else:
             self.assertEqual(self.sub3._p_jar.cabort_sub, 1)
 
     # last test, check the hosing mechanism
 
-    def testHoserStoppage(self):
-        # XXX We should consult ZConfig to decide whether we can get into a
-        # hosed state or not.
-        return
-
-        # It's hard to test the "hosed" state of the database, where
-        # hosed means that a failure occurred in the second phase of
-        # the two phase commit.  It's hard because the database can
-        # recover from such an error if it occurs during the very first
-        # tpc_finish() call of the second phase.
-
-        for obj in self.sub1, self.sub2:
-            j = HoserJar(errors='tpc_finish')
-            j.reset()
-            obj._p_jar = j
-            obj.modify(nojar=1)
-
-        try:
-            get_transaction().commit()
-        except TestTxnException:
-            pass
-
-        self.assert_(Transaction.hosed)
-
-        self.sub2.modify()
-
-        try:
-            get_transaction().commit()
-        except Transaction.POSException.TransactionError:
-            pass
-        else:
-            self.fail("Hosed Application didn't stop commits")
+##    def testHoserStoppage(self):
+##        # It's hard to test the "hosed" state of the database, where
+##        # hosed means that a failure occurred in the second phase of
+##        # the two phase commit.  It's hard because the database can
+##        # recover from such an error if it occurs during the very first
+##        # tpc_finish() call of the second phase.
+
+##        for obj in self.sub1, self.sub2:
+##            j = HoserJar(errors='tpc_finish')
+##            j.reset()
+##            obj._p_jar = j
+##            obj.modify(nojar=1)
+
+##        try:
+##            get_transaction().commit()
+##        except TestTxnException:
+##            pass
+
+##        self.assert_(Transaction.hosed)
+
+##        self.sub2.modify()
+
+##        try:
+##            get_transaction().commit()
+##        except Transaction.POSException.TransactionError:
+##            pass
+##        else:
+##            self.fail("Hosed Application didn't stop commits")
 
 
 class DataObject:
 
     def __init__(self, nost=0):
-        self.nost= nost
+        self.nost = nost
         self._p_jar = None
 
     def modify(self, nojar=0, tracing=0):
@@ -544,7 +518,7 @@
 class BasicJar:
 
     def __init__(self, errors=(), tracing=0):
-        if not isinstance(errors, TupleType):
+        if not isinstance(errors, tuple):
             errors = errors,
         self.errors = errors
         self.tracing = tracing
@@ -558,7 +532,12 @@
         self.ccommit_sub = 0
 
     def __repr__(self):
-        return "<jar %X %s>" % (id(self), self.errors)
+        return "<%s %X %s>" % (self.__class__.__name__, id(self), self.errors)
+
+    def sortKey(self):
+        # All these jars use the same sort key, and Python's list.sort()
+        # is stable.  These two
+        return self.__class__.__name__
 
     def check(self, method):
         if self.tracing:
@@ -628,9 +607,39 @@
         HoserJar.committed += 1
 
 
+def test_join():
+    """White-box test of the join method
+
+    The join method is provided for "backward-compatability" with ZODB 4
+    data managers.
+
+    The argument to join must be a zodb4 data manager,
+    transaction.interfaces.IDataManager.
+
+    >>> from ZODB.tests.sampledm import DataManager
+    >>> from transaction._transaction import DataManagerAdapter
+    >>> t = transaction.Transaction()
+    >>> dm = DataManager()
+    >>> t.join(dm)
+
+    The end result is that a data manager adapter is one of the
+    transaction's objects:
+
+    >>> isinstance(t._resources[0], DataManagerAdapter)
+    True
+    >>> t._resources[0]._datamanager is dm
+    True
+
+    """
+
 def test_suite():
+    from doctest import DocTestSuite
+    return unittest.TestSuite((
+        DocTestSuite(),
+        unittest.makeSuite(TransactionTests),
+        ))
 
-    return unittest.makeSuite(TransactionTests)
 
 if __name__ == '__main__':
     unittest.TextTestRunner().run(test_suite())
+


=== Zope3/src/zope/app/advanced/acquisition/tests/testUtils.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testUtils.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testUtils.py	Thu Apr  1 13:34:39 2004
@@ -15,6 +15,7 @@
 
 import random
 import unittest
+from persistent import Persistent
 
 NUM = 100
 
@@ -43,6 +44,14 @@
         self.assertEquals(U64("\000\000\000\000\000\000\000\001"), 1)
         self.assertEquals(u64("\000\000\000\001\000\000\000\000"), 1L<<32)
         self.assertEquals(U64("\000\000\000\001\000\000\000\000"), 1L<<32)
+
+    def checkPersistentIdHandlesDescriptor(self):
+        from ZODB.serialize import BaseObjectWriter
+        class P(Persistent):
+            pass
+
+        writer = BaseObjectWriter(None)
+        self.assertEqual(writer.persistent_id(P), None)
 
 def test_suite():
     return unittest.makeSuite(TestUtils, 'check')


=== Zope3/src/zope/app/advanced/acquisition/tests/testZODB.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testZODB.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testZODB.py	Thu Apr  1 13:34:39 2004
@@ -11,16 +11,15 @@
 # FOR A PARTICULAR PURPOSE.
 #
 ##############################################################################
-from __future__ import nested_scopes
-
 import unittest
 
 import ZODB
 import ZODB.FileStorage
-from ZODB.PersistentMapping import PersistentMapping
 from ZODB.POSException import ReadConflictError, ConflictError
-from ZODB.tests.StorageTestBase import removefs
-from Persistence import Persistent
+
+from persistent import Persistent
+from persistent.mapping import PersistentMapping
+import transaction
 
 class P(Persistent):
     pass
@@ -55,74 +54,78 @@
 
     def tearDown(self):
         self._db.close()
-        removefs("ZODBTests.fs")
+        self._storage.cleanup()
 
-    def checkExportImport(self, abort_it=0, dup_name='test_duplicate'):
+    def checkExportImport(self, abort_it=False):
         self.populate()
-        get_transaction().begin()
-        get_transaction().note('duplication')
-        # Duplicate the 'test' object.
         conn = self._db.open()
         try:
-            root = conn.root()
-            ob = root['test']
-            assert len(ob) > 10, 'Insufficient test data'
-            try:
-                import tempfile
-                f = tempfile.TemporaryFile()
-                ob._p_jar.exportFile(ob._p_oid, f)
-                assert f.tell() > 0, 'Did not export correctly'
-                f.seek(0)
-                new_ob = ob._p_jar.importFile(f)
-                root[dup_name] = new_ob
-                f.close()
-                if abort_it:
-                    get_transaction().abort()
-                else:
-                    get_transaction().commit()
-            except:
-                get_transaction().abort()
-                raise
+            self.duplicate(conn, abort_it)
         finally:
             conn.close()
-        get_transaction().begin()
-        # Verify the duplicate.
         conn = self._db.open()
         try:
-            root = conn.root()
-            ob = root['test']
-            try:
-                ob2 = root[dup_name]
-            except KeyError:
-                if abort_it:
-                    # Passed the test.
-                    return
-                else:
-                    raise
-            else:
-                if abort_it:
-                    assert 0, 'Did not abort duplication'
-            l1 = list(ob.items())
-            l1.sort()
-            l2 = list(ob2.items())
-            l2.sort()
-            l1 = map(lambda (k, v): (k, v[0]), l1)
-            l2 = map(lambda (k, v): (k, v[0]), l2)
-            assert l1 == l2, 'Duplicate did not match'
-            assert ob._p_oid != ob2._p_oid, 'Did not duplicate'
-            assert ob._p_jar == ob2._p_jar, 'Not same connection'
-            oids = {}
-            for v in ob.values():
-                oids[v._p_oid] = 1
-            for v in ob2.values():
-                assert not oids.has_key(v._p_oid), (
-                    'Did not fully separate duplicate from original')
-            get_transaction().commit()
+            self.verify(conn, abort_it)
         finally:
             conn.close()
 
+    def duplicate(self, conn, abort_it):
+        get_transaction().begin()
+        get_transaction().note('duplication')
+        root = conn.root()
+        ob = root['test']
+        assert len(ob) > 10, 'Insufficient test data'
+        try:
+            import tempfile
+            f = tempfile.TemporaryFile()
+            ob._p_jar.exportFile(ob._p_oid, f)
+            assert f.tell() > 0, 'Did not export correctly'
+            f.seek(0)
+            new_ob = ob._p_jar.importFile(f)
+            self.assertEqual(new_ob, ob)
+            root['dup'] = new_ob
+            f.close()
+            if abort_it:
+                get_transaction().abort()
+            else:
+                get_transaction().commit()
+        except:
+            get_transaction().abort()
+            raise
+
+    def verify(self, conn, abort_it):
+        get_transaction().begin()
+        root = conn.root()
+        ob = root['test']
+        try:
+            ob2 = root['dup']
+        except KeyError:
+            if abort_it:
+                # Passed the test.
+                return
+            else:
+                raise
+        else:
+            self.failUnless(not abort_it, 'Did not abort duplication')
+        l1 = list(ob.items())
+        l1.sort()
+        l2 = list(ob2.items())
+        l2.sort()
+        l1 = map(lambda (k, v): (k, v[0]), l1)
+        l2 = map(lambda (k, v): (k, v[0]), l2)
+        self.assertEqual(l1, l2)
+        self.assert_(ob._p_oid != ob2._p_oid)
+        self.assertEqual(ob._p_jar, ob2._p_jar)
+        oids = {}
+        for v in ob.values():
+            oids[v._p_oid] = 1
+        for v in ob2.values():
+            assert not oids.has_key(v._p_oid), (
+                'Did not fully separate duplicate from original')
+        get_transaction().commit()
+
     def checkExportImportAborted(self):
-        self.checkExportImport(abort_it=1, dup_name='test_duplicate_aborted')
+        self.checkExportImport(abort_it=True)
 
     def checkVersionOnly(self):
         # Make sure the changes to make empty transactions a no-op
@@ -139,16 +142,67 @@
         get_transaction().commit()
 
     def checkResetCache(self):
-        # The cache size after a reset should be 0 and the GC attributes
-        # ought to be linked to it rather than the old cache.
+        # The cache size after a reset should be 0.  Note that
+        # _resetCache is not a public API, but the resetCaches()
+        # function is, and resetCaches() causes _resetCache() to be
+        # called.
         self.populate()
         conn = self._db.open()
         conn.root()
         self.assert_(len(conn._cache) > 0)  # Precondition
         conn._resetCache()
         self.assertEqual(len(conn._cache), 0)
-        self.assert_(conn._incrgc == conn._cache.incrgc)
-        self.assert_(conn.cacheGC == conn._cache.incrgc)
+
+    def checkResetCachesAPI(self):
+        # Checks the resetCaches() API.
+        # (resetCaches used to be called updateCodeTimestamp.)
+        self.populate()
+        conn = self._db.open()
+        conn.root()
+        self.assert_(len(conn._cache) > 0)  # Precondition
+        ZODB.Connection.resetCaches()
+        conn.close()
+        self.assert_(len(conn._cache) > 0)  # Still not flushed
+        conn._setDB(self._db)  # simulate the connection being reopened
+        self.assertEqual(len(conn._cache), 0)
+
+    def checkExplicitTransactionManager(self):
+        # Test of transactions that apply to only the connection,
+        # not the thread.
+        tm1 = transaction.TransactionManager()
+        conn1 = self._db.open(txn_mgr=tm1)
+        tm2 = transaction.TransactionManager()
+        conn2 = self._db.open(txn_mgr=tm2)
+        try:
+            r1 = conn1.root()
+            r2 = conn2.root()
+            if r1.has_key('item'):
+                del r1['item']
+                tm1.get().commit()
+            r1.get('item')
+            r2.get('item')
+            r1['item'] = 1
+            tm1.get().commit()
+            self.assertEqual(r1['item'], 1)
+            # r2 has not seen a transaction boundary,
+            # so it should be unchanged.
+            self.assertEqual(r2.get('item'), None)
+            conn2.sync()
+            # Now r2 is updated.
+            self.assertEqual(r2['item'], 1)
+
+            # Now, for good measure, send an update in the other direction.
+            r2['item'] = 2
+            tm2.get().commit()
+            self.assertEqual(r1['item'], 1)
+            self.assertEqual(r2['item'], 2)
+            conn1.sync()
+            conn2.sync()
+            self.assertEqual(r1['item'], 2)
+            self.assertEqual(r2['item'], 2)
+        finally:
+            conn1.close()
+            conn2.close()
 
     def checkLocalTransactions(self):
         # Test of transactions that apply to only the connection,
@@ -199,7 +253,7 @@
         # error because the object state read is not necessarily
         # consistent with the objects read earlier in the transaction.
 
-        conn = self._db.open()
+        conn = self._db.open(mvcc=False)
         conn.setLocalTransaction()
         r1 = conn.root()
         r1["p"] = self.obj
@@ -207,7 +261,7 @@
         conn.getTransaction().commit()
 
         # start a new transaction with a new connection
-        cn2 = self._db.open()
+        cn2 = self._db.open(mvcc=False)
         # start a new transaction with the other connection
         cn2.setLocalTransaction()
         r2 = cn2.root()
@@ -232,7 +286,7 @@
     def checkReadConflictIgnored(self):
         # Test that an application that catches a read conflict and
         # continues can not commit the transaction later.
-        root = self._db.open().root()
+        root = self._db.open(mvcc=False).root()
         root["real_data"] = real_data = PersistentMapping()
         root["index"] = index = PersistentMapping()
 
@@ -243,7 +297,7 @@
         get_transaction().commit()
 
         # load some objects from one connection
-        cn2 = self._db.open()
+        cn2 = self._db.open(mvcc=False)
         cn2.setLocalTransaction()
         r2 = cn2.root()
         real_data2 = r2["real_data"]
@@ -258,7 +312,7 @@
         try:
             del index2[0]["a"]
         except ReadConflictError:
-            # This is the crux of the test.  Ignore the error.
+            # This is the crux of the text.  Ignore the error.
             pass
         else:
             self.fail("No conflict occurred")
@@ -274,54 +328,6 @@
         self.assertRaises(ConflictError, cn2.getTransaction().commit)
         get_transaction().abort()
 
-    def checkIgnoreReadConflict(self):
-        # Test that an application that catches a read conflict and
-        # continues can not commit the transaction later.
-        root = self._db.open().root()
-        root["real_data"] = real_data = PersistentMapping()
-        root["index"] = index = PersistentMapping()
-
-        real_data["a"] = PersistentMapping({"indexed_value": 0})
-        real_data["b"] = PersistentMapping({"indexed_value": 1})
-        index[1] = PersistentMapping({"b": 1})
-        index[0] = PersistentMapping({"a": 1})
-        get_transaction().commit()
-
-        # load some objects from one connection
-        cn2 = self._db.open()
-        cn2.setLocalTransaction()
-        r2 = cn2.root()
-        real_data2 = r2["real_data"]
-        index2 = r2["index"]
-
-        real_data["b"]["indexed_value"] = 0
-        del index[1]["b"]
-        index[0]["b"] = 1
-        get_transaction().commit()
-
-        del real_data2["a"]
-        try:
-            del index2[0]["a"]
-        except ReadConflictError, obj:
-            # The point of this test is to make sure that the conflict
-            # can be ignored.
-            obj.ignore()
-        else:
-            self.fail("No conflict occurred")
-
-        # real_data2 still ready to commit
-        self.assert_(real_data2._p_changed)
-
-        # index2 values not ready to commit
-        self.assert_(not index2._p_changed)
-        self.assert_(not index2[0]._p_changed)
-        self.assert_(not index2[1]._p_changed)
-
-        # The commit should succeed despite the ReadConflictError, because
-        # we explicitly said to ignore it.
-        cn2.getTransaction().commit()
-        get_transaction().abort()
-
     def checkIndependent(self):
         self.obj = Independent()
         self.readConflict(shouldFail=0)
@@ -332,3 +338,6 @@
 
 def test_suite():
     return unittest.makeSuite(ZODBTests, 'check')
+
+if __name__ == "__main__":
+    unittest.main(defaultTest="test_suite")


=== Zope3/src/zope/app/advanced/acquisition/tests/testfsIndex.py 1.1 => 1.2 ===
--- Zope3/src/zope/app/advanced/acquisition/tests/testfsIndex.py:1.1	Thu Apr  1 13:26:43 2004
+++ Zope3/src/zope/app/advanced/acquisition/tests/testfsIndex.py	Thu Apr  1 13:34:39 2004
@@ -11,7 +11,7 @@
 # FOR A PARTICULAR PURPOSE.
 #
 ##############################################################################
-import unittest, sys
+import unittest
 from ZODB.fsIndex import fsIndex
 from ZODB.utils import p64
 




More information about the Zope3-Checkins mailing list