[Zodb-checkins] CVS: ZODB3/BDBStorage/tests - BerkeleyTestBase.py:1.7.4.2 test_autopack.py:1.6.2.4 test_create.py:1.11.6.3 test_storage_api.py:1.23.2.3 test_whitebox.py:1.4.2.2
Barry Warsaw
barry@wooz.org
Mon, 27 Jan 2003 18:19:08 -0500
Update of /cvs-repository/ZODB3/BDBStorage/tests
In directory cvs.zope.org:/tmp/cvs-serv9345/BDBStorage/tests
Modified Files:
Tag: ZODB3-3_1-branch
BerkeleyTestBase.py test_autopack.py test_create.py
test_storage_api.py test_whitebox.py
Log Message:
Backport Berkeley storage fixes from the 3.2 trunk.
=== ZODB3/BDBStorage/tests/BerkeleyTestBase.py 1.7.4.1 => 1.7.4.2 ===
--- ZODB3/BDBStorage/tests/BerkeleyTestBase.py:1.7.4.1 Tue Jan 7 14:40:44 2003
+++ ZODB3/BDBStorage/tests/BerkeleyTestBase.py Mon Jan 27 18:19:02 2003
@@ -1,6 +1,6 @@
##############################################################################
#
-# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
+# Copyright (c) 2001 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
@@ -17,6 +17,7 @@
import os
import errno
+import shutil
from BDBStorage.BerkeleyBase import BerkeleyConfig
from ZODB.tests.StorageTestBase import StorageTestBase
@@ -26,40 +27,49 @@
class BerkeleyTestBase(StorageTestBase):
- def _zap_dbhome(self, dir):
- # If the tests exited with any uncommitted objects, they'll blow up
- # subsequent tests because the next transaction commit will try to
- # commit those object. But they're tied to closed databases, so
- # that's broken. Aborting the transaction now saves us the headache.
- try:
- for file in os.listdir(dir):
- os.unlink(os.path.join(dir, file))
- os.removedirs(dir)
- except OSError, e:
- if e.errno <> errno.ENOENT:
- raise
-
- def _mk_dbhome(self, dir):
+ def _config(self):
# Checkpointing just slows the tests down because we have to wait for
# the thread to properly shutdown. This can take up to 10 seconds, so
# for the purposes of the test suite we shut off this thread.
config = BerkeleyConfig()
config.interval = 0
+ return config
+
+ def _envdir(self):
+ return DBHOME
+
+ def open(self):
+ self._storage = self.ConcreteStorage(
+ self._envdir(), config=self._config())
+
+ def _zap_dbhome(self, dir=None):
+ if dir is None:
+ dir = self._envdir()
+ # XXX Pre-Python 2.3 doesn't ignore errors if the first arg doesn't
+ # exist, even if the second is True.
+ try:
+ shutil.rmtree(dir, True)
+ except OSError, e:
+ if e.errno <> errno.ENOENT: raise
+
+ def _mk_dbhome(self, dir=None):
+ if dir is None:
+ dir = self._get_envdir()
os.mkdir(dir)
try:
- return self.ConcreteStorage(dir, config=config)
+ return self.ConcreteStorage(dir, config=self._config())
except:
- self._zap_dbhome(dir)
+ self._zap_dbhome()
raise
def setUp(self):
StorageTestBase.setUp(self)
- self._zap_dbhome(DBHOME)
- self._storage = self._mk_dbhome(DBHOME)
+ self._zap_dbhome()
+ self.open()
def tearDown(self):
StorageTestBase.tearDown(self)
- self._zap_dbhome(DBHOME)
+ self._zap_dbhome()
=== ZODB3/BDBStorage/tests/test_autopack.py 1.6.2.3 => 1.6.2.4 ===
--- ZODB3/BDBStorage/tests/test_autopack.py:1.6.2.3 Wed Jan 22 11:59:41 2003
+++ ZODB3/BDBStorage/tests/test_autopack.py Mon Jan 27 18:19:02 2003
@@ -12,8 +12,6 @@
#
##############################################################################
-from __future__ import nested_scopes
-
import os
import time
import unittest
@@ -55,9 +53,9 @@
class TestAutopackBase(BerkeleyTestBase):
def _config(self):
config = BerkeleyConfig()
- # Autopack every 3 seconds, 6 seconds into the past, no classic packs
- config.frequency = 3
- config.packtime = 6
+ # Autopack every 1 second, 2 seconds into the past, no classic packs
+ config.frequency = 1
+ config.packtime = 2
config.classicpack = 0
return config
@@ -66,7 +64,7 @@
# BAW: this uses a non-public interface
packtime = storage._autopacker._nextcheck
while packtime == storage._autopacker._nextcheck:
- time.sleep(1)
+ time.sleep(0.1)
def _mk_dbhome(self, dir):
# Create the storage
@@ -100,8 +98,8 @@
unless(storage.loadSerial(oid, revid1))
unless(storage.loadSerial(oid, revid2))
unless(storage.loadSerial(oid, revid3))
- # Should be enough time for the revisions to get packed away
- time.sleep(10)
+ # Two more autopacks ought to be enough to pack away old revisions
+ self._wait_for_next_autopack()
self._wait_for_next_autopack()
# The first two revisions should now be gone, but the third should
# still exist because it's the current revision, and we haven't done a
@@ -117,9 +115,10 @@
def _config(self):
config = BerkeleyConfig()
- # Autopack every 3 seconds, 6 seconds into the past, no classic packs
- config.frequency = 3
- config.packtime = 6
+ # Autopack every 1 second, 2 seconds into the past, classic packing
+ # every time.
+ config.frequency = 1
+ config.packtime = 2
config.classicpack = 1
return config
@@ -141,8 +140,8 @@
unless(storage.loadSerial(oid, revid1))
unless(storage.loadSerial(oid, revid2))
unless(storage.loadSerial(oid, revid3))
- # Should be enough time for the revisions to get packed away
- time.sleep(10)
+ # Two more autopacks ought to be enough to pack away old revisions
+ self._wait_for_next_autopack()
self._wait_for_next_autopack()
# The first two revisions should now be gone, but the third should
# still exist because it's the current revision, and we haven't done a
@@ -282,6 +281,113 @@
# clean up any outstanding transactions
get_transaction().abort()
+ def _getPackThread(self, storage):
+ raise NotImplemented
+
+ def testRaceCondition(self):
+ unless = self.failUnless
+ storage = self._storage
+ db = DB(storage)
+ conn = db.open()
+ root = conn.root()
+ # Start by storing a root reachable object.
+ obj1 = C()
+ obj1.value = 888
+ root.obj1 = obj1
+ txn = get_transaction()
+ txn.note('root -> obj1')
+ txn.commit()
+ # Now, start a transaction, store an object, but don't yet complete
+ # the transaction. This will ensure that the second object has a tid
+ # < packtime, but it won't be root reachable yet.
+ obj2 = C()
+ t = Transaction()
+ storage.tpc_begin(t)
+ obj2sn = storage.store('\0'*7 + '\2', ZERO, zodb_pickle(obj2), '', t)
+ # Now, acquire the condvar lock and start a thread that will do a
+ # pack, up to the _sweep call. Wait for the _mark() call to
+ # complete.
+ now = time.time()
+ while now == time.time():
+ time.sleep(0.1)
+ self._cv.acquire()
+ packthread = self._getPackThread(storage)
+ packthread.start()
+ self._cv.wait()
+ # Now that the _mark() has finished, complete the transaction, which
+ # links the object to root.
+ root.obj2 = obj2
+ rootsn = storage.getSerial(ZERO)
+ rootsn = storage.store(ZERO, rootsn, zodb_pickle(root), '', t)
+ storage.tpc_vote(t)
+ storage.tpc_finish(t)
+ # And notify the pack thread that it can do the sweep and collect
+ self._cv.notify()
+ self._cv.wait()
+ # We're done with the condvar and the thread
+ self._cv.release()
+ packthread.join()
+ # Now make sure that all the interesting objects are still available
+ rootsn = storage.getSerial(ZERO)
+ obj1sn = storage.getSerial('\0'*7 + '\1')
+ obj2sn = storage.getSerial('\0'*7 + '\2')
+ # obj1 revision was written before the second revision of the root
+ unless(obj1sn < rootsn)
+ unless(rootsn == obj2sn)
+ unless(obj1sn < obj2sn)
+
+ def testEarlierRaceCondition(self):
+ unless = self.failUnless
+ storage = self._storage
+ db = DB(storage)
+ conn = db.open()
+ root = conn.root()
+ # Start by storing a root reachable object.
+ obj1 = C()
+ obj1.value = 888
+ root.obj1 = obj1
+ txn = get_transaction()
+ txn.note('root -> obj1')
+ txn.commit()
+ # Now, start a transaction, store an object, but don't yet complete
+ # the transaction. This will ensure that the second object has a tid
+ # < packtime, but it won't be root reachable yet.
+ obj2 = C()
+ t = Transaction()
+ storage.tpc_begin(t)
+ # Now, acquire the condvar lock and start a thread that will do a
+ # pack, up to the _sweep call. Wait for the _mark() call to
+ # complete.
+ now = time.time()
+ while now == time.time():
+ time.sleep(0.1)
+ self._cv.acquire()
+ packthread = self._getPackThread(storage)
+ packthread.start()
+ self._cv.wait()
+ obj2sn = storage.store('\0'*7 + '\2', ZERO, zodb_pickle(obj2), '', t)
+ # Now that the _mark() has finished, complete the transaction, which
+ # links the object to root.
+ root.obj2 = obj2
+ rootsn = storage.getSerial(ZERO)
+ rootsn = storage.store(ZERO, rootsn, zodb_pickle(root), '', t)
+ storage.tpc_vote(t)
+ storage.tpc_finish(t)
+ # And notify the pack thread that it can do the sweep and collect
+ self._cv.notify()
+ self._cv.wait()
+ # We're done with the condvar and the thread
+ self._cv.release()
+ packthread.join()
+ # Now make sure that all the interesting objects are still available
+ rootsn = storage.getSerial(ZERO)
+ obj1sn = storage.getSerial('\0'*7 + '\1')
+ obj2sn = storage.getSerial('\0'*7 + '\2')
+ # obj1 revision was written before the second revision of the root
+ unless(obj1sn < rootsn)
+ unless(rootsn == obj2sn)
+ unless(obj1sn < obj2sn)
+
# Subclass which does ugly things to _dopack so we can actually test the race
@@ -362,57 +468,8 @@
class TestFullClassicPackRaceCondition(RaceConditionBase):
ConcreteStorage = SynchronizedFullStorage
- def testRaceCondition(self):
- unless = self.failUnless
- storage = self._storage
- db = DB(storage)
- conn = db.open()
- root = conn.root()
- # Start by storing a root reachable object.
- obj1 = C()
- obj1.value = 888
- root.obj1 = obj1
- txn = get_transaction()
- txn.note('root -> obj1')
- txn.commit()
- # Now, start a transaction, store an object, but don't yet complete
- # the transaction. This will ensure that the second object has a tid
- # < packtime, but it won't be root reachable yet.
- obj2 = C()
- t = Transaction()
- storage.tpc_begin(t)
- obj2sn = storage.store('\0'*7 + '\2', ZERO, zodb_pickle(obj2), '', t)
- # Now, acquire the condvar lock and start a thread that will do a
- # pack, up to the _sweep call. Wait for the _mark() call to
- # complete.
- now = time.time()
- while now == time.time():
- time.sleep(0.5)
- self._cv.acquire()
- packthread = FullPackThread(storage)
- packthread.start()
- self._cv.wait()
- # Now that the _mark() has finished, complete the transaction, which
- # links the object to root.
- root.obj2 = obj2
- rootsn = storage.getSerial(ZERO)
- rootsn = storage.store(ZERO, rootsn, zodb_pickle(root), '', t)
- storage.tpc_vote(t)
- storage.tpc_finish(t)
- # And notify the pack thread that it can do the sweep and collect
- self._cv.notify()
- self._cv.wait()
- # We're done with the condvar and the thread
- self._cv.release()
- packthread.join()
- # Now make sure that all the interesting objects are still available
- rootsn = storage.getSerial(ZERO)
- obj1sn = storage.getSerial('\0'*7 + '\1')
- obj2sn = storage.getSerial('\0'*7 + '\2')
- # obj1 revision was written before the second revision of the root
- unless(obj1sn < rootsn)
- unless(rootsn == obj2sn)
- unless(obj1sn < obj2sn)
+ def _getPackThread(self, storage):
+ return FullPackThread(storage)
@@ -469,57 +526,8 @@
class TestMinimalClassicPackRaceCondition(RaceConditionBase):
ConcreteStorage = SynchronizedMinimalStorage
- def testRaceCondition(self):
- unless = self.failUnless
- storage = self._storage
- db = DB(storage)
- conn = db.open()
- root = conn.root()
- # Start by storing a root reachable object.
- obj1 = C()
- obj1.value = 888
- root.obj1 = obj1
- txn = get_transaction()
- txn.note('root -> obj1')
- txn.commit()
- # Now, start a transaction, store an object, but don't yet complete
- # the transaction. This will ensure that the second object has a tid
- # < packtime, but it won't be root reachable yet.
- obj2 = C()
- t = Transaction()
- storage.tpc_begin(t)
- obj2sn = storage.store('\0'*7 + '\2', ZERO, zodb_pickle(obj2), '', t)
- # Now, acquire the condvar lock and start a thread that will do a
- # pack, up to the _sweep call. Wait for the _mark() call to
- # complete.
- now = time.time()
- while now == time.time():
- time.sleep(0.5)
- self._cv.acquire()
- packthread = MinimalPackThread(storage)
- packthread.start()
- self._cv.wait()
- # Now that the _mark() has finished, complete the transaction, which
- # links the object to root.
- root.obj2 = obj2
- rootsn = storage.getSerial(ZERO)
- rootsn = storage.store(ZERO, rootsn, zodb_pickle(root), '', t)
- storage.tpc_vote(t)
- storage.tpc_finish(t)
- # And notify the pack thread that it can do the sweep and collect
- self._cv.notify()
- self._cv.wait()
- # We're done with the condvar and the thread
- self._cv.release()
- packthread.join()
- # Now make sure that all the interesting objects are still available
- rootsn = storage.getSerial(ZERO)
- obj1sn = storage.getSerial('\0'*7 + '\1')
- obj2sn = storage.getSerial('\0'*7 + '\2')
- # obj1 revision was written before the second revision of the root
- unless(obj1sn < rootsn)
- unless(rootsn == obj2sn)
- unless(obj1sn < obj2sn)
+ def _getPackThread(self, storage):
+ return MinimalPackThread(storage)
=== ZODB3/BDBStorage/tests/test_create.py 1.11.6.2 => 1.11.6.3 ===
--- ZODB3/BDBStorage/tests/test_create.py:1.11.6.2 Tue Jan 21 17:38:54 2003
+++ ZODB3/BDBStorage/tests/test_create.py Mon Jan 27 18:19:02 2003
@@ -1,6 +1,6 @@
##############################################################################
#
-# Copyright (c) 2001 Zope Corporation and Contributors.
+# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
@@ -74,6 +74,8 @@
class FullOpenCloseTest(BerkeleyTestBase.FullTestBase):
+ level = 2
+
def _mk_dbhome(self, dir):
config = BerkeleyConfig
config.interval = 10
@@ -91,8 +93,8 @@
class OpenRecoveryTest(BerkeleyTestBase.FullTestBase):
- def _mk_dbhome(self, dir):
- self._dir = dir
+ def open(self):
+ self._storage = None
def testOpenWithBogusConfig(self):
class C: pass
@@ -100,9 +102,10 @@
# This instance won't have the necessary attributes, so the creation
# will fail. We want to be sure that everything gets cleaned up
# enough to fix that and create a proper storage.
- self.assertRaises(AttributeError, BDBFullStorage, self._dir, config=c)
+ dir = self._envdir()
+ self.assertRaises(AttributeError, BDBFullStorage, dir, config=c)
c = BerkeleyConfig()
- s = BDBFullStorage(self._dir, config=c)
+ s = BDBFullStorage(dir, config=c)
s.close()
=== ZODB3/BDBStorage/tests/test_storage_api.py 1.23.2.2 => 1.23.2.3 ===
--- ZODB3/BDBStorage/tests/test_storage_api.py:1.23.2.2 Tue Jan 21 17:41:10 2003
+++ ZODB3/BDBStorage/tests/test_storage_api.py Mon Jan 27 18:19:02 2003
@@ -29,8 +29,7 @@
from ZODB.tests.PackableStorage import PackableStorage
from ZODB.tests.HistoryStorage import HistoryStorage
from ZODB.tests.IteratorStorage import IteratorStorage, ExtendedIteratorStorage
-# XXX The refactored RecoveryStorage test is only available on the trunk.
-#from ZODB.tests.RecoveryStorage import RecoveryStorage
+from ZODB.tests.RecoveryStorage import RecoveryStorage
from ZODB.tests import ConflictResolution
@@ -59,20 +58,20 @@
DST_DBHOME = 'test-dst'
-##class FullRecoveryTest(BerkeleyTestBase.FullTestBase,
-## RecoveryStorage):
-## def setUp(self):
-## BerkeleyTestBase.FullTestBase.setUp(self)
-## self._zap_dbhome(DST_DBHOME)
-## self._dst = self._mk_dbhome(DST_DBHOME)
-
-## def tearDown(self):
-## BerkeleyTestBase.FullTestBase.tearDown(self)
-## self._zap_dbhome(DST_DBHOME)
-
-## def new_dest(self):
-## self._zap_dbhome(DST_DBHOME)
-## return self._mk_dbhome(DST_DBHOME)
+class FullRecoveryTest(BerkeleyTestBase.FullTestBase,
+ RecoveryStorage):
+ def setUp(self):
+ BerkeleyTestBase.FullTestBase.setUp(self)
+ self._zap_dbhome(DST_DBHOME)
+ self._dst = self._mk_dbhome(DST_DBHOME)
+
+ def tearDown(self):
+ BerkeleyTestBase.FullTestBase.tearDown(self)
+ self._zap_dbhome(DST_DBHOME)
+
+ def new_dest(self):
+ self._zap_dbhome(DST_DBHOME)
+ return self._mk_dbhome(DST_DBHOME)
@@ -80,7 +79,7 @@
suite = unittest.TestSuite()
if BDBStorage.is_available:
suite.addTest(unittest.makeSuite(FullTest, 'check'))
-## suite.addTest(unittest.makeSuite(FullRecoveryTest, 'check'))
+ suite.addTest(unittest.makeSuite(FullRecoveryTest, 'check'))
suite.addTest(unittest.makeSuite(MinimalTest, 'check'))
return suite
=== ZODB3/BDBStorage/tests/test_whitebox.py 1.4.2.1 => 1.4.2.2 ===
--- ZODB3/BDBStorage/tests/test_whitebox.py:1.4.2.1 Tue Jan 7 14:36:22 2003
+++ ZODB3/BDBStorage/tests/test_whitebox.py Mon Jan 27 18:19:02 2003
@@ -85,6 +85,14 @@
# stored object isn't referenced by any other objects.
eq(len(self._storage._refcounts.keys()), 0)
+ def checkStorageVersionAfterCreation(self):
+ from BDBStorage.BDBMinimalStorage import BDBMINIMAL_SCHEMA_VERSION
+ eq = self.assertEqual
+ eq(self._storage._info['version'], BDBMINIMAL_SCHEMA_VERSION)
+ self._storage.close()
+ self.open()
+ eq(self._storage._info['version'], BDBMINIMAL_SCHEMA_VERSION)
+
class WhiteboxHighLevelMinimal(ZODBTestBase):
@@ -174,6 +182,19 @@
+class WhiteboxLowLevelFull(BerkeleyTestBase):
+ ConcreteStorage = BDBFullStorage
+
+ def checkStorageVersionAfterCreation(self):
+ from BDBStorage.BDBFullStorage import BDBFULL_SCHEMA_VERSION
+ eq = self.assertEqual
+ eq(self._storage._info['version'], BDBFULL_SCHEMA_VERSION)
+ self._storage.close()
+ self.open()
+ eq(self._storage._info['version'], BDBFULL_SCHEMA_VERSION)
+
+
+
class WhiteboxHighLevelFull(ZODBTestBase):
ConcreteStorage = BDBFullStorage
@@ -229,6 +250,7 @@
if BDBStorage.is_available:
suite.addTest(unittest.makeSuite(WhiteboxLowLevelMinimal, 'check'))
suite.addTest(unittest.makeSuite(WhiteboxHighLevelMinimal, 'check'))
+ suite.addTest(unittest.makeSuite(WhiteboxLowLevelFull, 'check'))
suite.addTest(unittest.makeSuite(WhiteboxHighLevelFull, 'check'))
return suite