[Zodb-checkins] SVN: ZODB/trunk/s Refactored most of the blob-storage tests to be usable with different
Jim Fulton
jim at zope.com
Fri Dec 5 15:09:26 EST 2008
Log message for revision 93695:
Refactored most of the blob-storage tests to be usable with different
blob-storage implementations.
Changed:
U ZODB/trunk/setup.py
U ZODB/trunk/src/ZODB/tests/blob_connection.txt
U ZODB/trunk/src/ZODB/tests/blob_importexport.txt
U ZODB/trunk/src/ZODB/tests/blob_transaction.txt
U ZODB/trunk/src/ZODB/tests/testblob.py
-=-
Modified: ZODB/trunk/setup.py
===================================================================
--- ZODB/trunk/setup.py 2008-12-05 20:09:23 UTC (rev 93694)
+++ ZODB/trunk/setup.py 2008-12-05 20:09:25 UTC (rev 93695)
@@ -52,6 +52,7 @@
zeopasswd = ZEO.zeopasswd:main
mkzeoinst = ZEO.mkzeoinst:main
zeoctl = ZEO.zeoctl:main
+ remove-old-zeo-cached-blobs = ZEO.ClientStorage:check_blob_size_script
"""
scripts = []
Modified: ZODB/trunk/src/ZODB/tests/blob_connection.txt
===================================================================
--- ZODB/trunk/src/ZODB/tests/blob_connection.txt 2008-12-05 20:09:23 UTC (rev 93694)
+++ ZODB/trunk/src/ZODB/tests/blob_connection.txt 2008-12-05 20:09:25 UTC (rev 93695)
@@ -1,17 +1,3 @@
-##############################################################################
-#
-# Copyright (c) 2005 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
Connection support for Blobs tests
==================================
@@ -30,13 +16,8 @@
FileStorage rather than MappingStorage here because we will want ``loadBefore``
for one of our examples.)
- >>> import ZODB.FileStorage
- >>> from ZODB.blob import BlobStorage
+ >>> blob_storage = create_storage()
>>> from ZODB.DB import DB
- >>> base_storage = ZODB.FileStorage.FileStorage(
- ... 'BlobTests.fs', create=True)
- >>> blob_dir = 'blobs'
- >>> blob_storage = BlobStorage(blob_dir, base_storage)
>>> database = DB(blob_storage)
Putting a Blob into a Connection works like every other object:
Modified: ZODB/trunk/src/ZODB/tests/blob_importexport.txt
===================================================================
--- ZODB/trunk/src/ZODB/tests/blob_importexport.txt 2008-12-05 20:09:23 UTC (rev 93694)
+++ ZODB/trunk/src/ZODB/tests/blob_importexport.txt 2008-12-05 20:09:25 UTC (rev 93695)
@@ -17,25 +17,13 @@
Set up:
- >>> from ZODB.FileStorage import FileStorage
- >>> from ZODB.blob import Blob, BlobStorage
- >>> from ZODB.DB import DB
+ >>> import ZODB.blob, transaction
>>> from persistent.mapping import PersistentMapping
- >>> import shutil
- >>> import transaction
- >>> storagefile1 = 'Data.fs.1'
- >>> blob_dir1 = 'blobs1'
- >>> storagefile2 = 'Data.fs.2'
- >>> blob_dir2 = 'blobs2'
We need an database with an undoing blob supporting storage:
- >>> base_storage1 = FileStorage(storagefile1)
- >>> blob_storage1 = BlobStorage(blob_dir1, base_storage1)
- >>> base_storage2 = FileStorage(storagefile2)
- >>> blob_storage2 = BlobStorage(blob_dir2, base_storage2)
- >>> database1 = DB(blob_storage1)
- >>> database2 = DB(blob_storage2)
+ >>> database1 = ZODB.DB(create_storage('1'))
+ >>> database2 = ZODB.DB(create_storage('2'))
Create our root object for database1:
@@ -46,12 +34,11 @@
>>> import time, os
>>> nothing = transaction.begin()
- >>> tid = blob_storage1._tid
>>> data1 = 'x'*100000
- >>> blob1 = Blob()
+ >>> blob1 = ZODB.blob.Blob()
>>> blob1.open('w').write(data1)
>>> data2 = 'y'*100000
- >>> blob2 = Blob()
+ >>> blob2 = ZODB.blob.Blob()
>>> blob2.open('w').write(data2)
>>> d = PersistentMapping({'blob1':blob1, 'blob2':blob2})
>>> root1['blobdata'] = d
@@ -85,17 +72,7 @@
True
>>> transaction.get().abort()
-Clean up our blob directory:
+.. cleanup
- >>> base_storage1.close()
- >>> base_storage2.close()
- >>> import ZODB.blob
- >>> ZODB.blob.remove_committed_dir(blob_dir1)
- >>> ZODB.blob.remove_committed_dir(blob_dir2)
- >>> os.unlink(exportfile)
- >>> os.unlink(storagefile1)
- >>> os.unlink(storagefile1+".index")
- >>> os.unlink(storagefile1+".tmp")
- >>> os.unlink(storagefile2)
- >>> os.unlink(storagefile2+".index")
- >>> os.unlink(storagefile2+".tmp")
+ >>> database1.close()
+ >>> database2.close()
Modified: ZODB/trunk/src/ZODB/tests/blob_transaction.txt
===================================================================
--- ZODB/trunk/src/ZODB/tests/blob_transaction.txt 2008-12-05 20:09:23 UTC (rev 93694)
+++ ZODB/trunk/src/ZODB/tests/blob_transaction.txt 2008-12-05 20:09:25 UTC (rev 93695)
@@ -17,20 +17,16 @@
We need a database with a blob supporting storage::
- >>> from ZODB.MappingStorage import MappingStorage
- >>> from ZODB.blob import Blob, BlobStorage
- >>> from ZODB.DB import DB
- >>> import transaction
- >>> base_storage = MappingStorage("test")
+ >>> import ZODB.blob, transaction
>>> blob_dir = 'blobs'
- >>> blob_storage = BlobStorage(blob_dir, base_storage)
- >>> database = DB(blob_storage)
+ >>> blob_storage = create_storage(blob_dir=blob_dir)
+ >>> database = ZODB.DB(blob_storage)
>>> connection1 = database.open()
>>> root1 = connection1.root()
Putting a Blob into a Connection works like any other Persistent object::
- >>> blob1 = Blob()
+ >>> blob1 = ZODB.blob.Blob()
>>> blob1.open('w').write('this is blob 1')
>>> root1['blob1'] = blob1
>>> 'blob1' in root1
@@ -130,7 +126,7 @@
We can open more than one blob object during the course of a single
transaction::
- >>> blob2 = Blob()
+ >>> blob2 = ZODB.blob.Blob()
>>> blob2.open('w').write('this is blob 3')
>>> root2['blob2'] = blob2
>>> transaction.commit()
@@ -189,16 +185,6 @@
>>> root4['blob1'].open('r').read()
'this is blob 1woot!this is from connection 3'
-BlobStorages implementation of getSize() does not include the blob data and
-only returns what the underlying storages do. (We need to ensure the last
-number to be an int, otherwise it will be a long on 32-bit platforms and an
-int on 64-bit)::
-
- >>> underlying_size = base_storage.getSize()
- >>> blob_size = blob_storage.getSize()
- >>> int(blob_size - underlying_size)
- 0
-
You can't commit a transaction while blob files are open:
>>> f = root3['blob1'].open('w')
@@ -227,7 +213,7 @@
>>> connection5 = database.open()
>>> root5 = connection5.root()
- >>> blob = Blob()
+ >>> blob = ZODB.blob.Blob()
>>> blob_fh = blob.open("w")
>>> blob_fh.write("I'm a happy blob.")
>>> blob_fh.close()
@@ -297,7 +283,7 @@
>>> connection6 = database.open()
>>> root6 = connection6.root()
- >>> blob = Blob()
+ >>> blob = ZODB.blob.Blob()
>>> blob_fh = blob.open("w")
>>> blob_fh.write("I'm a happy blob.")
>>> blob_fh.close()
@@ -330,7 +316,7 @@
An exception is raised if we call committed on a blob that has
uncommitted changes:
- >>> blob = Blob()
+ >>> blob = ZODB.blob.Blob()
>>> blob.committed()
Traceback (most recent call last):
...
@@ -375,55 +361,55 @@
...
IOError: ...
-tpc_abort with dirty data
--------------------------
+tpc_abort
+---------
-When `tpc_abort` is called during the first commit phase we need to be able to
-clean up dirty files:
+If a transaction is aborted in the middle of 2-phase commit, any data
+stored are discarded.
- >>> class DummyBaseStorage(object):
- ... def tpc_abort(self):
- ... pass
- >>> base_storage = DummyBaseStorage()
- >>> blob_dir2 = 'blobs2'
- >>> blob_storage2 = BlobStorage(blob_dir2, base_storage)
- >>> committed_blob_dir = blob_storage2.fshelper.getPathForOID(0)
- >>> os.makedirs(committed_blob_dir)
- >>> committed_blob_file = blob_storage2.fshelper.getBlobFilename(0, 0)
- >>> open(os.path.join(committed_blob_file), 'w').write('foo')
- >>> os.path.exists(committed_blob_file)
- True
+ >>> olddata, oldserial = blob_storage.load(blob._p_oid, '')
+ >>> t = transaction.get()
+ >>> blob_storage.tpc_begin(t)
+ >>> open('blobfile', 'w').write('This data should go away')
+ >>> s1 = blob_storage.storeBlob(blob._p_oid, oldserial, olddata, 'blobfile',
+ ... '', t)
+ >>> new_oid = blob_storage.new_oid()
+ >>> open('blobfile2', 'w').write('This data should go away too')
+ >>> s2 = blob_storage.storeBlob(new_oid, '\0'*8, olddata, 'blobfile2',
+ ... '', t)
+ >>> blob_storage.tpc_abort(t)
-Now, telling the storage that Blob 0 and Blob 1 (both with serial 0) are dirty
-will: remove the committed file for Blob 0 and ignore the fact that Blob 1 is
-set to dirty but doesn't actually have an existing file:
+Now, the serial for the existing blob should be the same:
- >>> blob_storage2.dirty_oids = [(0, 0), (1, 0)]
- >>> blob_storage2.tpc_abort()
- >>> os.path.exists(committed_blob_file)
- False
+ >>> blob_storage.load(blob._p_oid, '') == (olddata, oldserial)
+ True
+And we shouldn't be able to read the data that we saved:
-Note: This is a counter measure against regression of bug #126007.
+ >>> blob_storage.loadBlob(blob._p_oid, s1)
+ Traceback (most recent call last):
+ ...
+ POSKeyError: 'No blob file'
-`getSize` iterates over the existing blob files in the blob directory and adds
-up their size. The blob directory sometimes contains temporary files that the
-getSize function needs to ignore:
+Of course the old data should be unaffected:
- >>> garbage_file = os.path.join(blob_dir, 'garbage')
- >>> open(garbage_file, 'w').write('garbage')
- >>> int(blob_storage.getSize())
- 2673
+ >>> open(blob_storage.loadBlob(blob._p_oid, oldserial)).read()
+ "I'm a happy blob."
-Note: This is a counter measer against regression of bug #12991.
+Similarly, the new object wasn't added to the storage:
-Teardown
---------
+ >>> blob_storage.load(new_oid, '')
+ Traceback (most recent call last):
+ ...
+ POSKeyError: 0x06
-We don't need the storage directory and databases anymore::
+ >>> blob_storage.loadBlob(blob._p_oid, s2)
+ Traceback (most recent call last):
+ ...
+ POSKeyError: 'No blob file'
+.. clean up
+
>>> tm1.abort()
>>> tm2.abort()
>>> database.close()
- >>> rmtree(blob_dir)
- >>> rmtree(blob_dir2)
Modified: ZODB/trunk/src/ZODB/tests/testblob.py
===================================================================
--- ZODB/trunk/src/ZODB/tests/testblob.py 2008-12-05 20:09:23 UTC (rev 93694)
+++ ZODB/trunk/src/ZODB/tests/testblob.py 2008-12-05 20:09:25 UTC (rev 93695)
@@ -39,10 +39,10 @@
import ZODB.blob
import ZODB.interfaces
import ZODB.tests.IteratorStorage
+import ZODB.tests.StorageTestBase
import ZODB.tests.util
import zope.testing.renormalizing
-
def new_time():
"""Create a _new_ time stamp.
@@ -95,18 +95,21 @@
</zodb>
""")
+class BlobTestBase(ZODB.tests.StorageTestBase.StorageTestBase):
-class BlobCloneTests(ZODB.tests.util.TestCase):
+ def setUp(self):
+ ZODB.tests.StorageTestBase.StorageTestBase.setUp(self)
+ self._storage = self.create_storage()
+class BlobCloneTests(BlobTestBase):
+
def testDeepCopyCanInvalidate(self):
"""
Tests regression for invalidation problems related to missing
readers and writers values in cloned objects (see
http://mail.zope.org/pipermail/zodb-dev/2008-August/012054.html)
"""
- base_storage = FileStorage('Data.fs')
- blob_storage = BlobStorage('blobs', base_storage)
- database = DB(blob_storage)
+ database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
@@ -129,12 +132,10 @@
database.close()
-class BlobUndoTests(ZODB.tests.util.TestCase):
+class BlobUndoTests(BlobTestBase):
def testUndoWithoutPreviousVersion(self):
- base_storage = FileStorage('Data.fs')
- blob_storage = BlobStorage('blobs', base_storage)
- database = DB(blob_storage)
+ database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
@@ -149,9 +150,7 @@
database.close()
def testUndo(self):
- base_storage = FileStorage('Data.fs')
- blob_storage = BlobStorage('blobs', base_storage)
- database = DB(blob_storage)
+ database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
@@ -173,9 +172,7 @@
database.close()
def testUndoAfterConsumption(self):
- base_storage = FileStorage('Data.fs')
- blob_storage = BlobStorage('blobs', base_storage)
- database = DB(blob_storage)
+ database = DB(self._storage)
connection = database.open()
root = connection.root()
transaction.begin()
@@ -199,9 +196,7 @@
database.close()
def testRedo(self):
- base_storage = FileStorage('Data.fs')
- blob_storage = BlobStorage('bobs', base_storage)
- database = DB(blob_storage)
+ database = DB(self._storage)
connection = database.open()
root = connection.root()
blob = Blob()
@@ -221,8 +216,6 @@
self.assertEqual(blob.open('r').read(), 'this is state 1')
- serial = base64.encodestring(blob_storage._tid)
-
database.undo(database.undoLog(0, 1)[0]['id'])
transaction.commit()
@@ -231,9 +224,7 @@
database.close()
def testRedoOfCreation(self):
- base_storage = FileStorage('Data.fs')
- blob_storage = BlobStorage('blobs', base_storage)
- database = DB(blob_storage)
+ database = DB(self._storage)
connection = database.open()
root = connection.root()
blob = Blob()
@@ -256,20 +247,16 @@
database.close()
-class RecoveryBlobStorage(ZODB.tests.util.TestCase,
+class RecoveryBlobStorage(BlobTestBase,
ZODB.tests.IteratorStorage.IteratorDeepCompare):
def setUp(self):
- ZODB.tests.util.TestCase.setUp(self)
- self._storage = BlobStorage(
- 'src_blobs', ZODB.FileStorage.FileStorage("Source.fs", create=True))
- self._dst = BlobStorage(
- 'dest_blobs', ZODB.FileStorage.FileStorage("Dest.fs", create=True))
+ BlobTestBase.setUp(self)
+ self._dst = self.create_storage('dest')
def tearDown(self):
- self._storage.close()
self._dst.close()
- ZODB.tests.util.TestCase.tearDown(self)
+ BlobTestBase.tearDown(self)
# Requires a setUp() that creates a self._dst destination storage
def testSimpleBlobRecovery(self):
@@ -299,7 +286,6 @@
def gc_blob_removes_uncommitted_data():
"""
- >>> from ZODB.blob import Blob
>>> blob = Blob()
>>> blob.open('w').write('x')
>>> fname = blob._p_blob_uncommitted
@@ -323,19 +309,14 @@
>>> os_rename = os.rename
>>> os.rename = fail
- >>> import logging, sys
+ >>> import logging
>>> logger = logging.getLogger('ZODB.blob.copied')
>>> handler = logging.StreamHandler(sys.stdout)
>>> logger.propagate = False
>>> logger.setLevel(logging.DEBUG)
>>> logger.addHandler(handler)
- >>> import transaction
- >>> from ZODB.MappingStorage import MappingStorage
- >>> from ZODB.blob import BlobStorage
- >>> from ZODB.DB import DB
- >>> base_storage = MappingStorage("test")
- >>> blob_storage = BlobStorage('blobs', base_storage)
+ >>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
@@ -378,13 +359,10 @@
temporary directory that is ignored while packing.
>>> import transaction
- >>> from ZODB.MappingStorage import MappingStorage
- >>> from ZODB.blob import BlobStorage
>>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
- >>> base_storage = MappingStorage("test")
- >>> blob_storage = BlobStorage('blobs', base_storage)
+ >>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
@@ -409,14 +387,9 @@
blob_directory and confused our packing strategy. We now use a separate
temporary directory that is ignored while packing.
- >>> import transaction
- >>> from ZODB.FileStorage.FileStorage import FileStorage
- >>> from ZODB.blob import BlobStorage
- >>> from ZODB.DB import DB
>>> from ZODB.serialize import referencesf
- >>> base_storage = FileStorage('Data.fs')
- >>> blob_storage = BlobStorage('blobs', base_storage)
+ >>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
@@ -438,13 +411,8 @@
This is a test for secure creation and verification of secure settings of
blob directories.
- >>> from ZODB.FileStorage.FileStorage import FileStorage
- >>> from ZODB.blob import BlobStorage
- >>> import os.path
+ >>> blob_storage = create_storage(blob_dir='blobs')
- >>> base_storage = FileStorage('Data.fs')
- >>> blob_storage = BlobStorage('blobs', base_storage)
-
Two directories are created:
>>> os.path.isdir('blobs')
@@ -493,14 +461,7 @@
First, let's setup a regular database and store a blob:
- >>> import transaction
- >>> from ZODB.FileStorage.FileStorage import FileStorage
- >>> from ZODB.blob import BlobStorage
- >>> from ZODB.DB import DB
- >>> from ZODB.serialize import referencesf
-
- >>> base_storage = FileStorage('Data.fs')
- >>> blob_storage = BlobStorage('blobs', base_storage)
+ >>> blob_storage = create_storage()
>>> database = DB(blob_storage)
>>> connection = database.open()
>>> root = connection.root()
@@ -533,15 +494,14 @@
def is_blob_record():
r"""
- >>> fs = FileStorage('Data.fs')
- >>> bs = ZODB.blob.BlobStorage('blobs', fs)
+ >>> bs = create_storage()
>>> db = DB(bs)
>>> conn = db.open()
>>> conn.root()['blob'] = ZODB.blob.Blob()
>>> transaction.commit()
- >>> ZODB.blob.is_blob_record(fs.load(ZODB.utils.p64(0), '')[0])
+ >>> ZODB.blob.is_blob_record(bs.load(ZODB.utils.p64(0), '')[0])
False
- >>> ZODB.blob.is_blob_record(fs.load(ZODB.utils.p64(1), '')[0])
+ >>> ZODB.blob.is_blob_record(bs.load(ZODB.utils.p64(1), '')[0])
True
An invalid pickle yields a false value:
@@ -558,8 +518,7 @@
def do_not_depend_on_cwd():
"""
- >>> from ZODB.MappingStorage import MappingStorage
- >>> bs = ZODB.blob.BlobStorage('blobs', MappingStorage())
+ >>> bs = create_storage()
>>> here = os.getcwd()
>>> os.mkdir('evil')
>>> os.chdir('evil')
@@ -578,12 +537,67 @@
ZODB.tests.util.setUp(test)
test.globs['rmtree'] = zope.testing.setupstack.rmtree
+
+def setUpBlobAdaptedFileStorage(test):
+ setUp(test)
+
+ def create_storage(name='data', blob_dir=None):
+ if blob_dir is None:
+ blob_dir = '%s.bobs' % name
+ return ZODB.blob.BlobStorage(blob_dir, FileStorage('%s.fs' % name))
+
+ test.globs['create_storage'] = create_storage
+
+def storage_reusable_suite(prefix, factory):
+ """Return a test suite for a generic IBlobStorage.
+
+ Pass a factory taking a name and a blob directory name.
+ """
+
+ def setup(test):
+ setUp(test)
+ def create_storage(name='data', blob_dir=None):
+ if blob_dir is None:
+ blob_dir = '%s.bobs' % name
+ return factory(name, blob_dir)
+
+ test.globs['create_storage'] = create_storage
+
+ suite = unittest.TestSuite()
+ suite.addTest(doctest.DocFileSuite(
+ "blob_connection.txt", "blob_importexport.txt",
+ "blob_transaction.txt",
+ setUp=setup, tearDown=zope.testing.setupstack.tearDown,
+ optionflags=doctest.ELLIPSIS,
+ ))
+ suite.addTest(doctest.DocTestSuite(
+ setUp=setup, tearDown=zope.testing.setupstack.tearDown,
+ checker = zope.testing.renormalizing.RENormalizing([
+ (re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
+ (re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
+ ]),
+ ))
+
+ def create_storage(self, name='data', blob_dir=None):
+ if blob_dir is None:
+ blob_dir = '%s.bobs' % name
+ return factory(name, blob_dir)
+
+ for class_ in (BlobCloneTests, BlobUndoTests, RecoveryBlobStorage):
+ new_class = class_.__class__(
+ prefix+class_.__name__, (class_, ),
+ dict(create_storage=create_storage),
+ )
+ suite.addTest(unittest.makeSuite(new_class))
+
+ return suite
+
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
suite.addTest(doctest.DocFileSuite(
- "blob_basic.txt", "blob_connection.txt", "blob_transaction.txt",
- "blob_packing.txt", "blob_importexport.txt", "blob_consume.txt",
+ "blob_basic.txt",
+ "blob_packing.txt", "blob_consume.txt",
"blob_tempdir.txt",
setUp=setUp,
tearDown=zope.testing.setupstack.tearDown,
@@ -600,17 +614,11 @@
(re.compile(r'\S+/((old|bushy|lawn)/\S+/foo[23456]?)'), r'\1'),
]),
))
- suite.addTest(doctest.DocTestSuite(
- setUp=setUp,
- tearDown=zope.testing.setupstack.tearDown,
- checker = zope.testing.renormalizing.RENormalizing([
- (re.compile(r'\%(sep)s\%(sep)s' % dict(sep=os.path.sep)), '/'),
- (re.compile(r'\%(sep)s' % dict(sep=os.path.sep)), '/'),
- ]),
+ suite.addTest(storage_reusable_suite(
+ 'BlobAdaptedFileStorage',
+ lambda name, blob_dir:
+ ZODB.blob.BlobStorage(blob_dir, FileStorage('%s.fs' % name))
))
- suite.addTest(unittest.makeSuite(BlobCloneTests))
- suite.addTest(unittest.makeSuite(BlobUndoTests))
- suite.addTest(unittest.makeSuite(RecoveryBlobStorage))
return suite
More information about the Zodb-checkins
mailing list