[Zodb-checkins] SVN: ZODB/trunk/src/Z Split blob packing tests into generic resuable and non-generic tests.
Jim Fulton
jim at zope.com
Tue Dec 9 16:23:22 EST 2008
Log message for revision 93829:
Split blob packing tests into generic resuable and non-generic tests.
Changed:
U ZODB/trunk/src/ZEO/tests/testZEO.py
U ZODB/trunk/src/ZODB/tests/blob_packing.txt
A ZODB/trunk/src/ZODB/tests/blobstorage_packing.txt
U ZODB/trunk/src/ZODB/tests/testblob.py
-=-
Modified: ZODB/trunk/src/ZEO/tests/testZEO.py
===================================================================
--- ZODB/trunk/src/ZEO/tests/testZEO.py 2008-12-09 20:23:14 UTC (rev 93828)
+++ ZODB/trunk/src/ZEO/tests/testZEO.py 2008-12-09 21:23:22 UTC (rev 93829)
@@ -1233,11 +1233,9 @@
suite.addTest(sub)
suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
- 'ClientStorageNonSharedBlobs', ServerManagingClientStorage,
- test_blob_storage_recovery=False))
+ 'ClientStorageNonSharedBlobs', ServerManagingClientStorage))
suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
- 'ClientStorageSharedBlobs', create_storage_shared,
- test_blob_storage_recovery=False))
+ 'ClientStorageSharedBlobs', create_storage_shared))
return suite
Modified: ZODB/trunk/src/ZODB/tests/blob_packing.txt
===================================================================
--- ZODB/trunk/src/ZODB/tests/blob_packing.txt 2008-12-09 20:23:14 UTC (rev 93828)
+++ ZODB/trunk/src/ZODB/tests/blob_packing.txt 2008-12-09 21:23:22 UTC (rev 93829)
@@ -1,31 +1,13 @@
-##############################################################################
-#
-# Copyright (c) 2005 Zope Corporation and Contributors.
-# All Rights Reserved.
-#
-# This software is subject to the provisions of the Zope Public License,
-# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-# FOR A PARTICULAR PURPOSE.
-#
-##############################################################################
-
Packing support for blob data
=============================
Set up:
- >>> from ZODB.FileStorage import FileStorage
- >>> from ZODB.MappingStorage import MappingStorage
>>> from ZODB.serialize import referencesf
- >>> from ZODB.blob import Blob, BlobStorage
+ >>> from ZODB.blob import Blob
>>> from ZODB import utils
>>> from ZODB.DB import DB
>>> import transaction
- >>> storagefile = 'Data.fs'
- >>> blob_dir = 'blobs'
A helper method to assure a unique timestamp across multiple platforms:
@@ -36,8 +18,7 @@
We need a database with an undoing blob supporting storage:
- >>> base_storage = FileStorage(storagefile)
- >>> blob_storage = BlobStorage(blob_dir, base_storage)
+ >>> blob_storage = create_storage()
>>> database = DB(blob_storage)
Create our root object:
@@ -62,25 +43,25 @@
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 1')
>>> transaction.commit()
- >>> tids.append(blob_storage._tid)
+ >>> tids.append(blob._p_serial)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 2')
>>> transaction.commit()
- >>> tids.append(blob_storage._tid)
+ >>> tids.append(blob._p_serial)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 3')
>>> transaction.commit()
- >>> tids.append(blob_storage._tid)
+ >>> tids.append(blob._p_serial)
>>> nothing = transaction.begin()
>>> times.append(new_time())
>>> root['blob'].open('w').write('this is blob data 4')
>>> transaction.commit()
- >>> tids.append(blob_storage._tid)
+ >>> tids.append(blob._p_serial)
>>> oid = root['blob']._p_oid
>>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
@@ -144,128 +125,4 @@
Clean up our blob directory and database:
- >>> rmtree(blob_dir)
- >>> base_storage.close()
- >>> os.unlink(storagefile)
- >>> os.unlink(storagefile+".index")
- >>> os.unlink(storagefile+".tmp")
- >>> os.unlink(storagefile+".old")
-
-NON-UNDOING
-===========
-
-We need an database with a NON-undoing blob supporting storage:
-
- >>> base_storage = MappingStorage('storage')
- >>> blob_storage = BlobStorage(blob_dir, base_storage)
- >>> database = DB(blob_storage)
-
-Create our root object:
-
- >>> connection1 = database.open()
- >>> root = connection1.root()
-
-Put some revisions of a blob object in our database and on the filesystem:
-
- >>> import time, os
- >>> tids = []
- >>> times = []
- >>> nothing = transaction.begin()
- >>> times.append(new_time())
- >>> blob = Blob()
- >>> blob.open('w').write('this is blob data 0')
- >>> root['blob'] = blob
- >>> transaction.commit()
- >>> tids.append(blob_storage._tid)
-
- >>> nothing = transaction.begin()
- >>> times.append(new_time())
- >>> root['blob'].open('w').write('this is blob data 1')
- >>> transaction.commit()
- >>> tids.append(blob_storage._tid)
-
- >>> nothing = transaction.begin()
- >>> times.append(new_time())
- >>> root['blob'].open('w').write('this is blob data 2')
- >>> transaction.commit()
- >>> tids.append(blob_storage._tid)
-
- >>> nothing = transaction.begin()
- >>> times.append(new_time())
- >>> root['blob'].open('w').write('this is blob data 3')
- >>> transaction.commit()
- >>> tids.append(blob_storage._tid)
-
- >>> nothing = transaction.begin()
- >>> times.append(new_time())
- >>> root['blob'].open('w').write('this is blob data 4')
- >>> transaction.commit()
- >>> tids.append(blob_storage._tid)
-
- >>> oid = root['blob']._p_oid
- >>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
- >>> [ os.path.exists(x) for x in fns ]
- [True, True, True, True, True]
-
-Get our blob filenames for this oid.
-
- >>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
-
-Do a pack to the slightly before the first revision was written:
-
- >>> packtime = times[0]
- >>> blob_storage.pack(packtime, referencesf)
- >>> [ os.path.exists(x) for x in fns ]
- [False, False, False, False, True]
-
-Do a pack to now:
-
- >>> packtime = new_time()
- >>> blob_storage.pack(packtime, referencesf)
- >>> [ os.path.exists(x) for x in fns ]
- [False, False, False, False, True]
-
-Delete the object and do a pack, it should get rid of the most current
-revision as well as the entire directory:
-
- >>> nothing = transaction.begin()
- >>> del root['blob']
- >>> transaction.commit()
- >>> packtime = new_time()
- >>> blob_storage.pack(packtime, referencesf)
- >>> [ os.path.exists(x) for x in fns ]
- [False, False, False, False, False]
- >>> os.path.exists(os.path.split(fns[0])[0])
- False
-
-Avoiding parallel packs
-=======================
-
-Blob packing (similar to FileStorage) can only be run once at a time. For
-this, a flag (_blobs_pack_is_in_progress) is set. If the pack method is called
-while this flag is set, it will refuse to perform another pack, until the flag
-is reset:
-
- >>> blob_storage._blobs_pack_is_in_progress
- False
- >>> blob_storage._blobs_pack_is_in_progress = True
- >>> blob_storage.pack(packtime, referencesf)
- Traceback (most recent call last):
- BlobStorageError: Already packing
- >>> blob_storage._blobs_pack_is_in_progress = False
- >>> blob_storage.pack(packtime, referencesf)
-
-We can also see, that the flag is set during the pack, by leveraging the
-knowledge that the underlying storage's pack method is also called:
-
- >>> def dummy_pack(time, ref):
- ... print "_blobs_pack_is_in_progress =",
- ... print blob_storage._blobs_pack_is_in_progress
- ... return base_pack(time, ref)
- >>> base_pack = base_storage.pack
- >>> base_storage.pack = dummy_pack
- >>> blob_storage.pack(packtime, referencesf)
- _blobs_pack_is_in_progress = True
- >>> blob_storage._blobs_pack_is_in_progress
- False
- >>> base_storage.pack = base_pack
+ >>> blob_storage.close()
Copied: ZODB/trunk/src/ZODB/tests/blobstorage_packing.txt (from rev 93828, ZODB/trunk/src/ZODB/tests/blob_packing.txt)
===================================================================
--- ZODB/trunk/src/ZODB/tests/blobstorage_packing.txt (rev 0)
+++ ZODB/trunk/src/ZODB/tests/blobstorage_packing.txt 2008-12-09 21:23:22 UTC (rev 93829)
@@ -0,0 +1,155 @@
+##############################################################################
+#
+# Copyright (c) 2005 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+
+Packing support for blob data
+=============================
+
+Set up:
+
+ >>> from ZODB.MappingStorage import MappingStorage
+ >>> from ZODB.serialize import referencesf
+ >>> from ZODB.blob import Blob, BlobStorage
+ >>> from ZODB import utils
+ >>> from ZODB.DB import DB
+ >>> import transaction
+ >>> storagefile = 'Data.fs'
+ >>> blob_dir = 'blobs'
+
+A helper method to assure a unique timestamp across multiple platforms:
+
+ >>> from ZODB.tests.testblob import new_time
+
+UNDOING
+=======
+
+See blob_packing.txt.
+
+NON-UNDOING
+===========
+
+We need an database with a NON-undoing blob supporting storage:
+
+ >>> base_storage = MappingStorage('storage')
+ >>> blob_storage = BlobStorage(blob_dir, base_storage)
+ >>> database = DB(blob_storage)
+
+Create our root object:
+
+ >>> connection1 = database.open()
+ >>> root = connection1.root()
+
+Put some revisions of a blob object in our database and on the filesystem:
+
+ >>> import time, os
+ >>> tids = []
+ >>> times = []
+ >>> nothing = transaction.begin()
+ >>> times.append(new_time())
+ >>> blob = Blob()
+ >>> blob.open('w').write('this is blob data 0')
+ >>> root['blob'] = blob
+ >>> transaction.commit()
+ >>> tids.append(blob_storage._tid)
+
+ >>> nothing = transaction.begin()
+ >>> times.append(new_time())
+ >>> root['blob'].open('w').write('this is blob data 1')
+ >>> transaction.commit()
+ >>> tids.append(blob_storage._tid)
+
+ >>> nothing = transaction.begin()
+ >>> times.append(new_time())
+ >>> root['blob'].open('w').write('this is blob data 2')
+ >>> transaction.commit()
+ >>> tids.append(blob_storage._tid)
+
+ >>> nothing = transaction.begin()
+ >>> times.append(new_time())
+ >>> root['blob'].open('w').write('this is blob data 3')
+ >>> transaction.commit()
+ >>> tids.append(blob_storage._tid)
+
+ >>> nothing = transaction.begin()
+ >>> times.append(new_time())
+ >>> root['blob'].open('w').write('this is blob data 4')
+ >>> transaction.commit()
+ >>> tids.append(blob_storage._tid)
+
+ >>> oid = root['blob']._p_oid
+ >>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
+ >>> [ os.path.exists(x) for x in fns ]
+ [True, True, True, True, True]
+
+Get our blob filenames for this oid.
+
+ >>> fns = [ blob_storage.fshelper.getBlobFilename(oid, x) for x in tids ]
+
+Do a pack to the slightly before the first revision was written:
+
+ >>> packtime = times[0]
+ >>> blob_storage.pack(packtime, referencesf)
+ >>> [ os.path.exists(x) for x in fns ]
+ [False, False, False, False, True]
+
+Do a pack to now:
+
+ >>> packtime = new_time()
+ >>> blob_storage.pack(packtime, referencesf)
+ >>> [ os.path.exists(x) for x in fns ]
+ [False, False, False, False, True]
+
+Delete the object and do a pack, it should get rid of the most current
+revision as well as the entire directory:
+
+ >>> nothing = transaction.begin()
+ >>> del root['blob']
+ >>> transaction.commit()
+ >>> packtime = new_time()
+ >>> blob_storage.pack(packtime, referencesf)
+ >>> [ os.path.exists(x) for x in fns ]
+ [False, False, False, False, False]
+ >>> os.path.exists(os.path.split(fns[0])[0])
+ False
+
+Avoiding parallel packs
+=======================
+
+Blob packing (similar to FileStorage) can only be run once at a time. For
+this, a flag (_blobs_pack_is_in_progress) is set. If the pack method is called
+while this flag is set, it will refuse to perform another pack, until the flag
+is reset:
+
+ >>> blob_storage._blobs_pack_is_in_progress
+ False
+ >>> blob_storage._blobs_pack_is_in_progress = True
+ >>> blob_storage.pack(packtime, referencesf)
+ Traceback (most recent call last):
+ BlobStorageError: Already packing
+ >>> blob_storage._blobs_pack_is_in_progress = False
+ >>> blob_storage.pack(packtime, referencesf)
+
+We can also see, that the flag is set during the pack, by leveraging the
+knowledge that the underlying storage's pack method is also called:
+
+ >>> def dummy_pack(time, ref):
+ ... print "_blobs_pack_is_in_progress =",
+ ... print blob_storage._blobs_pack_is_in_progress
+ ... return base_pack(time, ref)
+ >>> base_pack = base_storage.pack
+ >>> base_storage.pack = dummy_pack
+ >>> blob_storage.pack(packtime, referencesf)
+ _blobs_pack_is_in_progress = True
+ >>> blob_storage._blobs_pack_is_in_progress
+ False
+ >>> base_storage.pack = base_pack
Modified: ZODB/trunk/src/ZODB/tests/testblob.py
===================================================================
--- ZODB/trunk/src/ZODB/tests/testblob.py 2008-12-09 20:23:14 UTC (rev 93828)
+++ ZODB/trunk/src/ZODB/tests/testblob.py 2008-12-09 21:23:22 UTC (rev 93829)
@@ -552,7 +552,8 @@
test.globs['create_storage'] = create_storage
def storage_reusable_suite(prefix, factory,
- test_blob_storage_recovery=True,
+ test_blob_storage_recovery=False,
+ test_packing=False,
):
"""Return a test suite for a generic IBlobStorage.
@@ -575,6 +576,11 @@
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
optionflags=doctest.ELLIPSIS,
))
+ if test_packing:
+ suite.addTest(doctest.DocFileSuite(
+ "blob_packing.txt",
+ setUp=setup, tearDown=zope.testing.setupstack.tearDown,
+ ))
suite.addTest(doctest.DocTestSuite(
setUp=setup, tearDown=zope.testing.setupstack.tearDown,
checker = zope.testing.renormalizing.RENormalizing([
@@ -608,9 +614,8 @@
suite.addTest(unittest.makeSuite(ZODBBlobConfigTest))
suite.addTest(unittest.makeSuite(BlobCloneTests))
suite.addTest(doctest.DocFileSuite(
- "blob_basic.txt",
- "blob_packing.txt", "blob_consume.txt",
- "blob_tempdir.txt",
+ "blob_basic.txt", "blob_consume.txt", "blob_tempdir.txt",
+ "blobstorage_packing.txt",
setUp=setUp,
tearDown=zope.testing.setupstack.tearDown,
optionflags=doctest.ELLIPSIS,
@@ -629,7 +634,9 @@
suite.addTest(storage_reusable_suite(
'BlobAdaptedFileStorage',
lambda name, blob_dir:
- ZODB.blob.BlobStorage(blob_dir, FileStorage('%s.fs' % name))
+ ZODB.blob.BlobStorage(blob_dir, FileStorage('%s.fs' % name)),
+ test_blob_storage_recovery=True,
+ test_packing=True,
))
return suite
More information about the Zodb-checkins
mailing list