[Zodb-checkins] SVN: ZODB/trunk/src/ Fixed a bug in object-cache size accounting. New objects weren't
Jim Fulton
jim at zope.com
Tue Jun 9 18:55:51 EDT 2009
Log message for revision 100773:
Fixed a bug in object-cache size accounting. New objects weren't
counted properly.
Changed:
U ZODB/trunk/src/CHANGES.txt
U ZODB/trunk/src/ZODB/Connection.py
U ZODB/trunk/src/ZODB/tests/testCache.py
-=-
Modified: ZODB/trunk/src/CHANGES.txt
===================================================================
--- ZODB/trunk/src/CHANGES.txt 2009-06-09 22:13:35 UTC (rev 100772)
+++ ZODB/trunk/src/CHANGES.txt 2009-06-09 22:55:51 UTC (rev 100773)
@@ -8,6 +8,10 @@
Bugs Fixed
----------
+- Sizes of new objects weren't added to the object cache size
+ estimation, causing the object-cache size limiting feature to let
+ the cache grow too large when many objects were added.
+
- Deleted records weren't removed when packing file storages.
- Fixed intermittent failures in the MVCCMappingStorage tests.
Modified: ZODB/trunk/src/ZODB/Connection.py
===================================================================
--- ZODB/trunk/src/ZODB/Connection.py 2009-06-09 22:13:35 UTC (rev 100772)
+++ ZODB/trunk/src/ZODB/Connection.py 2009-06-09 22:55:51 UTC (rev 100773)
@@ -656,10 +656,7 @@
obj._p_invalidate()
else:
s = self._storage.store(oid, serial, p, '', transaction)
- self._cache.update_object_size_estimation(oid,
- len(p)
- )
- obj._p_estimated_size = len(p)
+
self._store_count += 1
# Put the object in the cache before handling the
# response, just in case the response contains the
@@ -674,6 +671,9 @@
else:
raise
+ self._cache.update_object_size_estimation(oid, len(p))
+ obj._p_estimated_size = len(p)
+
self._handle_serial(s, oid)
def _handle_serial(self, store_return, oid=None, change=1):
@@ -901,9 +901,7 @@
self._reader.setGhostState(obj, p)
obj._p_serial = serial
- self._cache.update_object_size_estimation(obj._p_oid,
- len(p)
- )
+ self._cache.update_object_size_estimation(obj._p_oid, len(p))
obj._p_estimated_size = len(p)
# Blob support
@@ -1162,9 +1160,7 @@
data, serial = src.load(oid, src)
obj = self._cache.get(oid, None)
if obj is not None:
- self._cache.update_object_size_estimation(obj._p_oid,
- len(data)
- )
+ self._cache.update_object_size_estimation(obj._p_oid, len(data))
obj._p_estimated_size = len(data)
if isinstance(self._reader.getGhost(data), Blob):
blobfilename = src.loadBlob(oid, serial)
Modified: ZODB/trunk/src/ZODB/tests/testCache.py
===================================================================
--- ZODB/trunk/src/ZODB/tests/testCache.py 2009-06-09 22:13:35 UTC (rev 100772)
+++ ZODB/trunk/src/ZODB/tests/testCache.py 2009-06-09 22:55:51 UTC (rev 100773)
@@ -18,22 +18,21 @@
objects in memory under the assumption that they may be used again.
"""
+from persistent.cPickleCache import PickleCache
+from persistent import Persistent
+from persistent.mapping import PersistentMapping
+from ZODB.tests.MinPO import MinPO
+from ZODB.utils import p64
+from zope.testing import doctest
import gc
+import threading
import time
+import transaction
import unittest
-import threading
-
-from persistent.cPickleCache import PickleCache
-from persistent.mapping import PersistentMapping
-import transaction
import ZODB
import ZODB.MappingStorage
-from ZODB.tests.MinPO import MinPO
import ZODB.tests.util
-from ZODB.utils import p64
-from persistent import Persistent
-
class CacheTestBase(ZODB.tests.util.TestCase):
def setUp(self):
@@ -418,8 +417,64 @@
else:
self.fail("two objects with the same oid should have failed")
+def check_basic_cache_size_estimation():
+ """Make sure the basic accounting is correct:
+
+ >>> import ZODB.MappingStorage
+ >>> db = ZODB.MappingStorage.DB()
+ >>> conn = db.open()
+
+The cache is empty initially:
+
+ >>> conn._cache.total_estimated_size
+ 0
+
+We force the root to be loaded and the cache grows:
+
+ >>> getattr(conn.root, 'z', None)
+ >>> conn._cache.total_estimated_size
+ 128
+
+We add some data and the cache grows:
+
+ >>> conn.root.z = ZODB.tests.util.P('x'*100)
+ >>> import transaction
+ >>> transaction.commit()
+ >>> conn._cache.total_estimated_size
+ 320
+
+Loading the objects in another connection gets the same sizes:
+
+ >>> conn2 = db.open()
+ >>> conn2._cache.total_estimated_size
+ 0
+ >>> getattr(conn2.root, 'x', None)
+ >>> conn2._cache.total_estimated_size
+ 128
+ >>> _ = conn2.root.z.name
+ >>> conn2._cache.total_estimated_size
+ 320
+
+If we deactivate, the size goes down:
+
+ >>> conn2.root.z._p_deactivate()
+ >>> conn2._cache.total_estimated_size
+ 128
+
+Loading data directly, rather than through traversal updates the cache
+size correctly:
+
+ >>> conn3 = db.open()
+ >>> _ = conn3.get(conn2.root.z._p_oid).name
+ >>> conn3._cache.total_estimated_size
+ 192
+
+ """
+
+
def test_suite():
s = unittest.makeSuite(DBMethods, 'check')
s.addTest(unittest.makeSuite(LRUCacheTests, 'check'))
s.addTest(unittest.makeSuite(CacheErrors, 'check'))
+ s.addTest(doctest.DocTestSuite())
return s
More information about the Zodb-checkins
mailing list