[Zope3-checkins] CVS: Zope3/src/zodb/storage - bdbfull.py:1.30

Barry Warsaw barry@zope.com
Tue, 29 Jul 2003 18:20:05 -0400


Update of /cvs-repository/Zope3/src/zodb/storage
In directory cvs.zope.org:/tmp/cvs-serv7178/src/zodb/storage

Modified Files:
	bdbfull.py 
Log Message:
Forward port some fixes from ZODB 3.1 branch.

_make_autopacker(), _Autopack.__init__(): Remove some unused variables.

store(): Raise ReadOnlyError when the internal flag is set.

_loadSerialEx(): Cosmetic consistency.

getSerial(): Return KeyError for uncreated objects

_TransactionsIterator.iterator(): Added.



=== Zope3/src/zodb/storage/bdbfull.py 1.29 => 1.30 ===
--- Zope3/src/zodb/storage/bdbfull.py:1.29	Tue Jul 29 11:18:07 2003
+++ Zope3/src/zodb/storage/bdbfull.py	Tue Jul 29 18:19:59 2003
@@ -302,11 +302,8 @@
 
     def _make_autopacker(self, event):
         config = self._config
-        lastpacktime = u64(self._last_packtime())
-        return _Autopack(
-            self, event,
-            config.frequency, config.packtime, config.gcpack,
-            lastpacktime)
+        return _Autopack(self, event,
+                         config.frequency, config.packtime, config.gcpack)
 
     def _doabort(self, txn, tid):
         # First clean up the oid indexed (or oid+tid indexed) tables.
@@ -553,6 +550,8 @@
 
     def store(self, oid, serial, data, refs, version, transaction):
         # Lock and transaction wrapper
+        if self._is_read_only:
+            raise POSException.ReadOnlyError()
         if transaction is not self._transaction:
             raise StorageTransactionError(self, transaction)
         self._lock_acquire()
@@ -1007,17 +1006,16 @@
         return serial, tid
 
     def _loadSerialEx(self, oid, serial):
-        revid = oid+serial
         # Just like loadSerial, except that it returns the pickle data, the
-        # version this object revision is living in, and a backpointer.  The
-        # backpointer is None if the lrevid for this metadata record is the
-        # same as the tid.  If not, we have a pointer to previously existing
-        # data, so we return that.
+        # version this object revision is living in, a backpointer, and the
+        # object references.  The backpointer is None if the lrevid for this
+        # metadata record is the same as the tid.  If not, we have a pointer
+        # to previously existing data, so we return that.
         self._lock_acquire()
         try:
             # Get the pointer to the pickle for the given serial number.  Let
             # KeyErrors percolate up.
-            metadata = self._metadata[revid]
+            metadata = self._metadata[oid+serial]
             vid, ign, lrevid = unpack('>8s8s8s', metadata[:24])
             if vid == ZERO:
                 version = ''
@@ -1052,6 +1050,10 @@
         self._lock_acquire()
         try:
             serial, tid = self._getSerialAndTid(oid)
+            # See if the object has been uncreated
+            lrevid = unpack('>8s', self._metadata[oid+tid][16:24])[0]
+            if lrevid == DNE:
+                raise KeyError
             return serial
         finally:
             self._lock_release()
@@ -1120,9 +1122,9 @@
             vid, nvrevid = unpack('>8s8s', self._metadata[oid+tid][:16])
             return oid, vid+nvrevid+DNE+ctid, None, None
         elif target_prevrevid == ZERO or last_prevrevid == ZERO:
-            # The object's revision is in its initial creation state but
-            # we're asking for an undo of something other than the initial
-            # creation state.  No, no.
+            # The object's revision is in its initial creation state but we're
+            # asking for an undo of something other than the initial creation
+            # state.  No, no.
             raise UndoError, 'Undoing mismatched zombification'
         last_lrevid     = self._metadata[oid+last_prevrevid][16:24]
         target_metadata = self._metadata[oid+target_prevrevid]
@@ -1298,9 +1300,32 @@
             last = abs(last)
         return self._withlock(self._doundolog, first, last, filter)
 
-    #
     # Packing
     #
+    # There are two types of pack operations, the classic pack and the
+    # autopack.  Autopack's primary job is to periodically delete non-current
+    # object revisions.  It runs in a thread and has an `autopack time' which
+    # is essentially just a time in the past at which to autopack to.  For
+    # example, you might set up autopack to run once per hour, packing away
+    # all revisions that are older than 4 hours.  Autopack can also be
+    # configured to periodically do a classic pack.
+    #
+    # Classic pack is like autopack -- it packs away old revisions -- but it
+    # also does a mark and sweep through all the known objects, looking for
+    # those that are not root reachable as of the pack time.  Such objects are
+    # also packed away even if they have current revisions in the packable
+    # transactions, because it means that there is no undo operation that can
+    # restore the object's reachability.  Remember that you cannot undo
+    # previous to the latest pack time.
+    #
+    # Both packing strategies do reference counting, and the refcounts are
+    # sums of the refcounts of all revisions, so if an object's refcount goes
+    # to zero, all its object revisions can safely be packed away.
+    #
+    # We try to interleave BerkeleyDB transactions and non-pack-lock
+    # acquisition as granularly as possible so that packing doesn't block
+    # other operations for too long.  But remember we don't use Berkeley locks
+    # so we have to be careful about our application level locks.
 
     def pack(self, t, gc=True):
         """Perform a pack on the storage.
@@ -1578,7 +1603,7 @@
         # is always in the root set.  Second, any object revision after the
         # pack time that has a back pointer (lrevid) to before the pack time
         # serves as another root because some future undo could then revive
-        # any referenced objects.
+        # any referenced objects.  The root set ends up in the oidqueue.
         try:
             zerorev = self._findrev(ZERO, packtid, txn)
         except KeyError:
@@ -1626,6 +1651,8 @@
         # Quick exit for empty storages
         if not self._serials:
             return
+        # Start with the root set, iterating over all reachable objects until
+        # we've traversed the entire object tree.
         self._rootset(packtid, txn)
         rec = self._oidqueue.consume(txn)
         while rec:
@@ -1758,6 +1785,13 @@
         self._first = True
         self._iters = []
 
+    # This allows us to pass an iterator as the `other' argument to
+    # copyTransactionsFrom() in BaseStorage.  The advantage here is that we
+    # can create the iterator manually, e.g. setting start and stop, and then
+    # just let copyTransactionsFrom() do its thing.
+    def iterator(self):
+        return self
+
     def next(self):
         """Return the ith item in the sequence of transaction data.
 
@@ -1899,9 +1933,7 @@
 class _Autopack(_WorkThread):
     NAME = 'autopacking'
 
-    def __init__(self, storage, event,
-                 frequency, packtime, gcpack,
-                 lastpacktime):
+    def __init__(self, storage, event, frequency, packtime, gcpack):
         _WorkThread.__init__(self, storage, event, frequency)
         self._packtime = packtime
         self._gcpack = gcpack