[Zodb-checkins] CVS: ZODB3/bsddb3Storage/bsddb3Storage - Minimal.py:1.12.4.2
Barry Warsaw
barry@wooz.org
Wed, 11 Sep 2002 14:10:32 -0400
Update of /cvs-repository/ZODB3/bsddb3Storage/bsddb3Storage
In directory cvs.zope.org:/tmp/cvs-serv17864/bsddb3Storage/bsddb3Storage
Modified Files:
Tag: bdb-nolocks
Minimal.py
Log Message:
_doabort(): Simplify cursor management. This leaves a tiny window of
opportunity to leak cursors, but I don't think it'll be a problem in
practice (if it was we could wrap the first close in a try/finally).
_docommit(): Same, but also rewrite the oids loop so that we don't
need to instantiate the table's keys as an in-memory list. The
trade-off is that we lose a bit of locality of reference because we're
stepping through two tables simulatenously, but hopefully a big enough
Berkeley cachesize will smooth that out.
_update_refcounts(): Rewritten to interface better with _docommit().
Also, much simplification.
_vote(), _finish(): Move the twiddle of the pendings table from ABORT
to COMMIT, from the vote to the finish. Semantically, just because
we're voting doesn't mean we agree to commit our transaction.
_update(): New helper function.
=== ZODB3/bsddb3Storage/bsddb3Storage/Minimal.py 1.12.4.1 => 1.12.4.2 ===
--- ZODB3/bsddb3Storage/bsddb3Storage/Minimal.py:1.12.4.1 Mon Sep 9 18:52:51 2002
+++ ZODB3/bsddb3Storage/bsddb3Storage/Minimal.py Wed Sep 11 14:10:32 2002
@@ -116,89 +116,96 @@
self._docheckpoint()
def _doabort(self, tid, txn):
- c = self._serials.cursor(txn=txn)
+ co = cs = None
try:
- # Go through the oids loop twice to maintain locality of reference
- oids = self._oids.keys()
- # Remove from the serials table all entries with key oid where the
- # serial is tid.
- for oid in oids:
+ co = self._oids.cursor(txn=txn)
+ cs = self._serials.cursor(txn=txn)
+ rec = co.first()
+ while rec:
+ oid = rec[0]
+ rec = co.next()
try:
- rec = c.set_both(oid, tid)
+ cs.set_both(oid, tid)
except db.DBNotFoundError:
pass
else:
- c.delete()
- # Remove from the pickles table all entries with key oid+tid
- for oid in oids:
+ cs.delete()
+ # And delete the pickle table entry for this revision.
self._pickles.delete(oid+tid, txn=txn)
finally:
- c.close()
+ # There's a small window of opportunity for leakinga cursor here,
+ # if co.close() were to fail. In practice this shouldn't happen.
+ if co: co.close()
+ if cs: cs.close()
def _docommit(self, tid, txn):
- c = self._serials.cursor(txn=txn)
- try:
- # Go through the oids loop twice to maintain locality of reference
- oids = self._oids.keys()
- # Remove from the serials table all entries with key oid where the
- # serial is not tid. These are the old revisions of the object.
- for oid in oids:
- rec = c.set(oid)
- while rec:
- key, val = rec
- if val <> tid:
- c.delete()
- rec = c.next_dup()
- # Update refcounts and collect garbage recursively
- self._update_refcounts(oids, txn)
- finally:
- c.close()
-
- def _update_refcounts(self, oids, txn):
deltas = {}
- # Go through all the objects touched by this transaction. For each
- # new revision, increment the refcount of all referenced objects. For
- # each old revision, decrement the refcount of all referenced objects.
- # Actually, we'll keep a bunch of delta so we only need to touch the
- # refcounts table once for performance.
- c = self._pickles.cursor(txn=txn)
+ co = cs = None
try:
- tid = self._serial
- for oid in oids:
- rec = c.set_range(oid)
- while rec:
- key, val = rec
- if key[:8] <> oid:
+ co = self._oids.cursor(txn=txn)
+ cs = self._serials.cursor(txn=txn)
+ rec = co.first()
+ while rec:
+ oid = rec[0]
+ rec = co.next()
+ # Remove from the serials table all entries with key oid where
+ # the serial is not tid. These are the old revisions of the
+ # object. At the same time, we want to collect the oids of
+ # the objects referred to by this revision's pickle, so that
+ # later we can decref those reference counts.
+ srec = cs.set(oid)
+ while srec:
+ soid, stid = srec
+ if soid <> oid:
break
- if key[8:] == tid:
- # This is the new pickle
- delta = 1
- else:
- # This is the old pickle. We can also delete the old
- # pickle data now.
- delta = -1
- c.delete()
- refdoids = []
- referencesf(val, refdoids)
- for rcoids in refdoids:
- deltas[rcoids] = deltas.get(rcoids, 0) + delta
- rec = c.next()
- # Now update the reference counts for all the referenced objects
- # and keep track of any with a refcount that goes to zero. For
- # those, we'll recursively decrement their refcounts.
- nextoids = []
- for oid, delta in deltas.items():
- rc = U64(self._refcounts.get(oid, ZERO)) + delta
- assert rc >= 0
- if rc == 0:
- nextoids.append(oid)
- self._refcounts.delete(oid, txn=txn)
- else:
- self._refcounts.put(oid, p64(rc), txn=txn)
- if nextoids:
- self._update_refcounts(nextoids, txn)
- finally:
- c.close()
+ if stid <> tid:
+ cs.delete()
+ data = self._pickles.get(oid+stid, txn=txn)
+ assert data is not None
+ _update(deltas, data, -1)
+ self._pickles.delete(oid+stid, txn=txn)
+ srec = cs.next_dup()
+ # Now add incref deltas for all objects referenced by the new
+ # revision of this object.
+ data = self._pickles.get(oid+tid, txn=txn)
+ assert data is not None
+ _update(deltas, data, 1)
+ finally:
+ # There's a small window of opportunity for leakinga cursor here,
+ # if co.close() were to fail. In practice this shouldn't happen.
+ if co: co.close()
+ if cs: cs.close()
+ # Now, to finish up, we need apply the refcount deltas to the
+ # refcounts table, and do recursive collection of all refcount == 0
+ # objects.
+ if deltas:
+ self._update_refcounts(deltas, txn)
+
+ def _update_refcounts(self, deltas, txn):
+ newdeltas = {}
+ for oid, delta in deltas.items():
+ rc = U64(self._refcounts.get(oid, ZERO, txn=txn)) + delta
+ assert rc >= 0
+ if rc == 0:
+ # The reference count for this object has just gone to zero,
+ # so we can safely remove all traces of it from the serials,
+ # pickles and refcounts table. Note that before we remove its
+ # pickle, we need to decref all the objects referenced by it.
+ current = self._getCurrentSerial(oid)
+ data = self._pickles.get(oid+current, txn=txn)
+ _update(newdeltas, data, -1)
+ # And delete the serials, pickle and refcount entries. At
+ # this point, I believe we should have just one serial entry.
+ self._serials.delete(oid, txn=txn)
+ assert self._serials.get(oid, txn=txn) is None
+ self._refcounts.delete(oid, txn=txn)
+ self._pickles.delete(oid+current, txn=txn)
+ else:
+ self._refcounts.put(oid, p64(rc), txn=txn)
+ # Now we need to recurse for any objects referenced by pickles just
+ # deleted in this round.
+ if newdeltas:
+ self._update_refcounts(newdeltas, txn)
def _begin(self, tid, u, d, e):
# When a transaction begins, we set the pending flag to ABORT,
@@ -270,21 +277,24 @@
self._oids.put(oid, PRESENT, txn=txn)
except:
txn.abort()
+ self._docheckpoint()
raise
else:
txn.commit()
+ self._docheckpoint()
finally:
self._lock_release()
# Return the new serial number for the object
return newserial
def _vote(self):
+ pass
+
+ def _finish(self, tid, u, d, e):
# Twiddle the pending flag to COMMIT now since after the vote call, we
# promise that the changes will be committed, no matter what. The
# recovery process will check this.
self._pending[self._serial] = COMMIT
-
- def _finish(self, tid, u, d, e):
self._do(self._docommit, self._serial)
def _abort(self):
@@ -320,3 +330,16 @@
# So BaseStorage.getSerial just works. Note that this storage doesn't
# support versions.
return ''
+
+
+
+def _update(deltas, data, incdec):
+ refdoids = []
+ referencesf(data, refdoids)
+ for oid in refdoids:
+ rc = deltas.get(oid, 0) + incdec
+ if rc == 0:
+ # Save space in the dict by zapping zeroes
+ del deltas[oid]
+ else:
+ deltas[oid] = rc