[Zodb-checkins] CVS: ZODB4/src/zodb/storage - bdbminimal.py:1.11.4.1
Barry Warsaw
barry@wooz.org
Mon, 10 Feb 2003 18:09:45 -0500
Update of /cvs-repository/ZODB4/src/zodb/storage
In directory cvs.zope.org:/tmp/cvs-serv4308/src/zodb/storage
Modified Files:
Tag: opaque-pickles-branch
bdbminimal.py
Log Message:
The start of opaque pickles (from the p.o.v. of the storages). This
will eventually allow us to pass compressed pickles to the storage if
we want.
The approach basically changes store() so that the data argument is a
2-tuple of the pickle and the list of oids referenced in the pickle.
This is the first step in the changes, but currently, only Berkeley
storages natively store the refs included in the store() API call.
Changes here include:
- We don't need findrefs() here since store() will be handed the list
of oid references.
- Bump the schema version to BM02 to reflect the addition of the
referents table.
- _doabort(), _docommit(): Cleanup and use the referents table to
properly adjust the object refcounts.
- _dostore(): Split the data 2-tuple arg into the data and refs, and
update the referents table, but only if there actually are oids in
that list.
_ _update_refcounts(), _mark(), _collect_objs(): Get rid of pickle
sniffing for object references. Use the stored information in the
referents table instead.
- get ZERO from zodb.interfaces, and use MAXTID for DNE.
=== ZODB4/src/zodb/storage/bdbminimal.py 1.11 => 1.11.4.1 ===
--- ZODB4/src/zodb/storage/bdbminimal.py:1.11 Wed Feb 5 18:28:32 2003
+++ ZODB4/src/zodb/storage/bdbminimal.py Mon Feb 10 18:09:44 2003
@@ -17,17 +17,18 @@
$Revision$
"""
+from zodb.interfaces import ZERO
from zodb.storage.interfaces import *
from zodb.utils import p64, u64
-from zodb.serialize import findrefs
from zodb.conflict import ConflictResolvingStorage, ResolvedSerial
-from zodb.storage.base import db, ZERO, BerkeleyBase, PackStop, _WorkThread
+from zodb.storage.base import db, BerkeleyBase, PackStop, _WorkThread
ABORT = 'A'
COMMIT = 'C'
PRESENT = 'X'
+EMPTYSTRING = ''
-BDBMINIMAL_SCHEMA_VERSION = 'BM01'
+BDBMINIMAL_SCHEMA_VERSION = 'BM02'
@@ -78,6 +79,11 @@
# reference count is updated during the _finish() call. When it
# goes to zero, the object is automatically deleted.
#
+ # referents -- {oid+tid -> oid+oid+...}
+ # For each revision of the object, these are the oids of the
+ # objects referred to in the data record, as a list of 8-byte
+ # oids, concatenated together.
+ #
# oids -- [oid]
# This is a list of oids of objects that are modified in the
# current uncommitted transaction.
@@ -149,8 +155,11 @@
pass
else:
cs.delete()
- # And delete the pickle table entry for this revision.
- self._pickles.delete(oid+tid, txn=txn)
+ # Clean up revision-indexed tables
+ revid = oid+tid
+ self._pickles.delete(revid, txn=txn)
+ if self._referents.has_key(revid):
+ self._referents.delete(revid, txn=txn)
finally:
# There's a small window of opportunity for leaking a cursor here,
# if co.close() were to fail. In practice this shouldn't happen.
@@ -185,19 +194,22 @@
if soid <> oid:
break
if stid <> tid:
+ revid = oid+stid
# This is the previous revision of the object, so
# decref its referents and clean up its pickles.
cs.delete()
- data = self._pickles.get(oid+stid, txn=txn)
- assert data is not None
- self._update(deltas, data, -1)
- self._pickles.delete(oid+stid, txn=txn)
+ referents = self._referents.get(revid, txn=txn)
+ if referents:
+ self._update(deltas, referents, -1)
+ self._pickles.delete(revid, txn=txn)
+ if self._referents.has_key(revid):
+ self._referents.delete(revid, txn=txn)
srec = cs.next_dup()
# Now add incref deltas for all objects referenced by the new
# revision of this object.
- data = self._pickles.get(oid+tid, txn=txn)
- assert data is not None
- self._update(deltas, data, 1)
+ referents = self._referents.get(oid+tid, txn=txn)
+ if referents:
+ self._update(deltas, referents, 1)
finally:
# There's a small window of opportunity for leaking a cursor here,
# if co.close() were to fail. In practice this shouldn't happen.
@@ -231,8 +243,9 @@
# pickles and refcounts table. Note that before we remove its
# pickle, we need to decref all the objects referenced by it.
current = self._getCurrentSerial(oid)
- data = self._pickles.get(oid+current, txn=txn)
- self._update(newdeltas, data, -1)
+ referents = self._referents.get(oid+current, txn=txn)
+ if referents:
+ self._update(newdeltas, referents, -1)
# And delete the serials, pickle and refcount entries. At
# this point, I believe we should have just one serial entry.
self._serials.delete(oid, txn=txn)
@@ -259,6 +272,7 @@
txn.commit()
def _dostore(self, txn, oid, serial, data):
+ data, refs = data
conflictresolved = False
oserial = self._getCurrentSerial(oid)
if oserial is not None and serial <> oserial:
@@ -267,11 +281,15 @@
# number. Raise a ConflictError.
data = self.resolveConflict(oid, oserial, serial, data)
conflictresolved = True
- # Optimistically write to the serials and pickles table. Be sure
- # to also update the oids table for this object too.
+ # Optimistically write to the various tables.
newserial = self._serial
+ revid = oid+newserial
self._serials.put(oid, newserial, txn=txn)
- self._pickles.put(oid+newserial, data, txn=txn)
+ self._pickles.put(revid, data, txn=txn)
+ if refs:
+ referents = EMPTYSTRING.join(refs)
+ assert len(referents) % 8 == 0
+ self._referents.put(revid, referents, txn=txn)
self._oids.put(oid, PRESENT, txn=txn)
# If we're in the middle of a pack, we need to add these objects to
# the packmark, so a specific race condition won't collect them.
@@ -434,9 +452,12 @@
# unit tests), and we're looking up oid ZERO. Then serial
# will be None.
if tid is not None:
- data = self._pickles[oid+tid]
- for oid in findrefs(data):
- self._oidqueue.append(oid, txn)
+ # Now get the oids of all the objects referenced by this
+ # object revision
+ referents = self._referents.get(oid+tid)
+ if referents:
+ for oid in self._splitoids(referents):
+ self._oidqueue.append(oid, txn)
# Pop the next oid off the queue and do it all again
rec = self._oidqueue.consume(txn)
oid = rec and rec[1]
@@ -487,7 +508,7 @@
pass
finally:
c.close()
- # Now collect the pickle data and do reference counting
+ # Collect the pickle data
c = self._pickles.cursor(txn)
try:
try:
@@ -497,17 +518,33 @@
while rec and rec[0][:8] == oid:
if self._stop:
raise PackStop, 'stopped in _collect_objs() loop 2'
- data = rec[1]
c.delete()
rec = c.next()
- deltas = {}
- self._update(deltas, data, -1)
- for oid, delta in deltas.items():
- refcount = u64(self._refcounts.get(oid, ZERO)) + delta
- if refcount <= 0:
- self._oidqueue.append(oid, txn)
- else:
- self._refcounts.put(oid, p64(refcount), txn=txn)
+ finally:
+ c.close()
+ # Collect referents and do reference counting
+ c = self._referents.cursor(txn)
+ try:
+ try:
+ rec = c.set_range(oid)
+ except db.DBNotFoundError:
+ rec = None
+ while rec and rec[0][:8] == oid:
+ if self._stop:
+ raise PackStop, 'stopped in _collect_objs() loop 3'
+ referents = rec[1]
+ if referents:
+ deltas = {}
+ self._update(deltas, referents, -1)
+ for oid, delta in deltas.items():
+ rc = u64(self._refcounts.get(oid, ZERO)) + delta
+ if rc <= 0:
+ self._oidqueue.append(oid, txn)
+ else:
+ self._refcounts.put(oid, p64(rc), txn=txn)
+ # Delete table entry
+ c.delete()
+ rec = c.next()
finally:
c.close()
# We really do want this down here, since _decrefPickle() could