[Zodb-checkins] CVS: Zope/lib/python/ZODB - ActivityMonitor.py:1.3.6.2 BaseStorage.py:1.21.2.2 ConflictResolution.py:1.13.6.1 Connection.py:1.72.6.1 DB.py:1.43.6.1 TimeStamp.c:1.15.60.1 Transaction.py:1.37.6.1 __init__.py:1.14.2.1 cPersistence.c:1.62.10.2 coptimizations.c:1.17.60.1 fsdump.py:1.3.70.1
Jeremy Hylton
jeremy@zope.com
Tue, 12 Nov 2002 16:14:29 -0500
Update of /cvs-repository/Zope/lib/python/ZODB
In directory cvs.zope.org:/tmp/cvs-serv27252/lib/python/ZODB
Modified Files:
Tag: Zope-2_6-branch
ActivityMonitor.py BaseStorage.py ConflictResolution.py
Connection.py DB.py TimeStamp.c Transaction.py __init__.py
cPersistence.c coptimizations.c fsdump.py
Log Message:
Sync Zope 2.6 and ZODB 3.1 release branches.
ZODB deadlock prevention code.
Bug in ConflictResolution bad_classes handling.
Don't let exceptions propagate out of ConflictResolution.
Add data_txn atribute to records returned by storage iterators.
Other sundry changes.
=== Zope/lib/python/ZODB/ActivityMonitor.py 1.3.6.1 => 1.3.6.2 ===
=== Zope/lib/python/ZODB/BaseStorage.py 1.21.2.1 => 1.21.2.2 ===
--- Zope/lib/python/ZODB/BaseStorage.py:1.21.2.1 Tue Nov 5 16:58:48 2002
+++ Zope/lib/python/ZODB/BaseStorage.py Tue Nov 12 16:13:58 2002
@@ -63,6 +63,15 @@
def close(self):
pass
+ def sortKey(self):
+ """Return a string that can be used to sort storage instances.
+
+ The key must uniquely identify a storage and must be the same
+ across multiple instantiations of the same storage.
+ """
+ # name may not be sufficient, e.g. ZEO has a user-definable name.
+ return self.__name__
+
def getName(self):
return self.__name__
=== Zope/lib/python/ZODB/ConflictResolution.py 1.13 => 1.13.6.1 ===
--- Zope/lib/python/ZODB/ConflictResolution.py:1.13 Thu Aug 15 11:48:55 2002
+++ Zope/lib/python/ZODB/ConflictResolution.py Tue Nov 12 16:13:58 2002
@@ -11,6 +11,7 @@
# FOR A PARTICULAR PURPOSE
#
##############################################################################
+import sys
from cStringIO import StringIO
from cPickle import Unpickler, Pickler
@@ -77,7 +78,7 @@
except (ImportError, AttributeError):
zLOG.LOG("Conflict Resolution", zLOG.BLATHER,
"Unable to load class", error=sys.exc_info())
- bad_class[class_tuple] = 1
+ bad_classes[class_tuple] = 1
return None
return klass
@@ -114,6 +115,15 @@
pickler.dump(resolved)
return file.getvalue(1)
except ConflictError:
+ return 0
+ except:
+ # If anything else went wrong, catch it here and avoid passing an
+ # arbitrary exception back to the client. The error here will mask
+ # the original ConflictError. A client can recover from a
+ # ConflictError, but not necessarily from other errors. But log
+ # the error so that any problems can be fixed.
+ zLOG.LOG("Conflict Resolution", zLOG.ERROR,
+ "Unexpected error", error=sys.exc_info())
return 0
class ConflictResolvingStorage:
=== Zope/lib/python/ZODB/Connection.py 1.72 => 1.72.6.1 ===
--- Zope/lib/python/ZODB/Connection.py:1.72 Wed Aug 14 18:07:09 2002
+++ Zope/lib/python/ZODB/Connection.py Tue Nov 12 16:13:58 2002
@@ -191,6 +191,14 @@
return obj
return self[oid]
+ def sortKey(self):
+ # XXX will raise an exception if the DB hasn't been set
+ storage_key = self._sortKey()
+ # If two connections use the same storage, give them a
+ # consistent order using id(). This is unique for the
+ # lifetime of a connection, which is good enough.
+ return "%s:%s" % (storage_key, id(self))
+
def _setDB(self, odb):
"""Begin a new transaction.
@@ -198,6 +206,7 @@
"""
self._db=odb
self._storage=s=odb._storage
+ self._sortKey = odb._storage.sortKey
self.new_oid=s.new_oid
if self._code_timestamp != global_code_timestamp:
# New code is in place. Start a new cache.
@@ -275,21 +284,18 @@
method_name, args, kw = self.__onCommitActions.pop(0)
apply(getattr(self, method_name), (transaction,) + args, kw)
return
- oid=object._p_oid
- invalid=self._invalid
+
+ oid = object._p_oid
+ invalid = self._invalid
if oid is None or object._p_jar is not self:
# new object
oid = self.new_oid()
- object._p_jar=self
- object._p_oid=oid
+ object._p_jar = self
+ object._p_oid = oid
self._creating.append(oid)
elif object._p_changed:
- if (
- (invalid(oid) and not hasattr(object, '_p_resolveConflict'))
- or
- invalid(None)
- ):
+ if invalid(oid) and not hasattr(object, '_p_resolveConflict'):
raise ConflictError(object=object)
self._invalidating.append(oid)
@@ -297,7 +303,7 @@
# Nothing to do
return
- stack=[object]
+ stack = [object]
# Create a special persistent_id that passes T and the subobject
# stack along:
@@ -330,7 +336,7 @@
file=StringIO()
seek=file.seek
pickler=Pickler(file,1)
- pickler.persistent_id=new_persistent_id(self, stack.append)
+ pickler.persistent_id=new_persistent_id(self, stack)
dbstore=self._storage.store
file=file.getvalue
cache=self._cache
@@ -351,12 +357,7 @@
self._creating.append(oid)
else:
#XXX We should never get here
- if (
- (invalid(oid) and
- not hasattr(object, '_p_resolveConflict'))
- or
- invalid(None)
- ):
+ if invalid(oid) and not hasattr(object, '_p_resolveConflict'):
raise ConflictError(object=object)
self._invalidating.append(oid)
@@ -517,8 +518,7 @@
# storage to make sure that we don't miss an invaildation
# notifications between the time we check and the time we
# read.
- invalid = self._invalid
- if invalid(oid) or invalid(None):
+ if self._invalid(oid):
if not hasattr(object.__class__, '_p_independent'):
get_transaction().register(self)
raise ReadConflictError(object=object)
@@ -602,24 +602,20 @@
if self.__onCommitActions is not None:
del self.__onCommitActions
self._storage.tpc_abort(transaction)
- cache=self._cache
- cache.invalidate(self._invalidated)
- cache.invalidate(self._invalidating)
+ self._cache.invalidate(self._invalidated)
+ self._cache.invalidate(self._invalidating)
self._invalidate_creating()
def tpc_begin(self, transaction, sub=None):
- if self._invalid(None): # Some nitwit invalidated everything!
- raise ConflictError("transaction already invalidated")
- self._invalidating=[]
- self._creating=[]
+ self._invalidating = []
+ self._creating = []
if sub:
# Sub-transaction!
- _tmp=self._tmp
- if _tmp is None:
- _tmp=TmpStore.TmpStore(self._version)
- self._tmp=self._storage
- self._storage=_tmp
+ if self._tmp is None:
+ _tmp = TmpStore.TmpStore(self._version)
+ self._tmp = self._storage
+ self._storage = _tmp
_tmp.registerDB(self._db, 0)
self._storage.tpc_begin(transaction)
@@ -628,7 +624,7 @@
if self.__onCommitActions is not None:
del self.__onCommitActions
try:
- vote=self._storage.tpc_vote
+ vote = self._storage.tpc_vote
except AttributeError:
return
s = vote(transaction)
=== Zope/lib/python/ZODB/DB.py 1.43 => 1.43.6.1 ===
--- Zope/lib/python/ZODB/DB.py:1.43 Wed Aug 14 18:07:09 2002
+++ Zope/lib/python/ZODB/DB.py Tue Nov 12 16:13:58 2002
@@ -577,7 +577,11 @@
self.tpc_begin=s.tpc_begin
self.tpc_vote=s.tpc_vote
self.tpc_finish=s.tpc_finish
+ self._sortKey=s.sortKey
get_transaction().register(self)
+
+ def sortKey(self):
+ return "%s:%s" % (self._sortKey(), id(self))
def abort(self, reallyme, t): pass
=== Zope/lib/python/ZODB/TimeStamp.c 1.15 => 1.15.60.1 ===
--- Zope/lib/python/ZODB/TimeStamp.c:1.15 Fri Mar 8 13:36:13 2002
+++ Zope/lib/python/ZODB/TimeStamp.c Tue Nov 12 16:13:58 2002
@@ -344,7 +344,10 @@
static int
TimeStamp_compare(TimeStamp *v, TimeStamp *w)
{
- return memcmp(v->data, w->data, 8);
+ int cmp = memcmp(v->data, w->data, 8);
+ if (cmp < 0) return -1;
+ if (cmp > 0) return 1;
+ return 0;
}
static long
=== Zope/lib/python/ZODB/Transaction.py 1.37 => 1.37.6.1 ===
--- Zope/lib/python/ZODB/Transaction.py:1.37 Wed Aug 14 18:07:09 2002
+++ Zope/lib/python/ZODB/Transaction.py Tue Nov 12 16:13:58 2002
@@ -19,12 +19,34 @@
import time, sys, struct, POSException
from struct import pack
from string import split, strip, join
-from zLOG import LOG, ERROR, PANIC
+from zLOG import LOG, ERROR, PANIC, INFO, BLATHER, WARNING
from POSException import ConflictError
+from ZODB import utils
# Flag indicating whether certain errors have occurred.
hosed=0
+# There is an order imposed on all jars, based on the storages they
+# serve, that must be consistent across all applications using the
+# storages. The order is defined by the sortKey() method of the jar.
+
+def jar_cmp(j1, j2):
+ # Call sortKey() every time, because a ZEO client could reconnect
+ # to a different server at any time.
+ try:
+ k1 = j1.sortKey()
+ except:
+ LOG("TM", WARNING, "jar missing sortKey() method: %s" % j1)
+ k1 = id(j1)
+
+ try:
+ k2 = j2.sortKey()
+ except:
+ LOG("TM", WARNING, "jar missing sortKey() method: %s" % j2)
+ k2 = id(j2)
+
+ return cmp(k1, k2)
+
class Transaction:
'Simple transaction objects for single-threaded applications.'
user=''
@@ -53,6 +75,9 @@
for c in self._connections.values(): c.close()
del self._connections
+ def log(self, msg, level=INFO, error=None):
+ LOG("TM:%s" % self._id, level, msg, error=error)
+
def sub(self):
# Create a manually managed subtransaction for internal use
r=self.__class__()
@@ -84,11 +109,8 @@
""")
t = None
- subj = self._sub
- subjars = ()
if not subtransaction:
-
# Must add in any non-subtransaction supporting objects that
# may have been stowed away from previous subtransaction
# commits.
@@ -96,11 +118,14 @@
self._objects.extend(self._non_st_objects)
self._non_st_objects = None
- if subj is not None:
+ if self._sub is not None:
# Abort of top-level transaction after commiting
# subtransactions.
- subjars = subj.values()
+ subjars = self._sub.values()
+ subjars.sort(jar_cmp)
self._sub = None
+ else:
+ subjars = []
try:
# Abort the objects
@@ -110,13 +135,20 @@
if j is not None:
j.abort(o, self)
except:
+ # Record the first exception that occurred
if t is None:
t, v, tb = sys.exc_info()
+ else:
+ self.log("Failed to abort object %016x" %
+ utils.U64(o._p_oid), error=sys.exc_info())
- # Ugh, we need to abort work done in sub-transactions.
- while subjars:
- j = subjars.pop()
- j.abort_sub(self) # This should never fail
+ # tpc_begin() was never called, so tpc_abort() should not be
+ # called.
+
+ if not subtransaction:
+ # abort_sub() must be called to clear subtransaction state
+ for jar in subjars:
+ jar.abort_sub(self) # This should never fail
if t is not None:
raise t, v, tb
@@ -136,7 +168,8 @@
This aborts any transaction in progres.
'''
- if self._objects: self.abort(subtransaction, 0)
+ if self._objects:
+ self.abort(subtransaction, 0)
if info:
info=split(info,'\t')
self.user=strip(info[0])
@@ -146,30 +179,32 @@
'Finalize the transaction'
objects = self._objects
- jars = {}
- jarsv = None
- subj = self._sub
- subjars = ()
+ subjars = []
if subtransaction:
- if subj is None:
- self._sub = subj = {}
+ if self._sub is None:
+ # Must store state across multiple subtransactions
+ # so that the final commit can commit all subjars.
+ self._sub = {}
else:
- if subj is not None:
+ if self._sub is not None:
+ # This commit is for a top-level transaction that
+ # has previously committed subtransactions. Do
+ # one last subtransaction commit to clear out the
+ # current objects, then commit all the subjars.
if objects:
- # Do an implicit sub-transaction commit:
self.commit(1)
- # XXX What does this do?
objects = []
- subjars = subj.values()
+ subjars = self._sub.values()
+ subjars.sort(jar_cmp)
self._sub = None
- # If not a subtransaction, then we need to add any non-
- # subtransaction-supporting objects that may have been
- # stowed away during subtransaction commits to _objects.
- if (subtransaction is None) and (self._non_st_objects is not None):
- objects.extend(self._non_st_objects)
- self._non_st_objects = None
+ # If there were any non-subtransaction-aware jars
+ # involved in earlier subtransaction commits, we need
+ # to add them to the list of jars to commit.
+ if self._non_st_objects is not None:
+ objects.extend(self._non_st_objects)
+ self._non_st_objects = None
if (objects or subjars) and hosed:
# Something really bad happened and we don't
@@ -188,89 +223,140 @@
# either call tpc_abort or tpc_finish. It is OK to call
# these multiple times, as the storage is required to ignore
# these calls if tpc_begin has not been called.
+ #
+ # - That we call tpc_begin() in a globally consistent order,
+ # so that concurrent transactions involving multiple storages
+ # do not deadlock.
try:
ncommitted = 0
+ jars = self._get_jars(objects, subtransaction)
try:
- ncommitted += self._commit_objects(objects, jars,
- subtransaction, subj)
-
- self._commit_subtrans(jars, subjars)
-
- jarsv = jars.values()
- for jar in jarsv:
- if not subtransaction:
+ # If not subtransaction, then jars will be modified.
+ self._commit_begin(jars, subjars, subtransaction)
+ ncommitted += self._commit_objects(objects)
+ if not subtransaction:
+ # Unless this is a really old jar that doesn't
+ # implement tpc_vote(), it must raise an exception
+ # if it can't commit the transaction.
+ for jar in jars:
try:
vote = jar.tpc_vote
- except:
+ except AttributeError:
pass
else:
- vote(self) # last chance to bail
+ vote(self)
- # Try to finish one jar, since we may be able to
- # recover if the first one fails.
- self._finish_one(jarsv)
- # Once a single jar has finished, it's a fatal (hosed)
- # error if another jar fails.
- self._finish_rest(jarsv)
+ # Handle multiple jars separately. If there are
+ # multiple jars and one fails during the finish, we
+ # mark this transaction manager as hosed.
+ if len(jars) == 1:
+ self._finish_one(jars[0])
+ else:
+ self._finish_many(jars)
except:
# Ugh, we got an got an error during commit, so we
- # have to clean up.
- exc_info = sys.exc_info()
- if jarsv is None:
- jarsv = jars.values()
- self._commit_error(exc_info, objects, ncommitted,
- jarsv, subjars)
+ # have to clean up. First save the original exception
+ # in case the cleanup process causes another
+ # exception.
+ t, v, tb = sys.exc_info()
+ try:
+ self._commit_error(objects, ncommitted, jars, subjars)
+ except:
+ LOG('ZODB', ERROR,
+ "A storage error occured during transaction "
+ "abort. This shouldn't happen.",
+ error=sys.exc_info())
+
+ raise t, v, tb
finally:
del objects[:] # clear registered
if not subtransaction and self._id is not None:
free_transaction()
- def _commit_objects(self, objects, jars, subtransaction, subj):
- # commit objects and return number of commits
- ncommitted = 0
+ def _get_jars(self, objects, subtransaction):
+ # Returns a list of jars for this transaction.
+
+ # Find all the jars and sort them in a globally consistent order.
+ # objects is a list of persistent objects and jars.
+ # If this is a subtransaction and a jar is not subtransaction aware,
+ # it's object gets delayed until the parent transaction commits.
+
+ d = {}
for o in objects:
- j = getattr(o, '_p_jar', o)
- if j is not None:
- i = id(j)
- if not jars.has_key(i):
- jars[i] = j
-
- if subtransaction:
- # If a jar does not support subtransactions,
- # we need to save it away to be committed in
- # the outer transaction.
- try:
- j.tpc_begin(self, subtransaction)
- except TypeError:
- j.tpc_begin(self)
+ jar = getattr(o, '_p_jar', o)
+ if jar is None:
+ # I don't think this should ever happen, but can't
+ # prove that it won't. If there is no jar, there
+ # is nothing to be done.
+ self.log("Object with no jar registered for transaction: "
+ "%s" % repr(o), level=BLATHER)
+ continue
+ # jar may not be safe as a dictionary key
+ key = id(jar)
+ d[key] = jar
+
+ if subtransaction:
+ if hasattr(jar, "commit_sub"):
+ self._sub[key] = jar
+ else:
+ if self._non_st_objects is None:
+ self._non_st_objects = []
+ self._non_st_objects.append(o)
+
+ jars = d.values()
+ jars.sort(jar_cmp)
- if hasattr(j, 'commit_sub'):
- subj[i] = j
- else:
- if self._non_st_objects is None:
- self._non_st_objects = []
- self._non_st_objects.append(o)
- continue
- else:
- j.tpc_begin(self)
- j.commit(o, self)
+ return jars
+
+ def _commit_begin(self, jars, subjars, subtransaction):
+ if subtransaction:
+ assert not subjars
+ for jar in jars:
+ try:
+ jar.tpc_begin(self, subtransaction)
+ except TypeError:
+ # Assume that TypeError means that tpc_begin() only
+ # takes one argument, and that the jar doesn't
+ # support subtransactions.
+ jar.tpc_begin(self)
+ else:
+ # Merge in all the jars used by one of the subtransactions.
+
+ # When the top-level subtransaction commits, the tm must
+ # call commit_sub() for each jar involved in one of the
+ # subtransactions. The commit_sub() method should call
+ # tpc_begin() on the storage object.
+
+ # It must also call tpc_begin() on jars that were used in
+ # a subtransaction but don't support subtransactions.
+
+ # These operations must be performed on the jars in order.
+
+ # Modify jars inplace to include the subjars, too.
+ jars += subjars
+ jars.sort(jar_cmp)
+ # assume that subjars is small, so that it's cheaper to test
+ # whether jar in subjars than to make a dict and do has_key.
+ for jar in jars:
+ if jar in subjars:
+ jar.commit_sub(self)
+ else:
+ jar.tpc_begin(self)
+
+ def _commit_objects(self, objects):
+ ncommitted = 0
+ for o in objects:
+ jar = getattr(o, "_p_jar", o)
+ if jar is None:
+ continue
+ jar.commit(o, self)
ncommitted += 1
return ncommitted
- def _commit_subtrans(self, jars, subjars):
- # Commit work done in subtransactions
- while subjars:
- j = subjars.pop()
- i = id(j)
- if not jars.has_key(i):
- jars[i] = j
- j.commit_sub(self)
-
- def _finish_one(self, jarsv):
+ def _finish_one(self, jar):
try:
- if jarsv:
- jarsv[-1].tpc_finish(self) # This should never fail
- jarsv.pop() # It didn't, so it's taken care of.
+ # The database can't guarantee consistency if call fails.
+ jar.tpc_finish(self)
except:
# Bug if it does, we need to keep track of it
LOG('ZODB', ERROR,
@@ -279,42 +365,40 @@
error=sys.exc_info())
raise
- def _finish_rest(self, jarsv):
+ def _finish_many(self, jars):
global hosed
try:
- while jarsv:
- jarsv[-1].tpc_finish(self) # This should never fail
- jarsv.pop() # It didn't, so it's taken care of.
+ for jar in jars:
+ # The database can't guarantee consistency if call fails.
+ jar.tpc_finish(self)
except:
- # Bug if it does, we need to yell FIRE!
- # Someone finished, so don't allow any more
- # work without at least a restart!
hosed = 1
LOG('ZODB', PANIC,
"A storage error occurred in the last phase of a "
"two-phase commit. This shouldn\'t happen. "
- "The application may be in a hosed state, so "
- "transactions will not be allowed to commit "
+ "The application will not be allowed to commit "
"until the site/storage is reset by a restart. ",
error=sys.exc_info())
raise
- def _commit_error(self, (t, v, tb),
- objects, ncommitted, jarsv, subjars):
- # handle an exception raised during commit
- # takes sys.exc_info() as argument
-
- # First, we have to abort any uncommitted objects.
+ def _commit_error(self, objects, ncommitted, jars, subjars):
+ # First, we have to abort any uncommitted objects. The abort
+ # will mark the object for invalidation, so that it's last
+ # committed state will be restored.
for o in objects[ncommitted:]:
try:
j = getattr(o, '_p_jar', o)
if j is not None:
j.abort(o, self)
except:
- pass
-
- # Then, we unwind TPC for the jars that began it.
- for j in jarsv:
+ # nothing to do but log the error
+ self.log("Failed to abort object %016x" % utils.U64(o._p_oid),
+ error=sys.exc_info())
+
+ # Abort the two-phase commit. It's only necessary to abort the
+ # commit for jars that began it, but it is harmless to abort it
+ # for all.
+ for j in jars:
try:
j.tpc_abort(self) # This should never fail
except:
@@ -322,9 +406,14 @@
"A storage error occured during object abort. This "
"shouldn't happen. ", error=sys.exc_info())
- # Ugh, we need to abort work done in sub-transactions.
- while subjars:
- j = subjars.pop()
+ # After the tpc_abort(), call abort_sub() on all the
+ # subtrans-aware jars to *really* abort the subtransaction.
+
+ # Example: For Connection(), the tpc_abort() will abort the
+ # subtransaction TmpStore() and abort_sub() will remove the
+ # TmpStore.
+
+ for j in subjars:
try:
j.abort_sub(self) # This should never fail
except:
@@ -333,8 +422,6 @@
"object abort. This shouldn't happen.",
error=sys.exc_info())
- raise t, v, tb
-
def register(self,object):
'Register the given object for transaction control.'
self._append(object)
@@ -367,8 +454,6 @@
information on the error that lead to this problem.
"""
-
-
############################################################################
# install get_transaction:
@@ -405,5 +490,6 @@
del _t
-import __main__
-__main__.__builtins__.get_transaction=get_transaction
+import __builtin__
+__builtin__.get_transaction=get_transaction
+del __builtin__
=== Zope/lib/python/ZODB/__init__.py 1.14 => 1.14.2.1 ===
--- Zope/lib/python/ZODB/__init__.py:1.14 Thu Aug 22 20:52:57 2002
+++ Zope/lib/python/ZODB/__init__.py Tue Nov 12 16:13:58 2002
@@ -11,31 +11,34 @@
# FOR A PARTICULAR PURPOSE
#
##############################################################################
+
+__version__ = '3.1'
+
import sys
import cPersistence, Persistence
from zLOG import register_subsystem
register_subsystem('ZODB')
# This is lame. Don't look. :(
-sys.modules['cPersistence']=cPersistence
+sys.modules['cPersistence'] = cPersistence
-Persistent=cPersistence.Persistent
+Persistent = cPersistence.Persistent
# Install Persistent and PersistentMapping in Persistence
if not hasattr(Persistence, 'Persistent'):
- Persistence.Persistent=Persistent
- Persistent.__module__='Persistence'
- Persistence.Overridable=cPersistence.Overridable
- Persistence.Overridable.__module__='Persistence'
+ Persistence.Persistent = Persistent
+ Persistent.__module__ = 'Persistence'
+ Persistence.Overridable = cPersistence.Overridable
+ Persistence.Overridable.__module__ = 'Persistence'
if not hasattr(Persistence, 'PersistentMapping'):
import PersistentMapping
- sys.modules['PersistentMapping']=PersistentMapping
- sys.modules['BoboPOS']=sys.modules['ZODB']
- sys.modules['BoboPOS.PersistentMapping']=PersistentMapping
- PersistentMapping=PersistentMapping.PersistentMapping
+ sys.modules['PersistentMapping'] = PersistentMapping
+ sys.modules['BoboPOS'] = sys.modules['ZODB']
+ sys.modules['BoboPOS.PersistentMapping'] = PersistentMapping
+ PersistentMapping = PersistentMapping.PersistentMapping
from PersistentMapping import PersistentMapping
- Persistence.PersistentMapping=PersistentMapping
- PersistentMapping.__module__='Persistence'
+ Persistence.PersistentMapping = PersistentMapping
+ PersistentMapping.__module__ = 'Persistence'
del PersistentMapping
del cPersistence
=== Zope/lib/python/ZODB/cPersistence.c 1.62.10.1 => 1.62.10.2 ===
--- Zope/lib/python/ZODB/cPersistence.c:1.62.10.1 Wed Oct 16 12:22:03 2002
+++ Zope/lib/python/ZODB/cPersistence.c Tue Nov 12 16:13:58 2002
@@ -848,7 +848,7 @@
{
PyObject *m, *d, *s;
- s = PyString_FromString("TimeStamp");
+ s = PyString_FromString("ZODB.TimeStamp");
if (s == NULL)
return;
m = PyImport_Import(s);
@@ -856,7 +856,8 @@
Py_DECREF(s);
return;
}
- TimeStamp = PyObject_GetAttr(m, s);
+ TimeStamp = PyObject_GetAttrString(m, "TimeStamp");
+ assert(TimeStamp);
Py_DECREF(m);
Py_DECREF(s);
=== Zope/lib/python/ZODB/coptimizations.c 1.17 => 1.17.60.1 ===
--- Zope/lib/python/ZODB/coptimizations.c:1.17 Fri Mar 8 13:36:14 2002
+++ Zope/lib/python/ZODB/coptimizations.c Tue Nov 12 16:13:58 2002
@@ -33,207 +33,232 @@
typedef struct {
PyObject_HEAD
- PyObject *jar, *stackup, *new_oid;
+ PyObject *jar, *stack, *new_oid;
} persistent_id;
-staticforward PyTypeObject persistent_idType;
+static PyTypeObject persistent_idType;
static persistent_id *
newpersistent_id(PyObject *ignored, PyObject *args)
{
- persistent_id *self;
- PyObject *jar, *stackup;
+ persistent_id *self;
+ PyObject *jar, *stack;
- UNLESS (PyArg_ParseTuple(args, "OO", &jar, &stackup)) return NULL;
- UNLESS(self = PyObject_NEW(persistent_id, &persistent_idType)) return NULL;
- Py_INCREF(jar);
- self->jar=jar;
- Py_INCREF(stackup);
- self->stackup=stackup;
- self->new_oid=NULL;
- return self;
+ if (!PyArg_ParseTuple(args, "OO!", &jar, &PyList_Type, &stack))
+ return NULL;
+ self = PyObject_NEW(persistent_id, &persistent_idType);
+ if (!self)
+ return NULL;
+ Py_INCREF(jar);
+ self->jar = jar;
+ Py_INCREF(stack);
+ self->stack = stack;
+ self->new_oid = NULL;
+ return self;
}
-
static void
persistent_id_dealloc(persistent_id *self)
{
- Py_DECREF(self->jar);
- Py_DECREF(self->stackup);
- Py_XDECREF(self->new_oid);
- PyObject_DEL(self);
+ Py_DECREF(self->jar);
+ Py_DECREF(self->stack);
+ Py_XDECREF(self->new_oid);
+ PyObject_DEL(self);
+}
+
+/* Returns the klass of a persistent object.
+ Returns NULL for other objects.
+*/
+static PyObject *
+get_class(PyObject *object)
+{
+ PyObject *class = NULL;
+
+ if (!PyExtensionClass_Check(object)) {
+ if (PyExtensionInstance_Check(object)) {
+ class = PyObject_GetAttr(object, py___class__);
+ if (!class) {
+ PyErr_Clear();
+ return NULL;
+ }
+ if (!PyExtensionClass_Check(class) ||
+ !(((PyExtensionClass*)class)->class_flags
+ & PERSISTENT_TYPE_FLAG)) {
+ Py_DECREF(class);
+ return NULL;
+ }
+ }
+ else
+ return NULL;
+ }
+ return class;
+}
+
+/* Return a two-tuple of the class's module and name.
+ */
+static PyObject *
+get_class_tuple(PyObject *class, PyObject *oid)
+{
+ PyObject *module = NULL, *name = NULL, *tuple;
+
+ module = PyObject_GetAttr(class, py___module__);
+ if (!module)
+ goto err;
+ if (!PyObject_IsTrue(module)) {
+ Py_DECREF(module);
+ /* XXX Handle degenerate 1.x ZClass case. */
+ return oid;
+ }
+
+ name = PyObject_GetAttr(class, py___name__);
+ if (!name)
+ goto err;
+
+ tuple = PyTuple_New(2);
+ if (!tuple)
+ goto err;
+ PyTuple_SET_ITEM(tuple, 0, module);
+ PyTuple_SET_ITEM(tuple, 1, name);
+
+ return tuple;
+ err:
+ Py_XDECREF(module);
+ Py_XDECREF(name);
+ return NULL;
+}
+
+static PyObject *
+set_oid(persistent_id *self, PyObject *object)
+{
+ PyObject *oid;
+
+ if (!self->new_oid) {
+ self->new_oid = PyObject_GetAttr(self->jar, py_new_oid);
+ if (!self->new_oid)
+ return NULL;
+ }
+ oid = PyObject_CallObject(self->new_oid, NULL);
+ if (!oid)
+ return NULL;
+ if (PyObject_SetAttr(object, py__p_oid, oid) < 0)
+ goto err;
+ if (PyObject_SetAttr(object, py__p_jar, self->jar) < 0)
+ goto err;
+ if (PyList_Append(self->stack, object) < 0)
+ goto err;
+ return oid;
+ err:
+ Py_DECREF(oid);
+ return NULL;
}
static PyObject *
persistent_id_call(persistent_id *self, PyObject *args, PyObject *kwargs)
{
- PyObject *object, *oid, *jar=NULL, *r=NULL, *klass=NULL;
+ PyObject *object, *oid, *klass=NULL;
+ PyObject *t1, *t2;
+ int setjar = 0;
+
+ if (!PyArg_ParseTuple(args, "O", &object))
+ return NULL;
+
+ klass = get_class(object);
+ if (!klass)
+ goto return_none;
+
+ oid = PyObject_GetAttr(object, py__p_oid);
+ if (!oid) {
+ PyErr_Clear();
+ Py_DECREF(klass);
+ goto return_none;
+ }
- /*
- def persistent_id(object, self=self,stackup=stackup):
- */
- UNLESS (PyArg_ParseTuple(args, "O", &object)) return NULL;
-
- /*
- if (not hasattr(object, '_p_oid') or
- type(object) is ClassType): return None
- */
-
-
- /* Filter out most objects with low-level test.
- Yee ha!
- (Also get klass along the way.)
- */
- if (! PyExtensionClass_Check(object)) {
- if (PyExtensionInstance_Check(object))
- {
- UNLESS (klass=PyObject_GetAttr(object, py___class__))
- {
+ if (oid != Py_None) {
+ PyObject *jar = PyObject_GetAttr(object, py__p_jar);
+ if (!jar)
PyErr_Clear();
- goto not_persistent;
- }
- UNLESS (
- PyExtensionClass_Check(klass) &&
- (((PyExtensionClass*)klass)->class_flags
- & PERSISTENT_TYPE_FLAG)
- )
- goto not_persistent;
-
- }
- else
- goto not_persistent;
- }
-
- UNLESS (oid=PyObject_GetAttr(object, py__p_oid))
- {
- PyErr_Clear();
- goto not_persistent;
- }
-
- /*
- if oid is None or object._p_jar is not self:
- */
- if (oid != Py_None)
- {
- UNLESS (jar=PyObject_GetAttr(object, py__p_jar)) PyErr_Clear();
- if (jar && jar != Py_None && jar != self->jar)
- {
- PyErr_SetString(InvalidObjectReference,
- "Attempt to store an object from a foreign "
- "database connection");
- return NULL;
+ else {
+ if (jar != Py_None && jar != self->jar) {
+ PyErr_SetString(InvalidObjectReference,
+ "Attempt to store an object from a foreign "
+ "database connection");
+ goto err;
+ }
+ /* Ignore the oid of the unknown jar and assign a new one. */
+ if (jar == Py_None)
+ setjar = 1;
+ Py_DECREF(jar);
}
}
- if (oid == Py_None || jar != self->jar)
- {
- /*
- oid = self.new_oid()
- object._p_jar=self
- object._p_oid=oid
- stackup(object)
- */
- UNLESS (self->new_oid ||
- (self->new_oid=PyObject_GetAttr(self->jar, py_new_oid)))
+ if (oid == Py_None || setjar) {
+ Py_DECREF(oid);
+ oid = set_oid(self, object);
+ if (!oid)
goto err;
- ASSIGN(oid, PyObject_CallObject(self->new_oid, NULL));
- UNLESS (oid) goto null_oid;
- if (PyObject_SetAttr(object, py__p_jar, self->jar) < 0) goto err;
- if (PyObject_SetAttr(object, py__p_oid, oid) < 0) goto err;
- UNLESS (r=PyTuple_New(1)) goto err;
- PyTuple_SET_ITEM(r, 0, object);
- Py_INCREF(object);
- ASSIGN(r, PyObject_CallObject(self->stackup, r));
- UNLESS (r) goto err;
- Py_DECREF(r);
- }
-
- /*
- klass=object.__class__
-
- if klass is ExtensionKlass: return oid
- */
-
- if (PyExtensionClass_Check(object)) goto return_oid;
-
- /*
- if hasattr(klass, '__getinitargs__'): return oid
- */
-
- if ((r=PyObject_GetAttr(klass, py___getinitargs__)))
- {
- Py_DECREF(r);
- goto return_oid;
- }
- PyErr_Clear();
-
- /*
- module=getattr(klass,'__module__','')
- if module: klass=module, klass.__name__
- else: return oid # degenerate 1.x ZClass case
- */
- UNLESS (jar=PyObject_GetAttr(klass, py___module__)) goto err;
-
- UNLESS (PyObject_IsTrue(jar)) goto return_oid;
-
- ASSIGN(klass, PyObject_GetAttr(klass, py___name__));
- UNLESS (klass) goto err;
-
- UNLESS (r=PyTuple_New(2)) goto err;
- PyTuple_SET_ITEM(r, 0, jar);
- PyTuple_SET_ITEM(r, 1, klass);
- klass=r;
- jar=NULL;
-
- /*
- return oid, klass
- */
- UNLESS (r=PyTuple_New(2)) goto err;
- PyTuple_SET_ITEM(r, 0, oid);
- PyTuple_SET_ITEM(r, 1, klass);
- return r;
-
-not_persistent:
- Py_INCREF(Py_None);
- return Py_None;
-
-err:
- Py_DECREF(oid);
- oid=NULL;
-
-null_oid:
-return_oid:
- Py_XDECREF(jar);
- Py_XDECREF(klass);
- return oid;
+ }
+
+ if (PyExtensionClass_Check(object)
+ || PyObject_HasAttr(klass, py___getinitargs__))
+ goto return_oid;
+
+ t2 = get_class_tuple(klass, oid);
+ if (!t2)
+ goto err;
+ if (t2 == oid) /* pass through ZClass special case */
+ goto return_oid;
+ t1 = PyTuple_New(2);
+ if (!t1) {
+ Py_DECREF(t2);
+ goto err;
+ }
+ /* use borrowed references to oid and t2 */
+ PyTuple_SET_ITEM(t1, 0, oid);
+ PyTuple_SET_ITEM(t1, 1, t2);
+
+ Py_DECREF(klass);
+
+ return t1;
+
+ err:
+ Py_XDECREF(oid);
+ oid = NULL;
+
+ return_oid:
+ Py_XDECREF(klass);
+ return oid;
+
+ return_none:
+ Py_INCREF(Py_None);
+ return Py_None;
}
static PyTypeObject persistent_idType = {
- PyObject_HEAD_INIT(NULL)
- 0, /*ob_size*/
- "persistent_id", /*tp_name*/
- sizeof(persistent_id), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- /* methods */
- (destructor)persistent_id_dealloc, /*tp_dealloc*/
- (printfunc)0, /*tp_print*/
- (getattrfunc)0, /*obsolete tp_getattr*/
- (setattrfunc)0, /*obsolete tp_setattr*/
- (cmpfunc)0, /*tp_compare*/
- (reprfunc)0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- (hashfunc)0, /*tp_hash*/
- (ternaryfunc)persistent_id_call, /*tp_call*/
- (reprfunc)0, /*tp_str*/
- (getattrofunc)0, /*tp_getattro*/
- (setattrofunc)0, /*tp_setattro*/
-
- /* Space for future expansion */
- 0L,0L,
- "C implementation of the persistent_id function defined in Connection.py"
+ PyObject_HEAD_INIT(NULL)
+ 0, /*ob_size*/
+ "persistent_id", /*tp_name*/
+ sizeof(persistent_id), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ /* methods */
+ (destructor)persistent_id_dealloc, /*tp_dealloc*/
+ (printfunc)0, /*tp_print*/
+ (getattrfunc)0, /*obsolete tp_getattr*/
+ (setattrfunc)0, /*obsolete tp_setattr*/
+ (cmpfunc)0, /*tp_compare*/
+ (reprfunc)0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ (hashfunc)0, /*tp_hash*/
+ (ternaryfunc)persistent_id_call, /*tp_call*/
+ (reprfunc)0, /*tp_str*/
+ (getattrofunc)0, /*tp_getattro*/
+ (setattrofunc)0, /*tp_setattro*/
+
+ /* Space for future expansion */
+ 0L,0L,
+ "C implementation of the persistent_id function defined in Connection.py"
};
/* End of code for persistent_id objects */
@@ -243,45 +268,40 @@
/* List of methods defined in the module */
static struct PyMethodDef Module_Level__methods[] = {
- {"new_persistent_id", (PyCFunction)newpersistent_id, METH_VARARGS,
- "new_persistent_id(jar, stackup, new_oid)"
- " -- get a new persistent_id function"},
- {NULL, (PyCFunction)NULL, 0, NULL} /* sentinel */
+ {"new_persistent_id", (PyCFunction)newpersistent_id, METH_VARARGS,
+ "new_persistent_id(jar, stack) -- get a new persistent_id function"},
+ {NULL, NULL} /* sentinel */
};
void
initcoptimizations(void)
{
- PyObject *m, *d;
+ PyObject *m, *d;
#define make_string(S) if (! (py_ ## S=PyString_FromString(#S))) return
- make_string(_p_oid);
- make_string(_p_jar);
- make_string(__getinitargs__);
- make_string(__module__);
- make_string(__class__);
- make_string(__name__);
- make_string(new_oid);
+ make_string(_p_oid);
+ make_string(_p_jar);
+ make_string(__getinitargs__);
+ make_string(__module__);
+ make_string(__class__);
+ make_string(__name__);
+ make_string(new_oid);
- /* Get InvalidObjectReference error */
- UNLESS (m=PyString_FromString("POSException")) return;
- ASSIGN(m, PyImport_Import(m));
- UNLESS (m) return;
- ASSIGN(m, PyObject_GetAttrString(m, "InvalidObjectReference"));
- UNLESS (m) return;
- InvalidObjectReference=m;
-
- UNLESS (ExtensionClassImported) return;
-
- m = Py_InitModule4("coptimizations", Module_Level__methods,
- coptimizations_doc_string,
- (PyObject*)NULL,PYTHON_API_VERSION);
- d = PyModule_GetDict(m);
-
- persistent_idType.ob_type=&PyType_Type;
- PyDict_SetItemString(d,"persistent_idType", OBJECT(&persistent_idType));
-
- /* Check for errors */
- if (PyErr_Occurred())
- Py_FatalError("can't initialize module coptimizations");
+ /* Get InvalidObjectReference error */
+ UNLESS (m=PyString_FromString("ZODB.POSException")) return;
+ ASSIGN(m, PyImport_Import(m));
+ UNLESS (m) return;
+ ASSIGN(m, PyObject_GetAttrString(m, "InvalidObjectReference"));
+ UNLESS (m) return;
+ InvalidObjectReference=m;
+
+ if (!ExtensionClassImported)
+ return;
+
+ m = Py_InitModule3("coptimizations", Module_Level__methods,
+ coptimizations_doc_string);
+ d = PyModule_GetDict(m);
+
+ persistent_idType.ob_type = &PyType_Type;
+ PyDict_SetItemString(d,"persistent_idType", OBJECT(&persistent_idType));
}
=== Zope/lib/python/ZODB/fsdump.py 1.3 => 1.3.70.1 ===
--- Zope/lib/python/ZODB/fsdump.py:1.3 Mon Feb 11 14:38:09 2002
+++ Zope/lib/python/ZODB/fsdump.py Tue Nov 12 16:13:58 2002
@@ -64,8 +64,14 @@
version = "version=%s " % rec.version
else:
version = ''
- print >> file, " data #%05d oid=%016x %sclass=%s" % \
- (j, U64(rec.oid), version, fullclass)
+ if rec.data_txn:
+ # XXX It would be nice to print the transaction number
+ # (i) but it would be too expensive to keep track of.
+ bp = "bp=%016x" % U64(rec.data_txn)
+ else:
+ bp = ""
+ print >> file, " data #%05d oid=%016x %sclass=%s %s" % \
+ (j, U64(rec.oid), version, fullclass, bp)
j += 1
print >> file
i += 1