[Zodb-checkins] CVS: Zope/lib/python/ZODB - interfaces.py:1.2
subtransactions.txt:1.2 ConflictResolution.py:1.22
Connection.py:1.118 DB.py:1.61 DemoStorage.py:1.25
Transaction.py:1.57 referencesf.py:1.7 serialize.py:1.3
coptimizations.c:NONE
Jeremy Hylton
jeremy at zope.com
Wed Feb 18 21:59:41 EST 2004
Update of /cvs-repository/Zope/lib/python/ZODB
In directory cvs.zope.org:/tmp/cvs-serv11908/lib/python/ZODB
Modified Files:
ConflictResolution.py Connection.py DB.py DemoStorage.py
Transaction.py referencesf.py serialize.py
Added Files:
interfaces.py subtransactions.txt
Removed Files:
coptimizations.c
Log Message:
Merge zope3-zodb3-devel-branch to the Zope head (Zope 2 head).
Add support for persistent weak references. The implementation is in
pure Python right now; coptimizations.c was disabled. We need to
restore the C code before going to beta.
The persistent reference format has evolved a little, but the code on
the branch doesn't document it. The ref is usually a an oid-class
pair, where the class is an actual class object. It can also be a
list, for weak references, or a bare oid.
Add support for data managers written against the ZODB4 transaction
API. The transaction implementation provides an adapter between the
two interfaces. There's a substantial doctest test suite for this
feature.
Add add() method to Connection to explicitly add an object and get it
an assigned _p_oid. It's possible to add an unconnected object; this
isn't recommended, because it will be lost at pack time.
=== Zope/lib/python/ZODB/interfaces.py 1.1 => 1.2 ===
--- /dev/null Wed Feb 18 21:59:40 2004
+++ Zope/lib/python/ZODB/interfaces.py Wed Feb 18 21:59:06 2004
@@ -0,0 +1,225 @@
+##############################################################################
+#
+# Copyright (c) 2004 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Interfaces for ZODB."""
+
+try:
+ from zope.interface import Interface, Attribute
+except ImportError:
+ class Interface:
+ pass
+
+ class Attribute:
+ def __init__(self, __name__, __doc__):
+ self.__name__ = __name__
+ self.__doc__ = __doc__
+
+
+class IDataManager(Interface):
+ """Objects that manage transactional storage.
+
+ These object's may manage data for other objects, or they may manage
+ non-object storages, such as relational databases.
+ """
+
+ def abort_sub(transaction):
+ """Discard all subtransaction data.
+
+ See subtransaction.txt
+
+ This is called when top-level transactions are aborted.
+
+ No further subtransactions can be started once abort_sub()
+ has been called; this is only used when the transaction is
+ being aborted.
+
+ abort_sub also implies the abort of a 2-phase commit.
+
+ This should never fail.
+ """
+
+ def commit_sub(transaction):
+ """Commit all changes made in subtransactions and begin 2-phase commit
+
+ Data are saved *as if* they are part of the current transaction.
+ That is, they will not be persistent unless the current transaction
+ is committed.
+
+ This is called when the current top-level transaction is committed.
+
+ No further subtransactions can be started once commit_sub()
+ has been called; this is only used when the transaction is
+ being committed.
+
+ This call also implied the beginning of 2-phase commit.
+ """
+
+ # Two-phase commit protocol. These methods are called by the
+ # ITransaction object associated with the transaction being
+ # committed.
+
+ def tpc_begin(transaction, subtransaction=False):
+ """Begin commit of a transaction, starting the two-phase commit.
+
+ transaction is the ITransaction instance associated with the
+ transaction being committed.
+
+ subtransaction is a Boolean flag indicating whether the
+ two-phase commit is being invoked for a subtransaction.
+
+ Important note: Subtransactions are modelled in the sense that
+ when you commit a subtransaction, subsequent commits should be
+ for subtransactions as well. That is, there must be a
+ commit_sub() call between a tpc_begin() call with the
+ subtransaction flag set to true and a tpc_begin() with the
+ flag set to false.
+
+ """
+
+
+ def tpc_abort(transaction):
+ """Abort a transaction.
+
+ This is always called after a tpc_begin call.
+
+ transaction is the ITransaction instance associated with the
+ transaction being committed.
+
+ This should never fail.
+ """
+
+ def tpc_finish(transaction):
+ """Indicate confirmation that the transaction is done.
+
+ transaction is the ITransaction instance associated with the
+ transaction being committed.
+
+ This should never fail. If this raises an exception, the
+ database is not expected to maintain consistency; it's a
+ serious error.
+
+ """
+
+ def tpc_vote(transaction):
+ """Verify that a data manager can commit the transaction
+
+ This is the last chance for a data manager to vote 'no'. A
+ data manager votes 'no' by raising an exception.
+
+ transaction is the ITransaction instance associated with the
+ transaction being committed.
+ """
+
+ def commit(object, transaction):
+ """CCCommit changes to an object
+
+ Save the object as part of the data to be made persistent if
+ the transaction commits.
+ """
+
+ def abort(object, transaction):
+ """Abort changes to an object
+
+ Only changes made since the last transaction or
+ sub-transaction boundary are discarded.
+
+ This method may be called either:
+
+ o Outside of two-phase commit, or
+
+ o In the first phase of two-phase commit
+
+ """
+
+
+
+
+class ITransaction(Interface):
+ """Object representing a running transaction.
+
+ Objects with this interface may represent different transactions
+ during their lifetime (.begin() can be called to start a new
+ transaction using the same instance).
+ """
+
+ user = Attribute(
+ "user",
+ "The name of the user on whose behalf the transaction is being\n"
+ "performed. The format of the user name is defined by the\n"
+ "application.")
+ # XXX required to be a string?
+
+ description = Attribute(
+ "description",
+ "Textual description of the transaction.")
+
+ def begin(info=None, subtransaction=None):
+ """Begin a new transaction.
+
+ If the transaction is in progress, it is aborted and a new
+ transaction is started using the same transaction object.
+ """
+
+ def commit(subtransaction=None):
+ """Finalize the transaction.
+
+ This executes the two-phase commit algorithm for all
+ IDataManager objects associated with the transaction.
+ """
+
+ def abort(subtransaction=0, freeme=1):
+ """Abort the transaction.
+
+ This is called from the application. This can only be called
+ before the two-phase commit protocol has been started.
+ """
+
+ def join(datamanager):
+ """Add a datamanager to the transaction.
+
+ The datamanager must implement the
+ transactions.interfaces.IDataManager interface, and be
+ adaptable to ZODB.interfaces.IDataManager.
+ """
+
+ def register(object):
+ """Register the given object for transaction control."""
+
+ def note(text):
+ """Add text to the transaction description.
+
+ If a description has already been set, text is added to the
+ end of the description following two newline characters.
+ Surrounding whitespace is stripped from text.
+ """
+ # XXX does impl do the right thing with ''? Not clear what
+ # the "right thing" is.
+
+ def setUser(user_name, path="/"):
+ """Set the user name.
+
+ path should be provided if needed to further qualify the
+ identified user.
+ """
+
+ def setExtendedInfo(name, value):
+ """Add extension data to the transaction.
+
+ name is the name of the extension property to set; value must
+ be a picklable value.
+
+ Storage implementations may limit the amount of extension data
+ which can be stored.
+ """
+ # XXX is this this allowed to cause an exception here, during
+ # the two-phase commit, or can it toss data silently?
=== Zope/lib/python/ZODB/subtransactions.txt 1.1 => 1.2 ===
--- /dev/null Wed Feb 18 21:59:40 2004
+++ Zope/lib/python/ZODB/subtransactions.txt Wed Feb 18 21:59:06 2004
@@ -0,0 +1,51 @@
+=========================
+Subtransactions in ZODB 3
+=========================
+
+ZODB 3 provides limited support for subtransactions. Subtransactions
+are nested to *one* level. There are top-level transactions and
+subtransactions. When a transaction is committed, a flag is passed
+indicating whether it is a subtransaction or a top-level transaction.
+Consider the following exampler commit calls:
+
+- commit()
+
+ A regular top-level transaction is committed.
+
+- commit(1)
+
+ A subtransaction is committed. There is now one subtransaction of
+ the current top-level transaction.
+
+- commit(1)
+
+ A subtransaction is committed. There are now two subtransactions of
+ the current top-level transaction.
+
+- abort(1)
+
+ A subtransaction is aborted. There are still two subtransactions of
+ the current top-level transaction; work done since the last
+ commit(1) call is discarded.
+
+- commit()
+
+ We now commit a top-level transaction. The work done in the previous
+ two subtransactions *plus* work done since the last abort(1) call
+ is saved.
+
+- commit(1)
+
+ A subtransaction is committed. There is now one subtransaction of
+ the current top-level transaction.
+
+- commit(1)
+
+ A subtransaction is committed. There are now two subtransactions of
+ the current top-level transaction.
+
+- abort()
+
+ We now abort a top-level transaction. We discard the work done in
+ the previous two subtransactions *plus* work done since the last
+ commit(1) call.
=== Zope/lib/python/ZODB/ConflictResolution.py 1.21 => 1.22 ===
--- Zope/lib/python/ZODB/ConflictResolution.py:1.21 Fri Nov 28 11:44:49 2003
+++ Zope/lib/python/ZODB/ConflictResolution.py Wed Feb 18 21:59:06 2004
@@ -19,25 +19,39 @@
from ZODB.POSException import ConflictError
import zLOG
-bad_classes = {}
-
-def bad_class(class_tuple):
- if bad_classes.has_key(class_tuple) or class_tuple[0][0] == '*':
- # if we've seen the class before or if it's a ZClass, we know that
- # we can't resolve the conflict
- return 1
-
ResolvedSerial = 'rs'
-def _classFactory(location, name,
- _silly=('__doc__',), _globals={}):
- return getattr(__import__(location, _globals, _globals, _silly),
- name)
+class BadClassName(Exception):
+ pass
+
+_class_cache = {}
+_class_cache_get = _class_cache.get
+def find_global(*args):
+ cls = _class_cache_get(args, 0)
+ if cls == 0:
+ # Not cached. Try to import
+ try:
+ module = __import__(args[0], {}, {}, ['cluck'])
+ except ImportError:
+ cls = 1
+ else:
+ cls = getattr(module, args[1], 1)
+ _class_cache[args] = cls
+
+ if cls == 1:
+ zLOG.LOG("Conflict Resolution", zLOG.BLATHER,
+ "Unable to load class", error=sys.exc_info())
+
+ if cls == 1:
+ # Not importable
+ raise BadClassName(*args)
+ return cls
def state(self, oid, serial, prfactory, p=''):
p = p or self.loadSerial(oid, serial)
file = StringIO(p)
unpickler = Unpickler(file)
+ unpickler.find_global = find_global
unpickler.persistent_load = prfactory.persistent_load
unpickler.load() # skip the class tuple
return unpickler.load()
@@ -70,17 +84,8 @@
if getattr(object, '__class__', 0) is not PersistentReference:
return None
return object.data
-
-def load_class(class_tuple):
- try:
- klass = _classFactory(class_tuple[0], class_tuple[1])
- except (ImportError, AttributeError):
- zLOG.LOG("Conflict Resolution", zLOG.BLATHER,
- "Unable to load class", error=sys.exc_info())
- bad_classes[class_tuple] = 1
- return None
- return klass
-
+
+_unresolvable = {}
def tryToResolveConflict(self, oid, committedSerial, oldSerial, newpickle,
committedData=''):
# class_tuple, old, committed, newstate = ('',''), 0, 0, 0
@@ -88,21 +93,28 @@
prfactory = PersistentReferenceFactory()
file = StringIO(newpickle)
unpickler = Unpickler(file)
+ unpickler.find_global = find_global
unpickler.persistent_load = prfactory.persistent_load
meta = unpickler.load()
- class_tuple = meta[0]
- if bad_class(class_tuple):
+ if isinstance(meta, tuple):
+ klass = meta[0]
+ newargs = meta[1] or ()
+ if isinstance(klass, tuple):
+ klass = find_global(*klass)
+ else:
+ klass = meta
+ newargs = ()
+
+ if klass in _unresolvable:
return None
+
newstate = unpickler.load()
- klass = load_class(class_tuple)
- if klass is None:
- return None
- inst = klass.__new__(klass)
+ inst = klass.__new__(klass, *newargs)
try:
resolve = inst._p_resolveConflict
except AttributeError:
- bad_classes[class_tuple] = 1
+ _unresolvable[klass] = 1
return None
old = state(self, oid, oldSerial, prfactory)
@@ -116,7 +128,7 @@
pickler.dump(meta)
pickler.dump(resolved)
return file.getvalue(1)
- except ConflictError:
+ except (ConflictError, BadClassName):
return None
except:
# If anything else went wrong, catch it here and avoid passing an
=== Zope/lib/python/ZODB/Connection.py 1.117 => 1.118 ===
--- Zope/lib/python/ZODB/Connection.py:1.117 Wed Jan 14 13:58:08 2004
+++ Zope/lib/python/ZODB/Connection.py Wed Feb 18 21:59:06 2004
@@ -18,29 +18,21 @@
import logging
import sys
import threading
+import itertools
from time import time
-from types import ClassType
from utils import u64
-_marker = object()
-
-def myhasattr(obj, attr):
- # builtin hasattr() swallows exceptions
- return getattr(obj, attr, _marker) is not _marker
-
from persistent import PickleCache
from zLOG import LOG, ERROR, BLATHER, WARNING
from ZODB.ConflictResolution import ResolvedSerial
-from ZODB.coptimizations import new_persistent_id
from ZODB.ExportImport import ExportImport
from ZODB.POSException \
- import ConflictError, ReadConflictError, TransactionError
+ import ConflictError, ReadConflictError, InvalidObjectReference
from ZODB.TmpStore import TmpStore
from ZODB.Transaction import Transaction, get_transaction
from ZODB.utils import oid_repr, z64
-from ZODB.serialize \
- import ObjectWriter, getClassMetadata, ConnectionObjectReader
+from ZODB.serialize import ObjectWriter, ConnectionObjectReader, myhasattr
global_reset_counter = 0
@@ -70,6 +62,7 @@
_opened = None
_code_timestamp = 0
_transaction = None
+ _added_during_commit = None
def __init__(self, version='', cache_size=400,
cache_deactivate_after=60, mvcc=True):
@@ -88,6 +81,8 @@
self._cache.cache_drain_resistance = 100
self._incrgc = self.cacheGC = cache.incrgc
+ self._committed = []
+ self._added = {}
self._reset_counter = global_reset_counter
self._load_count = 0 # Number of objects unghosted
self._store_count = 0 # Number of objects stored
@@ -160,6 +155,9 @@
obj = self._cache.get(oid, None)
if obj is not None:
return obj
+ obj = self._added.get(oid, None)
+ if obj is not None:
+ return obj
p, serial = self._storage.load(oid, self._version)
obj = self._reader.getGhost(p)
@@ -172,6 +170,21 @@
self._cache[oid] = obj
return obj
+ def add(self, obj):
+ marker = object()
+ oid = getattr(obj, "_p_oid", marker)
+ if oid is marker:
+ raise TypeError("Only first-class persistent objects may be"
+ " added to a Connection.", obj)
+ elif obj._p_jar is None:
+ oid = obj._p_oid = self._storage.new_oid()
+ obj._p_jar = self
+ self._added[oid] = obj
+ if self._added_during_commit is not None:
+ self._added_during_commit.append(obj)
+ elif obj._p_jar is not self:
+ raise InvalidObjectReference(obj, obj._p_jar)
+
def sortKey(self):
# XXX will raise an exception if the DB hasn't been set
storage_key = self._sortKey()
@@ -219,8 +232,14 @@
if object is self:
self._flush_invalidations()
else:
- assert object._p_oid is not None
- self._cache.invalidate(object._p_oid)
+ oid = object._p_oid
+ assert oid is not None
+ if oid in self._added:
+ del self._added[oid]
+ del object._p_jar
+ del object._p_oid
+ else:
+ self._cache.invalidate(object._p_oid)
def cacheFullSweep(self, dt=0):
self._cache.full_sweep(dt)
@@ -269,55 +288,76 @@
raise ReadConflictError(object=object)
invalid = self._invalid
+
+ # XXX In the case of a new object or an object added using add(),
+ # the oid is appended to _creating.
+ # However, this ought to be unnecessary because the _p_serial
+ # of the object will be z64 or None, so it will be appended
+ # to _creating about 30 lines down. The removal from _added
+ # ought likewise to be unnecessary.
if oid is None or object._p_jar is not self:
# new object
oid = self.new_oid()
object._p_jar = self
object._p_oid = oid
+ self._creating.append(oid) # maybe don't need this
+ elif oid in self._added:
+ # maybe don't need these
self._creating.append(oid)
-
+ del self._added[oid]
elif object._p_changed:
if invalid(oid):
resolve = getattr(object, "_p_resolveConflict", None)
if resolve is None:
raise ConflictError(object=object)
self._modified.append(oid)
-
else:
# Nothing to do
return
w = ObjectWriter(object)
- for obj in w:
- oid = obj._p_oid
- serial = getattr(obj, '_p_serial', z64)
- if serial == z64:
- # new object
- self._creating.append(oid)
- else:
- if invalid(oid) and not hasattr(object, '_p_resolveConflict'):
- raise ConflictError(object=obj)
- self._modified.append(oid)
-
- p = w.serialize(obj)
- s = self._storage.store(oid, serial, p, self._version, transaction)
- self._store_count = self._store_count + 1
- # Put the object in the cache before handling the
- # response, just in case the response contains the
- # serial number for a newly created object
- try:
- self._cache[oid] = obj
- except:
- # Dang, I bet its wrapped:
- if hasattr(obj, 'aq_base'):
- self._cache[oid] = obj.aq_base
+ self._added_during_commit = []
+ try:
+ for obj in itertools.chain(w, self._added_during_commit):
+ oid = obj._p_oid
+ serial = getattr(obj, '_p_serial', z64)
+
+ # XXX which one? z64 or None? Why do I have to check both?
+ if serial == z64 or serial is None:
+ # new object
+ self._creating.append(oid)
+ # If this object was added, it is now in _creating, so can
+ # be removed from _added.
+ self._added.pop(oid, None)
else:
- raise
+ if (invalid(oid)
+ and not hasattr(object, '_p_resolveConflict')):
+ raise ConflictError(object=obj)
+ self._modified.append(oid)
+ p = w.serialize(obj) # This calls __getstate__ of obj
+
+ s = self._storage.store(oid, serial, p, self._version,
+ transaction)
+ self._store_count = self._store_count + 1
+ # Put the object in the cache before handling the
+ # response, just in case the response contains the
+ # serial number for a newly created object
+ try:
+ self._cache[oid] = obj
+ except:
+ # Dang, I bet its wrapped:
+ if hasattr(obj, 'aq_base'):
+ self._cache[oid] = obj.aq_base
+ else:
+ raise
- self._handle_serial(s, oid)
+ self._handle_serial(s, oid)
+ finally:
+ del self._added_during_commit
+
def commit_sub(self, t):
- """Commit all work done in subtransactions"""
+ """Commit all work done in all subtransactions for this transaction"""
tmp=self._tmp
if tmp is None: return
src=self._storage
@@ -347,7 +387,7 @@
self._handle_serial(s, oid, change=0)
def abort_sub(self, t):
- """Abort work done in subtransactions"""
+ """Abort work done in all subtransactions for this transaction"""
tmp=self._tmp
if tmp is None: return
src=self._storage
@@ -586,6 +626,10 @@
self._flush_invalidations()
self._conflicts.clear()
self._invalidate_creating()
+ while self._added:
+ oid, obj = self._added.popitem()
+ del obj._p_oid
+ del obj._p_jar
def tpc_begin(self, transaction, sub=None):
self._modified = []
=== Zope/lib/python/ZODB/DB.py 1.60 => 1.61 ===
--- Zope/lib/python/ZODB/DB.py:1.60 Tue Feb 17 20:13:00 2004
+++ Zope/lib/python/ZODB/DB.py Wed Feb 18 21:59:06 2004
@@ -228,8 +228,8 @@
self._connectionMap(f)
return detail
- def cacheFullSweep(self, value):
- self._connectionMap(lambda c, v=value: c._cache.full_sweep(v))
+ def cacheFullSweep(self):
+ self._connectionMap(lambda c: c._cache.full_sweep())
def cacheLastGCTime(self):
m=[0]
@@ -240,8 +240,8 @@
self._connectionMap(f)
return m[0]
- def cacheMinimize(self, value):
- self._connectionMap(lambda c, v=value: c._cache.minimize(v))
+ def cacheMinimize(self):
+ self._connectionMap(lambda c: c._cache.minimize())
def cacheMeanAge(self): return self._cacheMean('cache_mean_age')
def cacheMeanDeac(self): return self._cacheMean('cache_mean_deac')
=== Zope/lib/python/ZODB/DemoStorage.py 1.24 => 1.25 ===
--- Zope/lib/python/ZODB/DemoStorage.py:1.24 Tue Feb 17 20:13:00 2004
+++ Zope/lib/python/ZODB/DemoStorage.py Wed Feb 18 21:59:06 2004
@@ -20,7 +20,7 @@
- Provide a volatile storage that is useful for giving demonstrations.
-The demo strorage can have a "base" storage that is used in a
+The demo storage can have a "base" storage that is used in a
read-only fashion. The base storage must not not to contain version
data.
@@ -402,6 +402,11 @@
# Build indexes up to the pack time:
index, vindex = self._build_indexes(stop)
+
+
+ # XXX This packing algorithm is flawed. It ignores
+ # references from non-current records after the pack
+ # time.
# Now build an index of *only* those objects reachable
# from the root.
=== Zope/lib/python/ZODB/Transaction.py 1.56 => 1.57 ===
--- Zope/lib/python/ZODB/Transaction.py:1.56 Mon Dec 29 17:40:48 2003
+++ Zope/lib/python/ZODB/Transaction.py Wed Feb 18 21:59:06 2004
@@ -90,6 +90,9 @@
else:
return "Transaction thread=%s user=%s" % (self._id, `self.user`)
+ # XXX This whole freeme business is lame.
+ # As a separate task, we really need to revisit transaction management.
+
def __del__(self):
if self._objects:
self.abort(freeme=0)
@@ -428,6 +431,14 @@
'Register the given object for transaction control.'
self._append(object)
+ def join(self, zodb4datamanager):
+ """Join a transaction.interfaces.IDataManager with the transaction
+
+ This method is provided for "backward-compatability" with ZODB 4
+ data managers.
+ """
+ self._append(DataManagerAdapter(zodb4datamanager))
+
def note(self, text):
if self.description:
self.description = "%s\n\n%s" % (self.description, text.strip())
@@ -454,6 +465,69 @@
the system problem. See your application log for
information on the error that lead to this problem.
"""
+
+
+
+class DataManagerAdapter(object):
+ """Adapt zodb 4-style data managers to zodb3 style
+
+ Adapt transaction.interfaces.IDataManager to
+ ZODB.interfaces.IPureDatamanager
+
+ """
+
+ # Note that it is pretty important that this does not have a _p_jar
+ # attribute. This object will be registered with a zodb3 TM, which
+ # will then try to get a _p_jar from it, using it as the default.
+ # (Objects without a _p_jar are their own data managers.)
+
+ def __init__(self, datamanager):
+ self._datamanager = datamanager
+ self._rollback = None
+
+ def commit(self, ob, transaction):
+ assert ob is self
+
+ def abort(self, ob, transaction):
+ assert ob is self
+
+ # We need to discard any changes since the last save point, or all
+ # changes
+
+ if self._rollback is None:
+ # No previous savepoint, so just abort
+ self._datamanager.abort(transaction)
+ else:
+ self._rollback()
+
+ def abort_sub(self, transaction):
+ self._datamanager.abort(transaction)
+
+ def commit_sub(self, transaction):
+ # Nothing to do wrt data, be we begin 2pc for the top-level
+ # trans
+ self._sub = False
+
+ def tpc_begin(self, transaction, subtransaction=False):
+ self._sub = subtransaction
+
+ def tpc_abort(self, transaction):
+ if self._sub:
+ self.abort(self, transaction)
+ else:
+ self._datamanager.abort(transaction)
+
+ def tpc_finish(self, transaction):
+ if self._sub:
+ self._rollback = self._datamanager.savepoint(transaction).rollback
+ else:
+ self._datamanager.commit(transaction)
+
+ def tpc_vote(self, transaction):
+ if not self._sub:
+ self._datamanager.prepare(transaction)
+
+
############################################################################
# install get_transaction:
=== Zope/lib/python/ZODB/referencesf.py 1.6 => 1.7 ===
--- Zope/lib/python/ZODB/referencesf.py:1.6 Wed Aug 14 18:07:09 2002
+++ Zope/lib/python/ZODB/referencesf.py Wed Feb 18 21:59:06 2004
@@ -15,33 +15,54 @@
"""
import cPickle, cStringIO
-def referencesf(p, rootl=None,
- Unpickler=cPickle.Unpickler,
- StringIO=cStringIO.StringIO,
- tt=type(()),
- type=type):
-
- if rootl is None: rootl=[]
- u=Unpickler(StringIO(p))
- l=len(rootl)
- u.persistent_load=rootl
+def referencesf(p, rootl=None,):
+
+ if rootl is None:
+ rootl = []
+
+ u = cPickle.Unpickler(cStringIO.StringIO(p))
+ l = len(rootl)
+ u.persistent_load = rootl
u.noload()
- try: u.noload()
+ try:
+ u.noload()
except:
# Hm. We failed to do second load. Maybe there wasn't a
# second pickle. Let's check:
- f=StringIO(p)
- u=Unpickler(f)
- u.persistent_load=[]
+ f = cStringIO.StringIO(p)
+ u = cPickle.Unpickler(f)
+ u.persistent_load = []
u.noload()
- if len(p) > f.tell(): raise ValueError, 'Error unpickling, %s' % p
+ if len(p) > f.tell():
+ raise ValueError, 'Error unpickling, %s' % p
+
+
+ # References may be:
+ #
+ # - A tuple, in which case they are an oid and class.
+ # In this case, just extract the first element, which is
+ # the oid
+ #
+ # - A list, which is a weak reference. We skip those.
+ #
+ # - Anything else must be an oid. This means that an oid
+ # may not be a list or a tuple. This is a bit lame.
+ # We could avoid this lamosity by allowing single-element
+ # tuples, so that we wrap oids that are lists or tuples in
+ # tuples.
+ #
+ # - oids may *not* be false. I'm not sure why.
+
+ out = []
+ for v in rootl:
+ assert v # Let's see if we ever get empty ones
+ if type(v) is list:
+ # skip wekrefs
+ continue
+ if type(v) is tuple:
+ v = v[0]
+ out.append(v)
- # References may have class info, so we need to
- # check for wrapped references.
- for i in range(l, len(rootl)):
- v=rootl[i]
- if v:
- if type(v) is tt: v=v[0]
- rootl[i]=v
+ rootl[:] = out
return rootl
=== Zope/lib/python/ZODB/serialize.py 1.2 => 1.3 ===
--- Zope/lib/python/ZODB/serialize.py:1.2 Fri Nov 28 11:44:49 2003
+++ Zope/lib/python/ZODB/serialize.py Wed Feb 18 21:59:06 2004
@@ -58,34 +58,35 @@
import cPickle
import cStringIO
-from ZODB.coptimizations import new_persistent_id
+from persistent import Persistent
+from persistent.wref import WeakRefMarker, WeakRef
+from ZODB.POSException import InvalidObjectReference
-_marker = object()
+# Might to update or redo to reflect weakrefs
+# from ZODB.coptimizations import new_persistent_id
-def myhasattr(obj, attr):
- """Returns True or False or raises an exception."""
- val = getattr(obj, attr, _marker)
- return val is not _marker
+
+def myhasattr(obj, name, _marker=object()):
+ """Make sure we don't mask exceptions like hasattr().
+
+ We don't want exceptions other than AttributeError to be masked,
+ since that too often masks other programming errors.
+ Three-argument getattr() doesn't mask those, so we use that to
+ implement our own hasattr() replacement.
+ """
+ return getattr(obj, name, _marker) is not _marker
def getClassMetadata(obj):
- klass = obj.__class__
- if issubclass(klass, type):
- # Handle ZClasses.
- d = obj.__dict__.copy()
- del d["_p_jar"]
- args = obj.__name__, obj.__bases__, d
- return klass, args
+
+ # We don't use __class__ here, because obj could be a persistent proxy.
+ # We don't want to be folled by proxies.
+ klass = type(obj)
+
+ newargs = getattr(klass, "__getnewargs__", None)
+ if newargs is None:
+ return klass
else:
- getinitargs = getattr(klass, "__getinitargs__", None)
- if getinitargs is None:
- args = None
- else:
- args = getinitargs()
- mod = getattr(klass, "__module__", None)
- if mod is None:
- return klass, args
- else:
- return (mod, klass.__name__), args
+ return klass, newargs(obj)
class BaseObjectWriter:
"""Serializes objects for storage in the database.
@@ -93,23 +94,90 @@
The ObjectWriter creates object pickles in the ZODB format. It
also detects new persistent objects reachable from the current
object.
-
- The client is responsible for calling the close() method to avoid
- leaking memory. The ObjectWriter uses a Pickler internally, and
- Pickler objects do not participate in garbage collection. (Note
- that in Python 2.3 and higher, the close() method would be
- unnecessary because Picklers participate in garbage collection.)
"""
def __init__(self, jar=None):
self._file = cStringIO.StringIO()
self._p = cPickle.Pickler(self._file, 1)
self._stack = []
- self._p.persistent_id = new_persistent_id(jar, self._stack)
+ self._p.persistent_id = self.persistent_id
if jar is not None:
assert myhasattr(jar, "new_oid")
self._jar = jar
+
+ def persistent_id(self, obj):
+
+ # Most objects are not persistent. The following cheap test
+ # identifies most of them. For these, we return None,
+ # signalling that the object should be pickled normally.
+ if not isinstance(obj, (Persistent, type, WeakRef)):
+ # Not persistent, pickle normally
+ return None
+
+ # Any persistent object mosy have an oid:
+ try:
+ oid = obj._p_oid
+ except AttributeError:
+ # Not persistent, pickle normally
+ return None
+
+ if not (oid is None or isinstance(oid, str)):
+ # Deserves a closer look:
+
+ # Make sure it's not a descr
+ if hasattr(oid, '__get__'):
+ # The oid is a decriptor. That means obj is a non-persistent
+ # class whose instances are persistent, so ...
+ # Not persistent, pickle normally
+ return None
+
+ if oid is WeakRefMarker:
+ # we have a weakref, see weakref.py
+
+ oid = obj.oid
+ if oid is None:
+ obj = obj() # get the referenced object
+ oid = obj._p_oid
+ if oid is None:
+ # Here we are causing the object to be saved in
+ # the database. One could argue that we shouldn't
+ # do this, because a wekref should not cause an object
+ # to be added. We'll be optimistic, though, and
+ # assume that the object will be added eventually.
+
+ oid = self._jar.new_oid()
+ obj._p_jar = self._jar
+ obj._p_oid = oid
+ self._stack.append(obj)
+ return [oid]
+
+
+ # Since we have an oid, we have either a persistent instance
+ # (an instance of Persistent), or a persistent class.
+
+ # NOTE! Persistent classes don't (and can't) subclass persistent.
+
+ if oid is None:
+ oid = obj._p_oid = self._jar.new_oid()
+ obj._p_jar = self._jar
+ self._stack.append(obj)
+ elif obj._p_jar is not self._jar:
+ raise InvalidObjectReference(
+ "Attempt to store an object from a foreign "
+ "database connection"
+ )
+
+ klass = type(obj)
+ if hasattr(klass, '__getnewargs__'):
+ # We don't want to save newargs in object refs.
+ # It's possible that __getnewargs__ is degenerate and
+ # returns (), but we don't want to have to deghostify
+ # the object to find out.
+ return oid
+
+ return oid, klass
+
def serialize(self, obj):
return self._dump(getClassMetadata(obj), obj.__getstate__())
@@ -168,7 +236,7 @@
return unpickler
def _new_object(self, klass, args):
- if not args and not myhasattr(klass, "__getinitargs__"):
+ if not args and not myhasattr(klass, "__getnewargs__"):
obj = klass.__new__(klass)
else:
obj = klass(*args)
@@ -179,19 +247,32 @@
def getClassName(self, pickle):
unpickler = self._get_unpickler(pickle)
- klass, newargs = unpickler.load()
+ klass = unpickler.load()
if isinstance(klass, tuple):
- return "%s.%s" % klass
- else:
- return klass.__name__
+ klass, args = klass
+ if isinstance(klass, tuple):
+ # old style reference
+ return "%s.%s" % klass
+ return "%s.%s" % (klass.__module__, klass.__name__)
def getGhost(self, pickle):
unpickler = self._get_unpickler(pickle)
- klass, args = unpickler.load()
+ klass = unpickler.load()
if isinstance(klass, tuple):
- klass = self._get_class(*klass)
-
- return self._new_object(klass, args)
+ # Here we have a separate class and args.
+ # This could be an old record, so the class module ne a named
+ # refernce
+ klass, args = klass
+ if isinstance(klass, tuple):
+ # Old module_name, class_name tuple
+ klass = self._get_class(*klass)
+ if args is None:
+ return klass.__new__(klass)
+ else:
+ return klass.__new__(klass, *args)
+ else:
+ # Definately new style direct class reference
+ return klass.__new__(klass)
def getState(self, pickle):
unpickler = self._get_unpickler(pickle)
@@ -202,13 +283,6 @@
state = self.getState(pickle)
obj.__setstate__(state)
- def getObject(self, pickle):
- unpickler = self._get_unpickler(pickle)
- klass, args = unpickler.load()
- obj = self._new_object(klass, args)
- state = unpickler.load()
- obj.__setstate__(state)
- return obj
class ExternalReference(object):
pass
@@ -242,19 +316,18 @@
if isinstance(oid, tuple):
# Quick instance reference. We know all we need to know
# to create the instance w/o hitting the db, so go for it!
- oid, klass_info = oid
+ oid, klass = oid
obj = self._cache.get(oid, None) # XXX it's not a dict
if obj is not None:
return obj
-
- klass = self._get_class(*klass_info)
- # XXX Why doesn't this have args?
- obj = self._new_object(klass, None)
- # XXX This doesn't address the last fallback that used to
- # exist:
-## # Eek, we couldn't get the class. Hm. Maybe there's
-## # more current data in the object's actual record!
-## return self._conn[oid]
+ if isinstance(klass, tuple):
+ klass = self._get_class(*klass)
+ try:
+ obj = klass.__new__(klass)
+ except TypeError:
+ # Couldn't create the instance. Maybe there's more
+ # current data in the object's actual record!
+ return self._conn[oid]
# XXX should be done by connection
obj._p_oid = oid
@@ -267,7 +340,15 @@
self._cache[oid] = obj
return obj
- obj = self._cache.get(oid)
+ elif isinstance(oid, list):
+ # see weakref.py
+ [oid] = oid
+ obj = WeakRef.__new__(WeakRef)
+ obj.oid = oid
+ obj.dm = self._conn
+ return obj
+
+ obj = self._cache.get(oid, None)
if obj is not None:
return obj
return self._conn[oid]
=== Removed File Zope/lib/python/ZODB/coptimizations.c ===
More information about the Zodb-checkins
mailing list