[Zope-CVS] CVS: Products/AdaptableStorage/zodb - ASConnection.py:1.3 ASDB.py:1.2 ASStorage.py:1.3 OIDEncoder.py:1.2
Shane Hathaway
shane@zope.com
Fri, 6 Dec 2002 17:06:52 -0500
Update of /cvs-repository/Products/AdaptableStorage/zodb
In directory cvs.zope.org:/tmp/cvs-serv8198/zodb
Modified Files:
ASConnection.py ASDB.py ASStorage.py OIDEncoder.py
Log Message:
Experiment: removed mapper_name from OIDs, with the intent of allowing
loading and storage by different mappers depending on what the classifier
specifies. Not yet complete. Involved changes to virtually every module. :-)
I may decide to revert this. The shane-before-mapper-name-removal tag
was added just before this checkin.
=== Products/AdaptableStorage/zodb/ASConnection.py 1.2 => 1.3 ===
--- Products/AdaptableStorage/zodb/ASConnection.py:1.2 Tue Dec 3 18:10:55 2002
+++ Products/AdaptableStorage/zodb/ASConnection.py Fri Dec 6 17:06:51 2002
@@ -29,31 +29,38 @@
class ASConnection (Connection):
- """DomainMapper-driven Connection
+ """Mapper-driven Connection
- Uses a domain mapper to serialize the state of objects before
+ Uses a mapper to serialize the state of objects before
pickling, and to deserialize objects based on the pickled
state.
- The domain mapper might, for example, serialize all objects as
+ The mapper might, for example, serialize all objects as
tabular records.
"""
_volatile = None # { oid -> 1 }
- _domain_mapper = None
+ _root_mapper = None
no_mtime_available = 1 # Flag recognized by the PersistentExtra base class
__implements__ = (IKeyedObjectSystem,
getattr(Connection, '__implements__', ()))
-
- def getOIDInfo(self, oid):
- mapper_name, key = self._db._oid_encoder.decode(oid)
- domain_mapper = self._domain_mapper
- if domain_mapper is None:
- domain_mapper = self._db._domain_resource.access(self)
- self._domain_mapper = domain_mapper
- return domain_mapper.getMapper(mapper_name), key
+ def getRootMapper(self):
+ root_mapper = self._root_mapper
+ if root_mapper is None:
+ root_mapper = self._db._mapper_resource.access(self)
+ self._root_mapper = root_mapper
+ return root_mapper
+
+## def getOIDInfo(self, oid):
+## keychain = self._db._oid_encoder.decode(oid)
+## root_mapper = self._root_mapper
+## if root_mapper is None:
+## root_mapper = self._db._mapper_resource.access(self)
+## self._root_mapper = root_mapper
+
+## return domain_mapper.getMapper(mapper_name), keychain
def close(self):
@@ -61,18 +68,53 @@
try:
Connection.close(self)
finally:
- if db is not None and self._domain_mapper is not None:
- self._domain_mapper = None
- db._domain_resource.release(self)
+ if db is not None and self._root_mapper is not None:
+ self._root_mapper = None
+ db._mapper_resource.release(self)
+
+
+ def __getitem__(self, oid, tt=type(())):
+ obj = self._cache.get(oid, None)
+ if obj is not None:
+ return obj
+
+ __traceback_info__ = (oid)
+ p, serial = self._storage.load(oid, self._version)
+ __traceback_info__ = (oid, p)
+ file=StringIO(p)
+ unpickler=Unpickler(file)
+ unpickler.persistent_load=self._persistent_load
+
+ try:
+ classification, mapper_names = unpickler.load()
+ except:
+ raise "Could not load oid %s, pickled data in traceback info may\
+ contain clues" % (oid)
+
+ mapper = self.getRootMapper()
+ for mapper_name in mapper_names:
+ mapper = mapper.getSubMapper(mapper_name)
+ object = mapper.getSerializer().createEmptyInstance()
+ if object is None:
+ state = unpickler.load()
+ object = mapper.getSerializer().createEmptyInstance(state)
+ assert object is not None
+
+ object._p_oid=oid
+ object._p_jar=self
+ object._p_changed=None
+ object._p_serial=serial
+
+ self._cache[oid] = object
+ if oid=='\0\0\0\0\0\0\0\0':
+ self._root_=object # keep a ref
+ return object
def _persistent_load(self, oid, class_info=None):
__traceback_info__=oid
- # Note that this impl. never expects tuple OIDs like Connection does.
- assert isinstance(oid, StringType)
-
obj = self._cache.get(oid, None)
if obj is not None:
return obj
@@ -226,17 +268,29 @@
__traceback_info__=klass, oid, self._version
# SDH: hook in the serializer.
# state=object.__getstate__()
- mapper, key = self.getOIDInfo(oid)
+ keychain = self._db._oid_encoder.decode(oid)
+ mapper = self.getRootMapper()
+ mapper_names = []
+ for i in range(1, len(keychain)):
+ k = keychain[:i]
+ o = self[k]
+ cfr = mapper.getClassifier()
+ classification, sub_mapper_name = cfr.classifyObject(k, o)
+ mapper_names.append(sub_mapper_name)
+ mapper = mapper.getSubMapper(sub_mapper_name)
+ cfr = mapper.getClassifier()
+ classification, dummy_mapper_name = cfr.classifyObject(
+ keychain, object)
ser = mapper.getSerializer()
if DEBUG:
print 'serializing', repr(oid), repr(serial)
- state, ext_refs = ser.serialize(mapper, key, object, self)
+ state, ext_refs = ser.serialize(mapper, keychain, object, self)
if ext_refs:
oid_encoder = self._db._oid_encoder
- for (ext_mapper, ext_key, ext_ref) in ext_refs:
+ for (ext_keychain, ext_ref) in ext_refs:
if (not ext_ref._p_serial
or ext_ref._p_serial == SERIAL0):
- ext_oid = oid_encoder.encode(ext_mapper, ext_key)
+ ext_oid = oid_encoder.encode(ext_keychain)
if ext_ref._p_jar:
if ext_ref._p_jar != self:
raise InvalidObjectReference
@@ -251,7 +305,7 @@
seek(0)
clear_memo()
- dump((klass,args))
+ dump((classification, mapper_names))
dump(state)
p=file(1)
s=dbstore(oid,serial,p,version,transaction)
@@ -275,8 +329,9 @@
oid=object._p_oid
if self._storage is None:
- msg = "Shouldn't load state for %s when the connection is closed" % `oid`
- LOG('ZODB',ERROR, msg)
+ msg = ("Shouldn't load state for %s "
+ "when the connection is closed" % `oid`)
+ LOG('ZODB', ERROR, msg)
raise RuntimeError(msg)
try:
@@ -306,20 +361,24 @@
unpickler=Unpickler(file)
# SDH: external references are reassembled elsewhere.
# unpickler.persistent_load=self._persistent_load
- unpickler.load()
+ classification, mapper_names = unpickler.load()
state = unpickler.load()
- # SDH: Let the object mapping do the state setting.
+ # SDH: Let the object mapper do the state setting.
# if hasattr(object, '__setstate__'):
# object.__setstate__(state)
# else:
# d=object.__dict__
# for k,v in state.items(): d[k]=v
- mapper, key = self.getOIDInfo(oid)
+ keychain = self._db._oid_encoder.decode(oid)
+ assert len(keychain) == len(mapper_names) + 1
+ mapper = self.getRootMapper()
+ for mapper_name in mapper_names:
+ mapper = mapper.getSubMapper(mapper_name)
ser = mapper.getSerializer()
if DEBUG:
print 'deserializing', repr(oid), repr(serial)
- ser.deserialize(mapper, key, object, self, state)
+ ser.deserialize(mapper, keychain, object, self, state)
if mapper.isVolatile():
v = self._volatile
@@ -356,8 +415,8 @@
# IKeyedObjectSystem implementation
- def loadStub(self, mapper_name, key, class_info=None):
- oid = self._db._oid_encoder.encode(mapper_name, key)
+ def loadStub(self, keychain, class_info=None):
+ oid = self._db._oid_encoder.encode(keychain)
return self._persistent_load(oid, class_info)
def identifyObject(self, object):
@@ -366,8 +425,8 @@
return None
return self._db._oid_encoder.decode(oid)
- def newKey(self):
- return self.new_oid()
+## def newKey(self):
+## return self.new_oid()
### Volitalile object expiration ###
=== Products/AdaptableStorage/zodb/ASDB.py 1.1 => 1.2 ===
--- Products/AdaptableStorage/zodb/ASDB.py:1.1 Wed Nov 27 13:37:08 2002
+++ Products/AdaptableStorage/zodb/ASDB.py Fri Dec 6 17:06:51 2002
@@ -24,13 +24,13 @@
class ASDB (DB):
- """DomainMapper-driven Database
+ """Mapper-driven Database
"""
klass = ASConnection
# SDH: two extra args.
- def __init__(self, storage, domain_resource, oid_encoder=None,
+ def __init__(self, storage, mapper_resource, oid_encoder=None,
pool_size=7,
cache_size=400,
cache_deactivate_after=60,
@@ -65,7 +65,7 @@
if oid_encoder is None:
oid_encoder = OIDEncoder()
self._oid_encoder = oid_encoder
- self._domain_resource = domain_resource
+ self._mapper_resource = mapper_resource
# Pass through methods:
for m in ('history',
=== Products/AdaptableStorage/zodb/ASStorage.py 1.2 => 1.3 ===
--- Products/AdaptableStorage/zodb/ASStorage.py:1.2 Tue Dec 3 18:10:55 2002
+++ Products/AdaptableStorage/zodb/ASStorage.py Fri Dec 6 17:06:51 2002
@@ -11,7 +11,7 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
-"""Storage implementation that loads/stores using a domain mapper.
+"""Storage implementation that loads/stores using a mapper.
$Id$
"""
@@ -28,9 +28,9 @@
class ASStorage(BaseStorage.BaseStorage):
- def __init__(self, domain_resource, tpc_conns=(),
+ def __init__(self, mapper_resource, tpc_conns=(),
oid_encoder=None, name=''):
- self._domain_resource = domain_resource
+ self._mapper_resource = mapper_resource
if oid_encoder is None:
oid_encoder = OIDEncoder()
self._oid_encoder = oid_encoder
@@ -63,22 +63,45 @@
print 'hash of %r is %r' % (serial, h)
return h
- def getOIDInfo(self, oid):
- mapper_name, key = self._oid_encoder.decode(oid)
- dm = self._domain_resource.access(self)
- return dm.getMapper(mapper_name), key
+ def _load(self, root_mapper, keychain):
+ mapper = root_mapper
+ mapper_names = []
+ # Follow the keychain to find the right mapper. Every item in
+ # the keychain except the last one involves a change of
+ # domain. So change mappers for
+ # keychain[:1], keychain[:2]... keychain[:len(keychain)-1].
+ # The last item in the keychain is for the use by the gateway.
+ # (In other words, we should expect there to be one more key
+ # than domain changes.)
+ for i in range(1, len(keychain)):
+ k = keychain[:i]
+ cfr = mapper.getClassifier()
+ assert cfr is not None, keychain
+ classification, sub_mapper_name = cfr.classifyState(mapper, k)
+ mapper_names.append(sub_mapper_name)
+ mapper = mapper.getSubMapper(sub_mapper_name)
+ full_state, serial = mapper.getGateway().load(mapper, keychain)
+ cfr = mapper.getClassifier()
+ if cfr is not None:
+ classification, dummy_mapper_name = cfr.classifyState(
+ mapper, keychain)
+ else:
+ classification = None
+ return full_state, serial, classification, mapper_names
+
def load(self, oid, version):
if version:
raise POSException.Unsupported, "Versions aren't supported"
self._lock_acquire()
try:
- mapper, key = self.getOIDInfo(oid)
- full_state, serial = mapper.getGateway().load(mapper, key)
- class_info = mapper.getSerializer().getClassInfo(full_state)
+ keychain = self._oid_encoder.decode(oid)
+ root_mapper = self._mapper_resource.access(self)
+ full_state, serial, classification, mapper_names = self._load(
+ root_mapper, keychain)
file = StringIO()
p = Pickler(file)
- p.dump(class_info)
+ p.dump((classification, mapper_names))
p.dump(full_state)
data = file.getvalue()
h = self.hashSerial(serial)
@@ -97,30 +120,35 @@
self._lock_acquire()
try:
- mapper, key = self.getOIDInfo(oid)
+ root_mapper = self._mapper_resource.access(self)
+ keychain = self._oid_encoder.decode(oid)
+
# First detect conflicts.
# The "serial" argument, if its value is not 0,
# was previously generated by hashSerial().
if DEBUG:
print 'storing', `oid`, `serial_hash`
if serial_hash != SERIAL0:
- old_state, old_serial = mapper.getGateway().load(mapper, key)
- old_class_info = mapper.getSerializer().getClassInfo(old_state)
+ info = self._load(root_mapper, keychain)
+ old_state, old_serial = info[:2]
old_serial_hash = self.hashSerial(old_serial)
if serial_hash != old_serial_hash:
raise POSException.ConflictError("%r != %r" % (
serial_hash, old_serial_hash))
+
# Now unpickle and store the data.
file = StringIO(data)
u = Unpickler(file)
- class_info = u.load()
+ classification, mapper_names = u.load()
state = u.load()
- if VERIFY_CLASSES:
- check = mapper.getSerializer().getClassInfo(state)
- if check and class_info != check:
- raise RuntimeError, 'Class spec error: %r != %r' % (
- class_info, check)
- new_serial = mapper.getGateway().store(mapper, key, state)
+ assert len(keychain) == len(mapper_names) + 1
+ mapper = root_mapper
+ for mapper_name in mapper_names:
+ mapper = mapper.getSubMapper(mapper_name)
+ new_serial = mapper.getGateway().store(mapper, keychain, state)
+ cfr = mapper.getClassifier()
+ if cfr is not None:
+ cfr.store(mapper, keychain, classification)
new_hash = self.hashSerial(new_serial)
finally:
self._lock_release()
@@ -160,6 +188,6 @@
return ''
def close(self):
- self._domain_resource.release(self)
+ self._mapper_resource.release(self)
=== Products/AdaptableStorage/zodb/OIDEncoder.py 1.1 => 1.2 ===
--- Products/AdaptableStorage/zodb/OIDEncoder.py:1.1 Wed Nov 27 13:37:08 2002
+++ Products/AdaptableStorage/zodb/OIDEncoder.py Fri Dec 6 17:06:51 2002
@@ -16,6 +16,7 @@
$Id$
"""
+from types import TupleType
from consts import ROOT_OID
from interfaces.public import IOIDEncoder
@@ -27,19 +28,17 @@
__implements__ = IOIDEncoder
- def __init__(self, root_info=('root', '')):
- self.root_info = root_info
-
def decode(self, oid):
+ """Returns a keychain."""
if oid == ROOT_OID:
- return self.root_info
- info = oid.split(':', 1)
- if len(info) < 2:
- info = (info, '')
- return info
-
- def encode(self, mapper_name, key):
- if (mapper_name, key) == self.root_info:
+ return ('',)
+ assert isinstance(oid, TupleType)
+ return oid
+
+ def encode(self, keychain):
+ """Returns an OID."""
+ assert isinstance(keychain, TupleType)
+ if keychain == ('',):
return ROOT_OID
- return '%s:%s' % (mapper_name, key)
+ return keychain