[Zope-Checkins] CVS: Zope/lib/python/ZODB - StorageConfig.py:1.1.2.1 StorageTypes.py:1.1.2.1 fstools.py:1.1.2.1 ActivityMonitor.py:1.3.4.2 BaseStorage.py:1.20.4.8 ConflictResolution.py:1.13.4.2 Connection.py:1.72.4.7 DB.py:1.43.4.1 DemoStorage.py:1.12.4.8 FileStorage.py:1.95.4.7 POSException.py:1.12.4.10 TimeStamp.c:1.15.58.4 Transaction.py:1.37.4.6 __init__.py:1.13.4.5 cPersistence.c:1.62.8.4 cPickleCache.c:1.68.8.1 fsdump.py:1.3.68.2 utils.py:1.12.4.1
Chris McDonough
chrism@zope.com
Sun, 24 Nov 2002 19:10:16 -0500
Update of /cvs-repository/Zope/lib/python/ZODB
In directory cvs.zope.org:/tmp/cvs-serv18079
Modified Files:
Tag: chrism-install-branch
ActivityMonitor.py BaseStorage.py ConflictResolution.py
Connection.py DB.py DemoStorage.py FileStorage.py
POSException.py TimeStamp.c Transaction.py __init__.py
cPersistence.c cPickleCache.c fsdump.py utils.py
Added Files:
Tag: chrism-install-branch
StorageConfig.py StorageTypes.py fstools.py
Log Message:
Merge with HEAD.
=== Added File Zope/lib/python/ZODB/StorageConfig.py ===
##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Higher-level support for configuring storages.
Storages are configured a la DBTab.
A storage section has the form
<Storage Name (dependent)>
# For example
type FileStorage
file_name var/Data.fs
read_only 1
</Storage>
where Name and (dependent) are optional. Once you have retrieved the
section object (probably with getSection("Storage", name), the
function creatStorage() in this module will create the storage object
for you.
"""
from StorageTypes import storage_types
def createStorage(section):
"""Create a storage specified by a configuration section."""
klass, args = getStorageInfo(section)
return klass(**args)
def getStorageInfo(section):
"""Extract a storage description from a configuration section.
Return a tuple (klass, args) where klass is the storage class and
args is a dictionary of keyword arguments. To create the storage,
call klass(**args).
Adapted from DatabaseFactory.setStorageParams() in DBTab.py.
"""
type = section.get("type")
if not type:
raise RuntimeError, "A storage type is required"
module = None
pos = type.rfind(".")
if pos >= 0:
# Specified the module
module, type = type[:pos], type[pos+1:]
converter = None
if not module:
# Use a default module and argument converter.
info = storage_types.get(type)
if not info:
raise RuntimeError, "Unknown storage type: %s" % type
module, converter = info
m = __import__(module, {}, {}, [type])
klass = getattr(m, type)
args = {}
for key in section.keys():
if key.lower() != "type":
args[key] = section.get(key)
if converter is not None:
args = converter(**args)
return (klass, args)
=== Added File Zope/lib/python/ZODB/StorageTypes.py ===
##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Default storage types.
Adapted from DBTab/StorageTypes.py.
"""
import re
from ZConfig.Common import asBoolean
def convertFileStorageArgs(quota=None, stop=None, **kw):
if kw.has_key('name'):
# FileStorage doesn't accept a 'name' arg
del kw['name']
if quota is not None:
kw['quota'] = long(quota) or None
if stop is not None:
stop = long(stop)
if not stop:
stop = None
else:
from ZODB.utils import p64
stop = p64(stop)
kw['stop'] = stop
# Boolean args
for name in (
'create', 'read_only'
):
if kw.has_key(name):
kw[name] = asBoolean(kw[name])
return kw
# Match URLs of the form 'zeo://zope.example.com:1234'
zeo_url_re = re.compile('zeo:/*(?P<host>[A-Za-z0-9\.-]+):(?P<port>[0-9]+)')
def convertAddresses(s):
# Allow multiple addresses using semicolons as a split character.
res = []
for a in s.split(';'):
a = a.strip()
if a:
mo = zeo_url_re.match(a)
if mo is not None:
# ZEO URL
host, port = mo.groups()
res.append((host, int(port)))
else:
# Socket file
res.append(a)
return res
def convertClientStorageArgs(addr=None, **kw):
if addr is None:
raise RuntimeError, 'An addr parameter is required for ClientStorage.'
kw['addr'] = convertAddresses(addr)
# Integer args
for name in (
'cache_size', 'min_disconnect_poll', 'max_disconnect_poll',
):
if kw.has_key(name):
kw[name] = int(kw[name])
# Boolean args
for name in (
'wait', 'read_only', 'read_only_fallback',
):
if kw.has_key(name):
kw[name] = asBoolean(kw[name])
# The 'client' parameter must be None to be false. Yuck.
if kw.has_key('client') and not kw['client']:
kw['client'] = None
return kw
def convertBDBStorageArgs(**kw):
from bsddb3Storage.BerkeleyBase import BerkeleyConfig
config = BerkeleyConfig()
for name in dir(BerkeleyConfig):
if name.startswith('_'):
continue
val = kw.get(name)
if val is not None:
if name != 'logdir':
val = int(val)
setattr(config, name, val)
del kw[name]
# XXX: Nobody ever passes in env
assert not kw.has_key('env')
kw['config'] = config
return kw
storage_types = {
'FileStorage': ('ZODB.FileStorage', convertFileStorageArgs),
'DemoStorage': ('ZODB.DemoStorage', None),
'MappingStorage': ('ZODB.MappingStorage', None),
'TemporaryStorage': ('Products.TemporaryFolder.TemporaryStorage', None),
'ClientStorage': ('ZEO.ClientStorage', convertClientStorageArgs),
'Full': ('bsddb3Storage.Full', convertBDBStorageArgs),
'Minimal': ('bsddb3Storage.Minimal', convertBDBStorageArgs),
}
=== Added File Zope/lib/python/ZODB/fstools.py ===
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Tools for using FileStorage data files.
XXX This module needs tests.
XXX This file needs to be kept in sync with FileStorage.py.
"""
import cPickle
import struct
from ZODB.FileStorage import TRANS_HDR, DATA_HDR, TRANS_HDR_LEN, \
DATA_HDR_LEN, DATA_VERSION_HDR_LEN
from ZODB.utils import p64, u64
from ZODB.TimeStamp import TimeStamp
class TxnHeader:
"""Object representing a transaction record header.
Attribute Position Value
--------- -------- -----
tid 0- 8 transaction id
length 8-16 length of entire transaction record - 8
status 16-17 status of transaction (' ', 'u', 'p'?)
user_len 17-19 length of user field (pack code H)
descr_len 19-21 length of description field (pack code H)
ext_len 21-23 length of extensions (pack code H)
"""
def __init__(self, file, pos):
self._file = file
self._pos = pos
self._read_header()
def _read_header(self):
self._file.seek(self._pos)
self._hdr = self._file.read(TRANS_HDR_LEN)
(self.tid, length, self.status, self.user_len, self.descr_len,
self.ext_len) = struct.unpack(TRANS_HDR, self._hdr)
self.length = u64(length)
def read_meta(self):
"""Load user, descr, and ext attributes."""
self.user = ""
self.descr = ""
self.ext = {}
if not (self.user_len or self.descr_len or self.ext_len):
return
self._file.seek(self._pos + TRANS_HDR_LEN)
if self.user_len:
self.user = self._file.read(self.user_len)
if self.descr_len:
self.descr = self._file.read(self.descr_len)
if self.ext_len:
self._ext = self._file.read(self.ext_len)
self.ext = cPickle.loads(self._ext)
def get_data_offset(self):
return (self._pos + TRANS_HDR_LEN + self.user_len + self.descr_len
+ self.ext_len)
def get_timestamp(self):
return TimeStamp(self.tid)
def get_raw_data(self):
data_off = self.get_data_offset()
data_len = self.length - (data_off - self._pos)
self._file.seek(data_off)
return self._file.read(data_len)
def next_txn(self):
off = self._pos + self.length + 8
self._file.seek(off)
s = self._file.read(8)
if not s:
return None
return TxnHeader(self._file, off)
def prev_txn(self):
if self._pos == 4:
return None
self._file.seek(self._pos - 8)
tlen = u64(self._file.read(8))
return TxnHeader(self._file, self._pos - (tlen + 8))
class DataHeader:
"""Object representing a data record header.
Attribute Position Value
--------- -------- -----
oid 0- 8 object id
serial 8-16 object serial numver
prev_rec_pos 16-24 position of previous data record for object
txn_pos 24-32 position of txn header
version_len 32-34 length of version
data_len 34-42 length of data
nonversion_pos 42-50* position of nonversion data record
prev_version_pos 50-58* pos of previous version data record
* these attributes are only present if version_len != 0.
"""
def __init__(self, file, pos):
self._file = file
self._pos = pos
self._read_header()
def _read_header(self):
self._file.seek(self._pos)
self._hdr = self._file.read(DATA_VERSION_HDR_LEN)
# always read the longer header, just in case
(self.oid, self.serial, prev_rec_pos, txn_pos, self.version_len,
data_len) = struct.unpack(DATA_HDR, self._hdr[:DATA_HDR_LEN])
self.prev_rec_pos = u64(prev_rec_pos)
self.txn_pos = u64(txn_pos)
self.data_len = u64(data_len)
if self.version_len:
s = self._hdr[DATA_HDR_LEN:]
self.nonversion_pos = u64(s[:8])
self.prev_version_pos = u64(s[8:])
else:
self.nonversion_pos = None
self.prev_version_pos = None
def next_offset(self):
"""Return offset of next record."""
off = self._pos + self.data_len
if self.version_len:
off += self.version_len + DATA_VERSION_HDR_LEN
else:
off += DATA_HDR_LEN
if self.data_len == 0:
off += 8 # backpointer
return off
def prev_txn(f):
"""Return transaction located before current file position."""
f.seek(-8, 1)
tlen = u64(f.read(8)) + 8
return TxnHeader(f, f.tell() - tlen)
=== Zope/lib/python/ZODB/ActivityMonitor.py 1.3.4.1 => 1.3.4.2 ===
=== Zope/lib/python/ZODB/BaseStorage.py 1.20.4.7 => 1.20.4.8 ===
--- Zope/lib/python/ZODB/BaseStorage.py:1.20.4.7 Sat Oct 26 15:51:49 2002
+++ Zope/lib/python/ZODB/BaseStorage.py Sun Nov 24 19:10:12 2002
@@ -63,6 +63,15 @@
def close(self):
pass
+ def sortKey(self):
+ """Return a string that can be used to sort storage instances.
+
+ The key must uniquely identify a storage and must be the same
+ across multiple instantiations of the same storage.
+ """
+ # name may not be sufficient, e.g. ZEO has a user-definable name.
+ return self.__name__
+
def getName(self):
return self.__name__
@@ -230,6 +239,18 @@
def loadSerial(self, oid, serial):
raise POSException.Unsupported, (
"Retrieval of historical revisions is not supported")
+
+ def getExtensionMethods(self):
+ """getExtensionMethods
+
+ This returns a dictionary whose keys are names of extra methods
+ provided by this storage. Storage proxies (such as ZEO) should
+ call this method to determine the extra methods that they need
+ to proxy in addition to the standard storage methods.
+ Dictionary values should be None; this will be a handy place
+ for extra marshalling information, should we need it
+ """
+ return {}
def copyTransactionsFrom(self, other, verbose=0):
"""Copy transactions from another storage.
=== Zope/lib/python/ZODB/ConflictResolution.py 1.13.4.1 => 1.13.4.2 ===
--- Zope/lib/python/ZODB/ConflictResolution.py:1.13.4.1 Sat Sep 28 21:40:37 2002
+++ Zope/lib/python/ZODB/ConflictResolution.py Sun Nov 24 19:10:12 2002
@@ -78,7 +78,7 @@
except (ImportError, AttributeError):
zLOG.LOG("Conflict Resolution", zLOG.BLATHER,
"Unable to load class", error=sys.exc_info())
- bad_class[class_tuple] = 1
+ bad_classes[class_tuple] = 1
return None
return klass
@@ -115,6 +115,15 @@
pickler.dump(resolved)
return file.getvalue(1)
except ConflictError:
+ return 0
+ except:
+ # If anything else went wrong, catch it here and avoid passing an
+ # arbitrary exception back to the client. The error here will mask
+ # the original ConflictError. A client can recover from a
+ # ConflictError, but not necessarily from other errors. But log
+ # the error so that any problems can be fixed.
+ zLOG.LOG("Conflict Resolution", zLOG.ERROR,
+ "Unexpected error", error=sys.exc_info())
return 0
class ConflictResolvingStorage:
=== Zope/lib/python/ZODB/Connection.py 1.72.4.6 => 1.72.4.7 ===
--- Zope/lib/python/ZODB/Connection.py:1.72.4.6 Sat Oct 26 15:51:49 2002
+++ Zope/lib/python/ZODB/Connection.py Sun Nov 24 19:10:12 2002
@@ -15,7 +15,7 @@
$Id$"""
-from cPickleCache import PickleCache, MUCH_RING_CHECKING
+from cPickleCache import PickleCache
from POSException import ConflictError, ReadConflictError
from ExtensionClass import Base
import ExportImport, TmpStore
@@ -31,13 +31,6 @@
global_code_timestamp = 0
-if MUCH_RING_CHECKING:
- # To get rid of this warning, change the define inside
- # cPickleCache.c and recompile.
- LOG('ZODB',WARNING,
- 'Using cPickleCache with low performance (but extra debugging checks)')
-del MUCH_RING_CHECKING
-
def updateCodeTimestamp():
'''
Called after changes are made to persistence-based classes.
@@ -191,6 +184,14 @@
return obj
return self[oid]
+ def sortKey(self):
+ # XXX will raise an exception if the DB hasn't been set
+ storage_key = self._sortKey()
+ # If two connections use the same storage, give them a
+ # consistent order using id(). This is unique for the
+ # lifetime of a connection, which is good enough.
+ return "%s:%s" % (storage_key, id(self))
+
def _setDB(self, odb):
"""Begin a new transaction.
@@ -198,6 +199,7 @@
"""
self._db=odb
self._storage=s=odb._storage
+ self._sortKey = odb._storage.sortKey
self.new_oid=s.new_oid
if self._code_timestamp != global_code_timestamp:
# New code is in place. Start a new cache.
@@ -268,27 +270,8 @@
self.__onCommitActions.append((method_name, args, kw))
get_transaction().register(self)
- # NB: commit() is responsible for calling tpc_begin() on the storage.
- # It uses self._begun to track whether it has been called. When
- # self._begun is 0, it has not been called.
-
- # This arrangement allows us to handle the special case of a
- # transaction with no modified objects. It is possible for
- # registration to be occur unintentionally and for a persistent
- # object to compensate by making itself as unchanged. When this
- # happens, it's possible to commit a transaction with no modified
- # objects.
-
- # Since tpc_begin() may raise a ReadOnlyError, don't call it if there
- # are no objects. This avoids spurious (?) errors when working with
- # a read-only storage.
-
def commit(self, object, transaction):
if object is self:
- if not self._begun:
- self._storage.tpc_begin(transaction)
- self._begun = 1
-
# We registered ourself. Execute a commit action, if any.
if self.__onCommitActions is not None:
method_name, args, kw = self.__onCommitActions.pop(0)
@@ -313,10 +296,6 @@
# Nothing to do
return
- if not self._begun:
- self._storage.tpc_begin(transaction)
- self._begun = 1
-
stack = [object]
# Create a special persistent_id that passes T and the subobject
@@ -623,8 +602,6 @@
def tpc_begin(self, transaction, sub=None):
self._invalidating = []
self._creating = []
- self._begun = 0
-
if sub:
# Sub-transaction!
if self._tmp is None:
@@ -633,10 +610,7 @@
self._storage = _tmp
_tmp.registerDB(self._db, 0)
- # It's okay to always call tpc_begin() for a sub-transaction
- # because this isn't the real storage.
- self._storage.tpc_begin(transaction)
- self._begun = 1
+ self._storage.tpc_begin(transaction)
def tpc_vote(self, transaction):
if self.__onCommitActions is not None:
=== Zope/lib/python/ZODB/DB.py 1.43 => 1.43.4.1 ===
--- Zope/lib/python/ZODB/DB.py:1.43 Wed Aug 14 18:07:09 2002
+++ Zope/lib/python/ZODB/DB.py Sun Nov 24 19:10:12 2002
@@ -245,7 +245,8 @@
m.sort()
return m
- def close(self): self._storage.close()
+ def close(self):
+ self._storage.close()
def commitVersion(self, source, destination=''):
CommitVersion(self, source, destination)
@@ -577,7 +578,11 @@
self.tpc_begin=s.tpc_begin
self.tpc_vote=s.tpc_vote
self.tpc_finish=s.tpc_finish
+ self._sortKey=s.sortKey
get_transaction().register(self)
+
+ def sortKey(self):
+ return "%s:%s" % (self._sortKey(), id(self))
def abort(self, reallyme, t): pass
=== Zope/lib/python/ZODB/DemoStorage.py 1.12.4.7 => 1.12.4.8 ===
--- Zope/lib/python/ZODB/DemoStorage.py:1.12.4.7 Sat Oct 26 15:51:49 2002
+++ Zope/lib/python/ZODB/DemoStorage.py Sun Nov 24 19:10:13 2002
@@ -141,8 +141,8 @@
oids = []
for r in v.values():
oid, serial, pre, (version, nv), p = r
+ oids.append(oid)
if nv:
- oids.append(oid)
oid, serial, pre, vdata, p = nv
self._tindex.append([oid, serial, r, None, p])
else:
=== Zope/lib/python/ZODB/FileStorage.py 1.95.4.6 => 1.95.4.7 === (416/516 lines abridged)
--- Zope/lib/python/ZODB/FileStorage.py:1.95.4.6 Sat Oct 26 15:51:49 2002
+++ Zope/lib/python/ZODB/FileStorage.py Sun Nov 24 19:10:13 2002
@@ -145,7 +145,6 @@
return {}
from zLOG import LOG, BLATHER, WARNING, ERROR, PANIC, register_subsystem
-register_subsystem('ZODB FS')
z64='\0'*8
# the struct formats for the headers
@@ -176,7 +175,11 @@
LOG('ZODB FS', PANIC, "%s ERROR: %s\n" % (packed_version, message))
raise CorruptedTransactionError, message
-class FileStorageError(POSException.StorageError): pass
+class FileStorageError(POSException.StorageError):
+ pass
+
+class PackError(FileStorageError):
+ pass
class FileStorageFormatError(FileStorageError):
"""Invalid file format
@@ -282,6 +285,10 @@
read_only=read_only,
)
self._ltid = tid
+
+ # self._pos should always point just past the last
+ # transaction. During 2PC, data is written after _pos.
+ # invariant is restored at tpc_abort() or tpc_finish().
self._ts = tid = TimeStamp(tid)
t = time.time()
@@ -309,9 +316,6 @@
# hook to use something other than builtin dict
return {}, {}, {}, {}
- def abortVersion(self, src, transaction):
- return self.commitVersion(src, '', transaction, abort=1)
-
def _save_index(self):
"""Write the database index to a file to support quick startup
"""
@@ -399,15 +403,12 @@
return ltid
def _restore_index(self):
- """Load the database index from a file to support quick startup
- """
[-=- -=- -=- 416 lines omitted -=- -=- -=-]
prev = U64(sprev)
tloc = U64(stloc)
plen = U64(splen)
-
dlen = DATA_HDR_LEN + (plen or 8)
if vlen:
@@ -2397,29 +2400,35 @@
else:
version = ''
+ datapos = pos + DATA_HDR_LEN
+ if vlen:
+ datapos += 16 + vlen
+ assert self._file.tell() == datapos, (self._file.tell(), datapos)
+
if pos + dlen > self._tend or tloc != self._tpos:
warn("%s data record exceeds transaction record at %s",
file.name, pos)
break
self._pos = pos + dlen
- tid = None
+ prev_txn = None
if plen:
- p = self._file.read(plen)
+ data = self._file.read(plen)
else:
- p = self._file.read(8)
- if p == z64:
+ bp = self._file.read(8)
+ if bp == z64:
# If the backpointer is 0 (encoded as z64), then
# this transaction undoes the object creation. It
# either aborts the version that created the
# object or undid the transaction that created it.
# Return None instead of a pickle to indicate
# this.
- p = None
+ data = None
else:
- p, _s, tid = _loadBackTxn(self._file, oid, p)
+ data, _s, tid = _loadBackTxn(self._file, oid, bp)
+ prev_txn = getTxnFromData(self._file, oid, bp)
- r = Record(oid, serial, version, p, tid)
+ r = Record(oid, serial, version, data, prev_txn)
return r
=== Zope/lib/python/ZODB/POSException.py 1.12.4.9 => 1.12.4.10 ===
=== Zope/lib/python/ZODB/TimeStamp.c 1.15.58.3 => 1.15.58.4 ===
--- Zope/lib/python/ZODB/TimeStamp.c:1.15.58.3 Sat Oct 26 15:51:49 2002
+++ Zope/lib/python/ZODB/TimeStamp.c Sun Nov 24 19:10:13 2002
@@ -17,14 +17,12 @@
"\n"
"\n$Id$\n";
-#include <stdlib.h>
-#include <time.h>
#ifdef USE_EXTENSION_CLASS
#include "ExtensionClass.h"
#else
#include "Python.h"
#endif
-
+#include <time.h>
/* ----------------------------------------------------- */
=== Zope/lib/python/ZODB/Transaction.py 1.37.4.5 => 1.37.4.6 ===
--- Zope/lib/python/ZODB/Transaction.py:1.37.4.5 Sat Oct 26 15:51:49 2002
+++ Zope/lib/python/ZODB/Transaction.py Sun Nov 24 19:10:13 2002
@@ -19,12 +19,34 @@
import time, sys, struct, POSException
from struct import pack
from string import split, strip, join
-from zLOG import LOG, ERROR, PANIC
+from zLOG import LOG, ERROR, PANIC, INFO, BLATHER, WARNING
from POSException import ConflictError
+from ZODB import utils
# Flag indicating whether certain errors have occurred.
hosed=0
+# There is an order imposed on all jars, based on the storages they
+# serve, that must be consistent across all applications using the
+# storages. The order is defined by the sortKey() method of the jar.
+
+def jar_cmp(j1, j2):
+ # Call sortKey() every time, because a ZEO client could reconnect
+ # to a different server at any time.
+ try:
+ k1 = j1.sortKey()
+ except:
+ LOG("TM", WARNING, "jar missing sortKey() method: %s" % j1)
+ k1 = id(j1)
+
+ try:
+ k2 = j2.sortKey()
+ except:
+ LOG("TM", WARNING, "jar missing sortKey() method: %s" % j2)
+ k2 = id(j2)
+
+ return cmp(k1, k2)
+
class Transaction:
'Simple transaction objects for single-threaded applications.'
user=''
@@ -53,6 +75,9 @@
for c in self._connections.values(): c.close()
del self._connections
+ def log(self, msg, level=INFO, error=None):
+ LOG("TM:%s" % self._id, level, msg, error=error)
+
def sub(self):
# Create a manually managed subtransaction for internal use
r=self.__class__()
@@ -84,11 +109,8 @@
""")
t = None
- subj = self._sub
- subjars = ()
if not subtransaction:
-
# Must add in any non-subtransaction supporting objects that
# may have been stowed away from previous subtransaction
# commits.
@@ -96,11 +118,14 @@
self._objects.extend(self._non_st_objects)
self._non_st_objects = None
- if subj is not None:
+ if self._sub is not None:
# Abort of top-level transaction after commiting
# subtransactions.
- subjars = subj.values()
+ subjars = self._sub.values()
+ subjars.sort(jar_cmp)
self._sub = None
+ else:
+ subjars = []
try:
# Abort the objects
@@ -110,13 +135,20 @@
if j is not None:
j.abort(o, self)
except:
+ # Record the first exception that occurred
if t is None:
t, v, tb = sys.exc_info()
+ else:
+ self.log("Failed to abort object %016x" %
+ utils.U64(o._p_oid), error=sys.exc_info())
- # Ugh, we need to abort work done in sub-transactions.
- while subjars:
- j = subjars.pop()
- j.abort_sub(self) # This should never fail
+ # tpc_begin() was never called, so tpc_abort() should not be
+ # called.
+
+ if not subtransaction:
+ # abort_sub() must be called to clear subtransaction state
+ for jar in subjars:
+ jar.abort_sub(self) # This should never fail
if t is not None:
raise t, v, tb
@@ -136,7 +168,8 @@
This aborts any transaction in progres.
'''
- if self._objects: self.abort(subtransaction, 0)
+ if self._objects:
+ self.abort(subtransaction, 0)
if info:
info=split(info,'\t')
self.user=strip(info[0])
@@ -146,30 +179,32 @@
'Finalize the transaction'
objects = self._objects
- jars = {}
- jarsv = None
- subj = self._sub
- subjars = ()
+ subjars = []
if subtransaction:
- if subj is None:
- self._sub = subj = {}
+ if self._sub is None:
+ # Must store state across multiple subtransactions
+ # so that the final commit can commit all subjars.
+ self._sub = {}
else:
- if subj is not None:
+ if self._sub is not None:
+ # This commit is for a top-level transaction that
+ # has previously committed subtransactions. Do
+ # one last subtransaction commit to clear out the
+ # current objects, then commit all the subjars.
if objects:
- # Do an implicit sub-transaction commit:
self.commit(1)
- # XXX What does this do?
objects = []
- subjars = subj.values()
+ subjars = self._sub.values()
+ subjars.sort(jar_cmp)
self._sub = None
- # If not a subtransaction, then we need to add any non-
- # subtransaction-supporting objects that may have been
- # stowed away during subtransaction commits to _objects.
- if (subtransaction is None) and (self._non_st_objects is not None):
- objects.extend(self._non_st_objects)
- self._non_st_objects = None
+ # If there were any non-subtransaction-aware jars
+ # involved in earlier subtransaction commits, we need
+ # to add them to the list of jars to commit.
+ if self._non_st_objects is not None:
+ objects.extend(self._non_st_objects)
+ self._non_st_objects = None
if (objects or subjars) and hosed:
# Something really bad happened and we don't
@@ -188,88 +223,140 @@
# either call tpc_abort or tpc_finish. It is OK to call
# these multiple times, as the storage is required to ignore
# these calls if tpc_begin has not been called.
+ #
+ # - That we call tpc_begin() in a globally consistent order,
+ # so that concurrent transactions involving multiple storages
+ # do not deadlock.
try:
ncommitted = 0
+ jars = self._get_jars(objects, subtransaction)
try:
- ncommitted += self._commit_objects(objects, jars,
- subtransaction, subj)
-
- self._commit_subtrans(jars, subjars)
-
- jarsv = jars.values()
- for jar in jarsv:
- if not subtransaction:
+ # If not subtransaction, then jars will be modified.
+ self._commit_begin(jars, subjars, subtransaction)
+ ncommitted += self._commit_objects(objects)
+ if not subtransaction:
+ # Unless this is a really old jar that doesn't
+ # implement tpc_vote(), it must raise an exception
+ # if it can't commit the transaction.
+ for jar in jars:
try:
vote = jar.tpc_vote
- except:
+ except AttributeError:
pass
else:
- vote(self) # last chance to bail
+ vote(self)
# Handle multiple jars separately. If there are
# multiple jars and one fails during the finish, we
# mark this transaction manager as hosed.
- if len(jarsv) == 1:
- self._finish_one(jarsv[0])
+ if len(jars) == 1:
+ self._finish_one(jars[0])
else:
- self._finish_many(jarsv)
+ self._finish_many(jars)
except:
# Ugh, we got an got an error during commit, so we
- # have to clean up.
- exc_info = sys.exc_info()
- if jarsv is None:
- jarsv = jars.values()
- self._commit_error(exc_info, objects, ncommitted,
- jarsv, subjars)
+ # have to clean up. First save the original exception
+ # in case the cleanup process causes another
+ # exception.
+ t, v, tb = sys.exc_info()
+ try:
+ self._commit_error(objects, ncommitted, jars, subjars)
+ except:
+ LOG('ZODB', ERROR,
+ "A storage error occured during transaction "
+ "abort. This shouldn't happen.",
+ error=sys.exc_info())
+
+ raise t, v, tb
finally:
del objects[:] # clear registered
if not subtransaction and self._id is not None:
free_transaction()
- def _commit_objects(self, objects, jars, subtransaction, subj):
- # commit objects and return number of commits
- ncommitted = 0
+ def _get_jars(self, objects, subtransaction):
+ # Returns a list of jars for this transaction.
+
+ # Find all the jars and sort them in a globally consistent order.
+ # objects is a list of persistent objects and jars.
+ # If this is a subtransaction and a jar is not subtransaction aware,
+ # it's object gets delayed until the parent transaction commits.
+
+ d = {}
for o in objects:
- j = getattr(o, '_p_jar', o)
- if j is not None:
- i = id(j)
- if not jars.has_key(i):
- jars[i] = j
-
- if subtransaction:
- # If a jar does not support subtransactions,
- # we need to save it away to be committed in
- # the outer transaction.
- try:
- j.tpc_begin(self, subtransaction)
- except TypeError:
- j.tpc_begin(self)
+ jar = getattr(o, '_p_jar', o)
+ if jar is None:
+ # I don't think this should ever happen, but can't
+ # prove that it won't. If there is no jar, there
+ # is nothing to be done.
+ self.log("Object with no jar registered for transaction: "
+ "%s" % repr(o), level=BLATHER)
+ continue
+ # jar may not be safe as a dictionary key
+ key = id(jar)
+ d[key] = jar
+
+ if subtransaction:
+ if hasattr(jar, "commit_sub"):
+ self._sub[key] = jar
+ else:
+ if self._non_st_objects is None:
+ self._non_st_objects = []
+ self._non_st_objects.append(o)
+
+ jars = d.values()
+ jars.sort(jar_cmp)
- if hasattr(j, 'commit_sub'):
- subj[i] = j
- else:
- if self._non_st_objects is None:
- self._non_st_objects = []
- self._non_st_objects.append(o)
- continue
- else:
- j.tpc_begin(self)
- j.commit(o, self)
+ return jars
+
+ def _commit_begin(self, jars, subjars, subtransaction):
+ if subtransaction:
+ assert not subjars
+ for jar in jars:
+ try:
+ jar.tpc_begin(self, subtransaction)
+ except TypeError:
+ # Assume that TypeError means that tpc_begin() only
+ # takes one argument, and that the jar doesn't
+ # support subtransactions.
+ jar.tpc_begin(self)
+ else:
+ # Merge in all the jars used by one of the subtransactions.
+
+ # When the top-level subtransaction commits, the tm must
+ # call commit_sub() for each jar involved in one of the
+ # subtransactions. The commit_sub() method should call
+ # tpc_begin() on the storage object.
+
+ # It must also call tpc_begin() on jars that were used in
+ # a subtransaction but don't support subtransactions.
+
+ # These operations must be performed on the jars in order.
+
+ # Modify jars inplace to include the subjars, too.
+ jars += subjars
+ jars.sort(jar_cmp)
+ # assume that subjars is small, so that it's cheaper to test
+ # whether jar in subjars than to make a dict and do has_key.
+ for jar in jars:
+ if jar in subjars:
+ jar.commit_sub(self)
+ else:
+ jar.tpc_begin(self)
+
+ def _commit_objects(self, objects):
+ ncommitted = 0
+ for o in objects:
+ jar = getattr(o, "_p_jar", o)
+ if jar is None:
+ continue
+ jar.commit(o, self)
ncommitted += 1
return ncommitted
- def _commit_subtrans(self, jars, subjars):
- # Commit work done in subtransactions
- while subjars:
- j = subjars.pop()
- i = id(j)
- if not jars.has_key(i):
- jars[i] = j
- j.commit_sub(self)
-
def _finish_one(self, jar):
try:
- jar.tpc_finish(self) # This should never fail
+ # The database can't guarantee consistency if call fails.
+ jar.tpc_finish(self)
except:
# Bug if it does, we need to keep track of it
LOG('ZODB', ERROR,
@@ -278,42 +365,40 @@
error=sys.exc_info())
raise
- def _finish_many(self, jarsv):
+ def _finish_many(self, jars):
global hosed
try:
- while jarsv:
- jarsv[-1].tpc_finish(self) # This should never fail
- jarsv.pop() # It didn't, so it's taken care of.
+ for jar in jars:
+ # The database can't guarantee consistency if call fails.
+ jar.tpc_finish(self)
except:
- # Bug if it does, we need to yell FIRE!
- # Someone finished, so don't allow any more
- # work without at least a restart!
hosed = 1
LOG('ZODB', PANIC,
"A storage error occurred in the last phase of a "
"two-phase commit. This shouldn\'t happen. "
- "The application may be in a hosed state, so "
- "transactions will not be allowed to commit "
+ "The application will not be allowed to commit "
"until the site/storage is reset by a restart. ",
error=sys.exc_info())
raise
- def _commit_error(self, (t, v, tb),
- objects, ncommitted, jarsv, subjars):
- # handle an exception raised during commit
- # takes sys.exc_info() as argument
-
- # First, we have to abort any uncommitted objects.
+ def _commit_error(self, objects, ncommitted, jars, subjars):
+ # First, we have to abort any uncommitted objects. The abort
+ # will mark the object for invalidation, so that it's last
+ # committed state will be restored.
for o in objects[ncommitted:]:
try:
j = getattr(o, '_p_jar', o)
if j is not None:
j.abort(o, self)
except:
- pass
-
- # Then, we unwind TPC for the jars that began it.
- for j in jarsv:
+ # nothing to do but log the error
+ self.log("Failed to abort object %016x" % utils.U64(o._p_oid),
+ error=sys.exc_info())
+
+ # Abort the two-phase commit. It's only necessary to abort the
+ # commit for jars that began it, but it is harmless to abort it
+ # for all.
+ for j in jars:
try:
j.tpc_abort(self) # This should never fail
except:
@@ -321,9 +406,14 @@
"A storage error occured during object abort. This "
"shouldn't happen. ", error=sys.exc_info())
- # Ugh, we need to abort work done in sub-transactions.
- while subjars:
- j = subjars.pop()
+ # After the tpc_abort(), call abort_sub() on all the
+ # subtrans-aware jars to *really* abort the subtransaction.
+
+ # Example: For Connection(), the tpc_abort() will abort the
+ # subtransaction TmpStore() and abort_sub() will remove the
+ # TmpStore.
+
+ for j in subjars:
try:
j.abort_sub(self) # This should never fail
except:
@@ -332,8 +422,6 @@
"object abort. This shouldn't happen.",
error=sys.exc_info())
- raise t, v, tb
-
def register(self,object):
'Register the given object for transaction control.'
self._append(object)
@@ -365,8 +453,6 @@
the system problem. See your application log for
information on the error that lead to this problem.
"""
-
-
############################################################################
# install get_transaction:
=== Zope/lib/python/ZODB/__init__.py 1.13.4.4 => 1.13.4.5 ===
--- Zope/lib/python/ZODB/__init__.py:1.13.4.4 Tue Oct 8 17:45:55 2002
+++ Zope/lib/python/ZODB/__init__.py Sun Nov 24 19:10:13 2002
@@ -12,12 +12,11 @@
#
##############################################################################
-__version__ = '3.1b2'
+__version__ = '3.1+'
import sys
import cPersistence, Persistence
from zLOG import register_subsystem
-register_subsystem('ZODB')
# This is lame. Don't look. :(
sys.modules['cPersistence'] = cPersistence
=== Zope/lib/python/ZODB/cPersistence.c 1.62.8.3 => 1.62.8.4 ===
=== Zope/lib/python/ZODB/cPickleCache.c 1.68 => 1.68.8.1 ===
--- Zope/lib/python/ZODB/cPickleCache.c:1.68 Mon Jun 10 22:34:22 2002
+++ Zope/lib/python/ZODB/cPickleCache.c Sun Nov 24 19:10:13 2002
@@ -159,73 +159,9 @@
return v;
}
-/* define this for extra debugging checks, and lousy performance.
- Not really necessary in production code... disable this before
- release, providing noone has been reporting and RuntimeErrors
- that it uses to report problems.
-*/
-
-/* #define MUCH_RING_CHECKING */
-
-#ifdef MUCH_RING_CHECKING
-static int present_in_ring(ccobject *self, CPersistentRing *target);
-static int ring_corrupt(ccobject *self, const char *context);
-
-#define IS_RING_CORRUPT(OBJ, CTX) ring_corrupt((OBJ), (CTX))
-#define OBJECT_FROM_RING(SELF, HERE, CTX) \
- object_from_ring((SELF), (HERE), (CTX))
-
-static cPersistentObject *
-object_from_ring(ccobject *self, CPersistentRing *here, const char *context)
-{
- /* Given a position in the LRU ring, return a borrowed
- reference to the object at that point in the ring. The caller is
- responsible for ensuring that this ring position really does
- correspond to a persistent object, although the debugging
- version will double-check this. */
-
- PyObject *object;
-
- /* given a pointer to a ring slot in a cPersistent_HEAD, we want to get
- * the pointer to the Python object that slot is embedded in.
- */
- object = (PyObject *)(((char *)here) - offsetof(cPersistentObject, ring));
-
- if (!PyExtensionInstance_Check(object)) {
- PyErr_Format(PyExc_RuntimeError,
- "Unexpectedly encountered non-ExtensionClass object in %s",
- context);
- return NULL;
- }
- if (!(((PyExtensionClass*)(object->ob_type))->class_flags & PERSISTENT_TYPE_FLAG)) {
- PyErr_Format(PyExc_RuntimeError,
- "Unexpectedly encountered non-persistent object in %s", context);
- return NULL;
- }
- if (((cPersistentObject*)object)->jar != self->jar) {
- PyErr_Format(PyExc_RuntimeError,
- "Unexpectedly encountered object from a different jar in %s",
- context);
- return NULL;
- }
- if (((cPersistentObject *)object)->cache != (PerCache *)self) {
- PyErr_Format(PyExc_RuntimeError,
- "Unexpectedly encountered broken ring in %s", context);
- return NULL;
- }
- return (cPersistentObject *)object;
-}
-
-#else /* MUCH_RING_CHECKING */
-
-#define IS_RING_CORRUPT(OBJ, CTX) 0
-
#define OBJECT_FROM_RING(SELF, HERE, CTX) \
((cPersistentObject *)(((char *)here) - offsetof(cPersistentObject, ring)))
-#endif
-
-
static int
scan_gc_items(ccobject *self,int target)
{
@@ -238,44 +174,11 @@
CPersistentRing placeholder;
CPersistentRing *here = self->ring_home.next;
-#ifdef MUCH_RING_CHECKING
- int safety_counter = self->cache_size * 10;
- if (safety_counter < 10000)
- safety_counter = 10000;
-#endif
-
/* Scan through the ring until we either find the ring_home (i.e. start
* of the ring, or we've ghosted enough objects to reach the target
* size.
*/
while (1) {
- if (IS_RING_CORRUPT(self, "mid-gc"))
- return -1;
-
-#ifdef MUCH_RING_CHECKING
- if (!safety_counter--) {
- /* This loop has been running for a very long time. It is
- possible that someone loaded a very large number of objects,
- and now wants us to blow them all away. However it may also
- indicate a logic error. If the loop has been running this
- long then you really have to doubt it will ever terminate.
- In the MUCH_RING_CHECKING build we prefer to raise an
- exception here
- */
- PyErr_SetString(PyExc_RuntimeError,
- "scan_gc_items safety counter exceeded");
- return -1;
- }
-
- if (!present_in_ring(self, here)) {
- /* Our current working position is no longer in the ring.
- That's bad. */
- PyErr_SetString(PyExc_RuntimeError,
- "working position fell out the ring, in scan_gc_items");
- return -1;
- }
-#endif
-
/* back to the home position. stop looking */
if (here == &self->ring_home)
return 0;
@@ -345,8 +248,6 @@
return Py_None;
}
- if (IS_RING_CORRUPT(self, "pre-gc"))
- return NULL;
ENGINE_NOISE("<");
self->ring_lock = 1;
if (scan_gc_items(self, target_size)) {
@@ -355,8 +256,6 @@
}
self->ring_lock = 0;
ENGINE_NOISE(">\n");
- if (IS_RING_CORRUPT(self, "post-gc"))
- return NULL;
Py_INCREF(Py_None);
return Py_None;
@@ -549,9 +448,6 @@
return NULL;
}
- if (IS_RING_CORRUPT(self, "pre-cc_items"))
- return NULL;
-
l = PyList_New(0);
if (l == NULL)
return NULL;
@@ -701,9 +597,6 @@
{
PyObject *r;
- if (IS_RING_CORRUPT(self, "getattr"))
- return NULL;
-
if(*name=='c')
{
if(strcmp(name,"cache_age")==0)
@@ -776,9 +669,6 @@
{
PyObject *r;
- if (IS_RING_CORRUPT(self, "__getitem__"))
- return NULL;
-
r = (PyObject *)object_from_oid(self, key);
if (r == NULL) {
PyErr_SetObject(PyExc_KeyError, key);
@@ -884,8 +774,6 @@
*/
}
- if (IS_RING_CORRUPT(self, "pre-setitem"))
- return -1;
if (PyDict_SetItem(self->data, key, v) < 0)
return -1;
@@ -906,10 +794,7 @@
Py_DECREF(v);
}
- if (IS_RING_CORRUPT(self, "post-setitem"))
- return -1;
- else
- return 0;
+ return 0;
}
static int
@@ -919,9 +804,6 @@
cPersistentObject *p;
/* unlink this item from the ring */
- if (IS_RING_CORRUPT(self, "pre-delitem"))
- return -1;
-
v = (PyObject *)object_from_oid(self, key);
if (v == NULL)
return -1;
@@ -957,9 +839,6 @@
return -1;
}
- if (IS_RING_CORRUPT(self, "post-delitem"))
- return -1;
-
return 0;
}
@@ -978,101 +857,6 @@
return cc_del_item(self, key);
}
-#ifdef MUCH_RING_CHECKING
-static int
-_ring_corrupt(ccobject *self, const char *context)
-{
- CPersistentRing *here = &(self->ring_home);
-
- /* Determine the number of objects we expect to see in the ring.
- * Normally this is one for the home node plus one for each
- * non-ghost object, for which we maintain a separate total. If the
- * ring is unlocked then this value should be precise; there should
- * be no foreign nodes in the ring. If locked, it may be an
- * underestimate */
- int expected = 1 + self->non_ghost_count;
-
- int total = 0;
- do {
- if (++total > (expected + 10))
- /* ring too big, by a large margin. This probably
- * means we are stomping through random memory. Abort
- * now, and maybe we can deliver this error message
- * before dumping core */
- return 3;
- if (!here->next)
- return 4; /* various linking problems */
- if (!here->prev)
- return 5;
- if (!here->next->prev)
- return 7;
- if (!here->prev->next)
- return 8;
- if (here->prev->next != here)
- return 9;
- if (here->next->prev != here)
- return 10;
- if (!self->ring_lock) {
- /* If the ring is unlocked, then it must not contain
- * objects other than persistent instances (and the home) */
- if (here != &self->ring_home) {
- cPersistentObject *object = OBJECT_FROM_RING(self, here,
- context);
- if (!object)
- return 12;
- if (object->state == cPersistent_GHOST_STATE)
- /* ghost objects should not be in the ring, according
- * to the ghost storage regime. Experience shows
- * that this error condition is likely to be caused
- * by a race condition bug somewhere */
- return 13;
- }
- }
- here = here->next;
- } while (here != &self->ring_home);
-
- if (self->ring_lock) {
- if (total < expected)
- /* ring is too small.
- too big is ok when locked, we have already checked it is
- not too big */
- return 6;
- } else {
- if (total != expected)
- return 14; /* ring size wrong, or bad ghost accounting */
- }
-
- return 0;
-}
-
-static int
-ring_corrupt(ccobject *self, const char *context)
-{
- int code = _ring_corrupt(self, context);
- if (code) {
- if (!PyErr_Occurred())
- PyErr_Format(PyExc_RuntimeError,
- "broken ring (code %d) in %s, size %d",
- code, context, PyDict_Size(self->data));
- return code;
- }
- return 0;
-}
-
-static int
-present_in_ring(ccobject *self,CPersistentRing *target)
-{
- CPersistentRing *here = self->ring_home.next;
- while (1) {
- if (here == target)
- return 1;
- if (here == &self->ring_home)
- return 0; /* back to the home position, and we didnt find it */
- here = here->next;
- }
-}
-#endif /* MUCH_RING_CHECKING */
-
static PyMappingMethods cc_as_mapping = {
(inquiry)cc_length, /*mp_length*/
(binaryfunc)cc_subscript, /*mp_subscript*/
@@ -1174,10 +958,4 @@
d = PyModule_GetDict(m);
PyDict_SetItemString(d, "cache_variant", PyString_FromString("stiff/c"));
-
-#ifdef MUCH_RING_CHECKING
- PyDict_SetItemString(d, "MUCH_RING_CHECKING", PyInt_FromLong(1));
-#else
- PyDict_SetItemString(d, "MUCH_RING_CHECKING", PyInt_FromLong(0));
-#endif
}
=== Zope/lib/python/ZODB/fsdump.py 1.3.68.1 => 1.3.68.2 ===
--- Zope/lib/python/ZODB/fsdump.py:1.3.68.1 Sat Oct 26 15:51:49 2002
+++ Zope/lib/python/ZODB/fsdump.py Sun Nov 24 19:10:13 2002
@@ -76,3 +76,80 @@
print >> file
i += 1
iter.close()
+
+import struct
+from ZODB.FileStorage import TRANS_HDR, TRANS_HDR_LEN
+from ZODB.FileStorage import DATA_HDR, DATA_HDR_LEN
+
+def fmt(p64):
+ # Return a nicely formatted string for a packaged 64-bit value
+ return "%016x" % U64(p64)
+
+class Dumper:
+ """A very verbose dumper for debuggin FileStorage problems."""
+
+ def __init__(self, path, dest=None):
+ self.file = open(path, "rb")
+ self.dest = dest
+
+ def dump(self):
+ fid = self.file.read(4)
+ print >> self.dest, "*" * 60
+ print >> self.dest, "file identifier: %r" % fid
+ while self.dump_txn():
+ pass
+
+ def dump_txn(self):
+ pos = self.file.tell()
+ h = self.file.read(TRANS_HDR_LEN)
+ if not h:
+ return False
+ tid, stlen, status, ul, dl, el = struct.unpack(TRANS_HDR, h)
+ end = pos + U64(stlen)
+ print >> self.dest, "=" * 60
+ print >> self.dest, "offset: %d" % pos
+ print >> self.dest, "end pos: %d" % end
+ print >> self.dest, "transaction id: %s" % fmt(tid)
+ print >> self.dest, "trec len: %d" % U64(stlen)
+ print >> self.dest, "status: %r" % status
+ user = descr = extra = ""
+ if ul:
+ user = self.file.read(ul)
+ if dl:
+ descr = self.file.read(dl)
+ if el:
+ extra = self.file.read(el)
+ print >> self.dest, "user: %r" % user
+ print >> self.dest, "description: %r" % descr
+ print >> self.dest, "len(extra): %d" % el
+ while self.file.tell() < end:
+ self.dump_data(pos)
+ stlen2 = self.file.read(8)
+ print >> self.dest, "redundant trec len: %d" % U64(stlen2)
+ return True
+
+ def dump_data(self, tloc):
+ pos = self.file.tell()
+ h = self.file.read(DATA_HDR_LEN)
+ assert len(h) == DATA_HDR_LEN
+ oid, revid, sprev, stloc, vlen, sdlen = struct.unpack(DATA_HDR, h)
+ dlen = U64(sdlen)
+ print >> self.dest, "-" * 60
+ print >> self.dest, "offset: %d" % pos
+ print >> self.dest, "oid: %s" % fmt(oid)
+ print >> self.dest, "revid: %s" % fmt(revid)
+ print >> self.dest, "previous record offset: %d" % U64(sprev)
+ print >> self.dest, "transaction offset: %d" % U64(stloc)
+ if vlen:
+ pnv = self.file.read(8)
+ sprevdata = self.file.read(8)
+ version = self.file.read(vlen)
+ print >> self.dest, "version: %r" % version
+ print >> self.dest, "non-version data offset: %d" % U64(pnv)
+ print >> self.dest, \
+ "previous version data offset: %d" % U64(sprevdata)
+ print >> self.dest, "len(data): %d" % dlen
+ self.file.read(dlen)
+ if not dlen:
+ sbp = self.file.read(8)
+ print >> self.dest, "backpointer: %d" % U64(sbp)
=== Zope/lib/python/ZODB/utils.py 1.12 => 1.12.4.1 ===
--- Zope/lib/python/ZODB/utils.py:1.12 Wed Aug 14 18:07:09 2002
+++ Zope/lib/python/ZODB/utils.py Sun Nov 24 19:10:13 2002
@@ -13,18 +13,20 @@
##############################################################################
import sys
-import TimeStamp, time, struct
+import TimeStamp, time
+
+from struct import pack, unpack
if sys.version >= (2, 2):
# Note that the distinction between ints and longs is blurred in
# Python 2.2. So make u64() and U64() the same.
- def p64(v, pack=struct.pack):
+ def p64(v):
"""Pack an integer or long into a 8-byte string"""
return pack(">Q", v)
- def u64(v, unpack=struct.unpack):
+ def u64(v):
"""Unpack an 8-byte string into a 64-bit long integer."""
return unpack(">Q", v)[0]
@@ -34,7 +36,7 @@
t32 = 1L << 32
- def p64(v, pack=struct.pack):
+ def p64(v):
"""Pack an integer or long into a 8-byte string"""
if v < t32:
h = 0
@@ -42,7 +44,7 @@
h, v = divmod(v, t32)
return pack(">II", h, v)
- def u64(v, unpack=struct.unpack):
+ def u64(v):
"""Unpack an 8-byte string into a 64-bit (or long) integer."""
h, v = unpack(">ii", v)
if v < 0:
@@ -53,7 +55,7 @@
v = (long(h) << 32) + v
return v
- def U64(v, unpack=struct.unpack):
+ def U64(v):
"""Same as u64 but always returns a long."""
h, v = unpack(">II", v)
if h: