[Zodb-checkins] SVN: ZODB/branches/3.3/src/ Merge rev 29446 from
ZODB trunk.
Tim Peters
tim.one at comcast.net
Fri Mar 11 17:42:21 EST 2005
Log message for revision 29447:
Merge rev 29446 from ZODB trunk.
Convert some XXXs. More to come.
Changed:
U ZODB/branches/3.3/src/BTrees/BTreeTemplate.c
U ZODB/branches/3.3/src/BTrees/BucketTemplate.c
U ZODB/branches/3.3/src/ZEO/ClientStorage.py
U ZODB/branches/3.3/src/ZEO/auth/auth_digest.py
U ZODB/branches/3.3/src/ZEO/cache.py
U ZODB/branches/3.3/src/ZEO/tests/Cache.py
U ZODB/branches/3.3/src/ZEO/tests/CommitLockTests.py
U ZODB/branches/3.3/src/ZEO/tests/ConnectionTests.py
U ZODB/branches/3.3/src/ZEO/zrpc/client.py
U ZODB/branches/3.3/src/ZEO/zrpc/connection.py
U ZODB/branches/3.3/src/ZODB/BaseStorage.py
U ZODB/branches/3.3/src/ZODB/Connection.py
U ZODB/branches/3.3/src/ZODB/DB.py
U ZODB/branches/3.3/src/ZODB/DemoStorage.py
U ZODB/branches/3.3/src/ZODB/FileStorage/FileStorage.py
U ZODB/branches/3.3/src/ZODB/FileStorage/fsdump.py
U ZODB/branches/3.3/src/ZODB/FileStorage/fspack.py
U ZODB/branches/3.3/src/ZODB/broken.py
U ZODB/branches/3.3/src/ZODB/fstools.py
U ZODB/branches/3.3/src/ZODB/tests/BasicStorage.py
U ZODB/branches/3.3/src/ZODB/tests/ConflictResolution.py
U ZODB/branches/3.3/src/persistent/cPersistence.c
U ZODB/branches/3.3/src/persistent/cPickleCache.c
U ZODB/branches/3.3/src/scripts/fstest.py
-=-
Modified: ZODB/branches/3.3/src/BTrees/BTreeTemplate.c
===================================================================
--- ZODB/branches/3.3/src/BTrees/BTreeTemplate.c 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/BTrees/BTreeTemplate.c 2005-03-11 22:42:21 UTC (rev 29447)
@@ -239,7 +239,7 @@
factory = PyObject_GetAttr((PyObject *)self->ob_type, _bucket_type_str);
if (factory == NULL)
return NULL;
- /* XXX Should we check that the factory actually returns something
+ /* TODO: Should we check that the factory actually returns something
of the appropriate type? How? The C code here is going to
depend on any custom bucket type having the same layout at the
C level.
@@ -469,7 +469,7 @@
Bucket *result;
UNLESS (self->data && self->len) {
- IndexError(-1); /*XXX*/
+ IndexError(-1); /* is this the best action to take? */
return NULL;
}
@@ -1783,9 +1783,9 @@
/* End of iterator support. */
-/* XXX Even though the _firstbucket attribute is read-only, a program
- could probably do arbitrary damage to a the btree internals. For
- example, it could call clear() on a bucket inside a BTree.
+/* Caution: Even though the _firstbucket attribute is read-only, a program
+ could do arbitrary damage to the btree internals. For example, it could
+ call clear() on a bucket inside a BTree.
We need to decide if the convenience for inspecting BTrees is worth
the risk.
Modified: ZODB/branches/3.3/src/BTrees/BucketTemplate.c
===================================================================
--- ZODB/branches/3.3/src/BTrees/BucketTemplate.c 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/BTrees/BucketTemplate.c 2005-03-11 22:42:21 UTC (rev 29447)
@@ -1415,9 +1415,9 @@
}
#endif
-/* XXX Even though the _next attribute is read-only, a program could
- probably do arbitrary damage to a the btree internals. For
- example, it could call clear() on a bucket inside a BTree.
+/* Caution: Even though the _next attribute is read-only, a program could
+ do arbitrary damage to the btree internals. For example, it could call
+ clear() on a bucket inside a BTree.
We need to decide if the convenience for inspecting BTrees is worth
the risk.
Modified: ZODB/branches/3.3/src/ZEO/ClientStorage.py
===================================================================
--- ZODB/branches/3.3/src/ZEO/ClientStorage.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZEO/ClientStorage.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -262,7 +262,7 @@
# _seriald: _check_serials() moves from _serials to _seriald,
# which maps oid to serialno
- # XXX If serial number matches transaction id, then there is
+ # TODO: If serial number matches transaction id, then there is
# no need to have all this extra infrastructure for handling
# serial numbers. The vote call can just return the tid.
# If there is a conflict error, we can't have a special method
@@ -310,7 +310,7 @@
else:
cache_path = None
self._cache = self.ClientCacheClass(cache_path, size=cache_size)
- # XXX When should it be opened?
+ # TODO: maybe there's a better time to open the cache? Unclear.
self._cache.open()
self._rpc_mgr = self.ConnectionManagerClass(addr, self,
@@ -459,7 +459,7 @@
exception raised by register() is passed through.
"""
log2("Testing connection %r" % conn)
- # XXX Check the protocol version here?
+ # TODO: Should we check the protocol version here?
self._conn_is_read_only = 0
stub = self.StorageServerStubClass(conn)
@@ -496,7 +496,7 @@
# this method before it was stopped.
return
- # XXX would like to report whether we get a read-only connection
+ # TODO: report whether we get a read-only connection.
if self._connection is not None:
reconnect = 1
else:
@@ -597,8 +597,8 @@
self._pickler = cPickle.Pickler(self._tfile, 1)
self._pickler.fast = 1 # Don't use the memo
- # XXX should batch these operations for efficiency
- # XXX need to acquire lock...
+ # TODO: should batch these operations for efficiency; would need
+ # to acquire lock ...
for oid, tid, version in self._cache.contents():
server.verify(oid, version, tid)
self._pending_server = server
@@ -627,7 +627,7 @@
def __len__(self):
"""Return the size of the storage."""
- # XXX Where is this used?
+ # TODO: Is this method used?
return self._info['length']
def getName(self):
@@ -700,7 +700,7 @@
# versions of ZODB, you'd get a conflict error if you tried to
# commit a transaction with the cached data.
- # XXX If we could guarantee that ZODB gave the right answer,
+ # If we could guarantee that ZODB gave the right answer,
# we could just invalidate the version data.
for oid in oids:
self._tbuf.invalidate(oid, '')
@@ -798,7 +798,7 @@
# doesn't use the _load_lock, so it is possble to overlap
# this load with an invalidation for the same object.
- # XXX If we call again, we're guaranteed to get the
+ # If we call again, we're guaranteed to get the
# post-invalidation data. But if the data is still
# current, we'll still get end == None.
@@ -857,8 +857,8 @@
days -- a number of days to subtract from the pack time;
defaults to zero.
"""
- # XXX Is it okay that read-only connections allow pack()?
- # rf argument ignored; server will provide it's own implementation
+ # TODO: Is it okay that read-only connections allow pack()?
+ # rf argument ignored; server will provide its own implementation
if t is None:
t = time.time()
t = t - (days * 86400)
@@ -866,7 +866,7 @@
def _check_serials(self):
"""Internal helper to move data from _serials to _seriald."""
- # XXX serials are always going to be the same, the only
+ # serials are always going to be the same, the only
# question is whether an exception has been raised.
if self._serials:
l = len(self._serials)
@@ -939,7 +939,7 @@
if txn is not self._transaction:
return
try:
- # XXX Are there any transactions that should prevent an
+ # Caution: Are there any exceptions that should prevent an
# abort from occurring? It seems wrong to swallow them
# all, yet you want to be sure that other abort logic is
# executed regardless.
@@ -991,8 +991,7 @@
"""
# Must be called with _lock already acquired.
- # XXX not sure why _update_cache() would be called on
- # a closed storage.
+ # Not sure why _update_cache() would be called on a closed storage.
if self._cache is None:
return
@@ -1063,7 +1062,8 @@
# Invalidation as result of verify_cache().
# Queue an invalidate for the end the verification procedure.
if self._pickler is None:
- # XXX This should never happen
+ # This should never happen. TODO: assert it doesn't, or log
+ # if it does.
return
self._pickler.dump(args)
Modified: ZODB/branches/3.3/src/ZEO/auth/auth_digest.py
===================================================================
--- ZODB/branches/3.3/src/ZEO/auth/auth_digest.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZEO/auth/auth_digest.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -30,8 +30,9 @@
protocol uses a nonce as a challenge. The ZEO protocol requires a
separate session key that is used for message authentication. We
generate a second nonce for this purpose; the hash of nonce and
-user/realm/password is used as the session key. XXX I'm not sure if
-this is a sound approach; SRP would be preferred.
+user/realm/password is used as the session key.
+
+TODO: I'm not sure if this is a sound approach; SRP would be preferred.
"""
import os
Modified: ZODB/branches/3.3/src/ZEO/cache.py
===================================================================
--- ZODB/branches/3.3/src/ZEO/cache.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZEO/cache.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -211,7 +211,7 @@
self._trace(0x24, oid, tid)
return
lo, hi = L[i-1]
- # XXX lo should always be less than tid
+ # lo should always be less than tid
if not lo < tid <= hi:
self._trace(0x24, oid, tid)
return None
@@ -361,12 +361,13 @@
del self.current[oid] # because we no longer have current data
# Update the end_tid half of oid's validity range on disk.
- # XXX Want to fetch object without marking it as accessed
+ # TODO: Want to fetch object without marking it as accessed.
o = self.fc.access((oid, cur_tid))
assert o is not None
assert o.end_tid is None # i.e., o was current
if o is None:
- # XXX is this possible? (doubt it; added an assert just above)
+ # TODO: Since we asserted o is not None above, this block
+ # should be removing; waiting on time to prove it can't happen.
return
o.end_tid = tid
self.fc.update(o) # record the new end_tid on disk
@@ -377,7 +378,7 @@
##
# Return the number of object revisions in the cache.
#
- # XXX just return len(self.cache)?
+ # Or maybe better to just return len(self.cache)? Needs clearer use case.
def __len__(self):
n = len(self.current) + len(self.version)
if self.noncurrent:
@@ -389,7 +390,7 @@
# cache. This generator is used by cache verification.
def contents(self):
- # XXX May need to materialize list instead of iterating,
+ # May need to materialize list instead of iterating;
# depends on whether the caller may change the cache.
for o in self.fc:
oid, tid = o.key
@@ -993,7 +994,7 @@
# header to update the in-memory data structures held by
# ClientCache.
- # XXX Or we could just keep the header in memory at all times.
+ # We could instead just keep the header in memory at all times.
e = self.key2entry.pop(key, None)
if e is None:
Modified: ZODB/branches/3.3/src/ZEO/tests/Cache.py
===================================================================
--- ZODB/branches/3.3/src/ZEO/tests/Cache.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZEO/tests/Cache.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -28,8 +28,9 @@
info = self._storage.undoInfo()
if not info:
- # XXX perhaps we have an old storage implementation that
- # does do the negative nonsense
+ # Preserved this comment, but don't understand it:
+ # "Perhaps we have an old storage implementation that
+ # does do the negative nonsense."
info = self._storage.undoInfo(0, 20)
tid = info[0]['id']
Modified: ZODB/branches/3.3/src/ZEO/tests/CommitLockTests.py
===================================================================
--- ZODB/branches/3.3/src/ZEO/tests/CommitLockTests.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZEO/tests/CommitLockTests.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -132,7 +132,7 @@
def _duplicate_client(self):
"Open another ClientStorage to the same server."
- # XXX argh it's hard to find the actual address
+ # It's hard to find the actual address.
# The rpc mgr addr attribute is a list. Each element in the
# list is a socket domain (AF_INET, AF_UNIX, etc.) and an
# address.
Modified: ZODB/branches/3.3/src/ZEO/tests/ConnectionTests.py
===================================================================
--- ZODB/branches/3.3/src/ZEO/tests/ConnectionTests.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZEO/tests/ConnectionTests.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -261,8 +261,7 @@
self._storage.close()
def checkMultipleServers(self):
- # XXX crude test at first -- just start two servers and do a
- # commit at each one.
+ # Crude test-- just start two servers and do a commit at each one.
self._newAddr()
self._storage = self.openClientStorage('test', 100000)
@@ -334,7 +333,7 @@
self.assertRaises(ReadOnlyError, self._dostore)
self._storage.close()
- # XXX Compare checkReconnectXXX() here to checkReconnection()
+ # TODO: Compare checkReconnectXXX() here to checkReconnection()
# further down. Is the code here hopelessly naive, or is
# checkReconnection() overwrought?
@@ -535,13 +534,6 @@
def checkReconnection(self):
# Check that the client reconnects when a server restarts.
- # XXX Seem to get occasional errors that look like this:
- # File ZEO/zrpc2.py, line 217, in handle_request
- # File ZEO/StorageServer.py, line 325, in storea
- # File ZEO/StorageServer.py, line 209, in _check_tid
- # StorageTransactionError: (None, <tid>)
- # could system reconnect and continue old transaction?
-
self._storage = self.openClientStorage()
oid = self._storage.new_oid()
obj = MinPO(12)
@@ -609,7 +601,7 @@
# transaction. This is not really a connection test, but it needs
# about the same infrastructure (several storage servers).
- # XXX WARNING: with the current ZEO code, this occasionally fails.
+ # TODO: with the current ZEO code, this occasionally fails.
# That's the point of this test. :-)
def NOcheckMultiStorageTransaction(self):
Modified: ZODB/branches/3.3/src/ZEO/zrpc/client.py
===================================================================
--- ZODB/branches/3.3/src/ZEO/zrpc/client.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZEO/zrpc/client.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -120,13 +120,13 @@
# be started in a child process after a fork. Regardless,
# it's good to be defensive.
- # XXX need each connection started with async==0 to have a
- # callback
+ # We need each connection started with async==0 to have a
+ # callback.
log("CM.set_async(%s)" % repr(map), level=logging.DEBUG)
if not self.closed and self.trigger is None:
log("CM.set_async(): first call")
self.trigger = trigger()
- self.thr_async = 1 # XXX needs to be set on the Connection
+ self.thr_async = 1 # needs to be set on the Connection
def attempt_connect(self):
"""Attempt a connection to the server without blocking too long.
@@ -139,8 +139,8 @@
finishes quickly.
"""
- # XXX Will a single attempt take too long?
- # XXX Answer: it depends -- normally, you'll connect or get a
+ # Will a single attempt take too long?
+ # Answer: it depends -- normally, you'll connect or get a
# connection refused error very quickly. Packet-eating
# firewalls and other mishaps may cause the connect to take a
# long time to time out though. It's also possible that you
@@ -228,7 +228,7 @@
# to the errno value(s) expected if the connect succeeds *or* if it's
# already connected (our code can attempt redundant connects).
if hasattr(errno, "WSAEWOULDBLOCK"): # Windows
- # XXX The official Winsock docs claim that WSAEALREADY should be
+ # Caution: The official Winsock docs claim that WSAEALREADY should be
# treated as yet another "in progress" indicator, but we've never
# seen this.
_CONNECT_IN_PROGRESS = (errno.WSAEWOULDBLOCK,)
@@ -287,7 +287,7 @@
delay = self.tmin
success = 0
# Don't wait too long the first time.
- # XXX make timeout configurable?
+ # TODO: make timeout configurable?
attempt_timeout = 5
while not self.stopped:
success = self.try_connecting(attempt_timeout)
@@ -373,7 +373,7 @@
log("CT: select() %d, %d, %d" % tuple(map(len, (r,w,x))))
except select.error, msg:
log("CT: select failed; msg=%s" % str(msg),
- level=logging.WARNING) # XXX Is this the right level?
+ level=logging.WARNING)
continue
# Exceptable wrappers are in trouble; close these suckers
for wrap in x:
@@ -408,7 +408,7 @@
assert wrap.state == "closed"
del wrappers[wrap]
- # XXX should check deadline
+ # TODO: should check deadline
class ConnectWrapper:
@@ -520,8 +520,7 @@
self.preferred = 0
if self.conn is not None:
# Closing the ZRPC connection will eventually close the
- # socket, somewhere in asyncore.
- # XXX Why do we care? --Guido
+ # socket, somewhere in asyncore. Guido asks: Why do we care?
self.conn.close()
self.conn = None
if self.sock is not None:
Modified: ZODB/branches/3.3/src/ZEO/zrpc/connection.py
===================================================================
--- ZODB/branches/3.3/src/ZEO/zrpc/connection.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZEO/zrpc/connection.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -407,7 +407,7 @@
self.close()
def check_method(self, name):
- # XXX Is this sufficient "security" for now?
+ # TODO: This is hardly "secure".
if name.startswith('_'):
return None
return hasattr(self.obj, name)
@@ -524,7 +524,7 @@
def _prepare_async(self):
self.thr_async = False
ThreadedAsync.register_loop_callback(self.set_async)
- # XXX If we are not in async mode, this will cause dead
+ # TODO: If we are not in async mode, this will cause dead
# Connections to be leaked.
def set_async(self, map):
@@ -642,9 +642,9 @@
# loop is only intended to make sure all incoming data is
# returned.
- # XXX What if the server sends a lot of invalidations,
- # such that pending never finishes? Seems unlikely, but
- # not impossible.
+ # Insecurity: What if the server sends a lot of
+ # invalidations, such that pending never finishes? Seems
+ # unlikely, but possible.
timeout = 0
if r:
try:
@@ -771,7 +771,7 @@
return 0
def is_async(self):
- # XXX could the check_mgr_async() be avoided on each test?
+ # TODO: could the check_mgr_async() be avoided on each test?
if self.thr_async:
return 1
return self.check_mgr_async()
Modified: ZODB/branches/3.3/src/ZODB/BaseStorage.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/BaseStorage.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/BaseStorage.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -309,7 +309,7 @@
def loadBefore(self, oid, tid):
"""Return most recent revision of oid before tid committed."""
- # XXX Is it okay for loadBefore() to return current data?
+ # Unsure: Is it okay for loadBefore() to return current data?
# There doesn't seem to be a good reason to forbid it, even
# though the typical use of this method will never find
# current data. But maybe we should call it loadByTid()?
@@ -329,7 +329,7 @@
# Note: history() returns the most recent record first.
- # XXX The filter argument to history() only appears to be
+ # TODO: The filter argument to history() only appears to be
# supported by FileStorage. Perhaps it shouldn't be used.
L = self.history(oid, "", n, lambda d: not d["version"])
if not L:
Modified: ZODB/branches/3.3/src/ZODB/Connection.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/Connection.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/Connection.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -93,16 +93,16 @@
The Connection manages movement of objects in and out of object
storage.
- XXX We should document an intended API for using a Connection via
+ TODO: We should document an intended API for using a Connection via
multiple threads.
- XXX We should explain that the Connection has a cache and that
+ TODO: We should explain that the Connection has a cache and that
multiple calls to get() will return a reference to the same
object, provided that one of the earlier objects is still
referenced. Object identity is preserved within a connection, but
not across connections.
- XXX Mention the database pool.
+ TODO: Mention the database pool.
A database connection always presents a consistent view of the
objects in the database, although it may not always present the
@@ -184,9 +184,8 @@
# Caches for versions end up empty if the version
# is not used for a while. Non-version caches
# keep their content indefinitely.
+ # Unclear: Why do we want version caches to behave this way?
- # XXX Why do we want version caches to behave this way?
-
self._cache.cache_drain_resistance = 100
self._committed = []
self._added = {}
@@ -219,12 +218,11 @@
# from a single transaction should be applied atomically, so
# the lock must be held when reading _invalidated.
- # XXX It sucks that we have to hold the lock to read
- # _invalidated. Normally, _invalidated is written by calling
- # dict.update, which will execute atomically by virtue of the
- # GIL. But some storage might generate oids where hash or
- # compare invokes Python code. In that case, the GIL can't
- # save us.
+ # It sucks that we have to hold the lock to read _invalidated.
+ # Normally, _invalidated is written by calling dict.update, which
+ # will execute atomically by virtue of the GIL. But some storage
+ # might generate oids where hash or compare invokes Python code. In
+ # that case, the GIL can't save us.
self._inv_lock = threading.Lock()
self._invalidated = d = {}
self._invalid = d.has_key
@@ -329,7 +327,6 @@
- `ConnectionStateError`: if the connection is closed.
"""
if self._storage is None:
- # XXX Should this be a ZODB-specific exception?
raise ConnectionStateError("The database connection is closed")
obj = self._cache.get(oid, None)
@@ -424,7 +421,7 @@
register for afterCompletion() calls.
"""
- # XXX Why do we go to all the trouble of setting _db and
+ # TODO: Why do we go to all the trouble of setting _db and
# other attributes on open and clearing them on close?
# A Connection is only ever associated with a single DB
# and Storage.
@@ -477,14 +474,13 @@
self._tpc_cleanup()
- # XXX should there be a way to call incrgc directly?
- # perhaps "full sweep" should do that?
+ # Should there be a way to call incrgc directly?
+ # Perhaps "full sweep" should do that?
- # XXX we should test what happens when these methods are called
+ # TODO: we should test what happens when these methods are called
# mid-transaction.
def cacheFullSweep(self, dt=None):
- # XXX needs doc string
warnings.warn("cacheFullSweep is deprecated. "
"Use cacheMinimize instead.", DeprecationWarning)
if dt is None:
@@ -581,7 +577,8 @@
def commit(self, transaction):
if self._import:
- # XXX eh?
+ # TODO: This code seems important for Zope, but needs docs
+ # to explain why.
self._importDuringCommit(transaction, *self._import)
self._import = None
@@ -647,6 +644,7 @@
self._cache[oid] = obj
except:
# Dang, I bet it's wrapped:
+ # TODO: Deprecate, then remove, this.
if hasattr(obj, 'aq_base'):
self._cache[oid] = obj.aq_base
else:
@@ -775,7 +773,7 @@
# directly. That is no longer allowed, but we need to
# provide support for old code that still does it.
- # XXX The actual complaint here is that an object without
+ # The actual complaint here is that an object without
# an oid is being registered. I can't think of any way to
# achieve that without assignment to _p_jar. If there is
# a way, this will be a very confusing warning.
@@ -921,7 +919,7 @@
def oldstate(self, obj, tid):
"""Return copy of obj that was written by tid.
- XXX The returned object does not have the typical metadata
+ The returned object does not have the typical metadata
(_p_jar, _p_oid, _p_serial) set. I'm not sure how references
to other peristent objects are handled.
Modified: ZODB/branches/3.3/src/ZODB/DB.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/DB.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/DB.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -174,7 +174,8 @@
# Just let the connection go.
# We need to break circular refs to make it really go.
- # XXX What objects are involved in the cycle?
+ # TODO: Figure out exactly which objects are involved in the
+ # cycle.
connection.__dict__.clear()
return
@@ -711,9 +712,8 @@
return "%s:%s" % (self._db._storage.sortKey(), id(self))
def tpc_begin(self, txn, sub=False):
- # XXX we should never be called with sub=True.
if sub:
- raise ValueError, "doesn't supoprt sub-transactions"
+ raise ValueError("doesn't support sub-transactions")
self._db._storage.tpc_begin(txn)
# The object registers itself with the txn manager, so the ob
Modified: ZODB/branches/3.3/src/ZODB/DemoStorage.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/DemoStorage.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/DemoStorage.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -323,7 +323,7 @@
last = first - last + 1
self._lock_acquire()
try:
- # XXX Shouldn't this be sorted?
+ # Unsure: shouldn we sort this?
transactions = self._data.items()
pos = len(transactions)
r = []
@@ -404,7 +404,7 @@
index, vindex = self._build_indexes(stop)
- # XXX This packing algorithm is flawed. It ignores
+ # TODO: This packing algorithm is flawed. It ignores
# references from non-current records after the pack
# time.
Modified: ZODB/branches/3.3/src/ZODB/FileStorage/FileStorage.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/FileStorage/FileStorage.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/FileStorage/FileStorage.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -700,9 +700,10 @@
# Else oid's data record contains the data, and the file offset of
# oid's data record is returned. This data record should contain
# a pickle identical to the 'data' argument.
- # XXX If the length of the stored data doesn't match len(data),
- # XXX an exception is raised. If the lengths match but the data
- # XXX isn't the same, 0 is returned. Why the discrepancy?
+
+ # Unclear: If the length of the stored data doesn't match len(data),
+ # an exception is raised. If the lengths match but the data isn't
+ # the same, 0 is returned. Why the discrepancy?
self._file.seek(tpos)
h = self._file.read(TRANS_HDR_LEN)
tid, tl, status, ul, dl, el = unpack(TRANS_HDR, h)
@@ -820,7 +821,7 @@
if h.version:
return h.pnv
if h.back:
- # XXX Not sure the following is always true:
+ # TODO: Not sure the following is always true:
# The previous record is not for this version, yet we
# have a backpointer to it. The current record must
# be an undo of an abort or commit, so the backpointer
@@ -1175,8 +1176,8 @@
new.setVersion(v, snv, vprev)
self._tvindex[v] = here
- # XXX This seek shouldn't be necessary, but some other
- # bit of code is messig with the file pointer.
+ # TODO: This seek shouldn't be necessary, but some other
+ # bit of code is messing with the file pointer.
assert self._tfile.tell() == here - base, (here, base,
self._tfile.tell())
self._tfile.write(new.asString())
@@ -1855,7 +1856,7 @@
def next(self, index=0):
if self._file is None:
- # A closed iterator. XXX: Is IOError the best we can do? For
+ # A closed iterator. Is IOError the best we can do? For
# now, mimic a read on a closed file.
raise IOError, 'iterator is closed'
@@ -1986,8 +1987,8 @@
data = None
else:
data, tid = self._loadBackTxn(h.oid, h.back, False)
- # XXX looks like this only goes one link back, should
- # it go to the original data like BDBFullStorage?
+ # Caution: :ooks like this only goes one link back.
+ # Should it go to the original data like BDBFullStorage?
prev_txn = self.getTxnFromData(h.oid, h.back)
r = Record(h.oid, h.tid, h.version, data, prev_txn, pos)
Modified: ZODB/branches/3.3/src/ZODB/FileStorage/fsdump.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/FileStorage/fsdump.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/FileStorage/fsdump.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -50,8 +50,8 @@
else:
version = ''
if rec.data_txn:
- # XXX It would be nice to print the transaction number
- # (i) but it would be too expensive to keep track of.
+ # It would be nice to print the transaction number
+ # (i) but it would be expensive to keep track of.
bp = "bp=%016x" % u64(rec.data_txn)
else:
bp = ""
@@ -69,7 +69,7 @@
class Dumper:
"""A very verbose dumper for debuggin FileStorage problems."""
- # XXX Should revise this class to use FileStorageFormatter.
+ # TODO: Should revise this class to use FileStorageFormatter.
def __init__(self, path, dest=None):
self.file = open(path, "rb")
Modified: ZODB/branches/3.3/src/ZODB/FileStorage/fspack.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/FileStorage/fspack.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/FileStorage/fspack.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -82,9 +82,10 @@
# Else oid's data record contains the data, and the file offset of
# oid's data record is returned. This data record should contain
# a pickle identical to the 'data' argument.
- # XXX If the length of the stored data doesn't match len(data),
- # XXX an exception is raised. If the lengths match but the data
- # XXX isn't the same, 0 is returned. Why the discrepancy?
+
+ # Unclear: If the length of the stored data doesn't match len(data),
+ # an exception is raised. If the lengths match but the data isn't
+ # the same, 0 is returned. Why the discrepancy?
h = self._read_txn_header(tpos)
tend = tpos + h.tlen
pos = self._file.tell()
@@ -121,7 +122,7 @@
if h.version:
return h.pnv
elif bp:
- # XXX Not sure the following is always true:
+ # Unclear: Not sure the following is always true:
# The previous record is not for this version, yet we
# have a backpointer to it. The current record must
# be an undo of an abort or commit, so the backpointer
@@ -280,8 +281,8 @@
if err.buf != "":
raise
if th.status == 'p':
- # Delay import to cope with circular imports.
- # XXX put exceptions in a separate module
+ # Delayed import to cope with circular imports.
+ # TODO: put exceptions in a separate module.
from ZODB.FileStorage.FileStorage import RedundantPackWarning
raise RedundantPackWarning(
"The database has already been packed to a later time"
@@ -447,9 +448,9 @@
# The packer will use several indexes.
# index: oid -> pos
- # vindex: version -> pos of XXX
+ # vindex: version -> pos
# tindex: oid -> pos, for current txn
- # tvindex: version -> pos of XXX, for current txn
+ # tvindex: version -> pos, for current txn
# oid2tid: not used by the packer
self.index = fsIndex()
@@ -476,12 +477,12 @@
# Because these pointers are stored as file offsets, they
# must be updated when we copy data.
- # XXX Need to add sanity checking to pack
+ # TODO: Should add sanity checking to pack.
self.gc.findReachable()
# Setup the destination file and copy the metadata.
- # XXX rename from _tfile to something clearer
+ # TODO: rename from _tfile to something clearer.
self._tfile = open(self._name + ".pack", "w+b")
self._file.seek(0)
self._tfile.write(self._file.read(self._metadata_size))
Modified: ZODB/branches/3.3/src/ZODB/broken.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/broken.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/broken.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -94,7 +94,7 @@
__Broken_state__ = __Broken_initargs__ = None
- __name__ = 'bob XXX'
+ __name__ = 'broken object'
def __new__(class_, *args):
result = object.__new__(class_)
Modified: ZODB/branches/3.3/src/ZODB/fstools.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/fstools.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/fstools.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -14,8 +14,8 @@
"""Tools for using FileStorage data files.
-XXX This module needs tests.
-XXX This file needs to be kept in sync with FileStorage.py.
+TODO: This module needs tests.
+Caution: This file needs to be kept in sync with FileStorage.py.
"""
import cPickle
Modified: ZODB/branches/3.3/src/ZODB/tests/BasicStorage.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/tests/BasicStorage.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/tests/BasicStorage.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -176,7 +176,7 @@
eq(revid2, self._storage.getSerial(oid))
def checkTwoArgBegin(self):
- # XXX how standard is three-argument tpc_begin()?
+ # Unsure: how standard is three-argument tpc_begin()?
t = transaction.Transaction()
tid = '\0\0\0\0\0psu'
self._storage.tpc_begin(t, tid)
Modified: ZODB/branches/3.3/src/ZODB/tests/ConflictResolution.py
===================================================================
--- ZODB/branches/3.3/src/ZODB/tests/ConflictResolution.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/ZODB/tests/ConflictResolution.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -37,7 +37,7 @@
return oldState
- # XXX What if _p_resolveConflict _thinks_ it resolved the
+ # Insecurity: What if _p_resolveConflict _thinks_ it resolved the
# conflict, but did something wrong?
class PCounter2(PCounter):
Modified: ZODB/branches/3.3/src/persistent/cPersistence.c
===================================================================
--- ZODB/branches/3.3/src/persistent/cPersistence.c 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/persistent/cPersistence.c 2005-03-11 22:42:21 UTC (rev 29447)
@@ -85,7 +85,7 @@
if (self->state < 0 && self->jar) {
PyObject *r;
- /* XXX Is it ever possibly to not have a cache? */
+ /* Is it ever possibly to not have a cache? */
if (self->cache) {
/* Create a node in the ring for this unghostified object. */
self->cache->non_ghost_count++;
@@ -156,7 +156,7 @@
if (self->state == cPersistent_GHOST_STATE)
return;
- /* XXX is it ever possible to not have a cache? */
+ /* Is it ever possible to not have a cache? */
if (self->cache == NULL) {
self->state = cPersistent_GHOST_STATE;
return;
@@ -386,7 +386,7 @@
continue;
}
- /* XXX will this go through our getattr hook? */
+ /* Unclear: Will this go through our getattr hook? */
value = PyObject_GetAttr(self, name);
if (value == NULL)
PyErr_Clear();
@@ -548,11 +548,12 @@
static PyObject *
Per__getstate__(cPersistentObject *self)
{
- /* XXX Should it be an error to call __getstate__() on a ghost? */
+ /* TODO: Should it be an error to call __getstate__() on a ghost? */
if (unghostify(self) < 0)
return NULL;
- /* XXX shouldn't we increment stickyness? */
+ /* TODO: should we increment stickyness? Tim doesn't understand that
+ question. S*/
return pickle___getstate__((PyObject*)self);
}
@@ -723,7 +724,7 @@
}
/*
- XXX we should probably not allow assignment of __class__ and __dict__.
+ TODO: we should probably not allow assignment of __class__ and __dict__.
*/
static int
@@ -858,7 +859,7 @@
is to clear the exception, but that simply masks the
error.
- XXX We'll print an error to stderr just like exceptions in
+ This prints an error to stderr just like exceptions in
__del__(). It would probably be better to log it but that
would be painful from C.
*/
Modified: ZODB/branches/3.3/src/persistent/cPickleCache.c
===================================================================
--- ZODB/branches/3.3/src/persistent/cPickleCache.c 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/persistent/cPickleCache.c 2005-03-11 22:42:21 UTC (rev 29447)
@@ -303,7 +303,7 @@
{
int dt = -999;
- /* XXX This should be deprecated */
+ /* TODO: This should be deprecated; */
if (!PyArg_ParseTuple(args, "|i:full_sweep", &dt))
return NULL;
@@ -354,7 +354,7 @@
/* This looks wrong, but it isn't. We use strong references to types
because they don't have the ring members.
- XXX the result is that we *never* remove classes unless
+ The result is that we *never* remove classes unless
they are modified.
*/
@@ -412,7 +412,7 @@
_invalidate(self, key);
Py_DECREF(key);
}
- /* XXX Do we really want to modify the input? */
+ /* Dubious: modifying the input may be an unexpected side effect. */
PySequence_DelSlice(inv, 0, l);
}
}
@@ -603,7 +603,7 @@
*/
Py_INCREF(v);
- /* XXX Should we call _Py_ForgetReference() on error exit? */
+ /* TODO: Should we call _Py_ForgetReference() on error exit? */
if (PyDict_DelItem(self->data, oid) < 0)
return;
Py_DECREF((ccobject *)((cPersistentObject *)v)->cache);
@@ -851,7 +851,7 @@
classes that derive from persistent.Persistent, BTrees,
etc), report an error.
- XXX Need a better test.
+ TODO: checking sizeof() seems a poor test.
*/
PyErr_SetString(PyExc_TypeError,
"Cache values must be persistent objects.");
Modified: ZODB/branches/3.3/src/scripts/fstest.py
===================================================================
--- ZODB/branches/3.3/src/scripts/fstest.py 2005-03-11 22:07:51 UTC (rev 29446)
+++ ZODB/branches/3.3/src/scripts/fstest.py 2005-03-11 22:42:21 UTC (rev 29447)
@@ -193,12 +193,11 @@
(path, pos, tloc, tpos))
pos = pos + dlen
- # XXX is the following code necessary?
if plen:
file.seek(plen, 1)
else:
file.seek(8, 1)
- # XXX _loadBack() ?
+ # _loadBack() ?
return pos, oid
More information about the Zodb-checkins
mailing list