[Zope3-checkins] CVS: ZODB4/ZODB - Connection.py:1.85 DB.py:1.54 FileStorage.py:1.101 MappingStorage.py:1.11 Serialize.py:1.4 ZTransaction.py:1.3
Jeremy Hylton
jeremy@zope.com
Fri, 15 Nov 2002 16:49:43 -0500
Update of /cvs-repository/ZODB4/ZODB
In directory cvs.zope.org:/tmp/cvs-serv1060/ZODB
Modified Files:
Connection.py DB.py FileStorage.py MappingStorage.py
Serialize.py ZTransaction.py
Log Message:
Some more database simplifications
Get rid of version-specific pools. If an application is using
version, we're just going to let it get mixed in with other pools.
When connection is reset, it clears its cache when the new version is
different than the old version.
Get rid of modifiedInVersion() cache. There are no calls to
modifiedInVersion() outside of the ZODB test suite, so it can't be
that important to optimize it.
Add saveObject() method to Pickler and use it to create pickle for the
database root. XXX The Connection should use this code (or something
like it) to save objects, too.
=== ZODB4/ZODB/Connection.py 1.84 => 1.85 ===
--- ZODB4/ZODB/Connection.py:1.84 Fri Nov 15 15:33:15 2002
+++ ZODB4/ZODB/Connection.py Fri Nov 15 16:49:42 2002
@@ -115,7 +115,7 @@
def modifiedInVersion(self, oid):
try:
- return self._db.modifiedInVersion(oid)
+ return self._storage.modifiedInVersion(oid)
except KeyError:
return self._version
@@ -210,7 +210,13 @@
# required by the IPersistentDataManager interface, but unimplemented
return None
- def reset(self):
+ def reset(self, version):
+ if version != self._version:
+ # XXX I think it's necessary to clear the cache here, because
+ # the objects in the cache don't know that they were in a
+ # version.
+ self._cache.clear()
+ self._version = version
self._cache.invalidateMany(self._invalidated)
self._invalidated.clear()
self._opened = time.time()
=== ZODB4/ZODB/DB.py 1.53 => 1.54 ===
--- ZODB4/ZODB/DB.py:1.53 Fri Nov 15 15:33:15 2002
+++ ZODB4/ZODB/DB.py Fri Nov 15 16:49:42 2002
@@ -23,6 +23,7 @@
from referencesf import referencesf
from time import time, ctime
from zLOG import LOG, ERROR
+from ZODB.Serialize import Pickler
from ZODB.ZTransaction import Transaction
from Transaction import get_transaction
@@ -30,6 +31,8 @@
from types import StringType
+ROOT_KEY = "\0\0\0\0\0\0\0\0"
+
class DB:
"""The Object Database
@@ -40,8 +43,6 @@
def __init__(self, storage,
pool_size=7,
cache_size=400,
- version_pool_size=3,
- version_cache_size=100,
):
"""Create an object database.
@@ -58,69 +59,58 @@
self._r=l.release
# Setup connection pools and cache info
- self._pools={},[]
- self._temps=[]
- self._pool_size=pool_size
- self._cache_size=cache_size
- self._version_pool_size=version_pool_size
- self._version_cache_size=version_cache_size
-
- self._miv_cache={}
+ self._pool = []
+ self._allocated = []
+ self._pool_lock = Lock()
+ self._pool_lock.acquire()
+ self._temps = []
+ self._pool_size = pool_size
+ self._cache_size = cache_size
# Setup storage
self._storage = storage
storage.registerDB(self)
try:
- storage.load('\0\0\0\0\0\0\0\0', '')
+ storage.load(ROOT_KEY, "")
except KeyError:
# Create the database's root in the storage if it doesn't exist
from Persistence.PersistentDict import PersistentDict
root = PersistentDict()
- # Manually create a pickle for the root to put in the storage.
- # The pickle must be in the special ZODB format.
- file = cStringIO.StringIO()
- p = cPickle.Pickler(file, 1)
- p.dump((root.__class__.__module__, root.__class__.__name__, None))
- p.dump(root.__getstate__())
- t = Transaction()
- t.note("initial database creation")
+ p = Pickler()
+ t = Transaction(description="initial database creation")
storage.tpc_begin(t)
- storage.store('\0\0\0\0\0\0\0\0', None, file.getvalue(), '', t)
+ storage.store(ROOT_KEY, None, p.saveObject(root), '', t)
storage.tpc_vote(t)
storage.tpc_finish(t)
# Pass through methods:
for m in ('history', 'supportsVersions', 'undoInfo', 'versionEmpty',
- 'versions'):
+ 'versions', 'modifiedInVersion', 'versionEmpty'):
setattr(self, m, getattr(storage, m))
def _closeConnection(self, connection):
"""Return a connection to the pool"""
self._a()
try:
- version=connection._version
- pools,pooll=self._pools
- pool, allocated, pool_lock = pools[version]
- pool.append(connection)
- if len(pool)==1:
+ version = connection._version
+ self._allocated.remove(connection)
+ self._pool.append(connection)
+ if len(self._pool) == 1:
# Pool now usable again, unlock it.
- pool_lock.release()
+ self._pool_lock.release()
finally: self._r()
def _connectionMap(self, f):
self._a()
try:
- pools,pooll=self._pools
- for pool, allocated in pooll:
- for cc in allocated: f(cc)
-
- temps=self._temps
- if temps:
- t=[]
- rc=sys.getrefcount
+ map(f, self._allocated)
+
+ # XXX I don't understand what this code is trying to do
+ if self._temps:
for cc in temps:
- if rc(cc) > 3: f(cc)
- self._temps=t
+ if sys.getrefcount(cc) > 3:
+ f(cc)
+ self._temps = []
finally: self._r()
def abortVersion(self, version):
@@ -165,18 +155,15 @@
passed in to prevent useless (but harmless) messages to the
connection.
"""
+ assert oid is not None
if connection is not None:
+ assert version == connection._version
version = connection._version
- assert oid is not None
-
- self.updateMIVCache(oid)
-
# Notify connections
- for pool, allocated in self._pools[1]:
- for cc in allocated:
- if cc is not connection:
- self.invalidateConnection(cc, version, oid)
+ for cc in self._allocated:
+ if cc is not connection:
+ self.invalidateConnection(cc, version, oid)
if self._temps:
# t accumulates all the connections that aren't closed.
@@ -200,6 +187,7 @@
that aren't garbage collected.
"""
+ # XXX use weakrefs instead of refcounts?
if sys.getrefcount(conn) <= 3:
conn.close()
else:
@@ -208,22 +196,6 @@
if not version or conn.getVersion() == version:
conn.invalidate(oid)
- def updateMIVCache(self, oid):
- h = hash(oid) % 131
- o = self._miv_cache.get(h)
- if o is not None and o[0]==oid:
- del self._miv_cache[h]
-
- def modifiedInVersion(self, oid):
- h=hash(oid)%131
- cache=self._miv_cache
- o=cache.get(h, None)
- if o and o[0]==oid:
- return o[1]
- v=self._storage.modifiedInVersion(oid)
- cache[h]=oid, v
- return v
-
def open(self, version='', transaction=None, temporary=0, force=None,
waitflag=1):
"""Return a object space (AKA connection) to work in
@@ -263,24 +235,6 @@
transaction[id(c)] = c
return c
- pools,pooll=self._pools
-
- # pools is a mapping object:
- #
- # {version -> (pool, allocated, lock)
- #
- # where:
- #
- # pool is the connection pool for the version,
- # allocated is a list of all of the allocated
- # connections, and
- # lock is a lock that is used to block when a pool is
- # empty and no more connections can be allocated.
- #
- # pooll is a list of all of the pools and allocated for
- # use in cases where we need to iterate over all
- # connections or all inactive connections.
-
# Pool locks are tricky. Basically, the lock needs to be
# set whenever the pool becomes empty so that threads are
# forced to wait until the pool gets a connection in it.
@@ -289,86 +243,70 @@
# the last connection from the pool and just after adding
# a connection to an empty pool.
-
- if pools.has_key(version):
- pool, allocated, pool_lock = pools[version]
- else:
- pool, allocated, pool_lock = pools[version] = ([], [], Lock())
- pooll.append((pool, allocated))
- pool_lock.acquire()
-
- if not pool:
- c=None
- if version:
- if self._version_pool_size > len(allocated) or force:
- c = Connection(self, version,
- cache_size=self._version_cache_size)
- allocated.append(c)
- pool.append(c)
- elif self._pool_size > len(allocated) or force:
- c = Connection(self, version,
- cache_size=self._cache_size)
- allocated.append(c)
- pool.append(c)
+ if not self._pool:
+ c = None
+ if self._pool_size > len(self._pool) or force:
+ c = Connection(self, version, cache_size=self._cache_size)
+ self._pool.append(c)
if c is None:
if waitflag:
self._r()
- pool_lock.acquire()
+ self._pool_lock.acquire()
self._a()
- if len(pool) > 1:
+ if len(self._pool) > 1:
# Note that the pool size will normally be 1 here,
# but it could be higher due to a race condition.
- pool_lock.release()
- else: return
-
- elif len(pool)==1:
+ self._pool_lock.release()
+ else:
+ return
+ elif len(self._pool) == 1:
# Taking last one, lock the pool
# Note that another thread might grab the lock
# before us, so we might actually block, however,
# when we get the lock back, there *will* be a
# connection in the pool.
self._r()
- pool_lock.acquire()
+ self._pool_lock.acquire()
self._a()
- if len(pool) > 1:
+ if len(self._pool) > 1:
# Note that the pool size will normally be 1 here,
# but it could be higher due to a race condition.
- pool_lock.release()
+ self._pool_lock.release()
- c=pool[-1]
- del pool[-1]
- c.reset()
- for pool, allocated in pooll:
- for cc in pool:
- cc.cacheGC()
+ # XXX Could look for a connection with the right version
+ c = self._pool.pop()
+ self._allocated.append(c)
+ c.reset(version)
+ for other_conn in self._pool:
+ other_conn.cacheGC()
- if transaction is not None: transaction[version]=c
+ if transaction is not None:
+ transaction[version] = c
return c
finally: self._r()
def pack(self, t=None, days=0):
- if t is None: t=time()
- t=t-(days*86400)
- try: self._storage.pack(t,referencesf)
+ if t is None:
+ t = time()
+ t -= days * 86400
+ try:
+ self._storage.pack(t,referencesf)
except:
LOG("ZODB", ERROR, "packing", error=sys.exc_info())
raise
def setCacheSize(self, v):
- self._cache_size=v
- for c in self._pools[0][''][1]:
- c._cache.cache_size=v
+ self._cache_size = v
+ for c in self._pool:
+ c._cache.cache_size = v
def setPoolSize(self, v):
- self._pool_size=v
+ self._pool_size = v
def undo(self, id):
TransactionalUndo(self, id)
-
- def versionEmpty(self, version):
- return self._storage.versionEmpty(version)
class SimpleDataManager:
=== ZODB4/ZODB/FileStorage.py 1.100 => 1.101 ===
--- ZODB4/ZODB/FileStorage.py:1.100 Fri Nov 15 15:33:15 2002
+++ ZODB4/ZODB/FileStorage.py Fri Nov 15 16:49:42 2002
@@ -192,12 +192,10 @@
"""
def DB(file_name, create=0, read_only=0, stop=None, quota=None,
- pool_size=7, cache_size=400, version_pool_size=3,
- version_cache_size=100):
+ pool_size=7, cache_size=400):
"""Create new object database using FileStorage file_name."""
fs = FileStorage(file_name, create, read_only, stop, quota)
- db = ZODB.DB.DB(fs, pool_size, cache_size,
- version_pool_size, version_cache_size)
+ db = ZODB.DB.DB(fs, pool_size, cache_size)
return db
class FileStorage(BaseStorage.BaseStorage,
=== ZODB4/ZODB/MappingStorage.py 1.10 => 1.11 ===
--- ZODB4/ZODB/MappingStorage.py:1.10 Fri Nov 15 15:33:15 2002
+++ ZODB4/ZODB/MappingStorage.py Fri Nov 15 16:49:42 2002
@@ -97,8 +97,7 @@
pool_size=7, cache_size=400,
version_pool_size=3, version_cache_size=100):
ms = MappingStorage(name)
- db = ZODB.DB.DB(ms, pool_size, cache_size,
- version_pool_size, version_cache_size)
+ db = ZODB.DB.DB(ms, pool_size, cache_size)
return db
class MappingStorage(BaseStorage.BaseStorage):
=== ZODB4/ZODB/Serialize.py 1.3 => 1.4 ===
--- ZODB4/ZODB/Serialize.py:1.3 Thu Oct 3 00:32:56 2002
+++ ZODB4/ZODB/Serialize.py Fri Nov 15 16:49:42 2002
@@ -58,9 +58,12 @@
class Pickler:
- pass
-
-
+ def saveObject(self, object):
+ file = StringIO()
+ p = cPickle.Pickler(file, 1)
+ p.dump(getClassMetadata(object))
+ p.dump(object.__getstate__())
+ return file.getvalue()
class BaseUnpickler:
=== ZODB4/ZODB/ZTransaction.py 1.2 => 1.3 ===
--- ZODB4/ZODB/ZTransaction.py:1.2 Thu Oct 3 00:32:56 2002
+++ ZODB4/ZODB/ZTransaction.py Fri Nov 15 16:49:42 2002
@@ -24,11 +24,19 @@
description = ""
_extension = None
+ def __init__(self, manager=None, parent=None,
+ user=None, description=None):
+ super(Transaction, self).__init__(manager, parent)
+ if user is not None:
+ self.user = user
+ if description is not None:
+ self.description = description
+
def note(self, text):
if self.description:
- self.description = "%s\n\n%s" % (self.description, text.strip())
+ self.description = "%s\n\n%s" % (self.description, text)
else:
- self.description = text.strip()
+ self.description = text
def setUser(self, user_name, path='/'):
self.user = "%s %s" % (path, user_name)