[Zope3-checkins] CVS: ZODB4/ZODB - MappingStorage.py:1.10 FileStorage.py:1.100 DB.py:1.53 Connection.py:1.84
Jeremy Hylton
jeremy@zope.com
Fri, 15 Nov 2002 15:33:15 -0500
Update of /cvs-repository/ZODB4/ZODB
In directory cvs.zope.org:/tmp/cvs-serv24700/ZODB
Modified Files:
MappingStorage.py FileStorage.py DB.py Connection.py
Log Message:
Remove a lot of code from DB, at least for now.
All the cache and database introspection code is gone. We may restore
some of this later, after we understand what kind of introspection
will be needed by clients.
Remove the cache_age parameters to the DB/Connection. Not clear that
any caches will use this parameter. If they do, it's not clear that
we want to configure them via the DB/Connection constructor.
=== ZODB4/ZODB/MappingStorage.py 1.9 => 1.10 ===
--- ZODB4/ZODB/MappingStorage.py:1.9 Thu Jul 18 16:26:18 2002
+++ ZODB4/ZODB/MappingStorage.py Fri Nov 15 15:33:15 2002
@@ -94,13 +94,11 @@
from ZODB.TimeStamp import TimeStamp
def DB(name="Mapping Storage",
- pool_size=7, cache_size=400, cache_deactivate_after=60,
- version_pool_size=3, version_cache_size=100,
- version_cache_deactivate_after=10):
+ pool_size=7, cache_size=400,
+ version_pool_size=3, version_cache_size=100):
ms = MappingStorage(name)
- db = ZODB.DB.DB(ms, pool_size, cache_size, cache_deactivate_after,
- version_pool_size, version_cache_size,
- version_cache_deactivate_after)
+ db = ZODB.DB.DB(ms, pool_size, cache_size,
+ version_pool_size, version_cache_size)
return db
class MappingStorage(BaseStorage.BaseStorage):
=== ZODB4/ZODB/FileStorage.py 1.99 => 1.100 ===
--- ZODB4/ZODB/FileStorage.py:1.99 Thu Oct 3 15:44:24 2002
+++ ZODB4/ZODB/FileStorage.py Fri Nov 15 15:33:15 2002
@@ -192,14 +192,12 @@
"""
def DB(file_name, create=0, read_only=0, stop=None, quota=None,
- pool_size=7, cache_size=400, cache_deactivate_after=60,
- version_pool_size=3, version_cache_size=100,
- version_cache_deactivate_after=10):
+ pool_size=7, cache_size=400, version_pool_size=3,
+ version_cache_size=100):
"""Create new object database using FileStorage file_name."""
fs = FileStorage(file_name, create, read_only, stop, quota)
- db = ZODB.DB.DB(fs, pool_size, cache_size, cache_deactivate_after,
- version_pool_size, version_cache_size,
- version_cache_deactivate_after)
+ db = ZODB.DB.DB(fs, pool_size, cache_size,
+ version_pool_size, version_cache_size)
return db
class FileStorage(BaseStorage.BaseStorage,
=== ZODB4/ZODB/DB.py 1.52 => 1.53 ===
--- ZODB4/ZODB/DB.py:1.52 Thu Sep 19 14:22:04 2002
+++ ZODB4/ZODB/DB.py Fri Nov 15 15:33:15 2002
@@ -40,10 +40,8 @@
def __init__(self, storage,
pool_size=7,
cache_size=400,
- cache_deactivate_after=60,
version_pool_size=3,
version_cache_size=100,
- version_cache_deactivate_after=10,
):
"""Create an object database.
@@ -64,10 +62,8 @@
self._temps=[]
self._pool_size=pool_size
self._cache_size=cache_size
- self._cache_deactivate_after=cache_deactivate_after
self._version_pool_size=version_pool_size
self._version_cache_size=version_cache_size
- self._version_cache_deactivate_after=version_cache_deactivate_after
self._miv_cache={}
@@ -98,19 +94,6 @@
'versions'):
setattr(self, m, getattr(storage, m))
- def _cacheMean(self, attr):
- # XXX this method doesn't work
- m=[0,0]
- def f(con, m=m, attr=attr):
- t=getattr(con._cache, attr)
- m[0]=m[0]+t
- m[1]=m[1]+1
-
- self._connectionMap(f)
- if m[1]: m=m[0]/m[1]
- else: m=None
- return m
-
def _closeConnection(self, connection):
"""Return a connection to the pool"""
self._a()
@@ -143,97 +126,18 @@
def abortVersion(self, version):
AbortVersion(self, version)
- def cacheDetail(self):
- """Return information on objects in the various caches
-
- Organized by class."""
-
- detail = {}
- def f(con):
- for oid, ob in con._cache.items():
- module = getattr(ob.__class__, '__module__', '')
- module = module and '%s.' % module or ''
- c = "%s%s" % (module, ob.__class__.__name__)
- detail[c] = detail.get(c, 0) + 1
-
- self._connectionMap(f)
- detail = detail.items()
- detail.sort()
- return detail
-
- def cacheExtremeDetail(self):
- detail = []
- conn_no = [0] # A mutable reference to a counter
- def f(con, rc=sys.getrefcount, conn_no=conn_no):
- conn_no[0] = conn_no[0] + 1
- cn = conn_no[0]
- for oid, ob in con._cache.items():
- id = ''
- if hasattr(ob, '__dict__'):
- d = ob.__dict__
- id = d.get("id", "")
- if id is None:
- id = d.get("__name__", "")
-
- module = getattr(ob.__class__, '__module__', '')
- module = module and '%s.' % module or ''
-
- detail.append({
- 'conn_no': cn,
- 'oid': oid,
- 'id': id,
- 'klass': "%s%s" % (module, ob.__class__.__name__),
- 'rc': rc(ob)-4,
- 'state': ob._p_changed,
- #'references': con.references(oid),
- })
-
- self._connectionMap(f)
- return detail
-
def cacheFullSweep(self, value):
self._connectionMap(lambda c: c._cache.full_sweep())
- def cacheLastGCTime(self):
- L = []
- def f(con):
- L.append(con._cache.cache_last_gc_time)
-
- self._connectionMap(f)
- return max(L)
-
def cacheMinimize(self, value):
self._connectionMap(lambda c, v=value: c._cache.minimize())
- def cacheMeanAge(self):
- return self._cacheMean('cache_mean_age')
-
- def cacheMeanDeac(self):
- return self._cacheMean('cache_mean_deac')
-
- def cacheMeanDeal(self):
- return self._cacheMean('cache_mean_deal')
-
- def cacheSize(self):
- L = []
- def f(con):
- L.append(len(con._cache))
-
- self._connectionMap(f)
- return reduce(int.__add__, L)
-
def close(self):
self._storage.close()
def commitVersion(self, source, destination=''):
CommitVersion(self, source, destination)
- def exportFile(self, oid, file=None):
- raise 'Not yet implemented'
-
- def getCacheDeactivateAfter(self):
- return self._cache_deactivate_after
-
def getCacheSize(self):
return self._cache_size
@@ -243,21 +147,6 @@
def getPoolSize(self):
return self._pool_size
- def getSize(self):
- return self._storage.getSize()
-
- def getVersionCacheDeactivateAfter(self):
- return self._version_cache_deactivate_after
-
- def getVersionCacheSize(self):
- return self._version_cache_size
-
- def getVersionPoolSize(self):
- return self._version_pool_size
-
- def importFile(self, file):
- raise 'Not yet implemented'
-
def begin_invalidation(self):
# Must be called before first call to invalidate and before
# the storage lock is held.
@@ -335,9 +224,6 @@
cache[h]=oid, v
return v
- def __len__(self):
- return len(self._storage)
-
def open(self, version='', transaction=None, temporary=0, force=None,
waitflag=1):
"""Return a object space (AKA connection) to work in
@@ -371,9 +257,7 @@
# We won't bother with the pools. This will be
# a one-use connection.
c = Connection(self, version,
- cache_size=self._version_cache_size,
- cache_deactivate_after=
- self._version_cache_deactivate_after)
+ cache_size=self._version_cache_size)
self._temps.append(c)
if transaction is not None:
transaction[id(c)] = c
@@ -418,16 +302,12 @@
if version:
if self._version_pool_size > len(allocated) or force:
c = Connection(self, version,
- cache_size=self._version_cache_size,
- cache_deactivate_after=
- self._version_cache_deactivate_after)
+ cache_size=self._version_cache_size)
allocated.append(c)
pool.append(c)
elif self._pool_size > len(allocated) or force:
c = Connection(self, version,
- cache_size=self._cache_size,
- cache_deactivate_after=
- self._cache_deactivate_after)
+ cache_size=self._cache_size)
allocated.append(c)
pool.append(c)
@@ -468,26 +348,6 @@
finally: self._r()
- def connectionDebugInfo(self):
- r=[]
- pools,pooll=self._pools
- t=time()
- for version, (pool, allocated, lock) in pools.items():
- for c in allocated:
- o=c._opened
- d=c._debug_info
- if d:
- if len(d)==1: d=d[0]
- else: d=''
- d="%s (%s)" % (d, len(c._cache))
-
- r.append({
- 'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)),
- 'info': d,
- 'version': version,
- })
- return r
-
def pack(self, t=None, days=0):
if t is None: t=time()
t=t-(days*86400)
@@ -496,39 +356,14 @@
LOG("ZODB", ERROR, "packing", error=sys.exc_info())
raise
- def setCacheDeactivateAfter(self, v):
- self._cache_deactivate_after=v
- for c in self._pools[0][''][1]:
- c._cache.cache_age=v
-
def setCacheSize(self, v):
self._cache_size=v
for c in self._pools[0][''][1]:
c._cache.cache_size=v
- def setClassFactory(self, factory):
- self._classFactory=factory
-
- def setPoolSize(self, v): self._pool_size=v
+ def setPoolSize(self, v):
+ self._pool_size=v
- def setVersionCacheDeactivateAfter(self, v):
- self._version_cache_deactivate_after=v
- for ver in self._pools[0].keys():
- if ver:
- for c in self._pools[0][ver][1]:
- c._cache.cache_age=v
-
- def setVersionCacheSize(self, v):
- self._version_cache_size=v
- for ver in self._pools[0].keys():
- if ver:
- for c in self._pools[0][ver][1]:
- c._cache.cache_size=v
-
- def setVersionPoolSize(self, v): self._version_pool_size=v
-
- def cacheStatistics(self): return () # :(
-
def undo(self, id):
TransactionalUndo(self, id)
=== ZODB4/ZODB/Connection.py 1.83 => 1.84 ===
--- ZODB4/ZODB/Connection.py:1.83 Thu Oct 10 17:07:29 2002
+++ ZODB4/ZODB/Connection.py Fri Nov 15 15:33:15 2002
@@ -91,14 +91,13 @@
__implements__ = IConnection
- def __init__(self, db, version='', cache_size=400,
- cache_deactivate_after=60):
+ def __init__(self, db, version='', cache_size=400):
"""Create a new Connection"""
self._db = db
self._storage = db._storage
self.new_oid = db._storage.new_oid
self._version = version
- self._cache = cache = Cache(cache_size, cache_deactivate_after)
+ self._cache = cache = Cache(cache_size)
self._unpickler = ConnectionUnpickler(self, self._cache)
# _invalidated queues invalidate messages delivered from the DB