[Zope-Checkins] CVS: Zope2 - testCatalog.py:1.1.4.11
Andreas Jung
andreas@dhcp165.digicool.com
Wed, 14 Mar 2001 13:43:17 -0500
Update of /cvs-repository/Zope2/lib/python/Products/ZCatalog/tests
In directory yetix:/work/Zope2/Catalog-BTrees-Integration/lib/python/Products/ZCatalog/tests
Modified Files:
Tag: Catalog-BTrees-Integration
testCatalog.py
Log Message:
rearranged source code
--- Updated File testCatalog.py in package Zope2 --
--- testCatalog.py 2001/03/14 15:12:24 1.1.4.10
+++ testCatalog.py 2001/03/14 18:43:16 1.1.4.11
@@ -7,6 +7,9 @@
Andreas Jung, andreas@digicool.com
$Log$
+ Revision 1.1.4.11 2001/03/14 18:43:16 andreas
+ rearranged source code
+
Revision 1.1.4.10 2001/03/14 15:12:24 andreas
minor changes
@@ -149,7 +152,8 @@
self.keywords = []
self.maxfiles = maxfiles
- self._vocabulary = Vocabulary.Vocabulary('Vocabulary','Vocabulary', globbing=1)
+ self._vocabulary = Vocabulary.Vocabulary('Vocabulary',
+ 'Vocabulary', globbing=1)
self._catalog = Catalog.Catalog()
self._catalog.addIndex('to', 'TextIndex')
self._catalog.addIndex('sender', 'TextIndex')
@@ -198,7 +202,8 @@
def catMessage(self,m):
- self._catalog.catalogObject( testMessage(m) , m.dict["message-id"] )
+ self._catalog.catalogObject( testMessage(m) ,
+ m.dict["message-id"] )
def uncatMessage(self,uid):
self._catalog.uncatalogObject( uid )
@@ -247,8 +252,10 @@
self.init_phase = 0
self.setlog( open("dispatcher.log","a") )
- self.logn('treads=%d searchiterations=%d' % (numThreads,searchIterations))
- self.logn('updateiterations=%d maxfiles=%d' % (updateIterations,maxFiles))
+ self.logn('treads=%d searchiterations=%d' %
+ (numThreads,searchIterations))
+ self.logn('updateiterations=%d maxfiles=%d' %
+ (updateIterations,maxFiles))
#############################################################
# Build up ZODB
@@ -303,7 +310,8 @@
os.system("rm -fr data/work")
if not os.path.exists("data/work"): os.makedirs("data/work")
- assert os.system("cp %s/Data_orig.fs data/work/Data.fs" % dataDir)==0, "Error while replicating original data"
+ assert os.system("cp %s/Data_orig.fs data/work/Data.fs" % dataDir)==0, \
+ "Error while replicating original data"
self.zodb = testZODB("data/work/Data.fs",open=0)
self.threads = {}
@@ -314,8 +322,10 @@
self.keywords = kw.keywords()
self.logn("-" * 80)
- self.logn('treads=%d searchiterations=%d' % (numThreads,searchIterations))
- self.logn('updateiterations=%d maxfiles=%d' % (updateIterations,maxFiles))
+ self.logn('treads=%d searchiterations=%d' %
+ (numThreads,searchIterations))
+ self.logn('updateiterations=%d maxfiles=%d' %
+ (updateIterations,maxFiles))
def tearDown(self):
@@ -346,7 +356,8 @@
def testFulltextIndex(self,args,kw):
""" benchmark FulltextIndex """
- self.dispatcher('funcFulltextIndex' , ('funcFulltextIndex', kw["numThreads"] , () , {} ) )
+ self.dispatcher('funcFulltextIndex' ,
+ ('funcFulltextIndex', kw["numThreads"] , () , {} ) )
def funcFulltextIndex(self,*args):
@@ -368,7 +379,8 @@
def testFieldIndex(self,args,kw):
""" benchmark field index"""
- self.dispatcher('funcFieldIndex' , ('funcFieldIndex',kw["numThreads"] , () , {} ) )
+ self.dispatcher('funcFieldIndex' ,
+ ('funcFieldIndex',kw["numThreads"] , () , {} ) )
def funcFieldIndex(self,*args):
@@ -382,7 +394,8 @@
res = cat.searchResults( {"length" : i } )
for r in res:
- assert i==r.length , "%s should have size %d but is %s" % (r.file_id,i,r.length)
+ assert i==r.length , "%s should have size %d but is %s" % \
+ (r.file_id,i,r.length)
self.th_teardown(env)
@@ -392,7 +405,8 @@
def testKeywordIndex(self,args,kw):
""" benchmark Keyword index"""
- self.dispatcher('funcKeywordIndex' , ('funcKeywordIndex', kw["numThreads"] , () , {} ) )
+ self.dispatcher('funcKeywordIndex' ,
+ ('funcKeywordIndex', kw["numThreads"] , () , {} ) )
def funcKeywordIndex(self,*args):
@@ -414,7 +428,8 @@
def testFieldRangeIndex(self,args,kw):
""" benchmark field range index"""
- self.dispatcher('funcFieldRangeIndex' , ('funcFieldRangeIndex', kw["numThreads"] , () , {} ) )
+ self.dispatcher('funcFieldRangeIndex' ,
+ ('funcFieldRangeIndex', kw["numThreads"] , () , {} ) )
def funcFieldRangeIndex(self,*args):
@@ -434,7 +449,8 @@
for i in range(searchIterations):
for r in cat.searchResults( {"length" : rg[i],"length_usage" : "range:min:max" } ):
size = r.length
- assert rg[i][0]<=size and size<=rg[i][1] , "Filesize of %s is out of range (%d,%d) %d" % (r.file_id,rg[i][0],rg[i][1],size)
+ assert rg[i][0]<=size and size<=rg[i][1] , \
+ "Filesize of %s is out of range (%d,%d) %d" % (r.file_id,rg[i][0],rg[i][1],size)
self.th_teardown(env)
@@ -446,7 +462,8 @@
def testKeywordRangeIndex(self,args,kw):
""" benchmark Keyword range index"""
- self.dispatcher('funcKeywordRangeIndex' , ('funcKeywordRangeIndex', kw["numThreads"] , () , {} ) )
+ self.dispatcher('funcKeywordRangeIndex' ,
+ ('funcKeywordRangeIndex', kw["numThreads"] , () , {} ) )
def funcKeywordRangeIndex(self,*args):
@@ -464,7 +481,10 @@
results = []
for i in range(len(self.keywords)):
- results.append( cat.searchResults( {"keywords":self.keywords[i], "length" : rg[i],"length_usage" : "range:min:max" } ))
+ results.append( cat.searchResults( {"keywords":self.keywords[i],
+ "length" : rg[i],
+ "length_usage" : "range:min:max" } )
+ )
self.th_teardown(env)
@@ -475,7 +495,8 @@
def testUpdates(self,args,kw):
""" benchmark concurrent catalog/uncatalog operations """
- self.dispatcher("testUpdates" , ("funcUpdates", kw["numThreads"] , args, kw ))
+ self.dispatcher("testUpdates" ,
+ ("funcUpdates", kw["numThreads"] , args, kw ))
def funcUpdates(self,*args,**kw):
@@ -558,11 +579,13 @@
def testReindexing(self,args,kw):
""" test reindexing of existing data """
- self.dispatcher("testReindexing" , ("funcReindexing",kw["numThreads"] , (mbox,1000) , {} ))
+ self.dispatcher("testReindexing" ,
+ ("funcReindexing",kw["numThreads"] , (mbox,1000) , {} ))
def testReindexingAndModify(self,args,kw):
""" test reindexing of existing data but with modifications"""
- self.dispatcher("testReindexing" , ("funcReindexing",kw["numThreads"] , (mbox,1000,1) , {} ))
+ self.dispatcher("testReindexing" ,
+ ("funcReindexing",kw["numThreads"] , (mbox,1000,1) , {} ))
def funcReindexing(self,mbox,numfiles=100,modify_doc=0):
@@ -601,7 +624,8 @@
def testIncrementalIndexing(self,args,kw):
""" testing incremental indexing """
- self.dispatcher("testIncrementalIndexing" , ("funcReindexing",kw["numThreads"], (mbox2,1000) , {}))
+ self.dispatcher("testIncrementalIndexing" ,
+ ("funcReindexing",kw["numThreads"], (mbox2,1000) , {}))
def get_catalog(self):
@@ -826,7 +850,9 @@
m = whrandom.randint(0,20000)
n = m + 1000
- for r in self._catalog.searchResults( {"number" : (m,n) ,"length_usage" : "range:min:max" } ):
+ for r in self._catalog.searchResults( {"number" : (m,n) ,
+ "length_usage" : "range:min:max" }
+ ):
size = r.number
assert m<=size and size<=n , "%d vs [%d,%d]" % (r.number,m,n)