[Zope3-checkins] CVS: Zope3/src/zope/textindex - baseindex.py:1.1.2.2 cosineindex.py:1.1.2.2 htmlsplitter.py:1.1.2.2 lexicon.py:1.1.2.2 okapiindex.py:1.1.2.2 parsetree.py:1.1.2.2 queryparser.py:1.1.2.2
Fred L. Drake, Jr.
fred@zope.com
Mon, 23 Dec 2002 17:01:35 -0500
Update of /cvs-repository/Zope3/src/zope/textindex
In directory cvs.zope.org:/tmp/cvs-serv16014
Modified Files:
Tag: NameGeddon-branch
baseindex.py cosineindex.py htmlsplitter.py lexicon.py
okapiindex.py parsetree.py queryparser.py
Log Message:
Update to the New World Naming.
=== Zope3/src/zope/textindex/baseindex.py 1.1.2.1 => 1.1.2.2 ===
--- Zope3/src/zope/textindex/baseindex.py:1.1.2.1 Mon Dec 23 14:33:34 2002
+++ Zope3/src/zope/textindex/baseindex.py Mon Dec 23 17:01:02 2002
@@ -17,18 +17,20 @@
import math
+import zodb
+
+from persistence import Persistent
+
from zodb.btrees.IOBTree import IOBTree
from zodb.btrees.IIBTree import IIBTree, IIBucket, IITreeSet
from zodb.btrees.IIBTree import intersection, difference
from zodb.btrees import Length
from zope.textindex.iindex import IIndex
-from Zope.TextIndex import WidCode
+from zope.textindex import widcode
from zope.textindex.setops import mass_weightedIntersection, \
- mass_weightedUnion
+ mass_weightedUnion
-import zodb
-from persistence import Persistent
# Instead of storing floats, we generally store scaled ints. Binary pickles
# can store those more efficiently. The default SCALE_FACTOR of 1024
@@ -97,7 +99,7 @@
def get_words(self, docid):
"""Return a list of the wordids for a given docid."""
# Note this is overridden in the instance
- return WidCode.decode(self._docwords[docid])
+ return widcode.decode(self._docwords[docid])
# A subclass may wish to extend or override this.
def index_doc(self, docid, text):
@@ -107,7 +109,7 @@
wid2weight, docweight = self._get_frequencies(wids)
self._mass_add_wordinfo(wid2weight, docid)
self._docweight[docid] = docweight
- self._docwords[docid] = WidCode.encode(wids)
+ self._docwords[docid] = widcode.encode(wids)
return len(wids)
# A subclass may wish to extend or override this. This is for adjusting
@@ -146,7 +148,7 @@
self._add_wordinfo(wid, newscore, docid)
self._docweight[docid] = new_docw
- self._docwords[docid] = WidCode.encode(new_wids)
+ self._docwords[docid] = widcode.encode(new_wids)
return len(new_wids)
# Subclass must override.
@@ -192,7 +194,7 @@
hits = mass_weightedIntersection(scores)
if not hits:
return hits
- code = WidCode.encode(wids)
+ code = widcode.encode(wids)
result = IIBTree()
for docid, weight in hits.items():
docwords = self._docwords[docid]
=== Zope3/src/zope/textindex/cosineindex.py 1.1.2.1 => 1.1.2.2 ===
--- Zope3/src/zope/textindex/cosineindex.py:1.1.2.1 Mon Dec 23 14:33:34 2002
+++ Zope3/src/zope/textindex/cosineindex.py Mon Dec 23 17:01:02 2002
@@ -20,8 +20,8 @@
from zope.textindex.iindex import IIndex
from zope.textindex.baseindex import BaseIndex, \
- inverse_doc_frequency, \
- scaled_int, SCALE_FACTOR
+ inverse_doc_frequency, \
+ scaled_int, SCALE_FACTOR
class CosineIndex(BaseIndex):
=== Zope3/src/zope/textindex/htmlsplitter.py 1.1.2.1 => 1.1.2.2 ===
--- Zope3/src/zope/textindex/htmlsplitter.py:1.1.2.1 Mon Dec 23 14:33:34 2002
+++ Zope3/src/zope/textindex/htmlsplitter.py Mon Dec 23 17:01:02 2002
@@ -12,10 +12,11 @@
#
##############################################################################
+import re
+
from zope.textindex.isplitter import ISplitter
from zope.textindex.pipelinefactory import element_factory
-import re
class HTMLWordSplitter:
=== Zope3/src/zope/textindex/lexicon.py 1.1.2.1 => 1.1.2.2 ===
--- Zope3/src/zope/textindex/lexicon.py:1.1.2.1 Mon Dec 23 14:33:34 2002
+++ Zope3/src/zope/textindex/lexicon.py Mon Dec 23 17:01:02 2002
@@ -14,10 +14,11 @@
import re
+import zodb
+
from zodb.btrees.IOBTree import IOBTree
from zodb.btrees.OIBTree import OIBTree
-import zodb
from persistence import Persistent
from zope.textindex.ilexicon import ILexicon
@@ -25,6 +26,7 @@
from zope.textindex.parsetree import QueryError
from zope.textindex.pipelinefactory import element_factory
+
class Lexicon(Persistent):
__implements__ = ILexicon
@@ -193,7 +195,7 @@
dict = get_stopdict().copy()
try:
- from Zope.TextIndex.stopper import process as _process
+ from zope.textindex.stopper import process as _process
except ImportError:
def process(self, lst):
has_key = self.dict.has_key
=== Zope3/src/zope/textindex/okapiindex.py 1.1.2.1 => 1.1.2.2 ===
--- Zope3/src/zope/textindex/okapiindex.py:1.1.2.1 Mon Dec 23 14:33:34 2002
+++ Zope3/src/zope/textindex/okapiindex.py Mon Dec 23 17:01:02 2002
@@ -22,7 +22,6 @@
from zope.textindex.iindex import IIndex
from zope.textindex.baseindex import \
BaseIndex, inverse_doc_frequency, scaled_int
-##from Zope.TextIndex.okascore import score
class OkapiIndex(BaseIndex):
=== Zope3/src/zope/textindex/parsetree.py 1.1.2.1 => 1.1.2.2 ===
--- Zope3/src/zope/textindex/parsetree.py:1.1.2.1 Mon Dec 23 14:33:34 2002
+++ Zope3/src/zope/textindex/parsetree.py Mon Dec 23 17:01:02 2002
@@ -18,7 +18,7 @@
from zope.textindex.iqueryparsetree import IQueryParseTree
from zope.textindex.setops import mass_weightedIntersection, \
- mass_weightedUnion
+ mass_weightedUnion
class QueryError(Exception):
pass
=== Zope3/src/zope/textindex/queryparser.py 1.1.2.1 => 1.1.2.2 ===
--- Zope3/src/zope/textindex/queryparser.py:1.1.2.1 Mon Dec 23 14:33:34 2002
+++ Zope3/src/zope/textindex/queryparser.py Mon Dec 23 17:01:02 2002
@@ -58,7 +58,7 @@
import re
from zope.textindex.iqueryparser import IQueryParser
-from Zope.TextIndex import ParseTree
+from zope.textindex import parsetree
# Create unique symbols for token types.
_AND = intern("AND")
@@ -123,7 +123,7 @@
tree = self._parseOrExpr()
self._require(_EOF)
if tree is None:
- raise ParseTree.ParseError(
+ raise parsetree.ParseError(
"Query contains only common words: %s" % repr(query))
return tree
@@ -141,7 +141,7 @@
if not self._check(tokentype):
t = self._tokens[self._index]
msg = "Token %r required, %r found" % (tokentype, t)
- raise ParseTree.ParseError, msg
+ raise parsetree.ParseError, msg
def _check(self, tokentype):
if self._tokentypes[self._index] is tokentype:
@@ -169,7 +169,7 @@
elif len(L) == 1:
return L[0]
else:
- return ParseTree.OrNode(L)
+ return parsetree.OrNode(L)
def _parseAndExpr(self):
L = []
@@ -181,7 +181,7 @@
t = self._parseNotExpr()
if t is None:
continue
- if isinstance(t, ParseTree.NotNode):
+ if isinstance(t, parsetree.NotNode):
Nots.append(t)
else:
L.append(t)
@@ -191,14 +191,14 @@
if len(L) == 1:
return L[0]
else:
- return ParseTree.AndNode(L)
+ return parsetree.AndNode(L)
def _parseNotExpr(self):
if self._check(_NOT):
t = self._parseTerm()
if t is None:
return None # Only stopwords
- return ParseTree.NotNode(t)
+ return parsetree.NotNode(t)
else:
return self._parseTerm()
@@ -214,16 +214,16 @@
nodes = filter(None, nodes)
if not nodes:
return None # Only stopwords
- structure = [(isinstance(nodes[i], ParseTree.NotNode), i, nodes[i])
+ structure = [(isinstance(nodes[i], parsetree.NotNode), i, nodes[i])
for i in range(len(nodes))]
structure.sort()
nodes = [node for (bit, index, node) in structure]
- if isinstance(nodes[0], ParseTree.NotNode):
- raise ParseTree.ParseError(
+ if isinstance(nodes[0], parsetree.NotNode):
+ raise parsetree.ParseError(
"a term must have at least one positive word")
if len(nodes) == 1:
return nodes[0]
- tree = ParseTree.AndNode(nodes)
+ tree = parsetree.AndNode(nodes)
return tree
def _parseAtom(self):
@@ -233,11 +233,11 @@
self._ignored.append(term)
return None
if len(words) > 1:
- tree = ParseTree.PhraseNode(words)
+ tree = parsetree.PhraseNode(words)
elif self._lexicon.isGlob(words[0]):
- tree = ParseTree.GlobNode(words[0])
+ tree = parsetree.GlobNode(words[0])
else:
- tree = ParseTree.AtomNode(words[0])
+ tree = parsetree.AtomNode(words[0])
if term[0] == "-":
- tree = ParseTree.NotNode(tree)
+ tree = parsetree.NotNode(tree)
return tree