diff -Nur trunk-orig/moin/MoinMoin/Page.py test/moin/MoinMoin/Page.py
--- trunk-orig/moin/MoinMoin/Page.py	Sun Mar 16 22:17:53 2003
+++ test/moin/MoinMoin/Page.py	Sun Apr 13 21:55:02 2003
@@ -234,6 +234,7 @@
         print_mode = request.form.has_key('action') and request.form['action'].value == 'print'
         content_only = keywords.get('content_only', 0)
         self.hilite_re = keywords.get('hilite_re', None)
+        self.hilite_words_re = keywords.get('hilite_words_re', None)
         if msg is None: msg = ""
 
         # count hit?
diff -Nur trunk-orig/moin/MoinMoin/macro/MultiSearch.py test/moin/MoinMoin/macro/MultiSearch.py
--- trunk-orig/moin/MoinMoin/macro/MultiSearch.py	Thu Jan  1 01:00:00 1970
+++ test/moin/MoinMoin/macro/MultiSearch.py	Mon Apr 14 03:01:03 2003
@@ -0,0 +1,61 @@
+"""
+    MoinMoin - MultiSearch Macro
+
+    Copyright (c) 2003 Ronny Buchmann <ronny-vlug@vlugnet.org>
+    Modified by ThomasWaldmann
+    All rights reserved, see COPYING for details.
+
+    based on FullSearch.py
+        Copyright (c) 2000, 2001, 2002 by Jürgen Hermann <jh@web.de>
+
+    [[MultiSearch]]
+        displays a search dialog, as it always did
+
+    [[MultiSearch('HelpContents')]]
+        embeds a search result into a page, as if you entered
+        "HelpContents" into the search dialog
+
+    $Id: $
+"""
+
+# Imports
+import re, urllib
+from MoinMoin import config, user, wikiutil
+
+_args_re_pattern = r'((?P<hquote>[\'"])(?P<htext>.+?)(?P=hquote))|'
+
+
+def execute(macro, text, args_re=re.compile(_args_re_pattern)):
+    _ = macro.request.getText
+
+    # if no args given, invoke "classic" behavior
+    if text is None:
+        return macro._m_search("multisearch")
+
+    # parse and check arguments
+    args = args_re.match(text)
+    if not args:
+        return '<p><strong class="error">Invalid MultiSearch arguments "%s"!</strong></p>' % (text,)
+
+    needle = args.group('htext')
+    if not needle:
+        return '<p><strong class="error">No argument given for MultiSearch!</strong></p>' % (text,)
+
+    # do the search
+    pagecount, hits = wikiutil.multisearchPages(needle, context=0)
+
+    # generate the result
+    result = macro.formatter.number_list(1)
+    for (count, pagename, dummy) in hits:
+        result = result + macro.formatter.listitem(1)
+        result = result + wikiutil.link_tag('%s?action=highlight&value=%s&words=1' %
+            (wikiutil.quoteWikiname(pagename), urllib.quote_plus(needle)),
+            pagename)
+        result = result + ' . . . . ' + `count` + [
+            _(' match'),
+            _(' matches')][count != 1]
+        result = result + macro.formatter.listitem(0)
+    result = result + macro.formatter.number_list(0)
+
+    return result
+
diff -Nur trunk-orig/moin/MoinMoin/parser/wiki.py test/moin/MoinMoin/parser/wiki.py
--- trunk-orig/moin/MoinMoin/parser/wiki.py	Sat Mar 22 12:16:20 2003
+++ test/moin/MoinMoin/parser/wiki.py	Sun Apr 13 22:26:50 2003
@@ -763,7 +763,42 @@
 
     def highlight_text(self, text, **kw):
         if kw.get('flow', 1): self._check_p()
-        if not self.hilite_re: return self.formatter.text(text)
+        if not self.hilite_re:
+            if not self.hilite_words_re:
+                return self.formatter.text(text)
+            else: # hilite_words_re
+                result = []
+                matches = []
+                # find matches of all words and sort them
+                for word_re in self.hilite_words_re:
+                    lastpos = 0
+                    match = word_re.search(text)
+                    while 1:
+                        match = word_re.search(text, lastpos)
+                        if not match: break
+                        lastpos = match.end()+1
+                        matches.append((match.start(), match.end()))
+                matches.sort()
+                # actually output it
+                lastpos = 0
+                hilite_open = 0
+                for (start, end) in matches:
+                    if start <= lastpos: # overlapping match
+                        result.append(self.formatter.text(text[lastpos:end]))
+                        lastpos = end
+                    else: # new match
+                        if hilite_open:
+                            result.append(self.formatter.highlight(0))
+                            hilite_open = 0
+                        result.append(self.formatter.text(text[lastpos:start-1]))
+                        result.append(self.formatter.highlight(1))
+                        result.append(self.formatter.text(text[start:end]))
+                        lastpos = end
+                        hilite_open = 1
+                if lastpos < len(text):
+                    result.append(self.formatter.highlight(0))
+                    result.append(self.formatter.text(text[lastpos:]))
+                return string.join(result, '')
 
         result = []
         lastpos = 0
@@ -823,6 +858,7 @@
         """
         self.formatter = formatter
         self.hilite_re = self.formatter.page.hilite_re
+        self.hilite_words_re = self.formatter.page.hilite_words_re
 
         # prepare regex patterns
         rules = string.replace(self.formatting_rules, '\n', '|')
@@ -926,7 +962,7 @@
                     self.in_table = 0
 
             # convert line from wiki markup to HTML and print it
-            if self.hilite_re:
+            if self.hilite_re or self.hilite_words_re:
                 self.request.write(self.highlight_scan(scan_re, line + " "))
             else:
                 line, count = re.subn(scan_re, self.replace, line + " ")
diff -Nur trunk-orig/moin/MoinMoin/wikiaction.py test/moin/MoinMoin/wikiaction.py
--- trunk-orig/moin/MoinMoin/wikiaction.py	Thu Mar 13 09:13:11 2003
+++ test/moin/MoinMoin/wikiaction.py	Mon Apr 14 03:16:43 2003
@@ -86,6 +86,54 @@
     print_search_stats(request, len(hits), pagecount, start)
     wikiutil.send_footer(request, pagename, editable=0, showactions=0, form=request.form)
 
+def do_multisearch(pagename, request, fieldname='value'):
+    _ = request.getText
+    start = time.clock()
+
+    # send http headers
+    webapi.http_headers(request)
+
+    # get parameters
+    if request.form.has_key(fieldname):
+        needle = request.form[fieldname].value
+    else:
+        needle = ''
+    try:
+        context = int(request.form['context'].value)
+    except (KeyError, ValueError):
+        context = 0
+    max_context = 10 # only show first `max_context` contexts
+
+    # check for sensible search term
+    #if len(needle) < 3:
+    #    Page(pagename).send_page(request,
+    #        msg=_("<b>Please use a more selective search term instead of '%(needle)s'!</b>") % locals())
+    #    return
+
+    # send title
+    wikiutil.send_title(request, _('Full text search for "%s"') % (needle,))
+
+    # search the pages
+    pagecount, hits = wikiutil.multisearchPages(needle, context=context)
+
+    # print the result
+    print "<UL>"
+    for (count, page_name, fragments) in hits:
+        print '<LI>' + Page(page_name).link_to(querystr=
+            'action=highlight&value=%s&words=1' % urllib.quote_plus(needle))
+        print ' . . . . ' + `count`
+        print (_(' match'), _(' matches'))[count != 1]
+        if context:
+            for hit in fragments[:max_context]:
+                print '<br>', '&nbsp;'*8, '<font color="#808080">...%s<b>%s</b>%s...</font>' \
+                    % tuple(map(cgi.escape, hit))
+            if len(fragments) > max_context:
+                print '<br>', '&nbsp;'*8, '<font color="#808080">...</font>'
+    print "</UL>"
+
+    print_search_stats(request, len(hits), pagecount, start)
+    wikiutil.send_footer(request, pagename, editable=0, showactions=0, form=request.form)
+
 
 def do_titlesearch(pagename, request, fieldname='value'):
     _ = request.getText
@@ -146,14 +194,24 @@
         needle = request.form["value"].value
     else:
         needle = ''
+    if request.form.has_key('words'):
+        words = request.form["words"].value
+    else:
+        words = 0
 
-    try:
-        needle_re = re.compile(needle, re.IGNORECASE)
-    except re.error:
-        needle = re.escape(needle)
-        needle_re = re.compile(needle, re.IGNORECASE)
-
-    Page(pagename).send_page(request, hilite_re=needle_re)
+    if words:
+        needle_words = string.split(needle)
+        words_re = []
+        for needle_word in needle_words:
+            words_re.append(re.compile(re.escape(needle_word), re.IGNORECASE))
+        Page(pagename).send_page(request, hilite_words_re=words_re)
+    else:
+        try:
+            needle_re = re.compile(needle, re.IGNORECASE)
+        except re.error:
+            needle = re.escape(needle)
+            needle_re = re.compile(needle, re.IGNORECASE)
+        Page(pagename).send_page(request, hilite_re=needle_re)
 
 
 #############################################################################
diff -Nur trunk-orig/moin/MoinMoin/wikimacro.py test/moin/MoinMoin/wikimacro.py
--- trunk-orig/moin/MoinMoin/wikimacro.py	Thu Mar 13 09:13:11 2003
+++ test/moin/MoinMoin/wikimacro.py	Mon Apr 14 02:58:21 2003
@@ -119,6 +119,11 @@
                 + '<br><input type="checkbox" name="case" value="1">'
                 + self._('Case-sensitive searching')
             )
+        if type == "multisearch":
+            boxes = (
+                  '<br><input type="checkbox" name="context" value="40" checked>'
+                + self._('Display context of search results')
+            )
         return self.formatter.rawHTML((
             '<form method="GET">'
             '<input type="hidden" name="action" value="%s">'
diff -Nur trunk-orig/moin/MoinMoin/wikiutil.py test/moin/MoinMoin/wikiutil.py
--- trunk-orig/moin/MoinMoin/wikiutil.py	Wed Apr  9 21:02:41 2003
+++ test/moin/MoinMoin/wikiutil.py	Mon Apr 14 04:04:25 2003
@@ -393,6 +393,97 @@
 
     return (len(all_pages), hits)
 
+def multisearchPages(needle, **kw):
+    """ Search the text of all pages for "needle", and return a tuple of
+        (number of pages, hits).
+
+        "needle" is a google style search pattern, we do an AND search in
+        any case (doing OR searches is pointless as this can be done via
+        multiple search calls).
+
+        words are searched case-insensitive. If you want to search for a
+        case-sensitive match, use +searchword.
+        Use -excludeword to exclude pages containing excludeword.
+
+        `hits` is a list of tuples containing the number of hits on a page
+        and the pagename. When context>0, a list of triples with the text of
+        the hit and the text on each side of it is added; otherwise, the
+        third element is None.
+
+        context != 0: Provide `context` chars of text on each side of a hit
+    """
+    from MoinMoin.Page import Page
+
+    context = int(kw.get('context', 0))
+
+    needle_words = needle.split()
+    needle_res = []
+    for word in needle_words:
+        re_mode = re.IGNORECASE
+        exclude = 0
+        if word.startswith("-"):   # -exclude
+            exclude = 1
+            word = word[1:]
+        elif word.startswith("+"): # +CaseSensitive
+            re_mode = 0
+            word = word[1:]
+        if word.startswith("/") and word.endswith("/"): # /regex/
+            word = word[1:-1]
+        else:
+            word = re.escape(word)
+        c_re = re.compile(word, re_mode)
+        needle_res.append((exclude, c_re))
+
+    hits = []
+    all_pages = getPageList(config.text_dir)
+    for page_name in all_pages:
+        # we also search the title by faking it into the "body":
+        body = "****** %s ******\n" % page_name + Page(page_name).get_raw_body()
+        count = 0
+        hit = 0
+        matches = []
+        # search every pre-compiled re
+        for (exclude, needle_re) in needle_res:
+            if exclude:
+                hit = (needle_re.search(body) == None)
+            else:
+	        if context:
+		    pos = 0
+		    while 1:
+		        match = needle_re.search(body, pos)
+		        if not match: break
+		        pos = match.end()+1
+		        matches.append((match.start(), match.end()))
+		    hit = pos # if pos > 0: we have a hit
+	        else:
+	            hit = len(needle_re.findall(body))
+		    count = count + hit
+	    if not hit: break
+        if not hit: continue
+        # now output all hits in correct order
+        if context:
+            matches.sort()
+            fragments = []
+            # at the moment fragments can be listed several times
+            # if more than one needle_word appears within
+            for (start, end) in matches:
+                fragments.append((
+                    body[start-context:start],
+                    body[start:end],
+                    body[end:end+context],
+                ))
+            if fragments:
+                hits.append((len(fragments), page_name, fragments))
+        else:
+            if count:
+                hits.append((count, page_name, None))
+
+    # The default comparison for tuples compares elements in order,
+    # so this sorts by number of hits
+    hits.sort()
+    hits.reverse()
+
+    return (len(all_pages), hits)
 
 #############################################################################
 ### Misc

