Attachment 'backlinks.patch'
Download 1 * looking for arch@arch.thinkmo.de--2003-archives/moin--main--1.3--patch-616 to compare with
2 * comparing to arch@arch.thinkmo.de--2003-archives/moin--main--1.3--patch-616
3 -- MoinMoin/search.py
4 M MoinMoin/search.py
5 M MoinMoin/theme/__init__.py
6 M MoinMoin/Page.py
7
8 * file metadata changed
9
10 ./MoinMoin/search.py
11 --permissions 644
12 => --permissions 744
13
14 * modified files
15
16 --- orig/MoinMoin/Page.py
17 +++ mod/MoinMoin/Page.py
18 @@ -1070,26 +1070,12 @@
19
20 # send the page header
21 if self.default_formatter:
22 -
23 - def quote_whitespace(x):
24 - if x.find(" ")!=-1:
25 - return "'%s'" % x
26 - else:
27 - return x
28 - page_needle = quote_whitespace(self.page_name)
29 - if config.allow_subpages and page_needle.count('/'):
30 - #parts = page_needle.split('/')
31 - #for level in range(1, len(parts)):
32 - # page_needle += (" !" + quote_whitespace(
33 - # "/".join(parts[:level])) + " " +
34 - # quote_whitespace(
35 - # "/" + "/".join(parts[level:])))
36 - page_needle = '/' + page_needle.split('/')[-1]
37 -
38 - link = '%s/%s?action=fullsearch&value=%s&literal=1&case=1&context=180' % (
39 + full_text_query = 'linkto:"%s"' % self.page_name
40 + link = '%s/%s?action=fullsearch&value=%s&context=180' % (
41 request.getScriptname(),
42 wikiutil.quoteWikinameURL(self.page_name),
43 - urllib.quote_plus(page_needle.encode(config.charset), ''))
44 + urllib.quote_plus(full_text_query.encode(config.charset)))
45 +
46 title = self.split_title(request)
47 if self.rev:
48 msg = "<strong>%s</strong><br>%s" % (
49
50
51 --- orig/MoinMoin/search.py
52 +++ mod/MoinMoin/search.py
53 @@ -1,7 +1,9 @@
54 """
55 MoinMoin search engine
56
57 - @copyright: Florian Festi TODO: email
58 + @copyright: MoinMoin:FlorianFesti
59 + MoinMoin:NirSoffer
60 + MoinMoin:AlexanderSchremmer
61 @license: GNU GPL, see COPYING for details
62 """
63
64 @@ -329,6 +331,77 @@
65 return self
66
67
68 +class LinkSearch(BaseExpression):
69 + """ Search the term in the pagelinks """
70 +
71 + def __init__(self, pattern, use_re=False, case=True):
72 + """ Init a title search
73 +
74 + @param pattern: pattern to search for, ascii string or unicode
75 + @param use_re: treat pattern as re of plain text, bool
76 + @param case: do case sensitive search, bool
77 + """
78 + # used for search in links
79 + self._pattern = pattern
80 + # used for search in text
81 + self._textpattern = '(' + self._pattern.replace('/', '|') + ')'
82 + self.negated = 0
83 + self.textsearch = TextSearch(self._textpattern, use_re=1, case=case)
84 + self._build_re(unicode(pattern), use_re=use_re, case=case)
85 +
86 + def _build_re(self, pattern, use_re=False, case=False):
87 + """ Make a regular expression out of a text pattern """
88 + flags = (re.U | re.I, re.U)[case]
89 +
90 + try:
91 + if not use_re:
92 + raise re.error
93 + self.search_re = re.compile(pattern, flags)
94 + except re.error:
95 + pattern = re.escape(pattern) + '$'
96 + self.pattern = pattern
97 + self.search_re = re.compile(pattern, flags)
98 +
99 + def costs(self):
100 + return 5000 # cheaper than a TextSearch
101 +
102 + def __unicode__(self):
103 + return u'%s!"%s"' % (('', '-')[self.negated], unicode(self._pattern))
104 +
105 + def highlight_re(self):
106 + return u"(%s)" % self._textpattern
107 +
108 + def pageFilter(self):
109 + """ Page filter function for single text search """
110 + return None
111 +
112 + def search(self, page):
113 + # Get matches in page name
114 + matches = []
115 + page_links = '\n'.join(page.getPageLinks(page.request))
116 + if self.search_re.search(page_links) is not None:
117 + # Search in page text
118 + results = self.textsearch.search(page)
119 + if results:
120 + matches.extend(results)
121 + else: #This happens e.g. for pages that use navigation macros
122 + matches.append(TextMatch(0,0))
123 +
124 + # Decide what to do with the results.
125 + if ((self.negated and matches) or
126 + (not self.negated and not matches)):
127 + return None
128 + elif matches:
129 + return matches
130 + else:
131 + # XXX why not return None or empty list?
132 + return [Match()]
133 +
134 + def indexed_query(self):
135 + return self
136 +
137 +
138 +
139 class IndexedQuery:
140 """unused and experimental"""
141 def __init__(self, queryobject):
142 @@ -396,7 +469,6 @@
143 # before pages that their title does not match.
144 _weight = 100.0
145
146 -
147 class FoundPage:
148 """ Represents a page in a search result """
149
150 @@ -577,6 +649,7 @@
151 title_search = self.titlesearch
152 regex = self.regex
153 case = self.case
154 + linkto = 0
155
156 for m in modifiers:
157 if "title".startswith(m):
158 @@ -585,8 +658,12 @@
159 regex = True
160 elif "case".startswith(m):
161 case = True
162 + elif "linkto".startswith(m):
163 + linkto = True
164
165 - if title_search:
166 + if linkto:
167 + obj = LinkSearch(text, use_re=regex, case=case)
168 + elif title_search:
169 obj = TitleSearch(text, use_re=regex, case=case)
170 else:
171 obj = TextSearch(text, use_re=regex, case=case)
172
173
174 --- orig/MoinMoin/theme/__init__.py
175 +++ mod/MoinMoin/theme/__init__.py
176 @@ -154,14 +154,23 @@
177 @return: title html
178 """
179 _ = self.request.getText
180 +
181 if d['title_link']:
182 - content = ('<a title="%(title)s" href="%(href)s">%(text)s</a>') % {
183 + content = curpage = ''
184 + segments = d['title_text'].split('/')
185 + for s in segments[:-1]:
186 + curpage += s
187 + content = "%s%s/" % (content, Page(self.request, curpage).link_to(self.request, s))
188 + curpage += '/'
189 +
190 + content += ('<a class="backlink" title="%(title)s" href="%(href)s">%(text)s</a>') % {
191 'title': _('Click to do a full-text search for this title'),
192 'href': d['title_link'],
193 - 'text': wikiutil.escape(d['title_text']),
194 + 'text': wikiutil.escape(segments[-1]),
195 }
196 else:
197 content = wikiutil.escape(d['title_text'])
198 +
199 html = '''
200 <h1 id="title">%s</h1>
201 ''' % content
Attached Files
To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.You are not allowed to attach a file to this page.