my %toindex;
sub needsbuild ($) { #{{{
- %toindex = map { $_ => 1 } @{shift()};
+ %toindex = map { pagename($_) => 1 } @{shift()};
} #}}}
+my $scrubber;
sub filter (@) { #{{{
my %params=@_;
# index page
my $db=xapiandb();
my $doc=Search::Xapian::Document->new();
- my $title=$params{page};
+ my $title;
if (exists $pagestate{$params{page}}{meta} &&
exists $pagestate{$params{page}}{meta}{title}) {
$title=$pagestate{$params{page}}{meta}{title};
}
+ else {
+ $title=IkiWiki::pagetitle($params{page});
+ }
+ # Remove any html from text to be indexed.
+ # TODO: This removes html that is in eg, a markdown pre,
+ # which should not be removed.
+ if (! defined $scrubber) {
+ eval q{use HTML::Scrubber};
+ error($@) if $@;
+ $scrubber=HTML::Scrubber->new(allow => []);
+ }
+ my $toindex=$scrubber->scrub($params{content});
+
+ # Take 512 characters for a sample, then extend it out
+ # if it stopped in the middle of a word.
+ my $size=512;
+ my ($sample)=substr($toindex, 0, $size);
+ if (length($sample) == $size) {
+ my $max=length($toindex);
+ my $next;
+ while ($size < $max &&
+ ($next=substr($toindex, $size++, 1)) !~ /\s/) {
+ $sample.=$next;
+ }
+ }
+ $sample=~s/\n/ /g;
+
# data used by omega
$doc->set_data(
"url=".urlto($params{page}, "")."\n".
- "sample=\n". # TODO
+ "sample=$sample\n".
"caption=$title\n".
"modtime=$IkiWiki::pagemtime{$params{page}}\n".
"size=".length($params{content})."\n"
$tg->set_document($doc);
$tg->index_text($params{page}, 2);
$tg->index_text($title, 2);
- $tg->index_text($params{content}); # TODO html strip; preprocessor too
+ $tg->index_text($toindex);
my $pageterm=pageterm($params{page});
$doc->add_term($pageterm);
# TODO: check if > 255 char page names overflow term
# length; use sha1 if so?
- return "P".$page;
+ return "U:".$page;
} #}}}
my $db;