#!/usr/local/bin/perl -w # this script builds a cache of web pages, which can be searched # by sitesearch.pl; shd be run each time web site content changes use strict; use CGI qw(:standard); use Data::Dumper; my $dir = 'your_file_path'; # dir to search. my $ext = 'htm'; # page types to search. my $cache = 'sitesearch.dat'; # cache file. my (@Results,$file,$title); # optional boundaries for search area, to avoid # searching on repeated text: my $startstring = 'unique_ident_1'; my $endstring = 'unique_ident_2'; chdir $dir; # get all the relevant pages, strip out title, file name and # searchable text, store in array of hashes: while (<*.$ext>) { open FILE, $_; read FILE, $file, -s(FILE); $file =~ m#(.*?)#i; $title = $1; $file =~ s/^.*$startstring/$startstring/s; # delete these 2 lines if $file =~ s/$endstring.*$//s; # you want to search the whole page $file =~ s/<[^>]*>/ /g; push @Results, {filename => $_, title => $title, text => $file}; } # save the results in the cache file: open SAVE, ">$cache" or die "could not open $cache $!"; print SAVE Dumper(\@Results); close SAVE; # check what has been saved in the cache file, and display it: open RETRIEVE, $cache or die "could not open $cache $!"; my $data = do { local $/; }; my @Retrieves = @{ my $VAR1; eval $data }; print header,h2('You have successfully cached the following pages:'); print $_->{title},br for @Retrieves;