5 # This script crawls across all found links below the given "root" URL.
6 # It reports all good and bad links to stdout. This code was based on the
7 # checklink.pl script I wrote ages ago.
9 # Written to use 'curl' for URL checking.
11 # Author: Daniel Stenberg <daniel@haxx.se>
12 # Version: 0.3 Jan 3, 2001
16 # 0.3 - The -i now adds regexes that if a full URL link matches one of those,
17 # it is not followed. This can then be used to prevent this script from
18 # following '.*\.cgi', specific pages or whatever.
20 # 0.2 - Made it only HEAD non html files (i.e skip the GET). Makes it a lot
21 # faster to skip large non HTML files such as pdfs or big RFCs! ;-)
22 # Added a -c option that allows me to pass options to curl.
24 # 0.1 - The given url works as the root. This script will only continue
25 # and check other URLs if the leftmost part of the new URL is identical
42 if($ARGV[0] eq "-v" ) {
47 elsif($ARGV[0] eq "-c" ) {
53 elsif($ARGV[0] eq "-i" ) {
54 push @ignorelist, $ARGV[1];
59 elsif($ARGV[0] eq "-l" ) {
64 elsif($ARGV[0] eq "-h" ) {
69 elsif($ARGV[0] eq "-x" ) {
75 my $geturl = $ARGV[0];
76 my $firsturl= $geturl;
79 # Define a hash array to hold all root URLs to visit/we have visited
82 $rooturls{$ARGV[0]}=1;
84 if(($geturl eq "") || $help) {
85 print "Usage: $0 [-hilvx] <full URL>\n",
86 " Use a traling slash for directory URLs!\n",
87 " -c [data] Pass [data] as argument to every curl invoke\n",
88 " -h This help text\n",
89 " -i [regex] Ignore root links that match this pattern\n",
90 " -l Line number report for BAD links\n",
92 " -x Check non-local (external?) links only\n";
99 #$proxy =" -x 194.237.142.41:80";
102 # linkchecker, URL will be appended to the right of this command line
103 # this is the one using HEAD:
104 my $linkcheck = "curl -s -m 20 -I$proxy";
106 # as a second attempt, this will be used. This is not using HEAD but will
107 # get the whole frigging document!
108 my $linkcheckfull = "curl -s -m 20 -i$proxy";
110 # htmlget, URL will be appended to the right of this command line
111 my $htmlget = "curl -s$proxy";
113 # Parse the input URL and split it into the relevant parts:
127 if($inurl=~ /^([^:]+):\/\/([^\/]*)\/(.*)\/(.*)/ ) {
133 elsif ($inurl=~ /^([^:]+):\/\/([^\/]*)\/(.*)/ ) {
139 if($getpath !~ /\//) {
145 elsif ($inurl=~ /^([^:]+):\/\/(.*)/ ) {
152 print "Couldn't parse the specified URL, retry please!\n";
163 my $type="text/plain";
166 open(HEADGET, "$linkcheck $geturl|") ||
167 die "Couldn't get web page for some reason";
171 if($_ =~ /HTTP\/1\.[01] (\d\d\d) /) {
177 elsif($_ =~ /^Content-Type: ([\/a-zA-Z]+)/) {
181 ($_ =~ /^Location: (.*)/)) {
192 if($pagemoved == 1) {
193 print "Page is moved but we don't know where. Did you forget the ",
198 if($type ne "text/html") {
199 # there no point in getting anything but HTML
203 open(WEBGET, "$htmlget $geturl|") ||
204 die "Couldn't get web page for some reason";
214 return ($in, $code, $type);
221 # $check =~s/([^a-zA-Z0-9_:\/.-])/uc sprintf("%%%02x",ord($1))/eg;
223 my @doc = `$linkcheck \"$check\"`;
227 # print "COMMAND: $linkcheck \"$check\"\n";
228 # print $doc[0]."\n";
231 if( $doc[0] =~ /^HTTP[^ ]+ (\d+)/ ) {
239 if($head && ($error >= 500)) {
240 # This server doesn't like HEAD!
241 @doc = `$linkcheckfull \"$check\"`;
256 while($in =~ /[^<]*(<[^>]+>)/g ) {
257 # we have a tag in $1
260 if($tag =~ /^<!--/) {
261 # this is a comment tag, ignore it
264 if($tag =~ /(src|href|background|archive) *= *(\"[^\"]\"|[^ \)>]*)/i) {
266 if($url =~ /^\"(.*)\"$/) {
267 # this was a "string" now $1 has removed the quotes:
272 $url =~ s/([^\#]*)\#.*/$1/g;
275 # if the link was nothing than a #-link it may now have
276 # been emptied completely so then we skip the rest
281 # if this url already is done, do next
284 print " FOUND $url but that is already checked\n";
289 $done{$url} = 1; # this is "done"
292 if($tag =~ /< *([^ ]+)/) {
304 for(keys %rooturls) {
305 if($rooturls{$_} == 1) {
306 if($_ !~ /^$firsturl/) {
307 $rooturls{$_} += 1000; # don't do this, outside our scope
322 # Splits the URL in its different parts
327 # Returns the full HTML of the root page
329 my ($in, $error, $ctype) = &GetRootPage($geturl);
331 $rooturls{$geturl}++; # increase to prove we have already got it
333 if($ctype ne "text/html") {
334 # this is not HTML, we skip this
336 print "Non-HTML link, skipping\n";
342 print "ROOT page $geturl returned $error\n";
346 print " ==== $geturl ====\n";
349 printf("Error code $error, Content-Type: $ctype, got %d bytes\n",
353 #print "protocol = $getprotocol\n";
354 #print "server = $getserver\n";
355 #print "path = $getpath\n";
356 #print "document = $getdocument\n";
360 # Extracts all links from the given HTML buffer
362 my @links = &GetLinks($in);
368 if($url =~ /^([^:]+):/) {
370 if($prot !~ /http/i) {
371 # this is an unsupported protocol, we ignore this
381 # this is a link on the same server:
384 $link = "$getprotocol://$getserver$url";
387 # from the scanned page's dir
390 if(length($getpath) &&
391 ($getpath !~ /\/$/) &&
393 # lacks ending slash, add one to the document part:
396 $link = "$getprotocol://$getserver/$getpath$nyurl";
400 my $success = &LinkWorks($link);
402 my $count = $done{$url};
406 print "$success $count <".$tagtype{$url}."> $link $url\n";
408 if("BAD" eq $success) {
414 print " line $line\n";
421 # the link works, add it if it isn't in the ingore list
430 $rooturls{$link}++; # check this if not checked already
438 print "$allcount links were checked";
440 print ", $badlinks were found bad";