#!/usr/bin/perl use strict; use warnings; #recursive spider web starting from a page and #specifying depth level #like wget -r --level=... my ($depth_level,$start_page) = @ARGV; exit 1 unless $start_page;#exits if it doesn't have a parameter page to download if( !defined $depth_level || $depth_level > 0 ) { my $page_content=`curl $start_page 2>&1`;#takes contents of the page my @links = $page_content =~ //g;#takes out the links from the page for(@links) { print "Working on link $_\n"; my $new_call = "perl naive_crawler.pl ". ($depth_level - 1) ." $_";#new call for the script with the links from the page `$new_call`; }; }