in reply to converting from process to threads
Try this and see how you get on?
On my system with only dialup bandwidth I used HEAD requests and parsed local content (32kb HTML with ~1000 links). The result is that with 10 threads, it consumes ~40MB of ram and achieves >80% cpu usage. Using more threads is pointless, even fetching only head requests. That pretty much saturates my dialup connection.
YMMV depending on the size of the downloads, complexity of the processing and what other modules you need to load. Along with the number of cpus and your bandwidth. Feedback appreciated.
#! perl -slw use strict; use threads; use threads::shared; $|++; ## Important to prevent IO overlap our $NTHREADS ||= 10; ## Read test content from DATA 32kb html containing ~ 1000 links, ## Used for testing in conjunction with head() below. # my $content; { local $/; $content = <DATA>; } my $osync :shared; my $isync :shared; sub processEm { require LWP::Simple; LWP::Simple->import( 'head', 'get' ); require HTML::LinkExtor; my $tid = threads->self->tid; warn "$tid: starting"; while( my $url = do{ lock $isync; <STDIN> } ) { chomp $url; warn "$tid: processing $url"; ## Used for testing. A workaround my bandwidth limits ## and being a good netizen # head( $url ) or warn "Couldn't fetch $url" and next; my $content = get( $url ) or warn "Couldn't fetch $url" and ne +xt; my $l = HTML::LinkExtor->new( sub{ lock $osync; print "'$_[ 2 ]'"; } , $url ); $l->parse( $content ); } } open STDIN, '<', $ARGV[ 0 ] or die $!; my @threads = map{ threads->create( \&processEm ) } 1 .. $NTHREADS; $_->join for @threads; __END__ c:\test>663223.plt -NTHREADS=10 urls.txt >output.txt
|
|---|