The following provides a parallel version for the slurp routine. I'm not sure why or where to look, running MCE via cmpthese reports inaccurately with MCE being 300x faster which is wrong. So, I needed to benchmark another way.
Regarding MCE, workers receive the next chunk and tally using a local hash. Then, update the shared hash.
use strict;
use warnings;
use MCE;
use MCE::Shared;
use String::Random 'random_regex';
use Time::HiRes 'time';
my $fn = 'dna.txt';
my $POS = 10;
my $shrcount = MCE::Shared->hash();
my $mce;
unless ( -e $fn ) {
open my $fh, '>', $fn;
print $fh random_regex( '[ACTG]{42}' ), "\n"
for 1 .. 1e6;
}
sub slurp {
open my $fh, '<', $fn;
my $s = do { local $/ = undef; <$fh> };
my $count;
$count-> { substr $s, $POS - 1 + 43 * $_, 1 }++
for 0 .. length( $s ) / 43 - 1;
return $count
}
sub mce {
unless ( defined $mce ) {
$mce = MCE->new(
max_workers => 4,
chunk_size => '300k',
use_slurpio => 1,
user_func => sub {
my ( $mce, $slurp_ref, $chunk_id ) = @_;
my ( $count, @todo );
$count-> { substr ${ $slurp_ref }, $POS - 1 + 43 * $_, 1 }++
for 0 .. length( ${ $slurp_ref } ) / 43 - 1;
# Each key involves one IPC trip to the shared-manager.
#
# $shrcount->incrby( $_, $count->{$_} )
# for ( keys %{ $count } );
# The following is faster for smaller chunk size.
# Basically, send multiple commands at once.
#
push @todo, [ "incrby", $_, $count->{$_} ]
for ( keys %{ $count } );
$shrcount->pipeline( @todo );
}
)->spawn();
}
$shrcount->clear();
$mce->process($fn);
return $shrcount->export();
}
for (qw/ slurp mce /) {
no strict 'refs';
my $start = time();
my $func = "main::$_";
$func->() for 1 .. 3;
printf "%5s: %0.03f secs.\n", $_, time() - $start;
}
__END__
slurp: 0.487 secs.
mce: 0.149 secs.