$VAR1 = { 'Session 1' => { 'last' => 1080139082, 'files' => [ 'File1.txt 1079948073 @1079966101', 'File2.txt 1080035083 @1080053101', 'File3.txt 1080121051 @1080139081' ], 'lastfailmsg' => '+', 'lastfailtime' => 0 }, 'Session 2' => { 'last' => 1080129127, 'files' => [ 'File1.txt @1079956803', 'File2.txt @1080043204', 'File3.txt @1080129100' ], 'lastfailmsg' => '+', 'lastfailtime' => 0 } }; #### sub merge_hist_changes { # Merges current session's history changes into %sess_hist. ## Step 1 - Create copy of $session's hash tree my %temp_hash = %{$sess_hist{$session}}; ## Step 2 - Filter out files downloaded more than $hist_days days ago my $old = time - ( 86400 * $hist_days ); # 86400 seconds in a day @{$temp_hash{files}} = grep { /@(\d+)$/; $1 > $old } @{$temp_hash{files}}; @{$temp_hash{uploads}} = grep { /@(\d+)$/; $1 > $old } @{$temp_hash{uploads}}; ## Step 3 - Get an flock on $hist_file.l. This is the critical step that prevents ## other forks from updating until the current $session's info gets updated. open HFL, '>', "$hist_file.l"; unless(flock HFL, 2) { my $failstr = "Can't get lock on $hist_file.l, changes to DB unsaved\n"; $failstr .= "History tree for $session :\n" . Dumper \%temp_hash; &pager_alert($failstr); exit; } ## Step 4 - Get new %sess_hist from disk (like &parse_hist) local $/ = undef; if (-s $hist_file) { unless( %sess_hist = %{do($hist_file)} ) { my $failstr = "Can't parse history file, changes to DB unsaved\n"; $failstr .= "History tree for $session :\n" . Dumper \%temp_hash; &pager_alert($failstr); exit; } } ## Step 5 - Change $session's hash pointer to refer to %temp_hash $sess_hist{$session} = \%temp_hash; ## Step 6 - Dump %sess_hist. local $Data::Dumper::Indent = 1; open HF, '>', $hist_file; print HF Dumper \%sess_hist; close HF; close HFL; # Releases flock and lets next child process update $hist_file