sub merge_hist_changes { # Merges current session's history changes into %sess_hist. ## Step 1 - Create copy of $session's hash tree my %temp_hash = %{$sess_hist{$session}}; ## Step 2 - Filter out files downloaded more than $hist_days days ago my $old = time - ( 86400 * $hist_days ); # 86400 seconds in a day @{$temp_hash{files}} = grep { /@(\d+)$/; $1 > $old } @{$temp_hash{files}}; @{$temp_hash{uploads}} = grep { /@(\d+)$/; $1 > $old } @{$temp_hash{uploads}}; ## Step 3 - Get an flock on $hist_file.l. This is the critical step that prevents ## other forks from updating until the current $session's info gets updated. open HFL, '>', "$hist_file.l"; unless(flock HFL, 2) { my $failstr = "Can't get lock on $hist_file.l, changes to DB unsaved\n"; $failstr .= "History tree for $session :\n" . Dumper \%temp_hash; &pager_alert($failstr); exit; } ## Step 4 - Get new %sess_hist from disk (like &parse_hist) local $/ = undef; if (-s $hist_file) { unless( %sess_hist = %{do($hist_file)} ) { my $failstr = "Can't parse history file, changes to DB unsaved\n"; $failstr .= "History tree for $session :\n" . Dumper \%temp_hash; &pager_alert($failstr); exit; } } ## Step 5 - Change $session's hash pointer to refer to %temp_hash $sess_hist{$session} = \%temp_hash; ## Step 6 - Dump %sess_hist. local $Data::Dumper::Indent = 1; open HF, '>', $hist_file; print HF Dumper \%sess_hist; close HF; close HFL; # Releases flock and lets next child process update $hist_file