Is this what you want?
use strict; use warnings;
open OUT, '>', 'sfull1ns.dat'; print OUT <<'FILE'; Server,Avg CPU,P95 CPU,Avg Mem Util,P95 Mem Util WSOMQAVPRA05,93.75,95.87,66.67,68.13 wsomdavpra03,90.39,94,65.77,68.51 wsomddvfxa01,39.22,92.19,82.59,88.25 wsomddvfxa01,35.45,89.23,79.89,83.24 FILE close OUT; open OUT, '>', 'sfull2ns.dat'; print OUT <<'FILE'; Server,Avg CPU,P95 CPU,Avg Mem Util,P95 Mem Util WSOMQAVPRA05,34.78,100,55.1,67.6 wsomdavpra03,69.04,98.55,84.07,89.73 wsomddvfxa01,92.44,97.54,67.72,71.69 wsompapgtw05,48.77,96.9,92.1,93.55 FILE close OUT; open OUT, '>', 'sfull3ns.dat'; print OUT <<'FILE'; Server,Avg CPU,P95 CPU,Avg Mem Util,P95 Mem Util WSOMQAVPRA05,93.13,98.11,68.95,73.47 wsomdavpra03,68.85,97.56,76.35,98.23 wsomddvfxa01,46.97,96.29,88.23,94.02 wsompapgtw05,30.66,93.74,39.89,71.35 FILE close OUT;
my %usage; my @files = qw(sfull1ns.dat sfull2ns.dat sfull3ns.dat); for my $file ( @files ) { open( my $fh, "<", $file ) or die "Can't open file $file: $!"; <$fh>; # Skip header line while ( my $line = <$fh> ) { chomp($line); my ( $server, @data ) = ( split( ",", $line ) ); $usage{$server}{$file}{value} ||= $data[0]; } } for my $file (@files) { $usage{$_}{$file}{value} ||= 0 for keys %usage; } for my $server (sort keys %usage) { print "$server,", join (',', map {$usage{$server}{$_}{value}} sort keys %{$usage +{$server}}), "\n"; }
In reply to Re^3: Eliminating Duplicate Lines From A CSV File
by GrandFather
in thread Eliminating Duplicate Lines From A CSV File
by country1
| For: | Use: | ||
| & | & | ||
| < | < | ||
| > | > | ||
| [ | [ | ||
| ] | ] |