#!/usr/bin/perl use strict; use warnings; my %usage; my $files = 0; for my $file (qw/sfull1ns.dat sfull2ns.dat sfull3ns.dat/) { open (my $fh,'<',$file) or die "Can’t open file $file: $!"; while (my $line = <$fh>) { next if $line =~ /^Server/; #chomp($line); #my ($server, @data) = (split(",",$line)); # Assigns the first 2 values in the CSV # and discards the rest my ($server, $avg) = (split(",",$line)); #if ($data[0] lt "!" ) { # $data[0] = 0; #} #next if grep /[^0-9.]/, @data; #$usage{$server} = [] unless exists $usage{$server}; #push @{$usage{$server}}, 0 while @{$usage{$server}} < $files; #push @{$usage{$server}}, $data[0]; # if this is a second occurance of a server in the file, # its avg won't be assigned because the first one is already # stored there $usage{$server}[$files] ||= $avg; } continue { $files++ if eof; } close $fh or die "Can’t close file $file: $!"; } for my $server (sort keys %usage) { #Either this server has an average (for each element of the array) # or assign 0 to the ones that don't have a value my @avgs = map $usage{$server}[$_] || 0, 0..$#{$usage{$server}}; print join(",", $server, @avgs), "\n"; }
In reply to Re: Eliminating Duplicate Lines From A CSV File
by Cristoforo
in thread Eliminating Duplicate Lines From A CSV File
by country1
| For: | Use: | ||
| & | & | ||
| < | < | ||
| > | > | ||
| [ | [ | ||
| ] | ] |