It looks like you need something more like this:
#!/usr/local/bin/perl use warnings; use strict; open MYFILE, '<', 'file.txt' or die "Cannot open file.txt: $!"; my %unique; while ( <MYFILE> ) { chomp; next unless s/^(\d{4}-\d\d-\d\d)\s+\d\d:\d\d:\d\d\s+//; $unique{ "$_\t$1" }++; # updated! } print " - Found ($.)\n"; close MYFILE; print "Remove duplicate lines\n"; print "Found (", scalar keys %unique, ")\n"; my @sorted = sort keys %unique; print "format each line so that its formated as BulletinID,KBID,Title, +Endpointname,Date\n"; my @sort; for my $line ( @sorted ) { print "LINEPRINT - $line\n"; push @sort, join "\t", ( split /\t/, $line )[ 2, 1, 3, 0, 4 ]; }
In reply to Re: Removing duplicate lines from a file
by jwkrahn
in thread Removing duplicate lines from a file
by green_lakers
| For: | Use: | ||
| & | & | ||
| < | < | ||
| > | > | ||
| [ | [ | ||
| ] | ] |