[Darshan-commits] [Darshan] branch, dev-modular, updated. darshan-2.3.1-161-g3e3d2df

Service Account git at mcs.anl.gov
Thu Sep 3 12:44:39 CDT 2015


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "".

The branch, dev-modular has been updated
       via  3e3d2df8f841083834789a2bc9dc78800d8b9823 (commit)
       via  020d91b6f8d584236b1b8033afb724792b0e1014 (commit)
      from  fab0224e40a04833418f345e4b391fa918156e44 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 3e3d2df8f841083834789a2bc9dc78800d8b9823
Author: Shane Snyder <ssnyder at mcs.anl.gov>
Date:   Tue Sep 1 13:59:49 2015 -0500

    whole bunch of changes to job-summary scipts

commit 020d91b6f8d584236b1b8033afb724792b0e1014
Author: Shane Snyder <ssnyder at mcs.anl.gov>
Date:   Tue Aug 25 22:36:44 2015 -0500

    bug fix in mpiio reduction code

-----------------------------------------------------------------------

Summary of changes:
 darshan-runtime/lib/darshan-mpiio.c                |    2 +-
 darshan-util/Makefile.in                           |   18 +-
 .../bin/darshan-job-summary.pl.in                  | 1263 +++++++++++++++++++-
 .../share/{hist-eps.gplt => access-hist-eps.gplt}  |   11 +-
 .../darshan-job-summary/share/align-pdf.gplt       |   29 -
 .../darshan-job-summary/share/counts-pdf.gplt      |   28 -
 .../darshan-job-summary/share/counts-svg.gplt      |   27 -
 .../darshan-job-summary/share/file-access-eps.gplt |   40 +
 .../share/file-access-table.tex                    |   14 +
 .../darshan-job-summary/share/hist-pdf.gplt        |   27 -
 .../darshan-job-summary/share/hist-svg.gplt        |   26 -
 .../darshan-job-summary/share/iodist-pdf.gplt      |   29 -
 .../darshan-job-summary/share/job-table.tex        |    5 +
 .../share/{counts-eps.gplt => op-counts-eps.gplt}  |    9 +-
 .../darshan-job-summary/share/pattern-pdf.gplt     |   28 -
 darshan-util/darshan-job-summary/share/summary.tex |   62 +-
 .../share/time-summary-pdf.gplt                    |   30 -
 darshan-util/darshan-job-summary/share/title.tex   |   14 +
 .../darshan-job-summary/share/types-pdf.gplt       |   26 -
 19 files changed, 1371 insertions(+), 317 deletions(-)
 rename darshan-util/darshan-job-summary/share/{hist-eps.gplt => access-hist-eps.gplt} (75%)
 delete mode 100644 darshan-util/darshan-job-summary/share/align-pdf.gplt
 delete mode 100644 darshan-util/darshan-job-summary/share/counts-pdf.gplt
 delete mode 100644 darshan-util/darshan-job-summary/share/counts-svg.gplt
 create mode 100644 darshan-util/darshan-job-summary/share/file-access-eps.gplt
 create mode 100644 darshan-util/darshan-job-summary/share/file-access-table.tex
 delete mode 100644 darshan-util/darshan-job-summary/share/hist-pdf.gplt
 delete mode 100644 darshan-util/darshan-job-summary/share/hist-svg.gplt
 delete mode 100644 darshan-util/darshan-job-summary/share/iodist-pdf.gplt
 create mode 100644 darshan-util/darshan-job-summary/share/job-table.tex
 rename darshan-util/darshan-job-summary/share/{counts-eps.gplt => op-counts-eps.gplt} (72%)
 delete mode 100644 darshan-util/darshan-job-summary/share/pattern-pdf.gplt
 delete mode 100644 darshan-util/darshan-job-summary/share/time-summary-pdf.gplt
 create mode 100644 darshan-util/darshan-job-summary/share/title.tex
 delete mode 100644 darshan-util/darshan-job-summary/share/types-pdf.gplt


Diff of changes:
diff --git a/darshan-runtime/lib/darshan-mpiio.c b/darshan-runtime/lib/darshan-mpiio.c
index c823851..672ff99 100644
--- a/darshan-runtime/lib/darshan-mpiio.c
+++ b/darshan-runtime/lib/darshan-mpiio.c
@@ -1108,7 +1108,7 @@ static void mpiio_record_reduction_op(
         /* sum */
         for(j=MPIIO_F_READ_TIME; j<=MPIIO_F_META_TIME; j++)
         {
-            tmp_file.counters[j] = infile->fcounters[j] + inoutfile->fcounters[j];
+            tmp_file.fcounters[j] = infile->fcounters[j] + inoutfile->fcounters[j];
         }
 
         /* max (special case) */
diff --git a/darshan-util/Makefile.in b/darshan-util/Makefile.in
index 0e1bc41..0fdf2ec 100644
--- a/darshan-util/Makefile.in
+++ b/darshan-util/Makefile.in
@@ -110,21 +110,21 @@ install:: all
 	install -m 755 darshan-convert $(bindir)
 #	install -m 755 darshan-diff $(bindir)
 	install -m 755 darshan-parser $(bindir)
-#	install -m 755 $(srcdir)/darshan-summary-per-file.sh $(bindir)
+	install -m 755 $(srcdir)/darshan-summary-per-file.sh $(bindir)
 	install -m 755 libdarshan-util.a $(libdir)
 ifeq ($(DARSHAN_ENABLE_SHARED),1)
 	install -m 755 libdarshan-util.so $(libdir)
 endif
 	install -m 644 $(srcdir)/darshan-logutils.h $(includedir)
 	install -m 644 $(DARSHAN_LOG_FORMAT) $(includedir)
-#	install -m 755 darshan-job-summary/bin/darshan-job-summary.pl $(bindir)
-#	install -d $(libdir)/TeX
-#	install -m 644 $(srcdir)/darshan-job-summary/lib/TeX/Encode.pm $(libdir)/TeX/
-#	install -d $(libdir)/Number
-#	install -d $(libdir)/Number/Bytes
-#	install -m 644 $(srcdir)/darshan-job-summary/lib/Number/Bytes/Human.pm $(libdir)/Number/Bytes
-#	install -d $(datarootdir)
-#	install -m 644 $(srcdir)/darshan-job-summary/share/* $(datarootdir)
+	install -m 755 darshan-job-summary/bin/darshan-job-summary.pl $(bindir)
+	install -d $(libdir)/TeX
+	install -m 644 $(srcdir)/darshan-job-summary/lib/TeX/Encode.pm $(libdir)/TeX/
+	install -d $(libdir)/Number
+	install -d $(libdir)/Number/Bytes
+	install -m 644 $(srcdir)/darshan-job-summary/lib/Number/Bytes/Human.pm $(libdir)/Number/Bytes
+	install -d $(datarootdir)
+	install -m 644 $(srcdir)/darshan-job-summary/share/* $(datarootdir)
 	install -m 644 maint/darshan-util.pc $(pkgconfigdir)
 
 
diff --git a/darshan-util/darshan-job-summary/bin/darshan-job-summary.pl.in b/darshan-util/darshan-job-summary/bin/darshan-job-summary.pl.in
index 13041ed..0b11851 100755
--- a/darshan-util/darshan-job-summary/bin/darshan-job-summary.pl.in
+++ b/darshan-util/darshan-job-summary/bin/darshan-job-summary.pl.in
@@ -18,6 +18,1222 @@ use English;
 use Number::Bytes::Human qw(format_bytes);
 use POSIX qw(strftime);
 
+#
+# system commands used
+#
+my $darshan_parser = "$PREFIX/bin/darshan-parser";
+my $pdflatex       = "pdflatex";
+my $epstopdf       = "epstopdf";
+my $cp             = "cp";
+my $mv             = "mv";
+my $gnuplot        = "gnuplot";
+
+my $orig_dir = getcwd;
+my $output_file = "summary.pdf";
+my $verbose_flag = 0;
+my $input_file = "";
+my %posix_access_hash = ();
+my %mpiio_access_hash = ();
+my @access_size = ();
+my %hash_files = ();
+
+# data structures for calculating performance
+my %hash_unique_file_time = ();
+my $shared_file_time = 0;
+my $total_job_bytes = 0;
+
+process_args();
+
+check_prereqs();
+
+my $tmp_dir = tempdir( CLEANUP => !$verbose_flag );
+if ($verbose_flag)
+{
+    print "verbose: $tmp_dir\n";
+}
+
+open(PARSE_OUT, "$darshan_parser $input_file |") || die("Can't execute \"$darshan_parser $input_file\": $!\n");
+
+open(FA_READ, ">$tmp_dir/file-access-read.dat") || die("error opening output file: $!\n");
+open(FA_WRITE, ">$tmp_dir/file-access-write.dat") || die("error opening output file: $!\n");
+open(FA_READ_SH, ">$tmp_dir/file-access-read-sh.dat") || die("error opening output file: $!\n");
+open(FA_WRITE_SH, ">$tmp_dir/file-access-write-sh.dat") || die("error opening output file: $!\n");
+
+my $last_read_start = 0;
+my $last_write_start = 0;
+
+my $cumul_read_indep = 0;
+my $cumul_read_bytes_indep = 0;
+
+my $cumul_write_indep = 0;
+my $cumul_write_bytes_indep = 0;
+
+my $cumul_read_shared = 0;
+my $cumul_read_bytes_shared = 0;
+
+my $cumul_write_shared = 0;
+my $cumul_write_bytes_shared = 0;
+
+my $cumul_meta_shared = 0;
+my $cumul_meta_indep = 0;
+
+my $first_data_line = 1;
+my $current_rank = 0;
+my $current_hash = 0;
+my %file_record_hash = ();
+
+my %fs_data = ();
+
+while($line = <PARSE_OUT>)
+{
+    chomp($line);
+
+    if ($line =~ /^\s*$/)
+    {
+        # ignore blank lines
+    }
+    elsif ($line =~ /^#/)
+    {
+        if ($line =~ /^# exe: /)
+        {
+            ($junk, $cmdline) = split(':', $line, 2);
+            # add escape characters if needed for special characters in
+            # command line
+            $cmdline = encode('latex', $cmdline);
+        }
+        elsif ($line =~ /^# nprocs: /)
+        {
+            ($junk, $nprocs) = split(':', $line, 2);
+        }
+        elsif ($line =~ /^# run time: /)
+        {
+            ($junk, $runtime) = split(':', $line, 2);
+        }
+        elsif ($line =~ /^# start_time: /)
+        {
+            ($junk, $starttime) = split(':', $line, 2);
+        }
+        elsif ($line =~ /^# uid: /)
+        {        
+            ($junk, $uid) = split(':', $line, 2);
+        }
+        elsif ($line =~ /^# jobid: /)
+        {
+            ($junk, $jobid) = split(':', $line, 2);
+        }
+        elsif ($line =~ /^# darshan log version: /)
+        {
+            ($junk, $version) = split(':', $line, 2);
+            $version =~ s/^\s+//;
+        }
+    }
+    else
+    {
+        # parse line
+        @fields = split(/[\t ]+/, $line);
+
+        # encode the file system name to protect against special characters
+        $fields[5] = encode('latex', $fields[5]);
+
+        # is this our first piece of data?
+        if($first_data_line)
+        {
+            $current_rank = $fields[1];
+            $current_hash = $fields[2];
+            $first_data_line = 0;
+        }
+
+        # is this a new file record?
+        if($fields[1] != $current_rank || $fields[2] != $current_hash)
+        {
+            # process previous record (if posix or mpiio record)
+            if ($fields[0] eq "POSIX" || $fields[0] eq "MPIIO")
+            {
+                process_file_record($current_rank, $current_hash, \%file_record_hash);
+            }
+
+            # reset variables for next record 
+            $current_rank = $fields[1];
+            $current_hash = $fields[2];
+            %file_record_hash = ();
+            $file_record_hash{FILE_NAME} = $fields[5];
+        }
+
+        $file_record_hash{$fields[3]} = $fields[4];
+        $summary{$fields[3]} += $fields[4];
+
+        # accumulate independent and shared data as well as fs data
+        if ($fields[3] eq "POSIX_F_READ_TIME")
+        {
+            if ($fields[1] == -1)
+            {
+                $cumul_read_shared += $fields[4];
+            }
+            else
+            {
+                $cumul_read_indep += $fields[4];
+            }
+        }
+        elsif ($fields[3] eq "POSIX_F_WRITE_TIME")
+        {
+            if ($fields[1] == -1)
+            {
+                $cumul_write_shared += $fields[4];
+            }
+            else
+            {
+                $cumul_write_indep += $fields[4];
+            }
+        }
+        elsif ($fields[3] eq "POSIX_F_META_TIME")
+        {
+            if ($fields[1] == -1)
+            {
+                $cumul_meta_shared += $fields[4];
+            }
+            else
+            {
+                $cumul_meta_indep += $fields[4];
+            }
+        }
+        elsif ($fields[3] eq "POSIX_BYTES_READ")
+        {
+            if ($fields[1] == -1)
+            {
+                $cumul_read_bytes_shared += $fields[4];
+            }
+            else
+            {
+                $cumul_read_bytes_indep += $fields[4];
+            }
+            if (not defined $fs_data{$fields[6]})
+            {
+                $fs_data{$fields[6]} = [0,0];
+            }
+            $fs_data{$fields[6]}->[0] += $fields[4];
+        }
+        elsif ($fields[3] eq "POSIX_BYTES_WRITTEN")
+        {
+            if ($fields[1] == -1)
+            {
+                $cumul_write_bytes_shared += $fields[4];
+            }
+            else
+            {
+                $cumul_write_bytes_indep += $fields[4];
+            }
+            if (not defined $fs_data{$fields[6]})
+            {
+                $fs_data{$fields[6]} = [0,0];
+            }
+            $fs_data{$fields[6]}->[1] += $fields[4];
+        }
+
+        # record start and end of reads and writes
+        elsif ($fields[3] eq "POSIX_F_READ_START_TIMESTAMP")
+        {
+            # store until we find the end
+            $last_read_start = $fields[4];
+        }
+        elsif ($fields[3] eq "POSIX_F_READ_END_TIMESTAMP" && $fields[4] != 0)
+        {
+            # assume we got the read start already 
+            my $xdelta = $fields[4] - $last_read_start;
+            if($fields[1] == -1)
+            {
+                print FA_READ_SH "$last_read_start\t0\t$xdelta\t0\n";
+            }
+            else
+            {
+                print FA_READ "$last_read_start\t$fields[1]\t$xdelta\t0\n";
+            }
+        }
+        elsif ($fields[3] eq "POSIX_F_WRITE_START_TIMESTAMP")
+        {
+            # store until we find the end
+            $last_write_start = $fields[4];
+        }
+        elsif ($fields[3] eq "POSIX_F_WRITE_END_TIMESTAMP" && $fields[4] != 0)
+        {
+            # assume we got the write start already 
+            my $xdelta = $fields[4] - $last_write_start;
+            if($fields[1] == -1)
+            {
+                print FA_WRITE_SH "$last_write_start\t0\t$xdelta\t0\n";
+            }
+            else
+            {
+                print FA_WRITE "$last_write_start\t$fields[1]\t$xdelta\t0\n";
+            }
+        }
+
+        # record common access counter info
+        elsif ($fields[3] =~ /^POSIX_ACCESS(.)_ACCESS/)
+        {
+            $access_size[$1] = $fields[4];
+        }
+        elsif ($fields[3] =~ /^MPIIO_ACCESS(.)_ACCESS/)
+        {
+            $access_size[$1] = $fields[4];
+        }
+        elsif ($fields[3] =~ /^POSIX_ACCESS(.)_COUNT/)
+        {
+            my $tmp_access_size = $access_size[$1];
+            if(defined $posix_access_hash{$tmp_access_size})
+            {
+                $posix_access_hash{$tmp_access_size} += $fields[4];
+            }
+            else
+            {
+                $posix_access_hash{$tmp_access_size} = $fields[4];
+            }
+        }
+        elsif ($fields[3] =~ /^MPIIO_ACCESS(.)_COUNT/)
+        {
+            my $tmp_access_size = $access_size[$1];
+            if(defined $mpiio_access_hash{$tmp_access_size})
+            {
+                $mpiio_access_hash{$tmp_access_size} += $fields[4];
+            }
+            else
+            {
+                $mpiio_access_hash{$tmp_access_size} = $fields[4];
+            }
+        }
+    }
+}
+close(PARSE_OUT) || die "darshan-parser failure: $! $?";
+
+# Fudge one point at the end to make xrange match in read and write plots.
+# For some reason I can't get the xrange command to work.  -Phil
+print FA_READ "$runtime\t-1\t0\t0\n";
+print FA_WRITE "$runtime\t-1\t0\t0\n";
+print FA_READ_SH "$runtime\t0\t0\t0\n";
+print FA_WRITE_SH "$runtime\t0\t0\t0\n";
+close(FA_READ);
+close(FA_READ_SH);
+close(FA_WRITE);
+close(FA_WRITE_SH);
+
+#
+# Exit out if there are no actual file accesses
+#
+if ($first_data_line)
+{
+    $strtm = strftime("%a %b %e %H:%M:%S %Y", localtime($starttime));
+
+    print "This darshan log has no file records. No summary was produced.\n";
+    print "    jobid: $jobid\n";
+    print "      uid: $uid\n";
+    print "starttime: $strtm ($starttime )\n";
+    print "  runtime: $runtime (seconds)\n";
+    print "   nprocs: $nprocs\n";
+    print "  version: $version\n";
+    exit(1);
+}
+
+# process last file record
+$file_record_hash{FILE_NAME} = $fields[5];
+if ($fields[0] eq "POSIX" || $fields[0] eq "MPIIO")
+{
+    process_file_record($current_rank, $current_hash, \%file_record_hash);
+}
+
+# copy template files to tmp tmp_dir
+system "$cp $PREFIX/share/*.gplt $tmp_dir/";
+system "$cp $PREFIX/share/*.tex $tmp_dir/";
+
+
+# summary of time spent in POSIX & MPI-IO functions
+open(TIME_SUMMARY, ">$tmp_dir/time-summary.dat") || die("error opening output file:$!\n");
+print TIME_SUMMARY "# <type>, <app time>, <read>, <write>, <meta>\n";
+print TIME_SUMMARY "POSIX, ", ((($runtime * $nprocs - $summary{POSIX_F_READ_TIME} -
+    $summary{POSIX_F_WRITE_TIME} -
+    $summary{POSIX_F_META_TIME})/($runtime * $nprocs)) * 100);
+print TIME_SUMMARY ", ", (($summary{POSIX_F_READ_TIME}/($runtime * $nprocs))*100);
+print TIME_SUMMARY ", ", (($summary{POSIX_F_WRITE_TIME}/($runtime * $nprocs))*100);
+print TIME_SUMMARY ", ", (($summary{POSIX_F_META_TIME}/($runtime * $nprocs))*100), "\n";
+if (defined $summary{MPIIO_INDEP_OPENS})
+{
+    print TIME_SUMMARY "MPI-IO, ", ((($runtime * $nprocs - $summary{MPIIO_F_READ_TIME} -
+        $summary{MPIIO_F_WRITE_TIME} -
+        $summary{MPIIO_F_META_TIME})/($runtime * $nprocs)) * 100);
+    print TIME_SUMMARY ", ", (($summary{MPIIO_F_READ_TIME}/($runtime * $nprocs))*100);
+    print TIME_SUMMARY ", ", (($summary{MPIIO_F_WRITE_TIME}/($runtime * $nprocs))*100);
+    print TIME_SUMMARY ", ", (($summary{MPIIO_F_META_TIME}/($runtime * $nprocs))*100), "\n";
+}
+close TIME_SUMMARY;
+
+# counts of operations
+open(PSX_OP_COUNTS, ">$tmp_dir/posix-op-counts.dat") || die("error opening output file: $!\n");
+print PSX_OP_COUNTS "# <operation>, <POSIX count>\n";
+print PSX_OP_COUNTS
+    "Read, ", $summary{POSIX_READS} + $summary{POSIX_FREADS}, "\n",
+    "Write, ", $summary{POSIX_WRITES} + $summary{POSIX_FWRITES}, "\n",
+    "Open, ", $summary{POSIX_OPENS} + $summary{POSIX_FOPENS}, "\n",
+    "Stat, ", $summary{POSIX_STATS}, "\n",
+    "Seek, ", $summary{POSIX_SEEKS}, "\n",
+    "Mmap, ", $summary{POSIX_MMAPS}, "\n",
+    "Fsync, ", $summary{POSIX_FSYNCS} + $summary{POSIX_FDSYNCS}, "\n";
+close PSX_OP_COUNTS;
+
+if (defined $summary{MPIIO_INDEP_OPENS})
+{
+    # TODO: do we want to look at MPI split or non-blocking i/o here? 
+    open(MPI_OP_COUNTS, ">$tmp_dir/mpiio-op-counts.dat") || die("error opening output file: $!\n");
+    print MPI_OP_COUNTS "# <operation>, <MPI Ind. count>, <MPI Coll. count>\n";
+    print MPI_OP_COUNTS
+        "Read, ", $summary{MPIIO_INDEP_READS}, ", ", $summary{MPIIO_COLL_READS}, "\n",
+        "Write, ", $summary{MPIIO_INDEP_WRITES}, ", ", $summary{MPIIO_COLL_WRITES}, "\n",
+        "Open, ", $summary{MPIIO_INDEP_OPENS},", ", $summary{MPIIO_COLL_OPENS}, "\n",
+        "Stat, ", "0, 0\n",
+        "Seek, ", "0, 0\n",
+        "Mmap, ", "0, 0\n",
+        "Fsync, ", "0, ", $summary{MPIIO_SYNCS}, "\n";
+    close MPI_OP_COUNTS;
+}
+
+# histograms of reads and writes (for POSIX and MPI-IO modules)
+open (IO_HIST, ">$tmp_dir/posix-access-hist.dat") || die("error opening output file: $!\n");
+print IO_HIST "# <size_range>, <POSIX_reads>, <POSIX_writes>\n";
+print IO_HIST "0-100, ",
+              $summary{POSIX_SIZE_READ_0_100}, ", ",
+              $summary{POSIX_SIZE_WRITE_0_100}, "\n";
+print IO_HIST "101-1K, ",
+              $summary{POSIX_SIZE_READ_100_1K}, ", ",
+              $summary{POSIX_SIZE_WRITE_100_1K}, "\n";
+print IO_HIST "1K-10K, ",
+              $summary{POSIX_SIZE_READ_1K_10K}, ", ",
+              $summary{POSIX_SIZE_WRITE_1K_10K}, "\n";
+print IO_HIST "10K-100K, ",
+              $summary{POSIX_SIZE_READ_10K_100K}, ", ",
+              $summary{POSIX_SIZE_WRITE_10K_100K}, "\n";
+print IO_HIST "100K-1M, ",
+              $summary{POSIX_SIZE_READ_100K_1M}, ", ",
+              $summary{POSIX_SIZE_WRITE_100K_1M}, "\n";
+print IO_HIST "1M-4M, ",
+              $summary{POSIX_SIZE_READ_1M_4M}, ", ",
+              $summary{POSIX_SIZE_WRITE_1M_4M}, "\n";
+print IO_HIST "4M-10M, ",
+              $summary{POSIX_SIZE_READ_4M_10M}, ", ",
+              $summary{POSIX_SIZE_WRITE_4M_10M}, "\n";
+print IO_HIST "10M-100M, ",
+              $summary{POSIX_SIZE_READ_10M_100M}, ", ",
+              $summary{POSIX_SIZE_WRITE_10M_100M}, "\n";
+print IO_HIST "100M-1G, ",
+              $summary{POSIX_SIZE_READ_100M_1G}, ", ",
+              $summary{POSIX_SIZE_WRITE_100M_1G}, "\n";
+print IO_HIST "1G+, ",
+              $summary{POSIX_SIZE_READ_1G_PLUS}, ", ",
+              $summary{POSIX_SIZE_WRITE_1G_PLUS}, "\n";
+close IO_HIST;
+
+if (defined $summary{MPIIO_INDEP_OPENS})
+{
+    open (IO_HIST, ">$tmp_dir/mpiio-access-hist.dat") || die("error opening output file: $!\n");
+    print IO_HIST "# <size_range>, <MPIIO_reads>, <MPIIO_writes>\n";
+    print IO_HIST "0-100, ",
+                  $summary{MPIIO_SIZE_READ_AGG_0_100}, ", ",
+                  $summary{MPIIO_SIZE_WRITE_AGG_0_100}, "\n";
+    print IO_HIST "101-1K, ",
+                  $summary{MPIIO_SIZE_READ_AGG_100_1K}, ", ",
+                  $summary{MPIIO_SIZE_WRITE_AGG_100_1K}, "\n";
+    print IO_HIST "1K-10K, ",
+                  $summary{MPIIO_SIZE_READ_AGG_1K_10K}, ", ",
+                  $summary{MPIIO_SIZE_WRITE_AGG_1K_10K}, "\n";
+    print IO_HIST "10K-100K, ",
+                  $summary{MPIIO_SIZE_READ_AGG_10K_100K}, ", ",
+                  $summary{MPIIO_SIZE_WRITE_AGG_10K_100K}, "\n";
+    print IO_HIST "100K-1M, ",
+                  $summary{MPIIO_SIZE_READ_AGG_100K_1M}, ", ",
+                  $summary{MPIIO_SIZE_WRITE_AGG_100K_1M}, "\n";
+    print IO_HIST "1M-4M, ",
+                  $summary{MPIIO_SIZE_READ_AGG_1M_4M}, ", ",
+                  $summary{MPIIO_SIZE_WRITE_AGG_1M_4M}, "\n";
+    print IO_HIST "4M-10M, ",
+                  $summary{MPIIO_SIZE_READ_AGG_4M_10M}, ", ",
+                  $summary{MPIIO_SIZE_WRITE_AGG_4M_10M}, "\n";
+    print IO_HIST "10M-100M, ",
+                  $summary{MPIIO_SIZE_READ_AGG_10M_100M}, ", ",
+                  $summary{MPIIO_SIZE_WRITE_AGG_10M_100M}, "\n";
+    print IO_HIST "100M-1G, ",
+                  $summary{MPIIO_SIZE_READ_AGG_100M_1G}, ", ",
+                  $summary{MPIIO_SIZE_WRITE_AGG_100M_1G}, "\n";
+    print IO_HIST "1G+, ",
+                  $summary{MPIIO_SIZE_READ_AGG_1G_PLUS}, ", ",
+                  $summary{MPIIO_SIZE_WRITE_AGG_1G_PLUS}, "\n";
+    close IO_HIST;
+}
+
+# sequential and consecutive access patterns
+open (PATTERN, ">$tmp_dir/pattern.dat") || die("error opening output file: $!\n");
+print PATTERN "# op total sequential consecutive\n";
+print PATTERN "Read, ", $summary{POSIX_READS} + $summary{POSIX_FREADS}, ", ",
+    $summary{POSIX_SEQ_READS}, ", ", $summary{POSIX_CONSEC_READS}, "\n";
+print PATTERN "Write, ", $summary{POSIX_WRITES} + $summary{POSIX_FWRITES}, ", ",
+    $summary{POSIX_SEQ_WRITES}, ", ", $summary{POSIX_CONSEC_WRITES}, "\n";
+close PATTERN;
+
+# table of common access sizes
+open(ACCESS_TABLE, ">$tmp_dir/access-table.tex") || die("error opening output file:$!\n");
+print ACCESS_TABLE "
+\\begin{tabular}{r|r|r}
+\\multicolumn{3}{c}{ } \\\\
+\\multicolumn{3}{c}{Most Common Access Sizes} \\\\
+\\hline
+\& access size \& count \\\\
+\\hline
+\\hline
+";
+
+# sort POSIX & MPI-IO access sizes (descending)
+my $i = 0;
+my $tmp_access_count = 0;
+foreach $value (keys %posix_access_hash) {
+    if ($posix_access_hash{$value} > 0) {
+        $tmp_access_count++;
+        if ($tmp_access_count == 4) {
+            last;
+        }
+    }
+}
+if ($tmp_access_count > 0)
+{
+    foreach $value (sort {$posix_access_hash{$b} <=> $posix_access_hash{$a} } keys %posix_access_hash)
+    {
+        if ($i == 4) {
+            last;
+        }
+        if ($posix_access_hash{$value} == 0) {
+            last;
+        }
+
+        if ($i == 0) {
+            print ACCESS_TABLE "
+            \\multirow{$tmp_access_count}{*}{POSIX} \& $value \& $posix_access_hash{$value} \\\\\n
+            ";
+        }
+        else {
+            print ACCESS_TABLE "
+            \& $value \& $posix_access_hash{$value} \\\\\n
+            ";
+        }
+        $i++;
+    }
+}
+
+$i = 0;
+$tmp_access_count = 0;
+foreach $value (keys %mpiio_access_hash) {
+    if ($mpiio_access_hash{$value} > 0) {
+        $tmp_access_count++;
+        if ($tmp_access_count == 4) {
+            last;
+        }
+    }
+}
+if ($tmp_access_count > 0)
+{
+    foreach $value (sort {$mpiio_access_hash{$b} <=> $mpiio_access_hash{$a} } keys %mpiio_access_hash)
+    {
+        if ($i == 4) {
+            last;
+        }
+        if ($mpiio_access_hash{$value} == 0) {
+            last;
+        }
+
+        if ($i == 0) {
+            print ACCESS_TABLE "
+            \\hline
+            \\multirow{$tmp_access_count}{*}{MPI-IO \\textdagger} \& $value \& $mpiio_access_hash{$value} \\\\\n
+            ";
+        }
+        else {
+            print ACCESS_TABLE "
+            \& $value \& $mpiio_access_hash{$value} \\\\\n
+            ";
+        }
+        $i++;
+    }
+}
+
+print ACCESS_TABLE "
+\\hline
+\\end{tabular}
+";
+close ACCESS_TABLE;
+
+# file count table
+#open(TABLES, ">$tmp_dir/file-count-table.tex") || die("error opening output file:$!\n");
+#print TABLES "
+#\\begin{tabular}{r|r|r|r}
+#\\multicolumn{4}{c}{ } \\\\
+#\\multicolumn{4}{c}{File Count Summary} \\\\
+#";
+#if($size_est_flag == 1)
+#{
+#print TABLES "
+#\\multicolumn{4}{c}{(estimated by I/O access offsets)} \\\\
+#";
+#}
+#print TABLES "
+#\\hline
+#type \& number of files \& avg. size \& max size \\\\
+#\\hline
+#\\hline
+#";
+#my $counter;
+#my $sum;
+#my $max;
+#my $key;
+#my $avg;
+#
+#$counter = 0;
+#$sum = 0;
+#$max = 0;
+#foreach $key (keys %hash_files) {
+#    $counter++;
+#    if($hash_files{$key}{'min_open_size'} >
+#        $hash_files{$key}{'max_size'})
+#    {
+#        $sum += $hash_files{$key}{'min_open_size'};
+#        if($hash_files{$key}{'min_open_size'} > $max)
+#        {
+#            $max = $hash_files{$key}{'min_open_size'};
+#        }
+#    }
+#    else
+#    {
+#        $sum += $hash_files{$key}{'max_size'};
+#        if($hash_files{$key}{'max_size'} > $max)
+#        {
+#            $max = $hash_files{$key}{'max_size'};
+#        }
+#    }
+#}
+#if($counter > 0) { $avg = $sum / $counter; }
+#else { $avg = 0; }
+#$avg = format_bytes($avg);
+#$max = format_bytes($max);
+#print TABLES "total opened \& $counter \& $avg \& $max \\\\\n";
+#
+#$counter = 0;
+#$sum = 0;
+#$max = 0;
+#foreach $key (keys %hash_files) {
+#    if($hash_files{$key}{'was_read'} && !($hash_files{$key}{'was_written'}))
+#    {
+#        $counter++;
+#        if($hash_files{$key}{'min_open_size'} >
+#            $hash_files{$key}{'max_size'})
+#        {
+#            $sum += $hash_files{$key}{'min_open_size'};
+#            if($hash_files{$key}{'min_open_size'} > $max)
+#            {
+#                $max = $hash_files{$key}{'min_open_size'};
+#            }
+#        }
+#        else
+#        {
+#            $sum += $hash_files{$key}{'max_size'};
+#            if($hash_files{$key}{'max_size'} > $max)
+#            {
+#                $max = $hash_files{$key}{'max_size'};
+#            }
+#        }
+#    }
+#}
+#if($counter > 0) { $avg = $sum / $counter; }
+#else { $avg = 0; }
+#$avg = format_bytes($avg);
+#$max = format_bytes($max);
+#print TABLES "read-only files \& $counter \& $avg \& $max \\\\\n";
+#
+#$counter = 0;
+#$sum = 0;
+#$max = 0;
+#foreach $key (keys %hash_files) {
+#    if(!($hash_files{$key}{'was_read'}) && $hash_files{$key}{'was_written'})
+#    {
+#        $counter++;
+#        if($hash_files{$key}{'min_open_size'} >
+#            $hash_files{$key}{'max_size'})
+#        {
+#            $sum += $hash_files{$key}{'min_open_size'};
+#            if($hash_files{$key}{'min_open_size'} > $max)
+#            {
+#                $max = $hash_files{$key}{'min_open_size'};
+#            }
+#        }
+#        else
+#        {
+#            $sum += $hash_files{$key}{'max_size'};
+#            if($hash_files{$key}{'max_size'} > $max)
+#            {
+#                $max = $hash_files{$key}{'max_size'};
+#            }
+#        }
+#    }
+#}
+#if($counter > 0) { $avg = $sum / $counter; }
+#else { $avg = 0; }
+#$avg = format_bytes($avg);
+#$max = format_bytes($max);
+#print TABLES "write-only files \& $counter \& $avg \& $max \\\\\n";
+#
+#$counter = 0;
+#$sum = 0;
+#$max = 0;
+#foreach $key (keys %hash_files) {
+#    if($hash_files{$key}{'was_read'} && $hash_files{$key}{'was_written'})
+#    {
+#        $counter++;
+#        if($hash_files{$key}{'min_open_size'} >
+#            $hash_files{$key}{'max_size'})
+#        {
+#            $sum += $hash_files{$key}{'min_open_size'};
+#            if($hash_files{$key}{'min_open_size'} > $max)
+#            {
+#                $max = $hash_files{$key}{'min_open_size'};
+#            }
+#        }
+#        else
+#        {
+#            $sum += $hash_files{$key}{'max_size'};
+#            if($hash_files{$key}{'max_size'} > $max)
+#            {
+#                $max = $hash_files{$key}{'max_size'};
+#            }
+#        }
+#    }
+#}
+#if($counter > 0) { $avg = $sum / $counter; }
+#else { $avg = 0; }
+#$avg = format_bytes($avg);
+#$max = format_bytes($max);
+#print TABLES "read/write files \& $counter \& $avg \& $max \\\\\n";
+#
+#$counter = 0;
+#$sum = 0;
+#$max = 0;
+#foreach $key (keys %hash_files) {
+#    if($hash_files{$key}{'was_written'} &&
+#        $hash_files{$key}{'min_open_size'} == 0 &&
+#        $hash_files{$key}{'max_size'} > 0)
+#    {
+#        $counter++;
+#        if($hash_files{$key}{'min_open_size'} >
+#            $hash_files{$key}{'max_size'})
+#        {
+#            $sum += $hash_files{$key}{'min_open_size'};
+#            if($hash_files{$key}{'min_open_size'} > $max)
+#            {
+#                $max = $hash_files{$key}{'min_open_size'};
+#            }
+#        }
+#        else
+#        {
+#            $sum += $hash_files{$key}{'max_size'};
+#            if($hash_files{$key}{'max_size'} > $max)
+#            {
+#                $max = $hash_files{$key}{'max_size'};
+#            }
+#        }
+#    }
+#}
+#if($counter > 0) { $avg = $sum / $counter; }
+#else { $avg = 0; }
+#$avg = format_bytes($avg);
+#$max = format_bytes($max);
+#print TABLES "created files \& $counter \& $avg \& $max \\\\\n";
+#
+#print TABLES "
+#\\hline
+#\\end{tabular}
+#";
+#close(TABLES);
+
+# generate per filesystem data
+open(FS_TABLE, ">$tmp_dir/fs-data-table.tex") || die("error opening output files:$!\n");
+print FS_TABLE "
+\\begin{tabular}{c|r|r|r|r}
+\\multicolumn{5}{c}{ } \\\\
+\\multicolumn{5}{c}{Data Transfer Per Filesystem} \\\\
+\\hline
+\\multirow{2}{*}{File System} \& \\multicolumn{2}{c}{Write} \\vline \& \\multicolumn{2}{c}{Read} \\\\
+\\cline{2-5}
+\& MiB \& Ratio \& MiB \& Ratio \\\\\
+\\hline
+\\hline
+";
+
+foreach $key (keys %fs_data)
+{
+    my $wr_total_mb = ($fs_data{$key}->[1] / (1024*1024));
+    my $rd_total_mb = ($fs_data{$key}->[0] / (1024*1024));
+
+    my $wr_total_rt;
+    if ($cumul_write_bytes_shared+$cumul_write_bytes_indep)
+    {
+        $wr_total_rt = ($fs_data{$key}->[1] / ($cumul_write_bytes_shared + $cumul_write_bytes_indep));
+    }
+    else
+    {
+        $wr_total_rt = 0;
+    }
+
+    my $rd_total_rt;
+    if ($cumul_read_bytes_shared+$cumul_read_bytes_indep)
+    {
+        $rd_total_rt = ($fs_data{$key}->[0] / ($cumul_read_bytes_shared + $cumul_read_bytes_indep));
+    }
+    else
+    {
+        $rd_total_rt = 0;
+    }
+
+    printf FS_TABLE "%s \& %.5f \& %.5f \& %.5f \& %.5f \\\\\n",
+        $key, $wr_total_mb, $wr_total_rt, $rd_total_mb, $rd_total_rt;
+}
+
+print FS_TABLE "
+\\hline
+\\end{tabular}
+";
+close FS_TABLE;
+
+# variance data
+open(VAR_TABLE, ">$tmp_dir/variance-table.tex") || die("error opening output file:$!\n");
+print VAR_TABLE "
+\\begin{tabular}{c|r|r|r|r|r|r|r|r|r}
+\\multicolumn{10}{c}{} \\\\
+\\multicolumn{10}{c}{Variance in Shared Files} \\\\
+\\hline
+File \& Processes \& \\multicolumn{3}{c}{Fastest} \\vline \&
+\\multicolumn{3}{c}{Slowest} \\vline \& \\multicolumn{2}{c}{\$\\sigma\$} \\\\
+\\cline{3-10}
+Suffix \&  \& Rank \& Time \& Bytes \& Rank \& Time \& Bytes \& Time \& Bytes \\\\
+\\hline
+\\hline
+";
+
+my $curcount = 1;
+foreach $key (sort { $hash_files{$b}{'slowest_time'} <=> $hash_files{$a}{'slowest_time'} } keys %hash_files) {
+
+    if ($curcount > 20) { last; }
+
+    if ($hash_files{$key}{'procs'} > 1)
+    {
+        my $vt = sprintf("%.3g", sqrt($hash_files{$key}{'variance_time'}));
+        my $vb = sprintf("%.3g", sqrt($hash_files{$key}{'variance_bytes'}));
+        my $fast_bytes = format_bytes($hash_files{$key}{'fastest_bytes'});
+        my $slow_bytes = format_bytes($hash_files{$key}{'slowest_bytes'});
+        my $name = encode('latex', $hash_files{$key}{'name'});
+
+        print VAR_TABLE "
+               $name \&
+               $hash_files{$key}{'procs'} \&
+               $hash_files{$key}{'fastest_rank'} \&
+               $hash_files{$key}{'fastest_time'} \&
+               $fast_bytes \&
+               $hash_files{$key}{'slowest_rank'} \&
+               $hash_files{$key}{'slowest_time'} \&
+               $slow_bytes \&
+               $vt \&
+               $vb \\\\
+         ";
+        $curcount++;
+    }
+}
+
+print VAR_TABLE "
+\\hline
+\\end{tabular}
+";
+close VAR_TABLE;
+
+# calculate performance
+##########################################################################
+
+# what was the slowest time by any proc for unique file access?
+my $slowest_uniq_time = 0;
+if(keys %hash_unique_file_time > 0)
+{
+    $slowest_uniq_time < $_ and $slowest_uniq_time = $_ for values %hash_unique_file_time;
+}
+print("Slowest unique file time: $slowest_uniq_time\n");
+print("Slowest shared file time: $shared_file_time\n");
+print("Total bytes read and written by app (may be incorrect): $total_job_bytes\n");
+my $tmp_total_time = $slowest_uniq_time+$shared_file_time;
+print("Total absolute I/O time: $tmp_total_time\n");
+
+# move to tmp_dir
+chdir $tmp_dir;
+
+# gather data to be used for document title (headers/footers)
+($executable, $junk) = split(' ', $cmdline, 2);
+ at parts = split('/', $executable);
+$cmd = $parts[$#parts];
+ at timearray = localtime($starttime);
+$year = $timearray[5] + 1900;
+$mon = $timearray[4] + 1;
+$mday = $timearray[3];
+
+# detect gnuplot ranges for file access graphs
+my $ymax = $nprocs;
+my $yinc = int($nprocs / 8);
+if($yinc == 0) {$yinc=1;}
+my $ymaxtic = $nprocs-1;
+
+# reformat cumulative i/o data for file access table
+my $cri = $cumul_read_indep / $nprocs;
+my $crbi = $cumul_read_bytes_indep / ($nprocs * 1048576.0);
+
+my $cwi = $cumul_write_indep / $nprocs;
+my $cwbi = $cumul_write_bytes_indep / ($nprocs * 1048576.0);
+
+my $crs = $cumul_read_shared / $nprocs;
+my $crbs = $cumul_read_bytes_shared / ($nprocs * 1048576.0);
+
+my $cws = $cumul_write_shared / $nprocs;
+my $cwbs = $cumul_write_bytes_shared / ($nprocs * 1048576.0);
+
+my $cmi = $cumul_meta_indep / $nprocs;
+my $cms = $cumul_meta_shared / $nprocs;
+
+# do any extra work needed for plotting mpi-io graphs
+if (defined $summary{MPIIO_INDEP_OPENS})
+{
+    system "$gnuplot -e \"data_file='mpiio-access-hist.dat'; graph_title='MPI-IO Access Sizes {\262}'; \\
+    output_file='mpiio-access-hist.eps'\" access-hist-eps.gplt";
+    system "$epstopdf mpiio-access-hist.eps";
+
+    open(OP_COUNTS_PLT, ">>$tmp_dir/op-counts-eps.gplt") || die("error opening output file: $!\n");
+    my $tmp_sz = -s "$tmp_dir/op-counts-eps.gplt";
+    # overwrite existing newline
+    truncate(OP_COUNTS_PLT, $tmp_sz-1);
+    print OP_COUNTS_PLT ", \\
+    \"mpiio-op-counts.dat\" using 2:xtic(1) title \"MPI-IO Indep.\", \\
+    \"\" using 3 title \"MPI-IO Coll.\"\n";
+    close OP_COUNTS_PLT;
+}
+
+# execute base gnuplot scripts
+system "$gnuplot time-summary-eps.gplt";
+system "$epstopdf time-summary.eps";
+system "$gnuplot op-counts-eps.gplt";
+system "$epstopdf op-counts.eps";
+system "$gnuplot -e \"data_file='posix-access-hist.dat'; graph_title='POSIX Access Sizes'; \\
+output_file='posix-access-hist.eps'\" access-hist-eps.gplt";
+system "$epstopdf posix-access-hist.eps";
+system "$gnuplot -e \"ymax=$ymax; yinc=$yinc; ymaxtic=$ymaxtic; runtime='$runtime'\" file-access-eps.gplt";
+system "$epstopdf file-access-read.eps";
+system "$epstopdf file-access-write.eps";
+system "$epstopdf file-access-shared.eps";
+system "$gnuplot pattern-eps.gplt";
+system "$epstopdf pattern.eps";
+
+# generate summary PDF
+# NOTE: we pass arguments to the latex template using '\def' commands
+# NOTE: an autoconf test determines if -halt-on-error is available and sets
+# __CP_PDFLATEX_HALT_ON_ERROR accordingly
+my $latex_cmd_line = "\"\\def\\titlecmd{$cmd} \\
+    \\def\\titlemon{$mon} \\
+    \\def\\titlemday{$mday} \\
+    \\def\\titleyear{$year} \\
+    \\def\\titlecmdline{$cmdline} \\
+    \\def\\jobid{$jobid} \\
+    \\def\\jobuid{$uid} \\
+    \\def\\jobnprocs{$nprocs} \\
+    \\def\\jobruntime{$runtime} \\
+    \\def\\filecri{$cri} \\
+    \\def\\filecrbi{$crbi} \\
+    \\def\\filecwi{$cwi} \\
+    \\def\\filecwbi{$cwbi} \\
+    \\def\\filecrs{$crs} \\
+    \\def\\filecrbs{$crbs} \\
+    \\def\\filecws{$cws} \\
+    \\def\\filecwbs{$cwbs} \\
+    \\def\\filecmi{$cmi} \\
+    \\def\\filecms{$cms} \\
+    \\input{summary.tex}\" \\
+    @__CP_PDFLATEX_HALT_ON_ERROR@";
+
+if (defined $summary{MPIIO_INDEP_OPENS})
+{
+    my $mpiio_latex_flags = "\\def\\inclmpiio{1} \\\n";
+    $latex_cmd_line = substr($latex_cmd_line, 0, 1) . $mpiio_latex_flags . substr($latex_cmd_line, 1);
+}
+
+$system_rc = system "$pdflatex $latex_cmd_line > latex.output";
+if($system_rc)
+{
+    print("LaTeX generation (phase1) failed [$system_rc], aborting summary creation.\n");
+    print("error log:\n");
+    system("tail latex.output");
+    exit(1);
+}
+$system_rc = system "$pdflatex $latex_cmd_line > latex.output2";
+if($system_rc)
+{
+    print("LaTeX generation (phase2) failed [$system_rc], aborting summary creation.\n");
+    print("error log:\n");
+    system("tail latex.output2");
+    exit(1);
+}
+
+# get back out of tmp dir and grab results
+chdir $orig_dir;
+system "$mv $tmp_dir/summary.pdf $output_file";
+
+
+sub process_file_record
+{
+    my $rank = $_[0];
+    my $hash = $_[1];
+    my(%file_record) = %{$_[2]};
+
+    if($file_record{'POSIX_OPENS'} == 0 &&
+        $file_record{'POSIX_FOPENS'} == 0 &&
+        (!defined $file_record{'MPIIO_INDEP_OPENS'} ||
+        ($file_record{'MPIIO_INDEP_OPENS'} == 0 && $file_record{'MPIIO_COLL_OPENS'} == 0)))
+    {
+        # file wasn't really opened, just stat probably
+        return;
+    }
+
+    # record smallest open time size reported by any rank
+    # XXX this isn't doable since dropping SIZE_AT_OPEN counter
+    $hash_files{$hash}{'min_open_size'} = 0;
+
+    # record largest size that the file reached at any rank
+    if(!defined($hash_files{$hash}{'max_size'}) ||
+        $hash_files{$hash}{'max_size'} <  
+        ($file_record{'POSIX_MAX_BYTE_READ'} + 1))
+    {
+        $hash_files{$hash}{'max_size'} = 
+            $file_record{'POSIX_MAX_BYTE_READ'} + 1;
+    }
+    if(!defined($hash_files{$hash}{'max_size'}) ||
+        $hash_files{$hash}{'max_size'} <  
+        ($file_record{'POSIX_MAX_BYTE_WRITTEN'} + 1))
+    {
+        $hash_files{$hash}{'max_size'} = 
+            $file_record{'POSIX_MAX_BYTE_WRITTEN'} + 1;
+    }
+
+    # make sure there is an initial value for read and write flags
+    if(!defined($hash_files{$hash}{'was_read'}))
+    {
+        $hash_files{$hash}{'was_read'} = 0;
+    }
+    if(!defined($hash_files{$hash}{'was_written'}))
+    {
+        $hash_files{$hash}{'was_written'} = 0;
+    }
+
+    if(defined $file_record{'MPIIO_INDEP_OPENS'} &&
+        ($file_record{'MPIIO_INDEP_OPENS'} > 0 ||
+        $file_record{'MPIIO_COLL_OPENS'} > 0))
+    {
+        # mpi file
+        if($file_record{'MPIIO_INDEP_READS'} > 0 ||
+            $file_record{'MPIIO_COLL_READS'} > 0 ||
+            $file_record{'MPIIO_SPLIT_READS'} > 0 ||
+            $file_record{'MPIIO_NB_READS'} > 0)
+        {
+            # data was read from the file
+            $hash_files{$hash}{'was_read'} = 1;
+        }
+        if($file_record{'MPIIO_INDEP_WRITES'} > 0 ||
+            $file_record{'MPIIO_COLL_WRITES'} > 0 ||
+            $file_record{'MPIIO_SPLIT_WRITES'} > 0 ||
+            $file_record{'MPIIO_NB_WRITES'} > 0)
+        {
+            # data was written to the file
+            $hash_files{$hash}{'was_written'} = 1;
+        }
+    }
+    else
+    {
+        # posix file
+        if($file_record{'POSIX_READS'} > 0 ||
+            $file_record{'POSIX_FREADS'} > 0)
+        {
+            # data was read from the file
+            $hash_files{$hash}{'was_read'} = 1;
+        }
+        if($file_record{'POSIX_WRITES'} > 0 ||
+            $file_record{'POSIX_FWRITES'} > 0)
+        {
+            # data was written to the file 
+            $hash_files{$hash}{'was_written'} = 1;
+        }
+    }
+
+    $hash_files{$hash}{'name'} = $file_record{FILE_NAME};
+
+    if ($rank == -1)
+    {
+        $hash_files{$hash}{'procs'}          = $nprocs;
+        $hash_files{$hash}{'slowest_rank'}   = $file_record{'POSIX_SLOWEST_RANK'};
+        $hash_files{$hash}{'slowest_time'}   = $file_record{'POSIX_F_SLOWEST_RANK_TIME'};
+        $hash_files{$hash}{'slowest_bytes'}  = $file_record{'POSIX_SLOWEST_RANK_BYTES'};
+        $hash_files{$hash}{'fastest_rank'}   = $file_record{'POSIX_FASTEST_RANK'};
+        $hash_files{$hash}{'fastest_time'}   = $file_record{'POSIX_F_FASTEST_RANK_TIME'};
+        $hash_files{$hash}{'fastest_bytes'}  = $file_record{'POSIX_FASTEST_RANK_BYTES'};
+        $hash_files{$hash}{'variance_time'}  = $file_record{'POSIX_F_VARIANCE_RANK_TIME'};
+        $hash_files{$hash}{'variance_bytes'} = $file_record{'POSIX_F_VARIANCE_RANK_BYTES'};
+    }
+    else
+    {
+        my $total_time = $file_record{'POSIX_F_META_TIME'} +
+                         $file_record{'POSIX_F_READ_TIME'} +
+                         $file_record{'POSIX_F_WRITE_TIME'};
+
+        my $total_bytes = $file_record{'POSIX_BYTES_READ'} +
+                          $file_record{'POSIX_BYTES_WRITTEN'};
+
+        if(!defined($hash_files{$hash}{'slowest_time'}) ||
+           $hash_files{$hash}{'slowest_time'} < $total_time)
+        {
+            $hash_files{$hash}{'slowest_time'}  = $total_time;
+            $hash_files{$hash}{'slowest_rank'}  = $rank;
+            $hash_files{$hash}{'slowest_bytes'} = $total_bytes;
+        }
+
+        if(!defined($hash_files{$hash}{'fastest_time'}) ||
+           $hash_files{$hash}{'fastest_time'} > $total_time)
+        {
+            $hash_files{$hash}{'fastest_time'}  = $total_time;
+            $hash_files{$hash}{'fastest_rank'}  = $rank;
+            $hash_files{$hash}{'fastest_bytes'} = $total_bytes;
+        }
+
+        if(!defined($hash_files{$hash}{'variance_time_S'}))
+        {
+            $hash_files{$hash}{'variance_time_S'} = 0;
+            $hash_files{$hash}{'variance_time_T'} = $total_time;
+            $hash_files{$hash}{'variance_time_n'} = 1;
+            $hash_files{$hash}{'variance_bytes_S'} = 0;
+            $hash_files{$hash}{'variance_bytes_T'} = $total_bytes;
+            $hash_files{$hash}{'variance_bytes_n'} = 1;
+            $hash_files{$hash}{'procs'} = 1;
+            $hash_files{$hash}{'variance_time'} = 0;
+            $hash_files{$hash}{'variance_bytes'} = 0;
+        }
+        else
+        {
+            my $n = $hash_files{$hash}{'variance_time_n'};
+            my $m = 1;
+            my $T = $hash_files{$hash}{'variance_time_T'};
+            $hash_files{$hash}{'variance_time_S'} += ($m/($n*($n+$m)))*(($n/$m)*$total_time - $T)*(($n/$m)*$total_time - $T);
+            $hash_files{$hash}{'variance_time_T'} += $total_time;
+            $hash_files{$hash}{'variance_time_n'} += 1;
+
+            $hash_files{$hash}{'variance_time'}    = $hash_files{$hash}{'variance_time_S'} / $hash_files{$hash}{'variance_time_n'};
+
+            $n = $hash_files{$hash}{'variance_bytes_n'};
+            $m = 1;
+            $T = $hash_files{$hash}{'variance_bytes_T'};
+            $hash_files{$hash}{'variance_bytes_S'} += ($m/($n*($n+$m)))*(($n/$m)*$total_bytes - $T)*(($n/$m)*$total_bytes - $T);
+            $hash_files{$hash}{'variance_bytes_T'} += $total_bytes;
+            $hash_files{$hash}{'variance_bytes_n'} += 1;
+
+            $hash_files{$hash}{'variance_bytes'}    = $hash_files{$hash}{'variance_bytes_S'} / $hash_files{$hash}{'variance_bytes_n'};
+
+            $hash_files{$hash}{'procs'} = $hash_files{$hash}{'variance_time_n'};
+        }
+    }
+
+    # if this is a non-shared file, then add the time spent here to the
+    # total for that particular rank
+    # XXX mpiio or posix? should we do both or just pick mpiio over posix?
+    if ($rank != -1)
+    {
+        # is it mpi-io or posix?
+        if(defined $file_record{MPIIO_INDEP_OPENS} &&
+            ($file_record{MPIIO_INDEP_OPENS} > 0 ||
+            $file_record{MPIIO_COLL_OPENS} > 0))
+        {
+            # add up mpi times
+            if(defined($hash_unique_file_time{$rank}))
+            {
+                $hash_unique_file_time{$rank} +=
+                    $file_record{MPIIO_F_META_TIME} + 
+                    $file_record{MPIIO_F_READ_TIME} + 
+                    $file_record{MPIIO_F_WRITE_TIME};
+            }
+            else
+            {
+                $hash_unique_file_time{$rank} =
+                    $file_record{MPIIO_F_META_TIME} + 
+                    $file_record{MPIIO_F_READ_TIME} + 
+                    $file_record{MPIIO_F_WRITE_TIME};
+            }
+        }
+        else
+        {
+            # add up posix times
+            if(defined($hash_unique_file_time{$rank}))
+            {
+                $hash_unique_file_time{$rank} +=
+                    $file_record{POSIX_F_META_TIME} + 
+                    $file_record{POSIX_F_READ_TIME} + 
+                    $file_record{POSIX_F_WRITE_TIME};
+            }
+            else
+            {
+                $hash_unique_file_time{$rank} =
+                    $file_record{POSIX_F_META_TIME} + 
+                    $file_record{POSIX_F_READ_TIME} + 
+                    $file_record{POSIX_F_WRITE_TIME};
+            }
+        }
+    }
+    else
+    {
+        # cumulative time spent on shared files by slowest proc
+        # is it mpi-io or posix?
+        if(defined $file_record{MPIIO_INDEP_OPENS} &&
+            ($file_record{MPIIO_INDEP_OPENS} > 0 ||
+            $file_record{MPIIO_COLL_OPENS} > 0))
+        {
+            $shared_file_time += $file_record{'MPIIO_F_SLOWEST_RANK_TIME'};
+        }
+        else
+        {
+            $shared_file_time += $file_record{'POSIX_F_SLOWEST_RANK_TIME'};
+        }
+    }
+
+    my $mpi_did_read = 0;
+    if (defined $file_record{MPIIO_INDEP_OPENS})
+    {
+        $mpi_did_read =
+            $file_record{'MPIIO_INDEP_READS'} + 
+            $file_record{'MPIIO_COLL_READS'} + 
+            $file_record{'MPIIO_NB_READS'} + 
+            $file_record{'MPIIO_SPLIT_READS'};
+    }
+
+    # add up how many bytes were transferred
+    if(defined $file_record{MPIIO_INDEP_OPENS} &&
+        ($file_record{MPIIO_INDEP_OPENS} > 0 ||
+        $file_record{MPIIO_COLL_OPENS} > 0) && (!($mpi_did_read)))
+    {
+        # mpi file that was only written; disregard any read accesses that
+        # may have been performed for sieving at the posix level
+        $total_job_bytes += $file_record{'POSIX_BYTES_WRITTEN'}; 
+    }
+    else
+    {
+        # normal case
+        $total_job_bytes += $file_record{'POSIX_BYTES_WRITTEN'} +
+            $file_record{'POSIX_BYTES_READ'};
+    }
+}
 
 sub process_args
 {
@@ -55,19 +1271,11 @@ sub process_args
     $input_file = $ARGV[0];
 
     # give default output file a similar name to the input file.
-    #   log.darshan.gz => log.pdf
     #   log_name => log_name.pdf
     if (not $opt_output)
     {
         $output_file = basename($input_file);
-        if ($output_file =~ /\.darshan\.gz$/)
-        {
-            $output_file =~ s/\.darshan\.gz$/\.pdf/;
-        }
-        else
-        {
-            $output_file .= ".pdf";
-        }
+        $output_file .= ".pdf";
     }
 
     return;
@@ -92,7 +1300,7 @@ sub check_prereqs
         }
     }
 
-    # check  gnuplot version
+    # check gnuplot version
     $output = `$gnuplot --version`;
     if($? != 0)
     {
@@ -110,6 +1318,41 @@ sub check_prereqs
     return;
 }
 
+#
+# Execute which to see if the binary can be found in
+# the users path.
+#
+sub checkbin($)
+{
+    my $binname = shift;
+    my $rc;
+
+    # save stdout/err
+    open(SAVEOUT, ">&STDOUT");
+    open(SAVEERR, ">&STDERR");
+
+    # redirect stdout/error
+    open(STDERR, '>/dev/null');
+    open(STDOUT, '>/dev/null');
+    $rc = system("which $binname");
+    if ($rc)
+    {
+        $rc = 1;
+    }
+    close(STDOUT);
+    close(STDERR);
+
+    # suppress perl warning
+    select(SAVEERR);
+    select(SAVEOUT);
+
+    # restore stdout/err
+    open(STDOUT, ">&SAVEOUT");
+    open(STDERR, ">&SAVEERR");
+
+    return $rc;
+}
+
 sub print_help
 {
     print <<EOF;
diff --git a/darshan-util/darshan-job-summary/share/hist-eps.gplt b/darshan-util/darshan-job-summary/share/access-hist-eps.gplt
similarity index 75%
rename from darshan-util/darshan-job-summary/share/hist-eps.gplt
rename to darshan-util/darshan-job-summary/share/access-hist-eps.gplt
index 6dc3e4d..b07eeca 100644
--- a/darshan-util/darshan-job-summary/share/hist-eps.gplt
+++ b/darshan-util/darshan-job-summary/share/access-hist-eps.gplt
@@ -1,7 +1,7 @@
 #!/usr/bin/gnuplot -persist
 
 set terminal postscript eps color enhanced font "Helvetica" 18
-set output "hist.eps"
+set output output_file
 set size 0.8,1.0
 
 set style data histogram
@@ -11,7 +11,7 @@ set boxwidth 0.9
 set bmargin 5
 set border 3 front linetype -1 linewidth 1.000
 
-set key out bottom horiz
+set key out bottom center horiz
 set xtics border in scale 1,0.5 nomirror rotate by -45 offset character 0, 0, 0
 set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
 
@@ -22,7 +22,6 @@ set ylabel "Count (Total, All Procs)"
 set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
 set bmargin 7
 
-set title "I/O Sizes"
-# PR, MIR, MCR, PW, MIW, MCW, Popen, Pseek, Pstat
-plot  "hist.dat" using 2:xtic(1) title "Read", \
-      "" using 3 title "Write"
+set title graph_title
+plot data_file using 2:xtic(1) title "Read", \
+     "" using 3 title "Write"
diff --git a/darshan-util/darshan-job-summary/share/align-pdf.gplt b/darshan-util/darshan-job-summary/share/align-pdf.gplt
deleted file mode 100644
index 4678b77..0000000
--- a/darshan-util/darshan-job-summary/share/align-pdf.gplt
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/gnuplot -persist
-
-set terminal pdf enhanced fname "Helvetica" fsize 10
-set output "align.pdf"
-set size 0.8,1.0
-
-set style data histogram
-set style histogram cluster gap 1
-set style fill solid border -1
-set boxwidth 0.9
-set bmargin 5
-set border 3 front linetype -1 linewidth 1.000
-
-set key out bottom horiz
-unset xtics
-# set xtics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-
-set datafile separator ","
-
-set ylabel "Ops (Total, All Procs)"
-set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
-
-set title "I/O Alignment"
-plot  "align.dat" using 1 title "Total", \
-      "" using 3 title "Unaligned in File", \
-      "" using 2 title "Unaligned in Memory"
-
-set key title ""
diff --git a/darshan-util/darshan-job-summary/share/counts-pdf.gplt b/darshan-util/darshan-job-summary/share/counts-pdf.gplt
deleted file mode 100644
index 91bb66c..0000000
--- a/darshan-util/darshan-job-summary/share/counts-pdf.gplt
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/gnuplot -persist
-
-set terminal pdf enhanced fname "Helvetica" fsize 10
-set output "counts.pdf"
-set size 0.8,1.0
-
-set style data histogram
-set style histogram cluster gap 1
-set style fill solid border -1
-set boxwidth 0.9
-set bmargin 5
-set border 3 front linetype -1 linewidth 1.000
-
-set key out bottom horiz
-set xtics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-
-set key title ""
-set datafile separator ","
-
-set ylabel "Ops (Total, All Procs)"
-set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
-
-set title "I/O Operation Counts"
-# PR, MIR, MCR, PW, MIW, MCW, Popen, Pseek, Pstat
-plot  "counts.dat" using 2:xtic(1) title "POSIX", \
-      "" using 3 title "MPI-IO Ind.", \
-      "" using 4 title "MPI-IO Coll."
diff --git a/darshan-util/darshan-job-summary/share/counts-svg.gplt b/darshan-util/darshan-job-summary/share/counts-svg.gplt
deleted file mode 100644
index 8b8cb2e..0000000
--- a/darshan-util/darshan-job-summary/share/counts-svg.gplt
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/gnuplot -persist
-
-set terminal svg enhanced size 800 600 fname "Gill Sans" fsize 15
-set output "counts.svg"
-
-set style data histogram
-set style histogram cluster gap 1
-set style fill solid border -1
-set boxwidth 0.9
-set bmargin 5
-set border 3 front linetype -1 linewidth 1.000
-
-set key out bottom horiz
-set xtics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-
-set key title ""
-set datafile separator ","
-
-set ylabel "Operations (Total, All Processes)"
-set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
-
-set title "I/O Operation Counts"
-# PR, MIR, MCR, PW, MIW, MCW, Popen, Pseek, Pstat
-plot  "counts.dat" using 2:xtic(1) title "POSIX", \
-      "" using 3 title "MPI-IO Indep.", \
-      "" using 4 title "MPI-IO Coll."
diff --git a/darshan-util/darshan-job-summary/share/file-access-eps.gplt b/darshan-util/darshan-job-summary/share/file-access-eps.gplt
new file mode 100644
index 0000000..01a47db
--- /dev/null
+++ b/darshan-util/darshan-job-summary/share/file-access-eps.gplt
@@ -0,0 +1,40 @@
+#!/usr/bin/gnuplot -persist
+
+set terminal postscript eps color solid font "Helvetica" 18 size 10in,2.5in
+set output "file-access-read.eps"
+set ylabel "MPI rank"
+set xlabel "hours:minutes:seconds"
+set xdata time
+set timefmt "%s"
+set format x "%H:%M:%S"
+set yrange [-1:ymax]
+set title "Timespan from first to last read access on independent files"
+set xrange ["0":runtime]
+set ytics 0,yinc,ymaxtic
+set lmargin 6
+
+# color blindness work around
+set style line 2 lc 3
+set style line 3 lc 4
+set style line 4 lc 5
+set style line 5 lc 2
+set style increment user
+
+# lw 3 to make lines thicker...
+# note that writes are slightly offset for better visibility
+plot "file-access-read.dat" using 1:2:3:4 with vectors nohead filled notitle
+
+set output "file-access-write.eps"
+set title "Timespan from first to last write access on independent files"
+
+# lw 3 to make lines thicker...
+plot "file-access-write.dat" using 1:2:3:4 with vectors nohead filled lt 2 notitle
+
+set output "file-access-shared.eps"
+unset ytics
+set ylabel "All processes"
+set yrange [-1:1]
+set title "Timespan from first to last access on files shared by all processes"
+
+plot "file-access-read-sh.dat" using 1:2:3:4 with vectors nohead filled lw 10 title "read", \
+"file-access-write-sh.dat" using 1:(($2)-.2):3:4 with vectors nohead filled lw 10 title "write"
diff --git a/darshan-util/darshan-job-summary/share/file-access-table.tex b/darshan-util/darshan-job-summary/share/file-access-table.tex
new file mode 100644
index 0000000..f607aa2
--- /dev/null
+++ b/darshan-util/darshan-job-summary/share/file-access-table.tex
@@ -0,0 +1,14 @@
+\begin{tabular}{l|p{1.7in}r}
+\multicolumn{3}{c}{Average I/O per process} \\
+\hline
+ & Cumulative time spent in I/O functions (seconds) & Amount of I/O (MB) \\
+\hline
+\hline
+Independent reads & \multicolumn{1}{r}{\filecri} & \multicolumn{1}{r}{\filecrbi} \\
+Independent writes & \multicolumn{1}{r}{\filecwi} & \multicolumn{1}{r}{\filecwbi} \\
+Independent metadata & \multicolumn{1}{r}{\filecmi} & \multicolumn{1}{r}{N/A} \\
+Shared reads & \multicolumn{1}{r}{\filecrs} & \multicolumn{1}{r}{\filecrbs} \\
+Shared writes & \multicolumn{1}{r}{\filecws} & \multicolumn{1}{r}{\filecwbs} \\
+Shared metadata & \multicolumn{1}{r}{\filecms} & \multicolumn{1}{r}{N/A} \\
+\hline
+\end{tabular}
diff --git a/darshan-util/darshan-job-summary/share/hist-pdf.gplt b/darshan-util/darshan-job-summary/share/hist-pdf.gplt
deleted file mode 100644
index 9f5bab7..0000000
--- a/darshan-util/darshan-job-summary/share/hist-pdf.gplt
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/gnuplot -persist
-
-set terminal pdf enhanced fname "Helvetica" fsize 10
-set output "hist.pdf"
-set size 0.8,1.0
-
-set style data histogram
-set style histogram cluster gap 1
-set style fill solid border -1
-set boxwidth 0.9
-set bmargin 5
-set border 3 front linetype -1 linewidth 1.000
-
-set key out bottom horiz
-set xtics border in scale 1,0.5 nomirror rotate by -45 offset character 0, 0, 0
-set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-
-set key title ""
-set datafile separator ","
-
-set ylabel "Count (Total, All Procs)"
-set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
-
-set title "I/O Sizes"
-# PR, MIR, MCR, PW, MIW, MCW, Popen, Pseek, Pstat
-plot  "hist.dat" using 2:xtic(1) title "Read", \
-      "" using 3 title "Write"
diff --git a/darshan-util/darshan-job-summary/share/hist-svg.gplt b/darshan-util/darshan-job-summary/share/hist-svg.gplt
deleted file mode 100644
index b10c67c..0000000
--- a/darshan-util/darshan-job-summary/share/hist-svg.gplt
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/gnuplot -persist
-
-set terminal svg enhanced size 800 600 fname "Gill Sans" fsize 15
-set output "hist.svg"
-
-set style data histogram
-set style histogram cluster gap 1
-set style fill solid border -1
-set boxwidth 0.9
-set bmargin 5
-set border 3 front linetype -1 linewidth 1.000
-
-set key out bottom horiz
-set xtics border in scale 1,0.5 nomirror rotate by -45 offset character 0, 0, 0
-set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-
-set key title ""
-set datafile separator ","
-
-set ylabel "Count (Total, All Processes)"
-set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
-
-set title "I/O Sizes"
-# PR, MIR, MCR, PW, MIW, MCW, Popen, Pseek, Pstat
-plot  "hist.dat" using 2:xtic(1) title "Read", \
-      "" using 3 title "Write"
diff --git a/darshan-util/darshan-job-summary/share/iodist-pdf.gplt b/darshan-util/darshan-job-summary/share/iodist-pdf.gplt
deleted file mode 100644
index 9fffde8..0000000
--- a/darshan-util/darshan-job-summary/share/iodist-pdf.gplt
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/gnuplot -persist
-
-set terminal pdf enhanced fname "Helvetica" fsize 10
-set output "iodist.pdf"
-set size 0.8,1.0
-
-set style data histogram
-set style histogram cluster gap 1
-set style fill solid border -1
-set boxwidth 0.9
-set bmargin 5
-set border 3 front linetype -1 linewidth 1.000
-
-# set key out bottom horiz
-set xtics border in scale 1,0.5 nomirror rotate by -45 offset character 0, 0, 0
-set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-
-set key title ""
-set datafile separator ","
-
-set xlabel "Number of Files Accessed"
-set xlabel offset character 0,-1,0 font "" textcolor lt -1 norotate
-
-set ylabel "Number of Processes"
-set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
-
-set title "I/O Operations per Client"
-plot  "iodist.dat" using 2:xtic(1) title "Read", \
-      "" using 3 title "Write (NOT WORKING YET)"
diff --git a/darshan-util/darshan-job-summary/share/job-table.tex b/darshan-util/darshan-job-summary/share/job-table.tex
new file mode 100644
index 0000000..cf1a9bd
--- /dev/null
+++ b/darshan-util/darshan-job-summary/share/job-table.tex
@@ -0,0 +1,5 @@
+\begin{tabular}{|p{.47\columnwidth}|p{.35\columnwidth}|p{.47\columnwidth}|p{.6\columnwidth}|}
+\hline
+jobid: \jobid & uid: \jobuid & nprocs: \jobnprocs & runtime: \jobruntime \space seconds\\
+\hline
+\end{tabular}
diff --git a/darshan-util/darshan-job-summary/share/counts-eps.gplt b/darshan-util/darshan-job-summary/share/op-counts-eps.gplt
similarity index 72%
rename from darshan-util/darshan-job-summary/share/counts-eps.gplt
rename to darshan-util/darshan-job-summary/share/op-counts-eps.gplt
index 16a8055..97ca59a 100644
--- a/darshan-util/darshan-job-summary/share/counts-eps.gplt
+++ b/darshan-util/darshan-job-summary/share/op-counts-eps.gplt
@@ -1,7 +1,7 @@
 #!/usr/bin/gnuplot -persist
 
 set terminal postscript eps color enhanced "Helvetica" 18
-set output "counts.eps"
+set output "op-counts.eps"
 set size 0.8,1.0
 
 set style data histogram
@@ -11,7 +11,7 @@ set boxwidth 0.9
 set bmargin 5
 set border 3 front linetype -1 linewidth 1.000
 
-set key out bottom horiz
+set key out bottom center horiz
 set xtics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
 set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
 
@@ -22,7 +22,4 @@ set ylabel "Ops (Total, All Processes)"
 set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
 
 set title "I/O Operation Counts"
-# PR, MIR, MCR, PW, MIW, MCW, Popen, Pseek, Pstat
-plot  "counts.dat" using 2:xtic(1) title "POSIX", \
-      "" using 3 title "MPI-IO Indep.", \
-      "" using 4 title "MPI-IO Coll."
+plot "posix-op-counts.dat" using 2:xtic(1) title "POSIX"
diff --git a/darshan-util/darshan-job-summary/share/pattern-pdf.gplt b/darshan-util/darshan-job-summary/share/pattern-pdf.gplt
deleted file mode 100644
index 9e54967..0000000
--- a/darshan-util/darshan-job-summary/share/pattern-pdf.gplt
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/gnuplot -persist
-
-set terminal pdf enhanced fname "Helvetica" fsize 10
-set output "pattern.pdf"
-set size 0.8,1.0
-
-set style data histogram
-set style histogram cluster gap 1
-set style fill solid border -1
-set boxwidth 0.9
-set bmargin 5
-set border 3 front linetype -1 linewidth 1.000
-
-set key out bottom horiz
-set xtics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-
-set key out bottom horiz
-set key title ""
-set datafile separator ","
-
-set ylabel "Ops (Total, All Procs)"
-set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
-
-set title "I/O Pattern"
-plot  "pattern.dat" using 2:xtic(1) title "Total", \
-      "" using 3 title "Sequential", \
-      "" using 4 title "Consecutive"
diff --git a/darshan-util/darshan-job-summary/share/summary.tex b/darshan-util/darshan-job-summary/share/summary.tex
index 8f9e7af..1a1572a 100644
--- a/darshan-util/darshan-job-summary/share/summary.tex
+++ b/darshan-util/darshan-job-summary/share/summary.tex
@@ -1,5 +1,4 @@
 \documentclass[11pt,letterpaper,twocolumn]{article}
-% \usepackage{html}
 \usepackage{charter}
 \usepackage{graphicx}
 \usepackage{fancyhdr}
@@ -23,7 +22,7 @@
 % BEGINNING OF DOCUMENT
 %
 
-\input{title}
+\input{title.tex}
 
 \begin{document}
 \fontfamily{cmss}
@@ -35,7 +34,7 @@
 \centering
 \subfigure
 {
-    \input{job-table}
+    \input{job-table.tex}
 }\\
 \subfigure
 {
@@ -43,24 +42,34 @@
 }
 \subfigure
 {
-    \includegraphics[scale=0.8]{counts.pdf}
-}
-\subfigure
-{
-    \includegraphics[scale=0.8]{hist.pdf}
+    \includegraphics[scale=0.8]{op-counts.pdf}
 }
+\end{figure*}
+
+\begin{figure*}[!h]
+\centering
 \subfigure
 {
-    \includegraphics[scale=0.8]{pattern.pdf}
+    \includegraphics[scale=0.8]{posix-access-hist.pdf}
 }
+\ifdefined\inclmpiio
 \subfigure
 {
-    \input{access-table}
+    \includegraphics[scale=0.8]{mpiio-access-hist.pdf}
 }
+\fi
+\end{figure*}
+
+\begin{figure*}[!h]
+\centering
 \subfigure
 {
-    \input{file-count-table}
+    \input{access-table.tex}
 }
+%\subfigure
+%{
+%    \input{file-count-table}
+%}
 \end{figure*}
 
 \begin{figure*}[!h]
@@ -79,11 +88,11 @@
 }
 \subfigure
 {
-    \input{file-access-table}
+    \input{file-access-table.tex}
 }
 \subfigure
 {
-    \input{fs-data-table}
+    \input{fs-data-table.tex}
 }
 \end{figure*}
 
@@ -91,29 +100,12 @@
 \centering
 \subfigure
 {
-    \input{variance-table}
+    \includegraphics[scale=0.65]{pattern.pdf}
+}
+\subfigure
+{
+    \input{variance-table.tex}
 }
 \end{figure*}
 
-
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% at this point, things fall onto page 2
-%\begin{figure}[!h]
-%\includegraphics[scale=0.7]{types.pdf}
-%\end{figure}
-%\begin{figure}[!h]
-%\includegraphics[scale=0.7]{align.pdf}
-%\end{figure}
-%\begin{figure}[!h]
-%\includegraphics[scale=0.7]{iodist.pdf}
-%\end{figure}
-%
-%\begin{figure}[!h]
-%\centering
-%\input{stride-table}
-%\end{figure}
-
-
 \end{document}
diff --git a/darshan-util/darshan-job-summary/share/time-summary-pdf.gplt b/darshan-util/darshan-job-summary/share/time-summary-pdf.gplt
deleted file mode 100644
index 1e74fdc..0000000
--- a/darshan-util/darshan-job-summary/share/time-summary-pdf.gplt
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/gnuplot -persist
-
-set terminal pdf enhanced fname "Helvetica" fsize 10
-set output "time-summary.pdf"
-set size 0.8,1.0
-
-set style data histograms
-set style histogram rowstacked
-set style fill solid border -1
-set boxwidth 0.9
-set bmargin 5
-set border 3 front linetype -1 linewidth 1.000
-
-set key out bottom horiz
-set xtics border in scale 1,0.5 nomirror rotate by -45 offset character 0, 0, 0
-set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-
-set key title ""
-set datafile separator ","
-
-set ylabel "Percentage of run time"
-set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
-set yrange [0:100]
-
-set title "Average I/O cost per process"
-
-set bmargin 7
-
-plot \
-    newhistogram "", "time-summary.dat" using 3:xtic(1) title "Read", "" using 4 title "Write", "" using 5 title "Metadata", "" using 2 title "Other (including application compute)"
diff --git a/darshan-util/darshan-job-summary/share/title.tex b/darshan-util/darshan-job-summary/share/title.tex
new file mode 100644
index 0000000..88343b8
--- /dev/null
+++ b/darshan-util/darshan-job-summary/share/title.tex
@@ -0,0 +1,14 @@
+\rhead{\thepage\ of \pageref{LastPage}}
+\chead[
+\large{\titlecmd \space (\titlemon/\titlemday/\titleyear)}
+]
+{
+\large{\titlecmd \space (\titlemon/\titlemday/\titleyear)}
+}
+\cfoot[
+\scriptsize{\titlecmdline}
+]
+{
+\scriptsize{\titlecmdline}
+}
+
diff --git a/darshan-util/darshan-job-summary/share/types-pdf.gplt b/darshan-util/darshan-job-summary/share/types-pdf.gplt
deleted file mode 100644
index 70825fb..0000000
--- a/darshan-util/darshan-job-summary/share/types-pdf.gplt
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/gnuplot -persist
-
-set terminal pdf enhanced fname "Helvetica" fsize 10
-set output "types.pdf"
-set size 0.8,1.0
-
-set style data histogram
-set style histogram cluster gap 1
-set style fill solid border -1
-set boxwidth 0.9
-set bmargin 5
-set border 3 front linetype -1 linewidth 1.000
-
-set xtics border in scale 1,0.5 nomirror rotate by -45 offset character 0, 0, 0
-set ytics border in scale 1,0.5 nomirror norotate  offset character 0, 0, 0
-set yrange [0:]
-
-set key title ""
-set datafile separator ","
-
-set ylabel "Count (Total, All Procs)"
-set ylabel offset character 2,0,0 font "" textcolor lt -1 rotate by 90
-
-set title "MPI Datatype Use"
-# PR, MIR, MCR, PW, MIW, MCW, Popen, Pseek, Pstat
-plot  "types.dat" using 2:xtic(1) notitle;


hooks/post-receive
--



More information about the Darshan-commits mailing list