[Darshan-commits] [Darshan] branch, dev-modular, updated. darshan-2.3.1-pre2-41-g56b12ca

Service Account git at mcs.anl.gov
Sun Feb 22 09:18:23 CST 2015


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "".

The branch, dev-modular has been updated
       via  56b12ca2d8bcb72045dd70bdee2d5bf63d6857c9 (commit)
       via  2eae92a35b1bd8e43aad1ba14ea522e18e3c114f (commit)
       via  7dafe56e782713e35311781fe5b488b7984e548e (commit)
       via  a5832499de7e0091db8d01f59c946c3a119dfd23 (commit)
       via  547fa7af4c5993f4a89eb46412d7d7c52de4fc32 (commit)
       via  c904d6e0ad7b094ccf905ce297323d748cda31ae (commit)
       via  bdbe35ed4e16effb653b1b53bf64721e4e68dee6 (commit)
       via  3421534f9f6293c3aca1603162d09849028b5d93 (commit)
       via  f0d7be0fefc9431cc3245cd4632c8f6c7a27c1c4 (commit)
       via  aaeb071fda2bcaebff57ebff8d70043955cacc7a (commit)
       via  aa8583814b7e0817ca3f6b44b9de8121498959a7 (commit)
       via  5dc512f197185c3aaacd118e014cfe28fbfb04a2 (commit)
       via  e30a1aec9a24dac450eb2a9e35fd9ca06bb1828a (commit)
       via  d3b6727c3f6a58980d1f07abca6ff8ba64d84c3e (commit)
       via  fa863d34fa1ffe4944d4d8608997f54b24b2c1f9 (commit)
       via  f39548697747bea7213e00d1d09297878ef27cf9 (commit)
       via  d3c4b9ddd884d23065bce0521c219c123d28c9c0 (commit)
       via  23523f15fa7e93d176ed90a42e096c1573b750b0 (commit)
       via  55e1f37ed5a377716789882ba2eeed0980fcfb0e (commit)
       via  4ec192f30eba792a9f40dbccd161352b38363369 (commit)
       via  ddfcbd5f31ac37be8a69062386ad40a3684242c9 (commit)
       via  48173bc1eef809adb980ee0bee7bd67ef7ce2bdb (commit)
       via  ffc2ef63e93ea0766e9f16754c2bbba3a584a1e4 (commit)
       via  4831753da8baf87b57d11f47b9d0d123632c0982 (commit)
       via  feb1fe1ede5a233948e1945538405eb5275b2222 (commit)
       via  34b666ac054982410282b84b454c0a538c9150ae (commit)
       via  edfe5fab24434c220f2f0ef32d192d7954e434fa (commit)
       via  c768c10e29b9a89b17ccae91b029c32a22cc66d0 (commit)
       via  f32fc8df6dcfada5071e9b1f2a3939d940b4e056 (commit)
       via  b56995e72693c0fdef75ac1ab4cc8cb6c0525274 (commit)
       via  2a24693eaa03d3c02b547aa77e07f58e34c6ce8e (commit)
       via  752e747b4d4c881f164205481a81486779415d1d (commit)
       via  a783978ea560063ecd97240641d101c260626ffb (commit)
       via  0ff8468881b17873c18b1c8e4c731e39f61b458d (commit)
       via  9edc0d7106241592423c26693314a7e7f619b23a (commit)
       via  a61bba43ac6368d4192739419c2b49a40e10f77c (commit)
       via  58889f88ddfb049dc72441a2c640be805b3832dc (commit)
       via  ff60981bc60776e2cb572a66f30f3e36d0f6c426 (commit)
       via  9b5c82c3fb8beb74fe4e3cc06a9be93e99273f33 (commit)
      from  8f85fd08a49f9c2862ae1636b3ed46f609806239 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 56b12ca2d8bcb72045dd70bdee2d5bf63d6857c9
Merge: 8f85fd08a49f9c2862ae1636b3ed46f609806239 2eae92a35b1bd8e43aad1ba14ea522e18e3c114f
Author: Phil Carns <carns at mcs.anl.gov>
Date:   Sun Feb 22 10:18:10 2015 -0500

    Merge remote-tracking branch 'origin/master' into dev-modular
    
    Conflicts:
    	ChangeLog
    	darshan-log-format.h
    	darshan-runtime/Makefile.in
    	darshan-runtime/configure
    	darshan-runtime/configure.in
    	darshan-runtime/lib/darshan-mpi-io.c
    	darshan-runtime/lib/darshan-posix.c
    	darshan-util/configure
    	darshan-util/configure.in
    	darshan-util/darshan-logutils.c

-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                                          |   30 ++-
 darshan-runtime/Makefile.in                        |    4 +
 darshan-runtime/configure                          |  228 ++++-------
 darshan-runtime/configure.in                       |   14 +-
 darshan-runtime/darshan-gen-cxx.pl.in              |    6 +-
 darshan-runtime/darshan-gen-fortran.pl.in          |   15 +-
 darshan-runtime/doc/darshan-runtime.txt            |   75 +++-
 .../lib/pkgconfig/darshan-runtime.pc.in            |    6 +-
 darshan-runtime/share/craype-1.x/darshan-module.in |    2 +-
 .../share/mpi-profile/darshan-cc.conf.in           |   18 +
 .../share/mpi-profile/darshan-cxx.conf.in          |   23 +
 .../share/mpi-profile/darshan-f.conf.in            |   23 +
 darshan-test/RELEASE-CHECKLIST.txt                 |    7 +-
 darshan-test/regression/README.txt                 |   16 +
 darshan-test/regression/run-all.sh                 |   58 +++
 darshan-test/regression/test-cases/cxxpi.sh        |   39 ++
 darshan-test/regression/test-cases/fperf-f77.sh    |   43 ++
 darshan-test/regression/test-cases/fperf-f90.sh    |   43 ++
 darshan-test/regression/test-cases/mpi-io-test.sh  |   44 ++
 darshan-test/regression/test-cases/src/cxxpi.cxx   |   63 +++
 darshan-test/regression/test-cases/src/fperf.f     |  180 ++++++++
 .../regression/test-cases/src/mpi-io-test.c        |  443 ++++++++++++++++++++
 darshan-test/regression/workstation-dynamic/env.sh |   40 ++
 .../regression/workstation-profile-conf/env.sh     |   38 ++
 darshan-test/regression/workstation-static/env.sh  |   55 +++
 darshan-util/configure                             |  206 +++-------
 .../bin/darshan-job-summary.pl.in                  |    7 +-
 darshan-util/darshan-parser.c                      |    6 +-
 darshan-util/doc/darshan-util.txt                  |    3 +-
 maint/config/check_zlib.m4                         |   74 +---
 30 files changed, 1420 insertions(+), 389 deletions(-)
 create mode 100644 darshan-runtime/share/mpi-profile/darshan-cc.conf.in
 create mode 100644 darshan-runtime/share/mpi-profile/darshan-cxx.conf.in
 create mode 100644 darshan-runtime/share/mpi-profile/darshan-f.conf.in
 create mode 100644 darshan-test/regression/README.txt
 create mode 100755 darshan-test/regression/run-all.sh
 create mode 100755 darshan-test/regression/test-cases/cxxpi.sh
 create mode 100755 darshan-test/regression/test-cases/fperf-f77.sh
 create mode 100755 darshan-test/regression/test-cases/fperf-f90.sh
 create mode 100755 darshan-test/regression/test-cases/mpi-io-test.sh
 create mode 100644 darshan-test/regression/test-cases/src/cxxpi.cxx
 create mode 100644 darshan-test/regression/test-cases/src/fperf.f
 create mode 100644 darshan-test/regression/test-cases/src/mpi-io-test.c
 create mode 100755 darshan-test/regression/workstation-dynamic/env.sh
 create mode 100755 darshan-test/regression/workstation-profile-conf/env.sh
 create mode 100755 darshan-test/regression/workstation-static/env.sh


Diff of changes:
diff --git a/ChangeLog b/ChangeLog
index 09f9ab6..98fbac0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -5,16 +5,38 @@ Darshan Release Change Log
 Darshan-3.0.0-pre1
 =============
 
-darshan-2.3.1-pre1
+darshan-2.3.1-pre2
 =============
+* added documentation and example configuration files for using the -profile
+  or $MPICC_PROFILE hooks to add instrumentation to MPICH-based MPI
+  implementations without generating custom wrapper scripts
+* Add wrappers for mkstemp(), mkostemp(), mkstemps(), and mkostemps()
+  (reported by Tom Peterka)
 * Change OPEN_TIMESTAMP field to report timestamp right before open() is
-  invoked rather than after timestamp after open is completed
+  invoked rather than after timestamp after open is completed.
+  NOTE: updated log format version to 2.06 to reflect this change.
 * Change start_time and end_time fields in job record to use min and max
   (respectively) across all ranks
 * Fix bug in write volume data reported in file system table in
   darshan-job-summary.pl (reported by Matthieu Dorier)
-
-TODO: bump log version number and add warnings about semantic changes  
+* Clean up autoconf test for zlib and make zlib mandatory (reported by Kalyana
+  Chadalavada)
+* add --start-group and --end-group notation to Darshan libraries for Cray PE
+  2.x environment to fix link-time corner cases (Yushu Yao)
+* improve y axis labels on time interval graphs in darshan-job-summary.pl
+  (reported by Tom Peterka)
+* misc. improvements to darshan-parser --perf output (reported by Shane
+  Snyder)
+  - indicate which rank was slowest in unique file results
+  - label I/O vs. meta time more clearly
+  - include unique file meta time in agg_perf_by_slowest calculation
+* added regression test script framework in darshan-test/regression/
+  - workstation-static and workstation-dynamic test environments supported
+* update darshan-gen-fortran.pl and darshan-gen-cxx.pl to support new library
+  naming conventions in MPICH 3.1.1 and higher
+* update documentation to reflect known issues with some versions of MPICH
+* modify darshan-runtime so that link-time instrumentation options are only used
+  when statically linking via Libs.private.  (reported by Kalyana Chadalavada)
 
 darshan-2.3.0
 =============
diff --git a/darshan-runtime/Makefile.in b/darshan-runtime/Makefile.in
index 97d656b..332f69d 100644
--- a/darshan-runtime/Makefile.in
+++ b/darshan-runtime/Makefile.in
@@ -82,6 +82,10 @@ install:: all
 #	install -m 755 share/craype-1.x/darshan-module $(datarootdir)/craype-1.x/modulefiles/darshan/$(DARSHAN_VERSION)
 #	install -d $(datarootdir)/craype-2.x/modulefiles/darshan
 #	install -m 755 share/craype-2.x/darshan-module $(datarootdir)/craype-2.x/modulefiles/darshan/$(DARSHAN_VERSION)
+#	install -d $(datarootdir)/mpi-profile
+#	install -m 755 share/mpi-profile/darshan-cc.conf $(datarootdir)/mpi-profile/darshan-cc.conf
+#	install -m 755 share/mpi-profile/darshan-cxx.conf $(datarootdir)/mpi-profile/darshan-cxx.conf
+#	install -m 755 share/mpi-profile/darshan-f.conf $(datarootdir)/mpi-profile/darshan-f.conf
 #	install -d $(libdir)/pkgconfig
 #	install -m 644 lib/pkgconfig/darshan-runtime.pc $(libdir)/pkgconfig/darshan-runtime.pc
 
diff --git a/darshan-runtime/configure b/darshan-runtime/configure
index b5aa9d5..7b6d224 100755
--- a/darshan-runtime/configure
+++ b/darshan-runtime/configure
@@ -621,6 +621,7 @@ ac_includes_default="\
 
 ac_subst_vars='LTLIBOBJS
 LIBOBJS
+MPICH_LIB_OLD
 DARSHAN_VERSION
 DISABLE_LDPRELOAD
 CP_WRAPPERS
@@ -1460,52 +1461,6 @@ fi
 
 } # ac_fn_c_try_compile
 
-# ac_fn_c_try_link LINENO
-# -----------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_link ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  rm -f conftest.$ac_objext conftest$ac_exeext
-  if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext && {
-	 test "$cross_compiling" = yes ||
-	 test -x conftest$ac_exeext
-       }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_retval=1
-fi
-  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
-  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
-  # interfere with the next link command; also delete a directory that is
-  # left behind by Apple's compiler.  We do this before executing the actions.
-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_link
-
 # ac_fn_c_try_cpp LINENO
 # ----------------------
 # Try to preprocess conftest.$ac_ext, and return whether this succeeded.
@@ -1703,6 +1658,52 @@ $as_echo "$ac_res" >&6; }
 
 } # ac_fn_c_check_header_compile
 
+# ac_fn_c_try_link LINENO
+# -----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_link ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest$ac_exeext
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 test -x conftest$ac_exeext
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+  # interfere with the next link command; also delete a directory that is
+  # left behind by Apple's compiler.  We do this before executing the actions.
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_link
+
 # ac_fn_c_check_type LINENO TYPE VAR INCLUDES
 # -------------------------------------------
 # Tests whether TYPE exists after having included INCLUDES, setting cache
@@ -3455,109 +3456,38 @@ fi
 done
 
 
-#
-# Handle user hints
-#
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if zlib is wanted" >&5
-$as_echo_n "checking if zlib is wanted... " >&6; }
+
+
 
 # Check whether --with-zlib was given.
 if test "${with_zlib+set}" = set; then :
   withval=$with_zlib; if test "$withval" != no ; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
   if test -d "$withval"
   then
     ZLIB_HOME="$withval"
+    LDFLAGS="$LDFLAGS -L${ZLIB_HOME}/lib"
+    CPPFLAGS="$CPPFLAGS -I${ZLIB_HOME}/include"
+    __CP_ZLIB_LINK_FLAGS="-L${ZLIB_HOME}/lib"
+    __CP_ZLIB_INCLUDE_FLAGS="-I${ZLIB_HOME}/include"
   else
     { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Sorry, $withval does not exist, checking usual places" >&5
 $as_echo "$as_me: WARNING: Sorry, $withval does not exist, checking usual places" >&2;}
   fi
 else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
+  as_fn_error $? "zlib is required" "$LINENO" 5
 fi
 fi
 
 
-#
-# Locate zlib, if wanted
-#
-if test -n "${ZLIB_HOME}"
-then
-        ZLIB_OLD_LDFLAGS=$LDFLAGS
-        ZLIB_OLD_CPPFLAGS=$LDFLAGS
-        LDFLAGS="$LDFLAGS -L${ZLIB_HOME}/lib"
-        CPPFLAGS="$CPPFLAGS -I${ZLIB_HOME}/include"
-
-        ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-        { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflateEnd in -lz" >&5
-$as_echo_n "checking for inflateEnd in -lz... " >&6; }
-if ${ac_cv_lib_z_inflateEnd+:} false; then :
-  $as_echo_n "(cached) " >&6
-else
-  ac_check_lib_save_LIBS=$LIBS
-LIBS="-lz  $LIBS"
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-/* Override any GCC internal prototype to avoid an error.
-   Use char because int might match the return type of a GCC
-   builtin and then its argument prototype would still apply.  */
-#ifdef __cplusplus
-extern "C"
-#endif
-char inflateEnd ();
-int
-main ()
-{
-return inflateEnd ();
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
-  ac_cv_lib_z_inflateEnd=yes
-else
-  ac_cv_lib_z_inflateEnd=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
-    conftest$ac_exeext conftest.$ac_ext
-LIBS=$ac_check_lib_save_LIBS
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_inflateEnd" >&5
-$as_echo "$ac_cv_lib_z_inflateEnd" >&6; }
-if test "x$ac_cv_lib_z_inflateEnd" = xyes; then :
-  zlib_cv_libz=yes
-else
-  zlib_cv_libz=no
-fi
-
-        ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default"
+ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default"
 if test "x$ac_cv_header_zlib_h" = xyes; then :
-  zlib_cv_zlib_h=yes
+
 else
-  zlib_cv_zlib_h=no
+  as_fn_error $? "z.h not found" "$LINENO" 5
 fi
 
 
-        ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-        if test "$zlib_cv_libz" = "yes" -a "$zlib_cv_zlib_h" = "yes"
-        then
-                #
-                # If both library and header were found, use them
-                #
-                { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflateEnd in -lz" >&5
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflateEnd in -lz" >&5
 $as_echo_n "checking for inflateEnd in -lz... " >&6; }
 if ${ac_cv_lib_z_inflateEnd+:} false; then :
   $as_echo_n "(cached) " >&6
@@ -3600,27 +3530,10 @@ _ACEOF
 
   LIBS="-lz $LIBS"
 
+else
+  as_fn_error $? "libz not found" "$LINENO" 5
 fi
 
-                { $as_echo "$as_me:${as_lineno-$LINENO}: checking zlib in ${ZLIB_HOME}" >&5
-$as_echo_n "checking zlib in ${ZLIB_HOME}... " >&6; }
-                { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5
-$as_echo "ok" >&6; }
-		__CP_ZLIB_LINK_FLAGS="-L${ZLIB_HOME}/lib"
-		__CP_ZLIB_INCLUDE_FLAGS="-I${ZLIB_HOME}/include"
-        else
-                #
-                # If either header or library was not found, revert and bomb
-                #
-                { $as_echo "$as_me:${as_lineno-$LINENO}: checking zlib in ${ZLIB_HOME}" >&5
-$as_echo_n "checking zlib in ${ZLIB_HOME}... " >&6; }
-                LDFLAGS="$ZLIB_OLD_LDFLAGS"
-                CPPFLAGS="$ZLIB_OLD_CPPFLAGS"
-                { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5
-$as_echo "failed" >&6; }
-                as_fn_error $? "either specify a valid zlib installation with --with-zlib=DIR or disable zlib usage with --without-zlib" "$LINENO" 5
-        fi
-fi
 
 
 
@@ -4167,7 +4080,7 @@ done
 
 
 # libc functions wrapped by darshan
-#CP_WRAPPERS="-Wl,-u,MPI_Init,-u,MPI_Wtime,-wrap,write,-wrap,open,-wrap,creat,-wrap,creat64,-wrap,open64,-wrap,close,-wrap,read,-wrap,lseek,-wrap,lseek64,-wrap,pread,-wrap,pwrite,-wrap,readv,-wrap,writev,-wrap,__xstat,-wrap,__lxstat,-wrap,__fxstat,-wrap,__xstat64,-wrap,__lxstat64,-wrap,__fxstat64,-wrap,mmap,-wrap,mmap64,-wrap,fopen,-wrap,fclose,-wrap,fread,-wrap,fwrite,-wrap,fseek,-wrap,fopen64,-wrap,pread64,-wrap,pwrite64,-wrap,fsync,-wrap,fdatasync,-wrap,ncmpi_create,-wrap,ncmpi_open,-wrap,ncmpi_close,-wrap,H5Fcreate,-wrap,H5Fopen,-wrap,H5Fclose,-wrap,aio_write,-wrap,aio_write64,-wrap,aio_read,-wrap,aio_read64,-wrap,lio_listio,-wrap,lio_listio64,-wrap,aio_return,-wrap,aio_return64"
+#CP_WRAPPERS="-Wl,-u,MPI_Init,-u,MPI_Wtime,-wrap,write,-wrap,open,-wrap,creat,-wrap,creat64,-wrap,open64,-wrap,close,-wrap,read,-wrap,lseek,-wrap,lseek64,-wrap,pread,-wrap,pwrite,-wrap,readv,-wrap,writev,-wrap,__xstat,-wrap,__lxstat,-wrap,__fxstat,-wrap,__xstat64,-wrap,__lxstat64,-wrap,__fxstat64,-wrap,mmap,-wrap,mmap64,-wrap,fopen,-wrap,fclose,-wrap,fread,-wrap,fwrite,-wrap,fseek,-wrap,fopen64,-wrap,pread64,-wrap,pwrite64,-wrap,fsync,-wrap,fdatasync,-wrap,ncmpi_create,-wrap,ncmpi_open,-wrap,ncmpi_close,-wrap,H5Fcreate,-wrap,H5Fopen,-wrap,H5Fclose,-wrap,aio_write,-wrap,aio_write64,-wrap,aio_read,-wrap,aio_read64,-wrap,lio_listio,-wrap,lio_listio64,-wrap,aio_return,-wrap,aio_return64,-wrap,mkstemp,-wrap,mkostemp,-wrap,mkstemps,-wrap,mkostemps"
 
 CP_WRAPPERS="-Wl,-u,MPI_Init,-u,MPI_Wtime,-wrap,open,-wrap,open64,-wrap,close"
 
@@ -4338,6 +4251,19 @@ $as_echo "#define __D_MPI_REQUEST MPI_Request" >>confdefs.h
 fi
 
 
+# attempt to detect librarly naming convention in mpi compiler script
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for old (pre-3.1.1) style MPICH library naming convention" >&5
+$as_echo_n "checking for old (pre-3.1.1) style MPICH library naming convention... " >&6; }
+if $CC -show foo.c -o foo |grep lmpich >& /dev/null; then :
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+    MPICH_LIB_OLD=1
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+    MPICH_LIB_OLD=0
+fi
+
 DARSHAN_VERSION="3.0.0-pre1"
 
 
@@ -4347,7 +4273,8 @@ DARSHAN_VERSION="3.0.0-pre1"
 
 
 
-ac_config_files="$ac_config_files Makefile darshan-mk-log-dirs.pl darshan-gen-cc.pl darshan-gen-cxx.pl darshan-gen-fortran.pl darshan-config share/craype-1.x/darshan-module share/craype-2.x/darshan-module lib/pkgconfig/darshan-runtime.pc"
+
+ac_config_files="$ac_config_files Makefile darshan-mk-log-dirs.pl darshan-gen-cc.pl darshan-gen-cxx.pl darshan-gen-fortran.pl darshan-config share/craype-1.x/darshan-module share/craype-2.x/darshan-module lib/pkgconfig/darshan-runtime.pc share/mpi-profile/darshan-cc.conf share/mpi-profile/darshan-cxx.conf share/mpi-profile/darshan-f.conf"
 
 cat >confcache <<\_ACEOF
 # This file is a shell script that caches the results of configure
@@ -5050,6 +4977,9 @@ do
     "share/craype-1.x/darshan-module") CONFIG_FILES="$CONFIG_FILES share/craype-1.x/darshan-module" ;;
     "share/craype-2.x/darshan-module") CONFIG_FILES="$CONFIG_FILES share/craype-2.x/darshan-module" ;;
     "lib/pkgconfig/darshan-runtime.pc") CONFIG_FILES="$CONFIG_FILES lib/pkgconfig/darshan-runtime.pc" ;;
+    "share/mpi-profile/darshan-cc.conf") CONFIG_FILES="$CONFIG_FILES share/mpi-profile/darshan-cc.conf" ;;
+    "share/mpi-profile/darshan-cxx.conf") CONFIG_FILES="$CONFIG_FILES share/mpi-profile/darshan-cxx.conf" ;;
+    "share/mpi-profile/darshan-f.conf") CONFIG_FILES="$CONFIG_FILES share/mpi-profile/darshan-f.conf" ;;
 
   *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
   esac
diff --git a/darshan-runtime/configure.in b/darshan-runtime/configure.in
index a69c61e..c412ef0 100644
--- a/darshan-runtime/configure.in
+++ b/darshan-runtime/configure.in
@@ -188,7 +188,7 @@ CFLAGS="$old_cflags"
 AC_CHECK_HEADERS(mntent.h sys/mount.h)
 
 # libc functions wrapped by darshan
-#CP_WRAPPERS="-Wl,-u,MPI_Init,-u,MPI_Wtime,-wrap,write,-wrap,open,-wrap,creat,-wrap,creat64,-wrap,open64,-wrap,close,-wrap,read,-wrap,lseek,-wrap,lseek64,-wrap,pread,-wrap,pwrite,-wrap,readv,-wrap,writev,-wrap,__xstat,-wrap,__lxstat,-wrap,__fxstat,-wrap,__xstat64,-wrap,__lxstat64,-wrap,__fxstat64,-wrap,mmap,-wrap,mmap64,-wrap,fopen,-wrap,fclose,-wrap,fread,-wrap,fwrite,-wrap,fseek,-wrap,fopen64,-wrap,pread64,-wrap,pwrite64,-wrap,fsync,-wrap,fdatasync,-wrap,ncmpi_create,-wrap,ncmpi_open,-wrap,ncmpi_close,-wrap,H5Fcreate,-wrap,H5Fopen,-wrap,H5Fclose,-wrap,aio_write,-wrap,aio_write64,-wrap,aio_read,-wrap,aio_read64,-wrap,lio_listio,-wrap,lio_listio64,-wrap,aio_return,-wrap,aio_return64"
+#CP_WRAPPERS="-Wl,-u,MPI_Init,-u,MPI_Wtime,-wrap,write,-wrap,open,-wrap,creat,-wrap,creat64,-wrap,open64,-wrap,close,-wrap,read,-wrap,lseek,-wrap,lseek64,-wrap,pread,-wrap,pwrite,-wrap,readv,-wrap,writev,-wrap,__xstat,-wrap,__lxstat,-wrap,__fxstat,-wrap,__xstat64,-wrap,__lxstat64,-wrap,__fxstat64,-wrap,mmap,-wrap,mmap64,-wrap,fopen,-wrap,fclose,-wrap,fread,-wrap,fwrite,-wrap,fseek,-wrap,fopen64,-wrap,pread64,-wrap,pwrite64,-wrap,fsync,-wrap,fdatasync,-wrap,ncmpi_create,-wrap,ncmpi_open,-wrap,ncmpi_close,-wrap,H5Fcreate,-wrap,H5Fopen,-wrap,H5Fclose,-wrap,aio_write,-wrap,aio_write64,-wrap,aio_read,-wrap,aio_read64,-wrap,lio_listio,-wrap,lio_listio64,-wrap,aio_return,-wrap,aio_return64,-wrap,mkstemp,-wrap,mkostemp,-wrap,mkstemps,-wrap,mkostemps"
 
 CP_WRAPPERS="-Wl,-u,MPI_Init,-u,MPI_Wtime,-wrap,open,-wrap,open64,-wrap,close"
 
@@ -287,6 +287,14 @@ AC_CHECK_TYPE([MPIO_Request],
 		AC_DEFINE(__D_MPI_REQUEST, MPI_Request, Generalized request type for MPI-IO)
 	,[#include <mpi.h>])
 
+# attempt to detect librarly naming convention in mpi compiler script
+AC_MSG_CHECKING(for old (pre-3.1.1) style MPICH library naming convention)
+AS_IF([$CC -show foo.c -o foo |grep lmpich >& /dev/null], 
+    AC_MSG_RESULT(yes)
+    MPICH_LIB_OLD=1,
+    AC_MSG_RESULT(no)
+    MPICH_LIB_OLD=0)
+
 DARSHAN_VERSION="AC_PACKAGE_VERSION"
 
 AC_SUBST(darshan_lib_path)
@@ -296,6 +304,7 @@ AC_SUBST(__CP_LOG_PATH)
 AC_SUBST(CP_WRAPPERS)
 AC_SUBST(DISABLE_LDPRELOAD)
 AC_SUBST(DARSHAN_VERSION)
+AC_SUBST(MPICH_LIB_OLD)
 AC_OUTPUT(Makefile
 darshan-mk-log-dirs.pl
 darshan-gen-cc.pl
@@ -305,4 +314,7 @@ darshan-config
 share/craype-1.x/darshan-module
 share/craype-2.x/darshan-module
 lib/pkgconfig/darshan-runtime.pc
+share/mpi-profile/darshan-cc.conf
+share/mpi-profile/darshan-cxx.conf
+share/mpi-profile/darshan-f.conf
 )
diff --git a/darshan-runtime/darshan-gen-cxx.pl.in b/darshan-runtime/darshan-gen-cxx.pl.in
index b213a9a..3a9c41e 100644
--- a/darshan-runtime/darshan-gen-cxx.pl.in
+++ b/darshan-runtime/darshan-gen-cxx.pl.in
@@ -274,10 +274,14 @@ print OUTPUT<<"EOF";
     if [ \$? -eq 0 ] ; then
         CXXMPICH=-lcxxmpich
     fi
-    grep -E cxxmpich\\.cnk.a \$tmpfile >& /dev/null
+    grep -E cxxmpich\\.cnk\\.a \$tmpfile >& /dev/null
     if [ \$? -eq 0 ] ; then
         CXXMPICH=-lcxxmpich.cnk
     fi
+    grep -E mpicxx\\.a \$tmpfile >& /dev/null
+    if [ \$? -eq 0 ] ; then
+        CXXMPICH=-lmpicxx
+    fi
 
     rm \$tmpfile >& /dev/null
 
diff --git a/darshan-runtime/darshan-gen-fortran.pl.in b/darshan-runtime/darshan-gen-fortran.pl.in
index e91dd7f..231d64d 100644
--- a/darshan-runtime/darshan-gen-fortran.pl.in
+++ b/darshan-runtime/darshan-gen-fortran.pl.in
@@ -275,13 +275,20 @@ print OUTPUT<<"EOF";
     grep -E \\(PMPI_File_\\)\\|\\(PMPI_Init\\)\\|\\(PMPI_Finalize\\) \$tmpfile | grep -v -E \\(mpich.*\\.a\\) |grep \\(PMPI >& /dev/null
     rc_pmpi=\$?
 
-    # normal or cnk libraries?
-    grep -E mpich\\.cnk \$tmpfile >& /dev/null
+
+    # find appropriate fortran library name for profiling
+    grep -E libmpifort \$tmpfile >& /dev/null
     rc_cnk_check=\$?
     if [ \$rc_cnk_check -eq 0 ] ; then
-        FMPICH=-lfmpich.cnk
+        FMPICH=-lmpifort
     else
-        FMPICH=-lfmpich
+        grep -E mpich\\.cnk \$tmpfile >& /dev/null
+        rc_cnk_check=\$?
+        if [ \$rc_cnk_check -eq 0 ] ; then
+            FMPICH=-lfmpich.cnk
+        else
+            FMPICH=-lfmpich
+        fi
     fi
 
     rm \$tmpfile >& /dev/null
diff --git a/darshan-runtime/doc/darshan-runtime.txt b/darshan-runtime/doc/darshan-runtime.txt
index c4699dd..1ab3f5d 100644
--- a/darshan-runtime/doc/darshan-runtime.txt
+++ b/darshan-runtime/doc/darshan-runtime.txt
@@ -143,13 +143,17 @@ for details if you intend to force one mode or the other.
 
 == Instrumenting statically-linked applications
 
-Statically linked executables must be instrumented at compile time.  The
-simplest way to do this is to generate an MPI compiler script (e.g. `mpicc`)
-that includes the link options and libraries needed by Darshan.  Once this
-is done, Darshan instrumentation is transparent; you simply compile
-applications using the darshan-enabled MPI compiler scripts.
+Statically linked executables must be instrumented at compile time.
+The simplest methods to do this are to either generate a customized
+MPI compiler script (e.g. `mpicc`) that includes the link options and
+libraries needed by Darshan, or to use existing profiling configuration
+hooks for existing MPI compiler scripts.  Once this is done, Darshan
+instrumentation is transparent; you simply compile applications using
+the darshan-enabled MPI compiler scripts.
 
-For MPICH-based MPI libraries, such as MPICH1, MPICH2, or MVAPICH, these
+=== Using customized compiler wrapper scripts
+
+For MPICH-based MPI libraries, such as MPICH1, MPICH2, or MVAPICH, custom
 wrapper scripts can be generated automatically.  The following example
 illustrates how to produce wrappers for C, C++, and Fortran compilers:
 
@@ -160,6 +164,40 @@ darshan-gen-fortran.pl `which mpif77` --output mpif77.darshan
 darshan-gen-fortran.pl `which mpif90` --output mpif90.darshan
 -----
 
+=== Using a profile configuration 
+
+The MPICH MPI implementation supports the specification of a profiling library
+configuration, then it can be used to insert Darshan instrumentation without
+modifying the existing MPI compiler script.  Example profiling configuration
+files are installed with Darshan 2.3.1 and later.  You can enable a profiling
+configuration using environment variables or command line arguments to the
+compiler scripts:
+
+Example for MPICH 3.1.1 or newer:
+----
+export MPICC_PROFILE=$DARSHAN_PREFIX/share/mpi-profile/darshan-cc
+export MPICXX_PROFILE=$DARSHAN_PREFIX/share/mpi-profile/darshan-cxx
+export MPIFORT_PROFILE=$DARSHAN_PREFIX/share/mpi-profile/darshan-f
+----
+
+Example for MPICH 3.1 or earlier:
+----
+export MPICC_PROFILE=$DARSHAN_PREFIX/share/mpi-profile/darshan-cc
+export MPICXX_PROFILE=$DARSHAN_PREFIX/share/mpi-profile/darshan-cxx
+export MPICF77_PROFILE=$DARSHAN_PREFIX/share/mpi-profile/darshan-f
+export MPICF90_PROFILE=$DARSHAN_PREFIX/share/mpi-profile/darshan-f
+----
+
+Examples for command line use:
+----
+mpicc -profile=$DARSHAN_PREFIX/share/mpi-profile/darshan-c <args>
+mpicxx -profile=$DARSHAN_PREFIX/share/mpi-profile/darshan-cxx <args>
+mpif77 -profile=$DARSHAN_PREFIX/share/mpi-profile/darshan-f <args>
+mpif90 -profile=$DARSHAN_PREFIX/share/mpi-profile/darshan-f <args>
+----
+
+=== Other configurations
+
 Please see the Cray recipe in this document for instructions on
 instrumenting statically-linked applications on that platform.
 
@@ -379,12 +417,30 @@ http://software.intel.com/en-us/forums/showthread.php?t=103447&o=a&s=lr
 
 === Linux clusters using MPICH 
 
-Follow the generic instructions provided at the top of this document.  The
-only modification is to make sure that the `CC` used for compilation is
+Follow the generic instructions provided at the top of this document.  For MPICH versions 3.1 and
+later, MPICH uses shared libraries by default, so you may need to consider the dynamic linking
+instrumentation approach.  
+
+The static linking method can be used if MPICH is configured to use static
+linking by default, or if you are using a version prior to 3.1.
+The only modification is to make sure that the `CC` used for compilation is
 based on a GNU compiler.  Once Darshan has been installed, it should be
 capable of instrumenting executables built with GNU, Intel, and PGI
 compilers.
 
+[NOTE]
+Darshan is not capable of instrumenting Fortran applications build with MPICH versions 3.1.1, 3.1.2,
+or 3.1.3 due to a library symbol name compatibility issue.  Consider using a newer version of
+MPICH if you wish to instrument Fortran applications.  Please see
+http://trac.mpich.org/projects/mpich/ticket/2209 for more details.
+
+[NOTE]
+MPICH versions 3.1, 3.1.1, 3.1.2, and 3.1.3 may produce link-time errors when building static
+executables (i.e. using the -static option) if MPICH is built with shared library support.
+Please see http://trac.mpich.org/projects/mpich/ticket/2190 for more details.  The workaround if you
+wish to use static linking is to configure MPICH with `--enable-shared=no --enable-static=yes` to
+force it to use static MPI libraries with correct dependencies.
+
 === Linux clusters using Open MPI
 
 Follow the generic instructions provided at the top of this document for
@@ -413,4 +469,5 @@ behavior at runtime:
 in which files that were accessed by all ranks are collapsed into a single
 cumulative file record at rank 0.  This option retains more per-process
 information at the expense of creating larger log files.
-
+* DARSHAN_LOGPATH: specifies the path to write Darshan log files to. Note that this directory needs to be formatted using the darshan-mk-log-dirs script.
+* DARSHAN_LOGFILE: specifies the path (directory + Darshan log file name) to write the output Darshan log to. This overrides the default Darshan behavior of automatically generating a log file name and adding to a log file directory formatted using darshan-mk-log-dirs script.
diff --git a/darshan-runtime/lib/pkgconfig/darshan-runtime.pc.in b/darshan-runtime/lib/pkgconfig/darshan-runtime.pc.in
index d4d1a6c..55c2e92 100644
--- a/darshan-runtime/lib/pkgconfig/darshan-runtime.pc.in
+++ b/darshan-runtime/lib/pkgconfig/darshan-runtime.pc.in
@@ -11,8 +11,8 @@ Requires.private:
 darshan_prefix=@prefix@
 darshan_includedir=
 darshan_libdir= -L${darshan_prefix}/lib
-darshan_linkopts="-Wl,-u,MPI_Init,-u,MPI_Wtime,-u,__wrap_H5Fcreate,-u,__wrap_ncmpi_create,-wrap,write,-wrap,open,-wrap,creat,-wrap,creat64,-wrap,open64,-wrap,close,-wrap,read,-wrap,lseek,-wrap,lseek64,-wrap,pread,-wrap,pwrite,-wrap,readv,-wrap,writev,-wrap,__xstat,-wrap,__lxstat,-wrap,__fxstat,-wrap,__xstat64,-wrap,__lxstat64,-wrap,__fxstat64,-wrap,mmap,-wrap,mmap64,-wrap,fopen,-wrap,fclose,-wrap,fread,-wrap,fwrite,-wrap,fseek,-wrap,fopen64,-wrap,pread64,-wrap,pwrite64,-wrap,fsync,-wrap,fdatasync,-wrap,ncmpi_create,-wrap,ncmpi_open,-wrap,ncmpi_close,-wrap,H5Fcreate,-wrap,H5Fopen,-wrap,H5Fclose,-wrap,aio_write,-wrap,aio_write64,-wrap,aio_read,-wrap,aio_read64,-wrap,lio_listio,-wrap,lio_listio64,-wrap,aio_return,-wrap,aio_return64"
+darshan_linkopts="-Wl,-u,MPI_Init,-u,MPI_Wtime,-u,__wrap_H5Fcreate,-u,__wrap_ncmpi_create,-wrap,write,-wrap,open,-wrap,creat,-wrap,creat64,-wrap,open64,-wrap,close,-wrap,read,-wrap,lseek,-wrap,lseek64,-wrap,pread,-wrap,pwrite,-wrap,readv,-wrap,writev,-wrap,__xstat,-wrap,__lxstat,-wrap,__fxstat,-wrap,__xstat64,-wrap,__lxstat64,-wrap,__fxstat64,-wrap,mmap,-wrap,mmap64,-wrap,fopen,-wrap,fclose,-wrap,fread,-wrap,fwrite,-wrap,fseek,-wrap,fopen64,-wrap,pread64,-wrap,pwrite64,-wrap,fsync,-wrap,fdatasync,-wrap,ncmpi_create,-wrap,ncmpi_open,-wrap,ncmpi_close,-wrap,H5Fcreate,-wrap,H5Fopen,-wrap,H5Fclose,-wrap,aio_write,-wrap,aio_write64,-wrap,aio_read,-wrap,aio_read64,-wrap,lio_listio,-wrap,lio_listio64,-wrap,aio_return,-wrap,aio_return64,-wrap,mkstemp,-wrap,mkostemp,-wrap,mkstemps,-wrap,mkostemps"
 
 Cflags:
-Libs: ${darshan_linkopts} ${darshan_libdir} -lfmpich -lmpichcxx -ldarshan-mpi-io -ldarshan-posix -ldarshan-stubs
-Libs.private:
+Libs:
+Libs.private: ${darshan_linkopts} ${darshan_libdir} -lfmpich -lmpichcxx -Wl,--start-group -ldarshan-mpi-io -ldarshan-posix -ldarshan-stubs -Wl,--end-group
diff --git a/darshan-runtime/share/craype-1.x/darshan-module.in b/darshan-runtime/share/craype-1.x/darshan-module.in
index c58fff7..6915b7b 100644
--- a/darshan-runtime/share/craype-1.x/darshan-module.in
+++ b/darshan-runtime/share/craype-1.x/darshan-module.in
@@ -55,7 +55,7 @@ prepend-path CRAY_LD_LIBRARY_PATH           $root/lib
 
 
 # initial link options for Darshan
-setenv DARSHAN_POST_LINK_OPTS " -L$root/lib -ldarshan-mpi-io -lz -Wl,-u,__wrap_fopen,-u,MPI_Init,-u,MPI_Wtime,-wrap,write,-wrap,open,-wrap,creat,-wrap,creat64,-wrap,open64,-wrap,close,-wrap,read,-wrap,lseek,-wrap,lseek64,-wrap,pread,-wrap,pwrite,-wrap,readv,-wrap,writev,-wrap,__xstat,-wrap,__lxstat,-wrap,__fxstat,-wrap,__xstat64,-wrap,__lxstat64,-wrap,__fxstat64,-wrap,mmap,-wrap,mmap64,-wrap,fopen,-wrap,fclose,-wrap,fread,-wrap,fwrite,-wrap,fseek,-wrap,fopen64,-wrap,pread64,-wrap,pwrite64,-wrap,fsync,-wrap,fdatasync,-wrap,ncmpi_create,-wrap,ncmpi_open,-wrap,ncmpi_close,-wrap,H5Fcreate,-wrap,H5Fopen,-wrap,H5Fclose,-wrap,aio_write,-wrap,aio_write64,-wrap,aio_read,-wrap,aio_read64,-wrap,lio_listio,-wrap,lio_listio64,-wrap,aio_return,-wrap,aio_return64  -lfmpich -lmpichcxx -ldarshan-posix -ldarshan-mpi-io -lz "
+setenv DARSHAN_POST_LINK_OPTS " -L$root/lib -ldarshan-mpi-io -lz -Wl,-u,__wrap_fopen,-u,MPI_Init,-u,MPI_Wtime,-wrap,write,-wrap,open,-wrap,creat,-wrap,creat64,-wrap,open64,-wrap,close,-wrap,read,-wrap,lseek,-wrap,lseek64,-wrap,pread,-wrap,pwrite,-wrap,readv,-wrap,writev,-wrap,__xstat,-wrap,__lxstat,-wrap,__fxstat,-wrap,__xstat64,-wrap,__lxstat64,-wrap,__fxstat64,-wrap,mmap,-wrap,mmap64,-wrap,fopen,-wrap,fclose,-wrap,fread,-wrap,fwrite,-wrap,fseek,-wrap,fopen64,-wrap,pread64,-wrap,pwrite64,-wrap,fsync,-wrap,fdatasync,-wrap,ncmpi_create,-wrap,ncmpi_open,-wrap,ncmpi_close,-wrap,H5Fcreate,-wrap,H5Fopen,-wrap,H5Fclose,-wrap,aio_write,-wrap,aio_write64,-wrap,aio_read,-wrap,aio_read64,-wrap,lio_listio,-wrap,lio_listio64,-wrap,aio_return,-wrap,aio_return64,-wrap,mkstemp,-wrap,mkostemp,-wrap,mkstemps,-wrap,mkostemps  -lfmpich -lmpichcxx -ldarshan-posix -ldarshan-mpi-io -lz "
 
 # Add Darshan to the PE_PRODUCT_LIST variable.  This will cause the
 # compiler scripts to insert DARSHAN_POST_LINK_OPTS into the link command
diff --git a/darshan-runtime/share/mpi-profile/darshan-cc.conf.in b/darshan-runtime/share/mpi-profile/darshan-cc.conf.in
new file mode 100644
index 0000000..70fe29a
--- /dev/null
+++ b/darshan-runtime/share/mpi-profile/darshan-cc.conf.in
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# Example Darshan profiling configuration file for MPICH.
+
+
+DARSHAN_PREFIX=@prefix@
+
+# Libraries (and paths) to include before the MPI library 
+export PROFILE_PRELIB=`$DARSHAN_PREFIX/bin/darshan-config --pre-ld-flags`
+
+# Libraries to include after the MPI library 
+export PROFILE_POSTLIB=`$DARSHAN_PREFIX/bin/darshan-config --post-ld-flags`
+
+# C preprocessor arguments for any include files For example, to add
+# /usr/local/myprof/include to the include path and the library libmyprof.a in
+# /usr/local/myprof/lib to the link step, you could create the file myprof.conf with the
+# lines 
+# PROFILE_INCPATHS
diff --git a/darshan-runtime/share/mpi-profile/darshan-cxx.conf.in b/darshan-runtime/share/mpi-profile/darshan-cxx.conf.in
new file mode 100644
index 0000000..9499a76
--- /dev/null
+++ b/darshan-runtime/share/mpi-profile/darshan-cxx.conf.in
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Example Darshan profiling configuration file for MPICH.
+
+
+DARSHAN_PREFIX=@prefix@
+MPICH_LIB_OLD=@MPICH_LIB_OLD@
+
+# Libraries (and paths) to include before the MPI library 
+if [ $MPICH_LIB_OLD -eq 1 ]; then
+    export PROFILE_PRELIB="-lmpichcxx `$DARSHAN_PREFIX/bin/darshan-config --pre-ld-flags`"
+else
+    export PROFILE_PRELIB="-lmpicxx `$DARSHAN_PREFIX/bin/darshan-config --pre-ld-flags`"
+fi
+
+# Libraries to include after the MPI library 
+export PROFILE_POSTLIB=`$DARSHAN_PREFIX/bin/darshan-config --post-ld-flags`
+
+# C preprocessor arguments for any include files For example, to add
+# /usr/local/myprof/include to the include path and the library libmyprof.a in
+# /usr/local/myprof/lib to the link step, you could create the file myprof.conf with the
+# lines 
+# PROFILE_INCPATHS
diff --git a/darshan-runtime/share/mpi-profile/darshan-f.conf.in b/darshan-runtime/share/mpi-profile/darshan-f.conf.in
new file mode 100644
index 0000000..176ca88
--- /dev/null
+++ b/darshan-runtime/share/mpi-profile/darshan-f.conf.in
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+# Example Darshan profiling configuration file for MPICH.
+
+
+DARSHAN_PREFIX=@prefix@
+MPICH_LIB_OLD=@MPICH_LIB_OLD@
+
+# Libraries (and paths) to include before the MPI library 
+if [ $MPICH_LIB_OLD -eq 1 ]; then
+    export PROFILE_PRELIB="-lfmpich `$DARSHAN_PREFIX/bin/darshan-config --pre-ld-flags`"
+else
+    export PROFILE_PRELIB="-lmpifort `$DARSHAN_PREFIX/bin/darshan-config --pre-ld-flags`"
+fi
+
+# Libraries to include after the MPI library 
+export PROFILE_POSTLIB=`$DARSHAN_PREFIX/bin/darshan-config --post-ld-flags`
+
+# C preprocessor arguments for any include files For example, to add
+# /usr/local/myprof/include to the include path and the library libmyprof.a in
+# /usr/local/myprof/lib to the link step, you could create the file myprof.conf with the
+# lines 
+# PROFILE_INCPATHS
diff --git a/darshan-test/RELEASE-CHECKLIST.txt b/darshan-test/RELEASE-CHECKLIST.txt
index 7d4c3bc..29ec196 100644
--- a/darshan-test/RELEASE-CHECKLIST.txt
+++ b/darshan-test/RELEASE-CHECKLIST.txt
@@ -20,8 +20,13 @@ Notes on how to release a new version of Darshan
   pushed to origin/master
 - make a tag for the release according to instructions at
   http://git-scm.com/book/en/Git-Basics-Tagging
+  - example (annotated tag, pushed to repo):
+    git tag -a darshan-2.3.1-pre2 -m 'Darshan 2.3.1-pre2'
+    git push origin darshan-2.3.1-pre2
 - TESTING
-- export the tag (TODO: document recommended method) and tar gzip it
+- export the tag and tar gzip it
+  - easiest method is to do a fresh checkout and remove the .git
+    subdirectory
   - upload .tar.gz file to /mcs/ftp/pub/darshan/releases
 - generate web documentation from asciidoc by running make in
   darshan-util/doc/ and darshan-runtime/doc/ directories
diff --git a/darshan-test/regression/README.txt b/darshan-test/regression/README.txt
new file mode 100644
index 0000000..9fa8c81
--- /dev/null
+++ b/darshan-test/regression/README.txt
@@ -0,0 +1,16 @@
+This directory contains regression tests for both the runtime and util
+components of Darshan, assuming that Darshan is already compiled and
+installed in a known path.
+
+The master script must be executed with three arguments:
+
+1) path to darshan installation
+2) path to temporary directory (for building executables, collecting logs, 
+   etc. during test)
+3) platform type; options include:
+   - ws (for a standard workstation)
+
+The platform type should map to a subdirectory containing scripts
+that describe how to perform platform-specific tasks (like loading or
+generating darshan wrappers and executing jobs).
+
diff --git a/darshan-test/regression/run-all.sh b/darshan-test/regression/run-all.sh
new file mode 100755
index 0000000..604a366
--- /dev/null
+++ b/darshan-test/regression/run-all.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+if [ "$#" -ne 3 ]; then
+    echo "Usage: run-all.sh <darshan_install_path> <tmp_path> <platform>" 1>&2
+    echo "Example: ./run-all.sh ~/darshan-install /tmp/test ws" 1>&2
+    exit 1
+fi
+
+# set variables for use by other sub-scripts
+export DARSHAN_PATH=$1
+export DARSHAN_TMP=$2
+export DARSHAN_PLATFORM=$3
+# number of procs that most test jobs will use
+export DARSHAN_DEFAULT_NPROCS=4
+
+DARSHAN_TESTDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+export DARSHAN_TESTDIR
+
+# check darshan path
+if [ ! -x $DARSHAN_PATH/bin/darshan-parser ]; then
+    echo "Error: $DARSHAN_PATH doesn't contain a valid Darshan install." 1>&2
+    exit 1
+fi
+
+# check and/or create tmp path
+if [ ! -d $DARSHAN_TMP ]; then
+    mkdir -p $DARSHAN_TMP
+fi
+
+if [ ! -d $DARSHAN_TMP ]; then
+    echo "Error: unable to find or create $DARSHAN_TMP" 1>&2
+    exit 1
+fi
+if [ ! -w $DARSHAN_TMP ]; then
+    echo "Error: unable to write to $DARSHAN_TMP" 1>&2
+    exit 1
+fi
+
+# make sure that we have sub-scripts for the specified platform
+if [ ! -d $DARSHAN_TESTDIR/$DARSHAN_PLATFORM ]; then
+    echo "Error: unable to find scripts for platform $DARSHAN_PLATFORM" 1>&2
+    exit 1
+fi
+
+# set up environment for tests according to platform
+source $DARSHAN_TESTDIR/$DARSHAN_PLATFORM/env.sh
+
+for i in `ls $DARSHAN_TESTDIR/test-cases/*.sh`; do
+    echo Running ${i}...
+    $i
+    if [ $? -ne 0 ]; then
+        echo "Error: failed to execute test case $i"
+        exit 1
+    fi
+    echo Done.
+done
+
+exit 0
diff --git a/darshan-test/regression/test-cases/cxxpi.sh b/darshan-test/regression/test-cases/cxxpi.sh
new file mode 100755
index 0000000..bea35e8
--- /dev/null
+++ b/darshan-test/regression/test-cases/cxxpi.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+PROG=cxxpi
+
+# set log file path; remove previous log if present
+export DARSHAN_LOGFILE=$DARSHAN_TMP/${PROG}.darshan.gz
+rm -f ${DARSHAN_LOGFILE}
+
+# compile
+$DARSHAN_CXX $DARSHAN_TESTDIR/test-cases/src/${PROG}.cxx -o $DARSHAN_TMP/${PROG}
+if [ $? -ne 0 ]; then
+    echo "Error: failed to compile ${PROG}" 1>&2
+    exit 1
+fi
+
+# execute
+$DARSHAN_RUNJOB $DARSHAN_TMP/${PROG} -f $DARSHAN_TMP/${PROG}.tmp.dat
+if [ $? -ne 0 ]; then
+    echo "Error: failed to execute ${PROG}" 1>&2
+    exit 1
+fi
+
+# parse log
+$DARSHAN_PATH/bin/darshan-parser $DARSHAN_LOGFILE > $DARSHAN_TMP/${PROG}.darshan.txt
+if [ $? -ne 0 ]; then
+    echo "Error: failed to parse ${DARSHAN_LOGFILE}" 1>&2
+    exit 1
+fi
+
+# check results
+# in this case we want to confirm that the open counts are zero; cxxpi does not do any IO
+POSIX_OPENS=`grep CP_POSIX_OPENS $DARSHAN_TMP/${PROG}.darshan.txt |cut -f 4`
+if [ "$POSIX_OPENS"x != ""x ]; then
+    echo "Error: Found unexpected POSIX open count of $POSIX_OPENS" 1>&2
+    exit 1
+fi
+
+
+exit 0
diff --git a/darshan-test/regression/test-cases/fperf-f77.sh b/darshan-test/regression/test-cases/fperf-f77.sh
new file mode 100755
index 0000000..8f123d7
--- /dev/null
+++ b/darshan-test/regression/test-cases/fperf-f77.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+PROG=fperf-f77
+
+# set log file path; remove previous log if present
+export DARSHAN_LOGFILE=$DARSHAN_TMP/${PROG}.darshan.gz
+rm -f ${DARSHAN_LOGFILE}
+
+# compile
+$DARSHAN_F77 $DARSHAN_TESTDIR/test-cases/src/fperf.f -o $DARSHAN_TMP/${PROG}
+if [ $? -ne 0 ]; then
+    echo "Error: failed to compile ${PROG}" 1>&2
+    exit 1
+fi
+
+# execute
+$DARSHAN_RUNJOB $DARSHAN_TMP/${PROG} -fname $DARSHAN_TMP/${PROG}.tmp.dat
+if [ $? -ne 0 ]; then
+    echo "Error: failed to execute ${PROG}" 1>&2
+    exit 1
+fi
+
+# parse log
+$DARSHAN_PATH/bin/darshan-parser $DARSHAN_LOGFILE > $DARSHAN_TMP/${PROG}.darshan.txt
+if [ $? -ne 0 ]; then
+    echo "Error: failed to parse ${DARSHAN_LOGFILE}" 1>&2
+    exit 1
+fi
+
+# check results
+# in this case we want to confirm that both the MPI and POSIX open counters were triggered
+MPI_OPENS=`grep CP_COLL_OPENS $DARSHAN_TMP/${PROG}.darshan.txt |cut -f 4`
+if [ ! $MPI_OPENS -gt 0 ]; then
+    echo "Error: MPI open count of $MPI_OPENS is incorrect" 1>&2
+    exit 1
+fi
+POSIX_OPENS=`grep CP_POSIX_OPENS $DARSHAN_TMP/${PROG}.darshan.txt |cut -f 4`
+if [ ! $POSIX_OPENS -gt 0 ]; then
+    echo "Error: POSIX open count of $POSIX_OPENS is incorrect" 1>&2
+    exit 1
+fi
+
+exit 0
diff --git a/darshan-test/regression/test-cases/fperf-f90.sh b/darshan-test/regression/test-cases/fperf-f90.sh
new file mode 100755
index 0000000..69dbd4f
--- /dev/null
+++ b/darshan-test/regression/test-cases/fperf-f90.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+PROG=fperf-f90
+
+# set log file path; remove previous log if present
+export DARSHAN_LOGFILE=$DARSHAN_TMP/${PROG}.darshan.gz
+rm -f ${DARSHAN_LOGFILE}
+
+# compile
+$DARSHAN_F90 $DARSHAN_TESTDIR/test-cases/src/fperf.f -o $DARSHAN_TMP/${PROG}
+if [ $? -ne 0 ]; then
+    echo "Error: failed to compile ${PROG}" 1>&2
+    exit 1
+fi
+
+# execute
+$DARSHAN_RUNJOB $DARSHAN_TMP/${PROG} -fname $DARSHAN_TMP/${PROG}.tmp.dat
+if [ $? -ne 0 ]; then
+    echo "Error: failed to execute ${PROG}" 1>&2
+    exit 1
+fi
+
+# parse log
+$DARSHAN_PATH/bin/darshan-parser $DARSHAN_LOGFILE > $DARSHAN_TMP/${PROG}.darshan.txt
+if [ $? -ne 0 ]; then
+    echo "Error: failed to parse ${DARSHAN_LOGFILE}" 1>&2
+    exit 1
+fi
+
+# check results
+# in this case we want to confirm that both the MPI and POSIX open counters were triggered
+MPI_OPENS=`grep CP_COLL_OPENS $DARSHAN_TMP/${PROG}.darshan.txt |cut -f 4`
+if [ ! $MPI_OPENS -gt 0 ]; then
+    echo "Error: MPI open count of $MPI_OPENS is incorrect" 1>&2
+    exit 1
+fi
+POSIX_OPENS=`grep CP_POSIX_OPENS $DARSHAN_TMP/${PROG}.darshan.txt |cut -f 4`
+if [ ! $POSIX_OPENS -gt 0 ]; then
+    echo "Error: POSIX open count of $POSIX_OPENS is incorrect" 1>&2
+    exit 1
+fi
+
+exit 0
diff --git a/darshan-test/regression/test-cases/mpi-io-test.sh b/darshan-test/regression/test-cases/mpi-io-test.sh
new file mode 100755
index 0000000..82982c1
--- /dev/null
+++ b/darshan-test/regression/test-cases/mpi-io-test.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+PROG=mpi-io-test
+
+# set log file path; remove previous log if present
+export DARSHAN_LOGFILE=$DARSHAN_TMP/${PROG}.darshan.gz
+rm -f ${DARSHAN_LOGFILE}
+
+# compile
+$DARSHAN_CC $DARSHAN_TESTDIR/test-cases/src/${PROG}.c -o $DARSHAN_TMP/${PROG}
+if [ $? -ne 0 ]; then
+    echo "Error: failed to compile ${PROG}" 1>&2
+    exit 1
+fi
+
+# execute
+$DARSHAN_RUNJOB $DARSHAN_TMP/${PROG} -f $DARSHAN_TMP/${PROG}.tmp.dat
+if [ $? -ne 0 ]; then
+    echo "Error: failed to execute ${PROG}" 1>&2
+    exit 1
+fi
+
+# parse log
+$DARSHAN_PATH/bin/darshan-parser $DARSHAN_LOGFILE > $DARSHAN_TMP/${PROG}.darshan.txt
+if [ $? -ne 0 ]; then
+    echo "Error: failed to parse ${DARSHAN_LOGFILE}" 1>&2
+    exit 1
+fi
+
+# check results
+# in this case we want to confirm that both the MPI and POSIX open counters were triggered
+MPI_OPENS=`grep CP_INDEP_OPENS $DARSHAN_TMP/${PROG}.darshan.txt |cut -f 4`
+if [ ! $MPI_OPENS -gt 0 ]; then
+    echo "Error: MPI open count of $MPI_OPENS is incorrect" 1>&2
+    exit 1
+fi
+POSIX_OPENS=`grep CP_POSIX_OPENS $DARSHAN_TMP/${PROG}.darshan.txt |cut -f 4`
+if [ ! $POSIX_OPENS -gt 0 ]; then
+    echo "Error: POSIX open count of $POSIX_OPENS is incorrect" 1>&2
+    exit 1
+fi
+
+
+exit 0
diff --git a/darshan-test/regression/test-cases/src/cxxpi.cxx b/darshan-test/regression/test-cases/src/cxxpi.cxx
new file mode 100644
index 0000000..b7c28a6
--- /dev/null
+++ b/darshan-test/regression/test-cases/src/cxxpi.cxx
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; c-basic-offset:4 ; -*- */
+/*  
+ *  (C) 2004 by Argonne National Laboratory.
+ *      See COPYRIGHT in top-level directory.
+ */
+
+#include "mpi.h"
+#include <iostream>
+using namespace std;
+#include <math.h>
+
+double f(double);
+
+double f(double a)
+{
+    return (4.0 / (1.0 + a*a));
+}
+
+int main(int argc,char **argv)
+{
+    int n, myid, numprocs, i;
+    double PI25DT = 3.141592653589793238462643;
+    double mypi, pi, h, sum, x;
+    double startwtime = 0.0, endwtime;
+    int  namelen;
+    char processor_name[MPI_MAX_PROCESSOR_NAME];
+
+    MPI::Init(argc,argv);
+    numprocs = MPI::COMM_WORLD.Get_size();
+    myid     = MPI::COMM_WORLD.Get_rank();
+    MPI::Get_processor_name(processor_name,namelen);
+
+    cout << "Process " << myid << " of " << numprocs << " is on " <<
+	processor_name << endl;
+
+    n = 10000;			/* default # of rectangles */
+    if (myid == 0)
+	startwtime = MPI::Wtime();
+
+    MPI::COMM_WORLD.Bcast(&n, 1, MPI_INT, 0);
+
+    h   = 1.0 / (double) n;
+    sum = 0.0;
+    /* A slightly better approach starts from large i and works back */
+    for (i = myid + 1; i <= n; i += numprocs)
+    {
+	x = h * ((double)i - 0.5);
+	sum += f(x);
+    }
+    mypi = h * sum;
+
+    MPI::COMM_WORLD.Reduce(&mypi, &pi, 1, MPI_DOUBLE, MPI_SUM, 0);
+
+    if (myid == 0) {
+	endwtime = MPI::Wtime();
+	cout << "pi is approximately " << pi << " Error is " <<
+	    fabs(pi - PI25DT) << endl;
+	cout << "wall clock time = " << endwtime-startwtime << endl;
+    }
+
+    MPI::Finalize();
+    return 0;
+}
diff --git a/darshan-test/regression/test-cases/src/fperf.f b/darshan-test/regression/test-cases/src/fperf.f
new file mode 100644
index 0000000..a3fdfa2
--- /dev/null
+++ b/darshan-test/regression/test-cases/src/fperf.f
@@ -0,0 +1,180 @@
+! -*- Mode: Fortran; -*- 
+!  
+!  (C) 2001 by Argonne National Laboratory.
+!      See COPYRIGHT in top-level directory.
+!
+      program main
+      implicit none
+
+      include 'mpif.h'
+      
+
+!     Fortran equivalent of perf.c
+
+      integer SIZE 
+      parameter (SIZE=1048576*4)
+!     read/write size per node in bytes
+
+      integer buf(SIZE/4), j, mynod, nprocs, ntimes, flag, foo
+      double precision stim, read_tim, write_tim, new_read_tim
+      double precision new_write_tim, min_read_tim, min_write_tim
+      double precision read_bw, write_bw
+      integer fh, status(MPI_STATUS_SIZE), ierr, argc, iargc, i
+      character*1024 str    ! used to store the filename
+      integer*8 offset
+      
+
+      ntimes = 5
+      min_read_tim = 10000000.0D0
+      min_write_tim = 10000000.0D0
+
+      call MPI_INIT(ierr)
+      call MPI_COMM_SIZE(MPI_COMM_WORLD, nprocs, foo, ierr)
+      call MPI_COMM_RANK(MPI_COMM_WORLD, mynod, ierr)
+
+!     process 0 takes the file name as a command-line argument and 
+!     broadcasts it to other processes
+
+      if (mynod .eq. 0) then
+         argc = iargc()
+         i = 0
+         call getarg(i,str)
+         do while ((i .lt. argc) .and. (str .ne. '-fname'))
+            i = i + 1
+            call getarg(i,str)
+         end do
+         if (i .ge. argc) then
+            print *
+            print *, '*#  Usage: fperf -fname filename'
+            print *
+            call MPI_ABORT(MPI_COMM_WORLD, 1, ierr)
+         end if
+
+         i = i + 1
+         call getarg(i,str)
+         call MPI_BCAST(str, 1024, MPI_CHARACTER, 0,                    &  
+     &        MPI_COMM_WORLD, ierr)
+         print *, 'Access size per process = ', SIZE, ' bytes',         &
+     &        ', ntimes = ', ntimes
+      else 
+         call MPI_BCAST(str, 1024, MPI_CHARACTER, 0,                    &
+     &        MPI_COMM_WORLD, ierr)
+      end if
+
+
+      offset = mynod*SIZE
+      do j=1, ntimes
+         call MPI_FILE_OPEN(MPI_COMM_WORLD, str,                        &
+     &        MPI_MODE_CREATE+MPI_MODE_RDWR, MPI_INFO_NULL, fh, ierr)
+
+         call MPI_FILE_SEEK(fh, offset, MPI_SEEK_SET, ierr)
+
+         call MPI_BARRIER(MPI_COMM_WORLD, ierr)
+         stim = MPI_WTIME()
+         call MPI_FILE_WRITE(fh, buf, SIZE, MPI_BYTE, status, ierr)
+         write_tim = MPI_WTIME() - stim
+  
+         call MPI_FILE_CLOSE(fh, ierr)
+
+         call MPI_BARRIER(MPI_COMM_WORLD, ierr)
+
+         call MPI_FILE_OPEN(MPI_COMM_WORLD, str,                        &
+     &        MPI_MODE_CREATE+MPI_MODE_RDWR, MPI_INFO_NULL, fh, ierr)
+
+         call MPI_FILE_SEEK(fh, offset, MPI_SEEK_SET, ierr)
+
+         call MPI_BARRIER(MPI_COMM_WORLD, ierr)
+         stim = MPI_WTIME()
+         call MPI_FILE_READ(fh, buf, SIZE, MPI_BYTE, status, ierr)
+         read_tim = MPI_WTIME() - stim
+  
+         call MPI_FILE_CLOSE(fh, ierr)
+
+         call MPI_ALLREDUCE(write_tim, new_write_tim, 1,                &
+     &        MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_WORLD, ierr)
+         call MPI_ALLREDUCE(read_tim, new_read_tim, 1,                  &
+     &        MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_WORLD, ierr)
+
+         if (new_read_tim .lt. min_read_tim) then
+            min_read_tim = new_read_tim
+         end if
+         if (new_write_tim .lt. min_write_tim) then
+            min_write_tim = new_write_tim
+         end if
+      end do
+    
+      if (mynod .eq. 0) then
+         read_bw = (SIZE*nprocs*1.0D0)/(min_read_tim*1000000.0D0)
+         write_bw = (SIZE*nprocs*1.0D0)/(min_write_tim*1000000.0D0)
+         print *, 'Write bandwidth without file sync = ',               &
+     &        write_bw, ' Mbytes/sec'
+         print *, 'Read bandwidth without prior file sync = ',          &
+     &        read_bw, ' Mbytes/sec'
+      end if 
+
+      min_read_tim = 10000000.0D0
+      min_write_tim = 10000000.0D0
+
+      flag = 0
+      do j=1, ntimes
+         call MPI_FILE_OPEN(MPI_COMM_WORLD, str,                        & 
+     &        MPI_MODE_CREATE+MPI_MODE_RDWR, MPI_INFO_NULL, fh, ierr)
+
+         call MPI_FILE_SEEK(fh, offset, MPI_SEEK_SET, ierr)
+
+         call MPI_BARRIER(MPI_COMM_WORLD, ierr)
+         stim = MPI_WTIME()
+         call MPI_FILE_WRITE(fh, buf, SIZE, MPI_BYTE, status, ierr)
+         call MPI_FILE_SYNC(fh, ierr)
+         write_tim = MPI_WTIME() - stim
+         if (ierr .eq. MPI_ERR_UNKNOWN) then 
+            flag = 1
+         end if
+  
+         call MPI_FILE_CLOSE(fh, ierr)
+
+         call MPI_BARRIER(MPI_COMM_WORLD, ierr)
+
+         call MPI_FILE_OPEN(MPI_COMM_WORLD, str,                        & 
+     &        MPI_MODE_CREATE+MPI_MODE_RDWR, MPI_INFO_NULL, fh, ierr)
+
+         call MPI_FILE_SEEK(fh, offset, MPI_SEEK_SET, ierr)
+
+         call MPI_BARRIER(MPI_COMM_WORLD, ierr)
+         stim = MPI_WTIME()
+         call MPI_FILE_READ(fh, buf, SIZE, MPI_BYTE, status, ierr)
+         read_tim = MPI_WTIME() - stim
+  
+         call MPI_FILE_CLOSE(fh, ierr)
+
+         call MPI_ALLREDUCE(write_tim, new_write_tim, 1,                &
+     &        MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_WORLD, ierr)
+         call MPI_ALLREDUCE(read_tim, new_read_tim, 1,                  &
+     &        MPI_DOUBLE_PRECISION, MPI_MAX, MPI_COMM_WORLD, ierr)
+
+         if (new_read_tim .lt. min_read_tim) then
+            min_read_tim = new_read_tim
+         end if
+         if (new_write_tim .lt. min_write_tim) then
+            min_write_tim = new_write_tim
+         end if
+
+      end do
+    
+      if (mynod .eq. 0) then
+         if (flag .eq. 1) then
+            print *, 'MPI_FILE_SYNC returns error.'
+         else
+            read_bw = (SIZE*nprocs*1.0D0)/(min_read_tim*1000000.0D0)
+            write_bw = (SIZE*nprocs*1.0D0)/(min_write_tim*1000000.0D0)
+            print *, 'Write bandwidth including file sync = ',          & 
+     &           write_bw, ' Mbytes/sec'
+            print *, 'Read bandwidth after file sync = ',               &
+     &           read_bw, ' Mbytes/sec'
+         end if 
+      end if
+
+      call MPI_FINALIZE(ierr)
+
+      stop
+      end
diff --git a/darshan-test/regression/test-cases/src/mpi-io-test.c b/darshan-test/regression/test-cases/src/mpi-io-test.c
new file mode 100644
index 0000000..152c24b
--- /dev/null
+++ b/darshan-test/regression/test-cases/src/mpi-io-test.c
@@ -0,0 +1,443 @@
+/*
+ * (C) 1995-2001 Clemson University and Argonne National Laboratory.
+ *
+ * See COPYING in top-level directory.
+ */
+
+/* mpi-io-test.c
+ *
+ * This is derived from code given to me by Rajeev Thakur.  Dunno where
+ * it originated.
+ *
+ * It's purpose is to produce aggregate bandwidth numbers for varying
+ * block sizes, number of processors, an number of iterations.
+ *
+ * This is strictly an MPI program - it is used to test the MPI I/O
+ * functionality implemented by Romio.
+ *
+ * Compiling is usually easiest with something like:
+ * mpicc -Wall -Wstrict-prototypes mpi-io-test.c -o mpi-io-test
+ *
+ * NOTE: This code assumes that all command line arguments make it out to all
+ * the processes that make up the parallel job, which isn't always the case.
+ * So if it doesn't work on some platform, that might be why.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/time.h>
+#include <mpi.h>
+#include <errno.h>
+#include <getopt.h>
+
+/* DEFAULT VALUES FOR OPTIONS */
+static int64_t opt_block     = 16*1024*1024;
+static int     opt_iter      = 1;
+static int     opt_coll      = 0;
+static int     opt_correct   = 0;
+static int     opt_sync      = 0;
+static int     opt_single    = 0;
+static int     opt_verbose   = 0;
+static int     opt_rdonly    = 0;
+static int     opt_wronly    = 0;
+static char    opt_file[256] = "test.out";
+static char    opt_pvfs2tab[256] = "notset";
+static int     opt_pvfstab_set = 0;
+
+/* function prototypes */
+static int parse_args(int argc, char **argv);
+static void usage(void);
+static void handle_error(int errcode, char *str);
+
+/* global vars */
+static int mynod = 0;
+static int nprocs = 1;
+
+int main(int argc, char **argv)
+{
+   char *buf, *tmp=NULL, *check;
+   int i, j, v, err, sync_err=0, my_correct = 1, correct, myerrno;
+   double stim, etim;
+   double write_tim = 0;
+   double read_tim = 0;
+   double read_bw, write_bw;
+   double max_read_tim, max_write_tim;
+   double min_read_tim, min_write_tim;
+   double ave_read_tim, ave_write_tim;
+   double sum_read_tim, sum_write_tim;
+   double sq_write_tim, sq_read_tim;
+   double sumsq_write_tim, sumsq_read_tim;
+   double var_read_tim, var_write_tim;
+   int64_t iter_jump = 0;
+   int64_t seek_position = 0;
+   MPI_File fh;
+   MPI_Status status;
+	MPI_Comm comm;
+   int nchars=0;
+   int namelen;
+   char processor_name[MPI_MAX_PROCESSOR_NAME];
+
+   /* startup MPI and determine the rank of this process */
+   MPI_Init(&argc,&argv);
+   MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
+   MPI_Comm_rank(MPI_COMM_WORLD, &mynod);
+   MPI_Get_processor_name(processor_name, &namelen); 
+   
+   /* parse the command line arguments */
+   parse_args(argc, argv);
+
+   if (opt_verbose) fprintf(stdout,"Process %d of %d is on %s\n",
+									 mynod, nprocs, processor_name);
+
+   if (mynod == 0) printf("# Using mpi-io calls.\n");
+
+    //sleep(15);
+   
+   /* kindof a weird hack- if the location of the pvfstab file was 
+    * specified on the command line, then spit out this location into
+    * the appropriate environment variable: */
+   
+   if (opt_pvfstab_set) {
+      if ((setenv("PVFS2TAB_FILE", opt_pvfs2tab, 1)) < 0) {
+         perror("setenv");
+         goto die_jar_jar_die;
+      }
+   }
+   
+   /* this is how much of the file data is covered on each iteration of
+    * the test.  used to help determine the seek offset on each
+    * iteration */
+   iter_jump = nprocs * opt_block;
+      
+   /* setup a buffer of data to write */
+   if (!(tmp = malloc((size_t) opt_block + 256))) {
+      perror("malloc");
+      goto die_jar_jar_die;
+   }
+   buf = tmp + 128 - (((long)tmp) % 128);  /* align buffer */
+
+   /* open the file for writing */
+	if (opt_coll) {
+		comm = MPI_COMM_WORLD;
+	}
+	else {
+		comm = MPI_COMM_SELF;
+	}
+   err = MPI_File_open(comm, opt_file, 
+							  MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh);
+   if (err != MPI_SUCCESS) {
+      handle_error(err, "MPI_File_open");
+      goto die_jar_jar_die;
+   } 
+	
+	nchars = (int) (opt_block/sizeof(char));
+	if (!opt_rdonly) {
+
+   /* now repeat the seek and write operations the number of times
+    * specified on the command line */
+   for (j=0; j < opt_iter; j++) {
+
+      /* reading and writing to the same block is cheating, but sometimes
+       * we want to measure cached performance of file servers */
+      if (opt_single == 1)
+			seek_position = 0;
+      else
+			/* seek to an appropriate position depending on the iteration 
+			 * and rank of the current process */
+			seek_position = (j*iter_jump)+(mynod*opt_block);
+
+      MPI_File_seek(fh, seek_position, MPI_SEEK_SET);
+
+      if (opt_correct) /* fill in buffer for iteration */ {
+         for (i=0, v=mynod+j, check=buf; i<opt_block; i++, v++, check++) 
+            *check = (char) v;
+      }
+
+      /* discover the starting time of the operation */
+      MPI_Barrier(MPI_COMM_WORLD);
+      stim = MPI_Wtime();
+
+      /* write out the data */
+		if (opt_coll) {
+			err = MPI_File_write_all(fh, buf, nchars, MPI_CHAR, &status);
+		}
+		else {
+			err = MPI_File_write(fh, buf, nchars, MPI_CHAR, &status);
+		}
+      if(err){
+         fprintf(stderr, "node %d, write error: %s\n", mynod, 
+         strerror(errno));
+      }
+      if (opt_sync) sync_err = MPI_File_sync(fh);
+      if (sync_err) {
+         fprintf(stderr, "node %d, sync error: %s\n", mynod, 
+					  strerror(errno));
+      }
+
+      /* discover the ending time of the operation */
+      etim = MPI_Wtime();
+
+      write_tim += (etim - stim);
+      
+      /* we are done with this "write" iteration */
+   }
+	} /* ! opt_rdonly */
+
+   err = MPI_File_close(&fh);
+   if(err){
+      fprintf(stderr, "node %d, close error after write\n", mynod);
+   }
+    
+   /* wait for everyone to synchronize at this point */
+   MPI_Barrier(MPI_COMM_WORLD);
+
+   /* reopen the file to read the data back out */
+   err = MPI_File_open(comm, opt_file, 
+			   MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh);
+   if (err < 0) {
+      fprintf(stderr, "node %d, open error: %s\n", mynod, strerror(errno));
+      goto die_jar_jar_die;
+   }
+
+	if (!opt_wronly) {
+   /* we are going to repeat the read operation the number of iterations
+    * specified */
+   for (j=0; j < opt_iter; j++) {
+      /* reading and writing to the same block is cheating, but sometimes
+       * we want to measure cached performance of file servers */
+      if (opt_single == 1) {
+			seek_position = 0;
+		}
+      else {
+			/* seek to an appropriate position depending on the iteration 
+			 * and rank of the current process */
+			seek_position = (j*iter_jump)+(mynod*opt_block);
+		}
+
+      MPI_File_seek(fh, seek_position, MPI_SEEK_SET);
+
+      /* discover the start time */
+      MPI_Barrier(MPI_COMM_WORLD);
+      stim = MPI_Wtime();
+
+      /* read in the file data */
+		if (opt_coll) {
+			err = MPI_File_read_all(fh, buf, nchars, MPI_CHAR, &status);
+		}
+		else {
+			err = MPI_File_read(fh, buf, nchars, MPI_CHAR, &status);
+		}
+      myerrno = errno;
+
+      /* discover the end time */
+      etim = MPI_Wtime();
+      read_tim += (etim - stim);
+
+      if (err < 0) {
+			fprintf(stderr, "node %d, read error, loc = %lld: %s\n",
+				mynod, (long long) mynod*opt_block, strerror(myerrno));
+		}
+
+      /* if the user wanted to check correctness, compare the write
+       * buffer to the read buffer */
+      if (opt_correct) {
+         int badct = 0;
+
+         for (i=0, v=mynod+j, check=buf;
+              i < opt_block && badct < 10;
+              i++, v++, check++)
+         {
+            if (*check != (char) v) {
+               my_correct = 0;
+               if (badct < 10) {
+                  badct++;
+                  fprintf(stderr, "buf[%d] = %d, should be %d\n", 
+                          i, *check, (char) v);
+               }
+            }
+         }
+         MPI_Allreduce(&my_correct, &correct, 1, MPI_INT, MPI_MIN,
+                       MPI_COMM_WORLD);
+         if (badct == 10) fprintf(stderr, "...\n");
+      }
+
+      /* we are done with this read iteration */
+   }
+	} /* !opt_wronly */
+
+   /* close the file */
+   err = MPI_File_close(&fh);
+   if (err) {
+      fprintf(stderr, "node %d, close error after write\n", mynod);
+   }
+
+   /* compute the read and write times */
+   MPI_Allreduce(&read_tim, &max_read_tim, 1, MPI_DOUBLE, MPI_MAX,
+      MPI_COMM_WORLD);
+   MPI_Allreduce(&read_tim, &min_read_tim, 1, MPI_DOUBLE, MPI_MIN,
+      MPI_COMM_WORLD);
+   MPI_Allreduce(&read_tim, &sum_read_tim, 1, MPI_DOUBLE, MPI_SUM,
+      MPI_COMM_WORLD);
+
+   /* calculate our part of the summation used for variance */
+   sq_read_tim = read_tim - (sum_read_tim / nprocs);
+   sq_read_tim = sq_read_tim * sq_read_tim;
+   MPI_Allreduce(&sq_read_tim, &sumsq_read_tim, 1, MPI_DOUBLE, 
+                   MPI_SUM, MPI_COMM_WORLD);
+
+
+   MPI_Allreduce(&write_tim, &max_write_tim, 1, MPI_DOUBLE, MPI_MAX,
+      MPI_COMM_WORLD);
+   MPI_Allreduce(&write_tim, &min_write_tim, 1, MPI_DOUBLE, MPI_MIN,
+      MPI_COMM_WORLD);
+   MPI_Allreduce(&write_tim, &sum_write_tim, 1, MPI_DOUBLE, MPI_SUM,
+      MPI_COMM_WORLD);
+
+   /* calculate our part of the summation used for variance */
+   sq_write_tim = write_tim - (sum_write_tim / nprocs );
+   sq_write_tim = sq_write_tim * sq_write_tim;
+   MPI_Allreduce(&sq_write_tim, &sumsq_write_tim, 1, MPI_DOUBLE, 
+                   MPI_SUM, MPI_COMM_WORLD);
+
+   /* calculate the average from the sum */
+   ave_read_tim  = sum_read_tim / nprocs; 
+   ave_write_tim = sum_write_tim / nprocs; 
+
+   /* and finally compute variance */
+   if (nprocs > 1) {
+		var_read_tim  = sumsq_read_tim / (nprocs-1);
+		var_write_tim = sumsq_write_tim / (nprocs-1);
+   }
+   else {
+		var_read_tim = 0;
+		var_write_tim = 0;
+   }
+   
+   /* print out the results on one node */
+   if (mynod == 0) {
+      read_bw = (opt_block*nprocs*opt_iter)/(max_read_tim*1.0e6);
+      write_bw = (opt_block*nprocs*opt_iter)/(max_write_tim*1.0e6);
+      
+		printf("nr_procs = %d, nr_iter = %d, blk_sz = %lld, coll = %d\n",
+				 nprocs, opt_iter, (long long) opt_block, opt_coll);
+		
+		printf("# total_size = %lld\n",
+				 (long long) opt_block*nprocs*opt_iter);
+		printf("# Write: min_t = %f, max_t = %f, mean_t = %f, var_t = %f\n", 
+				 min_write_tim, max_write_tim, ave_write_tim, var_write_tim);
+		printf("# Read:  min_t = %f, max_t = %f, mean_t = %f, var_t = %f\n", 
+				 min_read_tim, max_read_tim, ave_read_tim, var_read_tim);
+      
+      printf("Write bandwidth = %f Mbytes/sec\n", write_bw);
+      printf("Read bandwidth = %f Mbytes/sec\n", read_bw);
+      
+      if (opt_correct) {
+         printf("Correctness test %s.\n", correct ? "passed" : "failed");
+      }
+   }
+
+
+die_jar_jar_die:   
+
+   free(tmp);
+   MPI_Finalize();
+   return(0);
+}
+
+static int parse_args(int argc, char **argv)
+{
+   int c;
+   
+   while ((c = getopt(argc, argv, "b:i:f:p:CcyShvrw")) != EOF) {
+      switch (c) {
+         case 'b': /* block size */
+            opt_block = atoi(optarg);
+            break;
+         case 'i': /* iterations */
+            opt_iter = atoi(optarg);
+            break;
+         case 'f': /* filename */
+            strncpy(opt_file, optarg, 255);
+            break;
+         case 'p': /* pvfstab file */
+            strncpy(opt_pvfs2tab, optarg, 255);
+            opt_pvfstab_set = 1;
+            break;
+         case 'c': /* correctness */
+            opt_correct = 1;
+            break;
+         case 'C': /* collective I/O */
+            opt_coll = 1;
+            break;
+         case 'y': /* sYnc */
+            opt_sync = 1;
+            break;
+         case 'S': /* Single region */
+            opt_single = 1;
+            break;
+         case 'v': /* verbose */
+            opt_verbose = 1;
+            break;
+			case 'r': /* read-only */
+				opt_rdonly = 1;
+				break;
+			case 'w': /* write-only */
+				opt_wronly = 1;
+				break;
+         case 'h':
+            if (mynod == 0)
+                usage();
+            exit(0);
+         case '?': /* unknown */
+            if (mynod == 0)
+                usage();
+            exit(1);
+         default:
+            break;
+      }
+   }
+   return(0);
+}
+
+static void usage(void)
+{
+    printf("Usage: mpi-io-test [<OPTIONS>...]\n");
+    printf("\n<OPTIONS> is one of\n");
+    printf(" -b       block size (in bytes) [default: 16777216]\n");
+    printf(" -c       verify correctness of file data [default: off]\n");
+    printf(" -C       perform operations Collectively [default: off]\n");
+    printf(" -i       iterations [default: 1]\n");
+    printf(" -f       filename [default: /foo/test.out]\n");
+    printf(" -p       path to pvfs2tab file to use [default: notset]\n");
+    printf(" -S       all process write to same Single region of file [default: off]\n");
+	 printf(" -r       read-only.  do no writes.  file must already exist\n");
+	 printf(" -w       write-only. do no reads.\n");
+    printf(" -v       be more verbose\n");
+    printf(" -y       sYnc the file after each write [default: off]\n");
+    printf(" -h       print this help\n");
+}
+
+static void handle_error(int errcode, char *str)
+{
+    char msg[MPI_MAX_ERROR_STRING];
+    int resultlen;
+
+    MPI_Error_string(errcode, msg, &resultlen);
+    fprintf(stderr, "%s: %s\n", str, msg);
+    MPI_Abort(MPI_COMM_WORLD, 1);
+}
+
+/*
+ * Local variables:
+ *  c-indent-level: 3
+ *  c-basic-offset: 3
+ *  tab-width: 3
+ *
+ * vim: ts=3
+ * End:
+ */ 
+
+
diff --git a/darshan-test/regression/workstation-dynamic/env.sh b/darshan-test/regression/workstation-dynamic/env.sh
new file mode 100755
index 0000000..1536d61
--- /dev/null
+++ b/darshan-test/regression/workstation-dynamic/env.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# General notes
+#######################
+
+# Script to set up the environment for tests on this platform.  Must export
+# the following environment variables:
+# 
+# DARSHAN_CC: command to compile C programs
+# DARSHAN_CXX: command to compile C++ programs
+# DARSHAN_F90: command to compile Fortran90 programs
+# DARSHAN_F77: command to compile Fortran77 programs
+# DARSHAN_RUNJOB: command to execute a job and wait for its completion
+
+# This script may load optional modules (as in a Cray PE), set LD_PRELOAD
+# variables (as in a dynamically linked environment), or generate mpicc
+# wrappers (as in a statically linked environment).
+
+# Notes specific to this platform (workstation-dynamic)_
+########################
+# This particular env script assumes that mpicc and its variants for other 
+# languages are already in the path, and that they will produce dynamic
+# executables by default.  Test programs are compile usign the existing
+# scripts, and LD_PRELOAD is set to enable instrumentation.
+
+# The runjob command is just mpiexec, no scheduler
+
+export DARSHAN_CC=mpicc
+export DARSHAN_CXX=mpicxx
+export DARSHAN_F77=mpif77
+export DARSHAN_F90=mpif90
+FULL_MPICC_PATH=`which mpicc`
+
+# This is a hack.  In order to instrument Fortran programs with LD_PRELOAD,
+# we must prepend libfmpich.so to the LD_PRELOAD variable, but with a fully
+# resolve path.  To find a path we locate mpicc and speculate that
+# libfmich.so can be found in ../lib.
+export LD_PRELOAD=`dirname $FULL_MPICC_PATH`/../lib/libfmpich.so:$DARSHAN_PATH/lib/libdarshan.so:$LD_PRELOAD
+
+export DARSHAN_RUNJOB="mpiexec -n $DARSHAN_DEFAULT_NPROCS"
diff --git a/darshan-test/regression/workstation-profile-conf/env.sh b/darshan-test/regression/workstation-profile-conf/env.sh
new file mode 100755
index 0000000..0147783
--- /dev/null
+++ b/darshan-test/regression/workstation-profile-conf/env.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+# General notes
+#######################
+
+# Script to set up the environment for tests on this platform.  Must export
+# the following environment variables:
+# 
+# DARSHAN_CC: command to compile C programs
+# DARSHAN_CXX: command to compile C++ programs
+# DARSHAN_F90: command to compile Fortran90 programs
+# DARSHAN_F77: command to compile Fortran77 programs
+# DARSHAN_RUNJOB: command to execute a job and wait for its completion
+
+# This script may load optional modules (as in a Cray PE), set LD_PRELOAD
+# variables (as in a dynamically linked environment), or generate mpicc
+# wrappers (as in a statically linked environment).
+
+# Notes specific to this platform (workstation-dynamic)_
+########################
+# This particular env script assumes that mpicc and its variants for other 
+# languages are already in the path, and that they will produce static 
+# executables by default.  Darshan instrumentation is added by specifying
+# a profiling configuration file using environment variables.
+
+# The runjob command is just mpiexec, no scheduler
+
+export DARSHAN_CC=mpicc
+export DARSHAN_CXX=mpicxx
+export DARSHAN_F77=mpif77
+export DARSHAN_F90=mpif90
+
+export MPICC_PROFILE=$DARSHAN_PATH/share/mpi-profile/darshan-cc
+export MPICXX_PROFILE=$DARSHAN_PATH/share/mpi-profile/darshan-cxx
+export MPIF90_PROFILE=$DARSHAN_PATH/share/mpi-profile/darshan-f
+export MPIF77_PROFILE=$DARSHAN_PATH/share/mpi-profile/darshan-f
+
+export DARSHAN_RUNJOB="mpiexec -n $DARSHAN_DEFAULT_NPROCS"
diff --git a/darshan-test/regression/workstation-static/env.sh b/darshan-test/regression/workstation-static/env.sh
new file mode 100755
index 0000000..f076efb
--- /dev/null
+++ b/darshan-test/regression/workstation-static/env.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# General notes
+#######################
+
+# Script to set up the environment for tests on this platform.  Must export
+# the following environment variables:
+# 
+# DARSHAN_CC: command to compile C programs
+# DARSHAN_CXX: command to compile C++ programs
+# DARSHAN_F90: command to compile Fortran90 programs
+# DARSHAN_F77: command to compile Fortran77 programs
+# DARSHAN_RUNJOB: command to execute a job and wait for its completion
+
+# This script may load optional modules (as in a Cray PE), set LD_PRELOAD
+# variables (as in a dynamically linked environment), or generate mpicc
+# wrappers (as in a statically linked environment).
+
+# Notes specific to this platform (workstation-static)
+########################
+# This particular env script assumes that mpicc and its variants for other 
+# languages are already in the path.  The compiler scripts to be used in
+# these test cases will be generated using darshan-gen-*.pl scripts.
+
+# The runjob command is just mpiexec, no scheduler
+
+$DARSHAN_PATH/bin/darshan-gen-cc.pl `which mpicc` --output $DARSHAN_TMP/mpicc
+if [ $? -ne 0 ]; then
+    echo "Error: failed to generate c compiler." 1>&2
+    exit 1
+fi
+export DARSHAN_CC=$DARSHAN_TMP/mpicc
+
+$DARSHAN_PATH/bin/darshan-gen-cxx.pl `which mpicxx` --output $DARSHAN_TMP/mpicxx
+if [ $? -ne 0 ]; then
+    echo "Error: failed to generate c compiler." 1>&2
+    exit 1
+fi
+export DARSHAN_CXX=$DARSHAN_TMP/mpicxx
+
+$DARSHAN_PATH/bin/darshan-gen-fortran.pl `which mpif77` --output $DARSHAN_TMP/mpif77
+if [ $? -ne 0 ]; then
+    echo "Error: failed to generate f77 compiler." 1>&2
+    exit 1
+fi
+export DARSHAN_F77=$DARSHAN_TMP/mpif77
+
+$DARSHAN_PATH/bin/darshan-gen-fortran.pl `which mpif90` --output $DARSHAN_TMP/mpif90
+if [ $? -ne 0 ]; then
+    echo "Error: failed to generate f90 compiler." 1>&2
+    exit 1
+fi
+export DARSHAN_F90=$DARSHAN_TMP/mpif90
+
+export DARSHAN_RUNJOB="mpiexec -n $DARSHAN_DEFAULT_NPROCS"
diff --git a/darshan-util/configure b/darshan-util/configure
index a0b19ba..16d470e 100755
--- a/darshan-util/configure
+++ b/darshan-util/configure
@@ -1445,52 +1445,6 @@ fi
 
 } # ac_fn_c_try_compile
 
-# ac_fn_c_try_link LINENO
-# -----------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_link ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  rm -f conftest.$ac_objext conftest$ac_exeext
-  if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext && {
-	 test "$cross_compiling" = yes ||
-	 test -x conftest$ac_exeext
-       }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_retval=1
-fi
-  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
-  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
-  # interfere with the next link command; also delete a directory that is
-  # left behind by Apple's compiler.  We do this before executing the actions.
-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
-  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_link
-
 # ac_fn_c_try_cpp LINENO
 # ----------------------
 # Try to preprocess conftest.$ac_ext, and return whether this succeeded.
@@ -1688,6 +1642,52 @@ $as_echo "$ac_res" >&6; }
 
 } # ac_fn_c_check_header_compile
 
+# ac_fn_c_try_link LINENO
+# -----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_link ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest$ac_exeext
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 test -x conftest$ac_exeext
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+  # interfere with the next link command; also delete a directory that is
+  # left behind by Apple's compiler.  We do this before executing the actions.
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_link
+
 # ac_fn_c_check_func LINENO FUNC VAR
 # ----------------------------------
 # Tests whether FUNC exists, setting the cache variable VAR accordingly
@@ -3420,109 +3420,38 @@ fi
 done
 
 
-#
-# Handle user hints
-#
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if zlib is wanted" >&5
-$as_echo_n "checking if zlib is wanted... " >&6; }
+
+
 
 # Check whether --with-zlib was given.
 if test "${with_zlib+set}" = set; then :
   withval=$with_zlib; if test "$withval" != no ; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
   if test -d "$withval"
   then
     ZLIB_HOME="$withval"
+    LDFLAGS="$LDFLAGS -L${ZLIB_HOME}/lib"
+    CPPFLAGS="$CPPFLAGS -I${ZLIB_HOME}/include"
+    __CP_ZLIB_LINK_FLAGS="-L${ZLIB_HOME}/lib"
+    __CP_ZLIB_INCLUDE_FLAGS="-I${ZLIB_HOME}/include"
   else
     { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Sorry, $withval does not exist, checking usual places" >&5
 $as_echo "$as_me: WARNING: Sorry, $withval does not exist, checking usual places" >&2;}
   fi
 else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
+  as_fn_error $? "zlib is required" "$LINENO" 5
 fi
 fi
 
 
-#
-# Locate zlib, if wanted
-#
-if test -n "${ZLIB_HOME}"
-then
-        ZLIB_OLD_LDFLAGS=$LDFLAGS
-        ZLIB_OLD_CPPFLAGS=$LDFLAGS
-        LDFLAGS="$LDFLAGS -L${ZLIB_HOME}/lib"
-        CPPFLAGS="$CPPFLAGS -I${ZLIB_HOME}/include"
-
-        ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-        { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflateEnd in -lz" >&5
-$as_echo_n "checking for inflateEnd in -lz... " >&6; }
-if ${ac_cv_lib_z_inflateEnd+:} false; then :
-  $as_echo_n "(cached) " >&6
-else
-  ac_check_lib_save_LIBS=$LIBS
-LIBS="-lz  $LIBS"
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-/* Override any GCC internal prototype to avoid an error.
-   Use char because int might match the return type of a GCC
-   builtin and then its argument prototype would still apply.  */
-#ifdef __cplusplus
-extern "C"
-#endif
-char inflateEnd ();
-int
-main ()
-{
-return inflateEnd ();
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
-  ac_cv_lib_z_inflateEnd=yes
-else
-  ac_cv_lib_z_inflateEnd=no
-fi
-rm -f core conftest.err conftest.$ac_objext \
-    conftest$ac_exeext conftest.$ac_ext
-LIBS=$ac_check_lib_save_LIBS
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_inflateEnd" >&5
-$as_echo "$ac_cv_lib_z_inflateEnd" >&6; }
-if test "x$ac_cv_lib_z_inflateEnd" = xyes; then :
-  zlib_cv_libz=yes
-else
-  zlib_cv_libz=no
-fi
-
-        ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default"
+ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default"
 if test "x$ac_cv_header_zlib_h" = xyes; then :
-  zlib_cv_zlib_h=yes
+
 else
-  zlib_cv_zlib_h=no
+  as_fn_error $? "z.h not found" "$LINENO" 5
 fi
 
 
-        ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-        if test "$zlib_cv_libz" = "yes" -a "$zlib_cv_zlib_h" = "yes"
-        then
-                #
-                # If both library and header were found, use them
-                #
-                { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflateEnd in -lz" >&5
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflateEnd in -lz" >&5
 $as_echo_n "checking for inflateEnd in -lz... " >&6; }
 if ${ac_cv_lib_z_inflateEnd+:} false; then :
   $as_echo_n "(cached) " >&6
@@ -3565,27 +3494,10 @@ _ACEOF
 
   LIBS="-lz $LIBS"
 
+else
+  as_fn_error $? "libz not found" "$LINENO" 5
 fi
 
-                { $as_echo "$as_me:${as_lineno-$LINENO}: checking zlib in ${ZLIB_HOME}" >&5
-$as_echo_n "checking zlib in ${ZLIB_HOME}... " >&6; }
-                { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5
-$as_echo "ok" >&6; }
-		__CP_ZLIB_LINK_FLAGS="-L${ZLIB_HOME}/lib"
-		__CP_ZLIB_INCLUDE_FLAGS="-I${ZLIB_HOME}/include"
-        else
-                #
-                # If either header or library was not found, revert and bomb
-                #
-                { $as_echo "$as_me:${as_lineno-$LINENO}: checking zlib in ${ZLIB_HOME}" >&5
-$as_echo_n "checking zlib in ${ZLIB_HOME}... " >&6; }
-                LDFLAGS="$ZLIB_OLD_LDFLAGS"
-                CPPFLAGS="$ZLIB_OLD_CPPFLAGS"
-                { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5
-$as_echo "failed" >&6; }
-                as_fn_error $? "either specify a valid zlib installation with --with-zlib=DIR or disable zlib usage with --without-zlib" "$LINENO" 5
-        fi
-fi
 
 
 #
diff --git a/darshan-util/darshan-job-summary/bin/darshan-job-summary.pl.in b/darshan-util/darshan-job-summary/bin/darshan-job-summary.pl.in
index ad5c0b6..1663244 100755
--- a/darshan-util/darshan-job-summary/bin/darshan-job-summary.pl.in
+++ b/darshan-util/darshan-job-summary/bin/darshan-job-summary.pl.in
@@ -787,7 +787,10 @@ system "$cp $PREFIX/share/*.gplt $tmp_dir/";
 system "$cp $PREFIX/share/*.tex $tmp_dir/";
 
 # generate template for file access plot (we have to set range)
-my $ymax = $nprocs + 1;
+my $ymax = $nprocs;
+my $yinc = int($nprocs / 8);
+if($yinc == 0) {$yinc=1;}
+my $ymaxtic = $nprocs-1;
 open(FILEACC, ">$tmp_dir/file-access-read-eps.gplt") || die("error opening output file:$!\n");
 print FILEACC "#!/usr/bin/gnuplot -persist
 
@@ -801,6 +804,7 @@ set format x \"%H:%M:%S\"
 set yrange [-1:$ymax]
 set title \"Timespan from first to last read access on independent files\"
 set xrange [\"0\":\"$runtime\"]
+set ytics 0,$yinc,$ymaxtic
 #set ytics -1,1
 set lmargin 4
 
@@ -831,6 +835,7 @@ set title \"Timespan from first to last write access on independent files\"
 set yrange [-1:$ymax]
 set xrange [\"0\":\"$runtime\"]
 #set ytics -1,1
+set ytics 0,$yinc,$ymaxtic
 set lmargin 4
 
 # color blindness work around
diff --git a/darshan-util/darshan-parser.c b/darshan-util/darshan-parser.c
index 264ddd0..02171a5 100644
--- a/darshan-util/darshan-parser.c
+++ b/darshan-util/darshan-parser.c
@@ -65,6 +65,7 @@ typedef struct perf_data_s
     int64_t total_bytes;
     double slowest_rank_time;
     double slowest_rank_meta_time;
+    int slowest_rank_rank;
     double shared_time_by_cumul;
     double shared_time_by_open;
     double shared_time_by_open_lastio;
@@ -475,8 +476,9 @@ int main(int argc, char **argv)
         printf("#\n");
         printf("# I/O timing for unique files (seconds):\n");
         printf("# ...........................\n");
-        printf("# unique files: slowest_rank_time: %lf\n", pdata.slowest_rank_time);
+        printf("# unique files: slowest_rank_io_time: %lf\n", pdata.slowest_rank_time);
         printf("# unique files: slowest_rank_meta_time: %lf\n", pdata.slowest_rank_meta_time);
+        printf("# unique files: slowest rank: %d\n", pdata.slowest_rank_rank);
         printf("#\n");
         printf("# I/O timing for shared files (seconds):\n");
         printf("# (multiple estimates shown; time_by_slowest is generally the most accurate)\n");
@@ -1062,6 +1064,7 @@ void calc_perf(struct darshan_job *djob,
         {
             pdata->slowest_rank_time = pdata->rank_cumul_io_time[i];
             pdata->slowest_rank_meta_time = pdata->rank_cumul_md_time[i];
+            pdata->slowest_rank_rank = i;
         }
     }
 
@@ -1083,6 +1086,7 @@ void calc_perf(struct darshan_job *djob,
     if (pdata->slowest_rank_time + pdata->shared_time_by_slowest)
     pdata->agg_perf_by_slowest = ((double)pdata->total_bytes / 1048576.0) /
                                      (pdata->slowest_rank_time +
+                                      pdata->slowest_rank_meta_time +
                                       pdata->shared_time_by_slowest);
 
     return;
diff --git a/darshan-util/doc/darshan-util.txt b/darshan-util/doc/darshan-util.txt
index 7773cbc..b9eced7 100644
--- a/darshan-util/doc/darshan-util.txt
+++ b/darshan-util/doc/darshan-util.txt
@@ -321,8 +321,9 @@ different computations.
 #
 # I/O timing for unique files (seconds):
 # ...........................
-# unique files: slowest_rank_time: 0.000000
+# unique files: slowest_rank_io_time: 0.000000
 # unique files: slowest_rank_meta_time: 0.000000
+# unique files: slowest_rank: 0
 #
 # I/O timing for shared files (seconds):
 # (multiple estimates shown; time_by_slowest is generally the most accurate)
diff --git a/maint/config/check_zlib.m4 b/maint/config/check_zlib.m4
index dc39c2d..ba74736 100644
--- a/maint/config/check_zlib.m4
+++ b/maint/config/check_zlib.m4
@@ -1,84 +1,26 @@
-dnl @synopsis CHECK_ZLIB()
-dnl
-dnl This macro searches for an installed zlib library. If nothing was
-dnl specified when calling configure, it searches first in /usr/local
-dnl and then in /usr. If the --with-zlib=DIR is specified, it will try
-dnl to find it in DIR/include/zlib.h and DIR/lib/libz.a. If
-dnl --without-zlib is specified, the library is not searched at all.
-dnl
-dnl If either the header file (zlib.h) or the library (libz) is not
-dnl found, the configuration exits on error, asking for a valid zlib
-dnl installation directory or --without-zlib.
-dnl
-dnl The macro defines the symbol HAVE_LIBZ if the library is found. You
-dnl should use autoheader to include a definition for this symbol in a
-dnl config.h file. Sample usage in a C/C++ source is as follows:
-dnl
-dnl   #ifdef HAVE_LIBZ
-dnl   #include <zlib.h>
-dnl   #endif /* HAVE_LIBZ */
-dnl
-dnl @category InstalledPackages
-dnl @author Loic Dachary <loic at senga.org>
-dnl @version 2004-09-20
-dnl @license GPLWithACException
-
 AC_DEFUN([CHECK_ZLIB],
+[
 
-#
-# Handle user hints
-#
-[AC_MSG_CHECKING(if zlib is wanted)
 AC_ARG_WITH(zlib,
 [  --with-zlib=DIR root directory path of zlib installation [defaults to
                     /usr/local or /usr if not found in /usr/local]
   --without-zlib to disable zlib usage completely],
 [if test "$withval" != no ; then
-  AC_MSG_RESULT(yes)
   if test -d "$withval"
   then
     ZLIB_HOME="$withval"
+    LDFLAGS="$LDFLAGS -L${ZLIB_HOME}/lib"
+    CPPFLAGS="$CPPFLAGS -I${ZLIB_HOME}/include"
+    __CP_ZLIB_LINK_FLAGS="-L${ZLIB_HOME}/lib"
+    __CP_ZLIB_INCLUDE_FLAGS="-I${ZLIB_HOME}/include"
   else
     AC_MSG_WARN([Sorry, $withval does not exist, checking usual places])
   fi
 else
-  AC_MSG_RESULT(no)
+  AC_MSG_ERROR(zlib is required)
 fi])
 
-#
-# Locate zlib, if wanted
-#
-if test -n "${ZLIB_HOME}"
-then
-        ZLIB_OLD_LDFLAGS=$LDFLAGS
-        ZLIB_OLD_CPPFLAGS=$LDFLAGS
-        LDFLAGS="$LDFLAGS -L${ZLIB_HOME}/lib"
-        CPPFLAGS="$CPPFLAGS -I${ZLIB_HOME}/include"
-        AC_LANG_SAVE
-        AC_LANG_C
-        AC_CHECK_LIB(z, inflateEnd, [zlib_cv_libz=yes], [zlib_cv_libz=no])
-        AC_CHECK_HEADER(zlib.h, [zlib_cv_zlib_h=yes], [zlib_cv_zlib_h=no])
-        AC_LANG_RESTORE
-        if test "$zlib_cv_libz" = "yes" -a "$zlib_cv_zlib_h" = "yes"
-        then
-                #
-                # If both library and header were found, use them
-                #
-                AC_CHECK_LIB(z, inflateEnd)
-                AC_MSG_CHECKING(zlib in ${ZLIB_HOME})
-                AC_MSG_RESULT(ok)
-		__CP_ZLIB_LINK_FLAGS="-L${ZLIB_HOME}/lib"
-		__CP_ZLIB_INCLUDE_FLAGS="-I${ZLIB_HOME}/include"
-        else
-                #
-                # If either header or library was not found, revert and bomb
-                #
-                AC_MSG_CHECKING(zlib in ${ZLIB_HOME})
-                LDFLAGS="$ZLIB_OLD_LDFLAGS"
-                CPPFLAGS="$ZLIB_OLD_CPPFLAGS"
-                AC_MSG_RESULT(failed)
-                AC_MSG_ERROR(either specify a valid zlib installation with --with-zlib=DIR or disable zlib usage with --without-zlib)
-        fi
-fi
+AC_CHECK_HEADER(zlib.h, [],[AC_MSG_ERROR(z.h not found)])
+AC_CHECK_LIB(z, inflateEnd, [],[AC_MSG_ERROR(libz not found)])
 
 ])


hooks/post-receive
--



More information about the Darshan-commits mailing list