[Darshan-commits] [Git][darshan/darshan][master] 2 commits: Integrate AutoPerf submodule

Shane Snyder xgitlab at cels.anl.gov
Mon Mar 29 19:58:07 CDT 2021



Shane Snyder pushed to branch master at darshan / darshan


Commits:
3b8be5c5 by Sudheer Chunduri at 2021-03-29T19:57:46-05:00
Integrate AutoPerf submodule

- - - - -
59379bfb by Shane Snyder at 2021-03-29T19:57:46-05:00
Merge branch 'autoperf-mod-update' into 'master'

Autoperf MPI module (APMPI) in Darshan

See merge request darshan/darshan!80
- - - - -


28 changed files:

- + .gitmodules
- darshan-log-format.h
- darshan-runtime/Makefile.in
- darshan-runtime/configure
- darshan-runtime/configure.in
- darshan-runtime/darshan-dynamic.h
- darshan-runtime/darshan-runtime-config.h.in
- darshan-runtime/doc/darshan-runtime.txt
- darshan-runtime/lib/darshan-core.c
- darshan-runtime/lib/darshan-mpiio.c
- darshan-runtime/lib/pkgconfig/darshan-runtime.pc.in
- darshan-runtime/share/ld-opts/darshan-base-ld-opts.in
- darshan-test/RELEASE-CHECKLIST.txt
- darshan-util/Makefile.in
- darshan-util/configure
- darshan-util/configure.in
- darshan-util/darshan-logutils.c
- darshan-util/darshan-logutils.h
- darshan-util/doc/darshan-util.txt
- darshan-util/pydarshan/Makefile
- darshan-util/pydarshan/darshan/backend/api_def_c.py
- darshan-util/pydarshan/darshan/backend/cffi_backend.py
- darshan-util/pydarshan/darshan/discover_darshan.py
- darshan-util/pydarshan/darshan/report.py
- + darshan-util/pydarshan/examples/01_darshan-apmpi.ipynb
- + darshan-util/pydarshan/examples/apmpi_analysis.py
- darshan-util/pydarshan/setup.py
- + modules/autoperf


Changes:

=====================================
.gitmodules
=====================================
@@ -0,0 +1,4 @@
+[submodule "modules/autoperf"]
+	path = modules/autoperf
+	url = git at xgitlab.cels.anl.gov:AutoPerf/autoperf.git
+	branch = master


=====================================
darshan-log-format.h
=====================================
@@ -120,6 +120,12 @@ struct darshan_base_record
 /* DXT */
 #include "darshan-dxt-log-format.h"
 #include "darshan-mdhim-log-format.h"
+#ifdef DARSHAN_USE_APXC
+#include "darshan-apxc-log-format.h"
+#endif
+#ifdef DARSHAN_USE_APMPI
+#include "darshan-apmpi-log-format.h"
+#endif
 
 /* X-macro for keeping module ordering consistent */
 /* NOTE: first val used to define module enum values, 
@@ -131,21 +137,40 @@ struct darshan_base_record
  * component -- NULL can be passed if there are no
  * logutil definitions)
  */
-#define DARSHAN_MODULE_IDS \
-    X(DARSHAN_NULL_MOD,     "NULL",     DARSHAN_NULL_VER,       NULL) \
-    X(DARSHAN_POSIX_MOD,    "POSIX",    DARSHAN_POSIX_VER,      &posix_logutils) \
-    X(DARSHAN_MPIIO_MOD,    "MPI-IO",   DARSHAN_MPIIO_VER,      &mpiio_logutils) \
-    X(DARSHAN_H5F_MOD,      "H5F",      DARSHAN_H5F_VER,        &hdf5_file_logutils) \
-    X(DARSHAN_H5D_MOD,      "H5D",      DARSHAN_H5D_VER,        &hdf5_dataset_logutils) \
-    X(DARSHAN_PNETCDF_MOD,  "PNETCDF",  DARSHAN_PNETCDF_VER,    &pnetcdf_logutils) \
-    X(DARSHAN_BGQ_MOD,      "BG/Q",     DARSHAN_BGQ_VER,        &bgq_logutils) \
-    X(DARSHAN_LUSTRE_MOD,   "LUSTRE",   DARSHAN_LUSTRE_VER,     &lustre_logutils) \
-    X(DARSHAN_STDIO_MOD,    "STDIO",    DARSHAN_STDIO_VER,      &stdio_logutils) \
-    /* DXT */ \
-    X(DXT_POSIX_MOD,       "DXT_POSIX",  DXT_POSIX_VER,         &dxt_posix_logutils) \
-    X(DXT_MPIIO_MOD,       "DXT_MPIIO",  DXT_MPIIO_VER,         &dxt_mpiio_logutils) \
-    X(DARSHAN_MDHIM_MOD,   "MDHIM",      DARSHAN_MDHIM_VER,     &mdhim_logutils)
 
+/* NOTE: if APXC support is not enabled, we still want to hold it's spot
+ * in the module id space
+ */
+#ifdef DARSHAN_USE_APXC
+#define __APXC_VER APXC_VER
+#define __apxc_logutils &apxc_logutils
+#else
+#define __APXC_VER 0
+#define __apxc_logutils NULL
+#endif
+#ifdef DARSHAN_USE_APMPI
+#define __APMPI_VER APMPI_VER
+#define __apmpi_logutils &apmpi_logutils
+#else
+#define __APMPI_VER 0
+#define __apmpi_logutils NULL
+#endif
+
+#define DARSHAN_MODULE_IDS \
+    X(DARSHAN_NULL_MOD,     "NULL",       DARSHAN_NULL_VER,      NULL) \
+    X(DARSHAN_POSIX_MOD,    "POSIX",      DARSHAN_POSIX_VER,     &posix_logutils) \
+    X(DARSHAN_MPIIO_MOD,    "MPI-IO",     DARSHAN_MPIIO_VER,     &mpiio_logutils) \
+    X(DARSHAN_H5F_MOD,      "H5F",        DARSHAN_H5F_VER,       &hdf5_file_logutils) \
+    X(DARSHAN_H5D_MOD,      "H5D",        DARSHAN_H5D_VER,       &hdf5_dataset_logutils) \
+    X(DARSHAN_PNETCDF_MOD,  "PNETCDF",    DARSHAN_PNETCDF_VER,   &pnetcdf_logutils) \
+    X(DARSHAN_BGQ_MOD,      "BG/Q",       DARSHAN_BGQ_VER,       &bgq_logutils) \
+    X(DARSHAN_LUSTRE_MOD,   "LUSTRE",     DARSHAN_LUSTRE_VER,    &lustre_logutils) \
+    X(DARSHAN_STDIO_MOD,    "STDIO",      DARSHAN_STDIO_VER,     &stdio_logutils) \
+    X(DXT_POSIX_MOD,        "DXT_POSIX",  DXT_POSIX_VER,         &dxt_posix_logutils) \
+    X(DXT_MPIIO_MOD,        "DXT_MPIIO",  DXT_MPIIO_VER,         &dxt_mpiio_logutils) \
+    X(DARSHAN_MDHIM_MOD,    "MDHIM",      DARSHAN_MDHIM_VER,     &mdhim_logutils) \
+    X(DARSHAN_APXC_MOD,     "APXC", 	  __APXC_VER,            __apxc_logutils) \
+    X(DARSHAN_APMPI_MOD,    "APMPI",      __APMPI_VER,           __apmpi_logutils) 
 
 /* unique identifiers to distinguish between available darshan modules */
 /* NOTES: - valid ids range from [0...DARSHAN_MAX_MODS-1]


=====================================
darshan-runtime/Makefile.in
=====================================
@@ -44,6 +44,8 @@ BUILD_HDF5_MODULE = @BUILD_HDF5_MODULE@
 BUILD_BGQ_MODULE = @BUILD_BGQ_MODULE@
 BUILD_LUSTRE_MODULE = @BUILD_LUSTRE_MODULE@
 BUILD_MDHIM_MODULE = @BUILD_MDHIM_MODULE@
+BUILD_APMPI_MODULE = @BUILD_APMPI_MODULE@
+BUILD_APXC_MODULE = @BUILD_APXC_MODULE@
 
 DARSHAN_STATIC_MOD_OBJS =
 DARSHAN_DYNAMIC_MOD_OBJS =
@@ -104,6 +106,13 @@ CFLAGS += -DDARSHAN_MDHIM
 CFLAGS_SHARED += -DDARSHAN_MDHIM
 endif
 
+ifdef BUILD_APXC_MODULE
+include $(srcdir)/../modules/autoperf/apxc/Makefile.darshan
+endif
+ifdef BUILD_APMPI_MODULE
+include $(srcdir)/../modules/autoperf/apmpi/Makefile.darshan
+endif
+
 lib::
 	@mkdir -p $@
 
@@ -250,6 +259,9 @@ endif
 ifdef BUILD_MDHIM_MODULE
 	install -m 644 $(srcdir)/share/ld-opts/darshan-mdhim-ld-opts $(DESTDIR)$(datarootdir)/ld-opts/darshan-mdhim-ld-opts
 endif
+ifdef BUILD_APMPI_MODULE
+	install -m 644 $(srcdir)/../modules/autoperf/apmpi/share/ld-opts/autoperf-apmpi-ld-opts $(DESTDIR)$(datarootdir)/ld-opts/autoperf-apmpi-ld-opts
+endif
 ifdef ENABLE_MMAP_LOGS
 	install -m 755 share/darshan-mmap-epilog.sh $(DESTDIR)$(datarootdir)/darshan-mmap-epilog.sh
 endif


=====================================
darshan-runtime/configure
=====================================
@@ -621,6 +621,7 @@ ac_includes_default="\
 
 ac_subst_vars='LTLIBOBJS
 LIBOBJS
+with_papi
 DARSHAN_MDHIM_LD_OPTS
 BUILD_MDHIM_MODULE
 DARSHAN_LUSTRE_LD_FLAGS
@@ -630,6 +631,9 @@ DARSHAN_HDF5_LD_FLAGS
 DARSHAN_HDF5_ADD_DFLUSH_LD_OPTS
 DARSHAN_HDF5_LD_OPTS
 BUILD_HDF5_MODULE
+DARSHAN_APMPI_LD_OPTS
+BUILD_APMPI_MODULE
+BUILD_APXC_MODULE
 DARSHAN_PNETCDF_LD_OPTS
 BUILD_PNETCDF_MODULE
 DARSHAN_MPIIO_LD_OPTS
@@ -685,6 +689,7 @@ infodir
 docdir
 oldincludedir
 includedir
+runstatedir
 localstatedir
 sharedstatedir
 sysconfdir
@@ -725,6 +730,9 @@ enable_posix_mod
 enable_stdio_mod
 enable_dxt_mod
 enable_mpiio_mod
+enable_apmpi_mod
+enable_apmpi_coll_sync
+enable_apxc_mod
 enable_hdf5_mod
 enable_pnetcdf_mod
 enable_bgq_mod
@@ -779,6 +787,7 @@ datadir='${datarootdir}'
 sysconfdir='${prefix}/etc'
 sharedstatedir='${prefix}/com'
 localstatedir='${prefix}/var'
+runstatedir='${localstatedir}/run'
 includedir='${prefix}/include'
 oldincludedir='/usr/include'
 docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
@@ -1031,6 +1040,15 @@ do
   | -silent | --silent | --silen | --sile | --sil)
     silent=yes ;;
 
+  -runstatedir | --runstatedir | --runstatedi | --runstated \
+  | --runstate | --runstat | --runsta | --runst | --runs \
+  | --run | --ru | --r)
+    ac_prev=runstatedir ;;
+  -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
+  | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
+  | --run=* | --ru=* | --r=*)
+    runstatedir=$ac_optarg ;;
+
   -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
     ac_prev=sbindir ;;
   -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
@@ -1168,7 +1186,7 @@ fi
 for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
 		datadir sysconfdir sharedstatedir localstatedir includedir \
 		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
-		libdir localedir mandir
+		libdir localedir mandir runstatedir
 do
   eval ac_val=\$$ac_var
   # Remove trailing slashes.
@@ -1321,6 +1339,7 @@ Fine tuning of the installation directories:
   --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
   --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
   --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
+  --runstatedir=DIR       modifiable per-process data [LOCALSTATEDIR/run]
   --libdir=DIR            object code libraries [EPREFIX/lib]
   --includedir=DIR        C header files [PREFIX/include]
   --oldincludedir=DIR     C header files for non-gcc [/usr/include]
@@ -1361,6 +1380,13 @@ Optional Features:
   --disable-dxt-mod       Disables compilation and use of DXT module
   --disable-mpiio-mod     Disables compilation and use of MPI-IO module
                           (requires MPI)
+  --enable-apmpi-mod      Enables compilation and use of AUTOPERF MPI module
+                          (requires MPI)
+  --enable-apmpi-coll-sync
+                          Enable sync time calculation for MPI collectives
+  --enable-apxc-mod       Enables compilation and use of AUTOPERF APXC module
+                          (requires MPI)
+
   --enable-hdf5-mod       Enables compilation and use of HDF5 module
   --disable-pnetcdf-mod   Disables compilation and use of PnetCDF module
                           (requires MPI)
@@ -1762,6 +1788,35 @@ $as_echo "$ac_res" >&6; }
 
 } # ac_fn_c_check_header_compile
 
+# ac_fn_c_check_header_preproc LINENO HEADER VAR
+# ----------------------------------------------
+# Tests whether HEADER is present, setting the cache variable VAR accordingly.
+ac_fn_c_check_header_preproc ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <$2>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+  eval "$3=yes"
+else
+  eval "$3=no"
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_preproc
+
 # ac_fn_c_check_type LINENO TYPE VAR INCLUDES
 # -------------------------------------------
 # Tests whether TYPE exists after having included INCLUDES, setting cache
@@ -3980,6 +4035,7 @@ fi
 fi
 
 
+
 # Check whether --enable-mmap-logs was given.
 if test "${enable_mmap_logs+set}" = set; then :
   enableval=$enable_mmap_logs; if test "x$enableval" = "xyes" ; then
@@ -4167,6 +4223,81 @@ if test "x$enable_mpiio_mod" = "xno" || test "x$ENABLE_MPI" = "x"; then :
       DARSHAN_MPIIO_LD_OPTS=
 fi
 
+# AUTOPERF MPI module
+BUILD_APMPI_MODULE=
+DARSHAN_APMPI_LD_OPTS=
+# Check whether --enable-apmpi-mod was given.
+if test "${enable_apmpi_mod+set}" = set; then :
+  enableval=$enable_apmpi_mod;
+fi
+
+if test "x$enable_apmpi_mod" = "xyes"; then :
+
+    if test "x$ENABLE_MPI" = "x"; then :
+  as_fn_error $? "Autoperf MPI module requires MPI support" "$LINENO" 5
+
+fi
+    abssrcdir=$(readlink -f ${srcdir})
+    as_ac_Header=`$as_echo "ac_cv_header_${abssrcdir}/../modules/autoperf/apmpi/darshan-apmpi-log-format.h" | $as_tr_sh`
+ac_fn_c_check_header_preproc "$LINENO" "${abssrcdir}/../modules/autoperf/apmpi/darshan-apmpi-log-format.h" "$as_ac_Header"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+
+else
+  as_fn_error $? "The Autoperf MPI module is not present" "$LINENO" 5
+fi
+
+ # this last part tells it to only check for presence
+    BUILD_APMPI_MODULE=1
+    DARSHAN_APMPI_LD_OPTS="@${darshan_share_path}/ld-opts/autoperf-apmpi-ld-opts"
+
+fi
+
+# Check whether --enable-apmpi-coll-sync was given.
+if test "${enable_apmpi_coll_sync+set}" = set; then :
+  enableval=$enable_apmpi_coll_sync; if test "x$enableval" = "xyes" ; then
+
+$as_echo "#define __APMPI_COLL_SYNC 1" >>confdefs.h
+
+fi
+
+fi
+
+
+#AUTOPERF APXC module
+BUILD_APXC_MODULE=
+# Check whether --enable-apxc-mod was given.
+if test "${enable_apxc_mod+set}" = set; then :
+  enableval=$enable_apxc_mod;
+fi
+
+if test "x$enable_apxc_mod" = "xyes"; then :
+
+    if test "x$ENABLE_MPI" = "x"; then :
+  as_fn_error $? "Autoperf XC module requires MPI support" "$LINENO" 5
+
+fi
+    abssrcdir=$(readlink -f ${srcdir})
+    ac_fn_c_check_header_mongrel "$LINENO" "papi.h" "ac_cv_header_papi_h" "$ac_includes_default"
+if test "x$ac_cv_header_papi_h" = xyes; then :
+  with_papi=-lpapi
+else
+  as_fn_error $? "Cannot find papi header required for Autoperf XC module" "$LINENO" 5
+fi
+
+
+    as_ac_Header=`$as_echo "ac_cv_header_${abssrcdir}/../modules/autoperf/apxc/darshan-apxc-log-format.h" | $as_tr_sh`
+ac_fn_c_check_header_preproc "$LINENO" "${abssrcdir}/../modules/autoperf/apxc/darshan-apxc-log-format.h" "$as_ac_Header"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+
+else
+  as_fn_error $? "The Autoperf XC git submodule is not present" "$LINENO" 5
+fi
+
+ # this last part tells it to only check for presence
+    BUILD_APXC_MODULE=1
+
+fi
+
 # HDF5 module (disabled by default)
 BUILD_HDF5_MODULE=
 DARSHAN_HDF5_LD_OPTS=
@@ -4853,11 +4984,11 @@ fi
 rm -f core conftest.err conftest.$ac_objext \
     conftest$ac_exeext conftest.$ac_ext
 
-# At some point MPI-IO converted most API functions to be const correct.  We
-# need to test for this to determine how to define MPI-IO wrappers in
+# At some point MPI converted most API functions to be const correct.  We
+# need to test for this to determine how to define MPI wrappers in
 # Darshan.  First we try compiling without cost qualifiers.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for MPI-IO prototypes without const qualifier" >&5
-$as_echo_n "checking for MPI-IO prototypes without const qualifier... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for MPI prototypes without const qualifier" >&5
+$as_echo_n "checking for MPI prototypes without const qualifier... " >&6; }
 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
@@ -4877,14 +5008,17 @@ _ACEOF
 if ac_fn_c_try_compile "$LINENO"; then :
   { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
 $as_echo "yes" >&6; }
+     if test "x$BUILD_APMPI_MODULE" = "x1"; then :
+  as_fn_error $? "APMPI module requires MPI version 3+" "$LINENO" 5
+fi
 else
   { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
 $as_echo "no" >&6; }
 
     # unable to compile without const qualifiers.  Let's try again with
     # const qualifiers.
-    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MPI-IO prototypes with const qualifier" >&5
-$as_echo_n "checking for MPI-IO prototypes with const qualifier... " >&6; }
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MPI prototypes with const qualifier" >&5
+$as_echo_n "checking for MPI prototypes with const qualifier... " >&6; }
     cat confdefs.h - <<_ACEOF >conftest.$ac_ext
 /* end confdefs.h.  */
 
@@ -4905,7 +5039,7 @@ if ac_fn_c_try_compile "$LINENO"; then :
   { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
 $as_echo "yes" >&6; }
 
-$as_echo "#define HAVE_MPIIO_CONST 1" >>confdefs.h
+$as_echo "#define HAVE_MPI_CONST 1" >>confdefs.h
 
 fi
 rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
@@ -5098,6 +5232,10 @@ DARSHAN_VERSION="3.2.1"
 
 
 
+
+
+
+
 
 
 
@@ -6476,3 +6614,17 @@ else
   { $as_echo "$as_me:${as_lineno-$LINENO}: MDHIM module support:   yes" >&5
 $as_echo "$as_me: MDHIM module support:   yes" >&6;}
 fi
+if test "x$BUILD_APXC_MODULE" = "x"; then :
+  { $as_echo "$as_me:${as_lineno-$LINENO}: AUTOPERF APXC module support:   no" >&5
+$as_echo "$as_me: AUTOPERF APXC module support:   no" >&6;}
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: AUTOPERF APXC module support:   yes" >&5
+$as_echo "$as_me: AUTOPERF APXC module support:   yes" >&6;}
+fi
+if test "x$BUILD_APMPI_MODULE" = "x"; then :
+  { $as_echo "$as_me:${as_lineno-$LINENO}: AUTOPERF MPI module support:   no" >&5
+$as_echo "$as_me: AUTOPERF MPI module support:   no" >&6;}
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: AUTOPERF MPI module support:   yes" >&5
+$as_echo "$as_me: AUTOPERF MPI module support:   yes" >&6;}
+fi


=====================================
darshan-runtime/configure.in
=====================================
@@ -97,6 +97,7 @@ AC_ARG_ENABLE(group-readable-logs,
 fi]
 ,)
 
+
 AC_ARG_ENABLE(mmap-logs,
 [  --enable-mmap-logs      Enables ability to mmap I/O data to log file],
 [if test "x$enableval" = "xyes" ; then
@@ -220,6 +221,53 @@ AS_IF([test "x$enable_mpiio_mod" = "xno" || test "x$ENABLE_MPI" = "x"],
       BUILD_MPIIO_MODULE=
       DARSHAN_MPIIO_LD_OPTS=)
 
+# AUTOPERF MPI module
+BUILD_APMPI_MODULE=
+DARSHAN_APMPI_LD_OPTS=
+AC_ARG_ENABLE(apmpi-mod,
+              AS_HELP_STRING([--enable-apmpi-mod], [Enables compilation and use of AUTOPERF MPI module (requires MPI)]))
+AS_IF([test "x$enable_apmpi_mod" = "xyes"], [
+    AS_IF([test "x$ENABLE_MPI" = "x"],
+        [AC_MSG_ERROR(Autoperf MPI module requires MPI support)]
+    )
+    abssrcdir=$(readlink -f ${srcdir})
+    AC_CHECK_HEADER([${abssrcdir}/../modules/autoperf/apmpi/darshan-apmpi-log-format.h],
+                    [],
+                    [AC_MSG_ERROR([The Autoperf MPI module is not present])],
+                    [-]) # this last part tells it to only check for presence
+    BUILD_APMPI_MODULE=1
+    DARSHAN_APMPI_LD_OPTS="@${darshan_share_path}/ld-opts/autoperf-apmpi-ld-opts"
+])
+
+AC_ARG_ENABLE(apmpi-coll-sync,
+[  --enable-apmpi-coll-sync
+                          Enable sync time calculation for MPI collectives],
+[if test "x$enableval" = "xyes" ; then
+    AC_DEFINE(__APMPI_COLL_SYNC, 1, Define if APMPI should enable sync time calculation for MPI collectives)
+fi]
+,)
+
+#AUTOPERF APXC module
+BUILD_APXC_MODULE=
+AC_ARG_ENABLE(apxc-mod,
+              AS_HELP_STRING([--enable-apxc-mod], [Enables compilation and use of AUTOPERF APXC module (requires MPI)])
+)
+AS_IF([test "x$enable_apxc_mod" = "xyes"], [
+    AS_IF([test "x$ENABLE_MPI" = "x"],
+        [AC_MSG_ERROR(Autoperf XC module requires MPI support)]
+    )
+    abssrcdir=$(readlink -f ${srcdir})
+    AC_CHECK_HEADER([papi.h],
+                    [with_papi=-lpapi],
+                    [AC_MSG_ERROR([Cannot find papi header required for Autoperf XC module])],
+                    [])
+    AC_CHECK_HEADER([${abssrcdir}/../modules/autoperf/apxc/darshan-apxc-log-format.h],
+                    [],
+                    [AC_MSG_ERROR([The Autoperf XC git submodule is not present])],
+                    [-]) # this last part tells it to only check for presence
+    BUILD_APXC_MODULE=1
+])
+
 # HDF5 module (disabled by default)
 BUILD_HDF5_MODULE=
 DARSHAN_HDF5_LD_OPTS=
@@ -408,10 +456,10 @@ AC_TRY_LINK([#include <mpi.h>], [
     AC_MSG_RESULT(yes),
             AC_MSG_ERROR(Darshan requires a version of MPI with MPI-IO support))
 
-# At some point MPI-IO converted most API functions to be const correct.  We
-# need to test for this to determine how to define MPI-IO wrappers in
+# At some point MPI converted most API functions to be const correct.  We
+# need to test for this to determine how to define MPI wrappers in
 # Darshan.  First we try compiling without cost qualifiers.
-AC_MSG_CHECKING(for MPI-IO prototypes without const qualifier)
+AC_MSG_CHECKING(for MPI prototypes without const qualifier)
 AC_TRY_COMPILE(
     [
     #include <mpi.h>
@@ -420,12 +468,14 @@ AC_TRY_COMPILE(
         {return 0;}
     ],
     [],
-    AC_MSG_RESULT(yes),
-    AC_MSG_RESULT(no)
+    [AC_MSG_RESULT(yes)
+     AS_IF([test "x$BUILD_APMPI_MODULE" = "x1"],
+	   AC_MSG_ERROR(APMPI module requires MPI version 3+))],
+    [AC_MSG_RESULT(no)
 
     # unable to compile without const qualifiers.  Let's try again with
     # const qualifiers.
-    AC_MSG_CHECKING(for MPI-IO prototypes with const qualifier)
+    AC_MSG_CHECKING(for MPI prototypes with const qualifier)
     AC_TRY_COMPILE(
         [
         #include <mpi.h>
@@ -435,10 +485,10 @@ AC_TRY_COMPILE(
         ],
         [],
         AC_MSG_RESULT(yes)
-        AC_DEFINE(HAVE_MPIIO_CONST, 1, Define if MPI-IO prototypes use const qualifier),
+        AC_DEFINE(HAVE_MPI_CONST, 1, Define if MPI prototypes use const qualifier),
         ,
         AC_MSG_ERROR(Darshan is unable to find a compatible MPI_File_open prototype)
-    )
+    )]
 )
 
 # determine if mpicc can create shared libraries that use MPI functions
@@ -529,6 +579,9 @@ AC_SUBST(BUILD_MPIIO_MODULE)
 AC_SUBST(DARSHAN_MPIIO_LD_OPTS)
 AC_SUBST(BUILD_PNETCDF_MODULE)
 AC_SUBST(DARSHAN_PNETCDF_LD_OPTS)
+AC_SUBST(BUILD_APXC_MODULE)
+AC_SUBST(BUILD_APMPI_MODULE)
+AC_SUBST(DARSHAN_APMPI_LD_OPTS)
 AC_SUBST(BUILD_HDF5_MODULE)
 AC_SUBST(DARSHAN_HDF5_LD_OPTS)
 AC_SUBST(DARSHAN_HDF5_ADD_DFLUSH_LD_OPTS)
@@ -538,6 +591,7 @@ AC_SUBST(BUILD_LUSTRE_MODULE)
 AC_SUBST(DARSHAN_LUSTRE_LD_FLAGS)
 AC_SUBST(BUILD_MDHIM_MODULE)
 AC_SUBST(DARSHAN_MDHIM_LD_OPTS)
+AC_SUBST(with_papi)
 AC_OUTPUT(Makefile
 darshan-mk-log-dirs.pl
 darshan-gen-cc.pl
@@ -570,3 +624,5 @@ AS_IF([test "x$BUILD_PNETCDF_MODULE" = "x"], [AC_MSG_NOTICE(PnetCDF module suppo
 AS_IF([test "x$BUILD_BGQ_MODULE" = "x"],     [AC_MSG_NOTICE(BG/Q module support:    no)], [AC_MSG_NOTICE(BG/Q module support:    yes)])
 AS_IF([test "x$BUILD_LUSTRE_MODULE" = "x"],  [AC_MSG_NOTICE(Lustre module support:  no)], [AC_MSG_NOTICE(Lustre module support:  yes)])
 AS_IF([test "x$BUILD_MDHIM_MODULE" = "x"],   [AC_MSG_NOTICE(MDHIM module support:   no)], [AC_MSG_NOTICE(MDHIM module support:   yes)])
+AS_IF([test "x$BUILD_APXC_MODULE" = "x"],   [AC_MSG_NOTICE(AUTOPERF APXC module support:   no)], [AC_MSG_NOTICE(AUTOPERF APXC module support:   yes)])
+AS_IF([test "x$BUILD_APMPI_MODULE" = "x"],   [AC_MSG_NOTICE(AUTOPERF MPI module support:   no)], [AC_MSG_NOTICE(AUTOPERF MPI module support:   yes)])


=====================================
darshan-runtime/darshan-dynamic.h
=====================================
@@ -17,22 +17,22 @@ DARSHAN_EXTERN_DECL(PMPI_File_close, int, (MPI_File *fh));
 DARSHAN_EXTERN_DECL(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 DARSHAN_EXTERN_DECL(PMPI_File_iread, int, (MPI_File fh, void  *buf, int  count, MPI_Datatype  datatype, __D_MPI_REQUEST  *request));
 DARSHAN_EXTERN_DECL(PMPI_File_iread_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_iwrite, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_iwrite, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_open, int, (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_open, int, (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh));
@@ -46,53 +46,53 @@ DARSHAN_EXTERN_DECL(PMPI_File_read, int, (MPI_File fh, void *buf, int count, MPI
 DARSHAN_EXTERN_DECL(PMPI_File_read_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
 DARSHAN_EXTERN_DECL(PMPI_File_read_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 DARSHAN_EXTERN_DECL(PMPI_File_read_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info));
 #endif
 DARSHAN_EXTERN_DECL(PMPI_File_sync, int, (MPI_File fh));
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_write_all, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_write_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_write, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_write, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_write_ordered, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_write_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_File_write_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_EXTERN_DECL(PMPI_File_write_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
@@ -101,32 +101,44 @@ DARSHAN_EXTERN_DECL(PMPI_Finalize, int, ());
 DARSHAN_EXTERN_DECL(PMPI_Init, int, (int *argc, char ***argv));
 DARSHAN_EXTERN_DECL(PMPI_Init_thread, int, (int *argc, char ***argv, int required, int *provided));
 DARSHAN_EXTERN_DECL(PMPI_Wtime, double, ());
+#ifdef HAVE_MPI_CONST
+DARSHAN_EXTERN_DECL(PMPI_Allreduce, int, (const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm));
+#else
 DARSHAN_EXTERN_DECL(PMPI_Allreduce, int, (void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm));
+#endif
 DARSHAN_EXTERN_DECL(PMPI_Bcast, int, (void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm));
 DARSHAN_EXTERN_DECL(PMPI_Comm_rank, int, (MPI_Comm comm, int *rank));
 DARSHAN_EXTERN_DECL(PMPI_Comm_size, int, (MPI_Comm comm, int *size));
+#ifdef HAVE_MPI_CONST
+DARSHAN_EXTERN_DECL(PMPI_Scan, int, (const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm));
+#else
 DARSHAN_EXTERN_DECL(PMPI_Scan, int, (void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm));
+#endif
 DARSHAN_EXTERN_DECL(PMPI_Type_commit, int, (MPI_Datatype *datatype));
 DARSHAN_EXTERN_DECL(PMPI_Type_contiguous, int, (int count, MPI_Datatype oldtype, MPI_Datatype *newtype));
 DARSHAN_EXTERN_DECL(PMPI_Type_extent, int, (MPI_Datatype datatype, MPI_Aint *extent));
 DARSHAN_EXTERN_DECL(PMPI_Type_free, int, (MPI_Datatype *datatype));
+#ifdef HAVE_MPI_CONST
+DARSHAN_EXTERN_DECL(PMPI_Type_hindexed, int, (int count, const int *array_of_blocklengths, MPI_Aint *array_of_displacements, MPI_Datatype oldtype, MPI_Datatype *newtype));
+#else
 DARSHAN_EXTERN_DECL(PMPI_Type_hindexed, int, (int count, int *array_of_blocklengths, MPI_Aint *array_of_displacements, MPI_Datatype oldtype, MPI_Datatype *newtype));
+#endif
 DARSHAN_EXTERN_DECL(PMPI_Type_get_envelope, int, (MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner));
 DARSHAN_EXTERN_DECL(PMPI_Type_size, int, (MPI_Datatype datatype, int *size));
 DARSHAN_EXTERN_DECL(PMPI_Op_create, int, (MPI_User_function *function, int commute, MPI_Op *op));
 DARSHAN_EXTERN_DECL(PMPI_Op_free, int, (MPI_Op *op));
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_Reduce, int, (const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm));
 #else
 DARSHAN_EXTERN_DECL(PMPI_Reduce, int, (void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_Send, int, (const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm));
 #else
 DARSHAN_EXTERN_DECL(PMPI_Send, int, (void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm));
 #endif
 DARSHAN_EXTERN_DECL(PMPI_Recv, int, (void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status));
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_EXTERN_DECL(PMPI_Gather, int, (const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm));
 #else
 DARSHAN_EXTERN_DECL(PMPI_Gather, int, (void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm));


=====================================
darshan-runtime/darshan-runtime-config.h.in
=====================================
@@ -33,8 +33,8 @@
 /* Define if build is MPI-enabled */
 #undef HAVE_MPI
 
-/* Define if MPI-IO prototypes use const qualifier */
-#undef HAVE_MPIIO_CONST
+/* Define if MPI prototypes use const qualifier */
+#undef HAVE_MPI_CONST
 
 /* Define if off64_t type is defined */
 #undef HAVE_OFF64_T
@@ -111,6 +111,9 @@
 # endif
 #endif
 
+/* Define if APMPI should enable sync time calculation for MPI collectives */
+#undef __APMPI_COLL_SYNC
+
 /* Define if cuserid() should be disabled */
 #undef __DARSHAN_DISABLE_CUSERID
 


=====================================
darshan-runtime/doc/darshan-runtime.txt
=====================================
@@ -445,6 +445,28 @@ configuration file:
 export DXT_TRIGGER_CONF_PATH=/path/to/dxt/config/file
 ----
 
+== Using AutoPerf instrumentation modules
+
+AutoPerf offers two additional Darshan instrumentation modules that may be enabled for MPI applications.
+
+* APMPI: Instrumentation of over 70 MPI-3 communication routines, providing operation counts, datatype sizes, and timing information for each application MPI rank.
+* APXC: Instrumentation of Cray XC environments to provide network and compute counters of interest, via PAPI.
+
+Users can request Darshan to build the APMPI and APXC modules by passing `--enable-apmpi-mod` and `--enable-apxc-mod` options to configure, respectively. Note that these options can be requested independently (i.e., you can build Darshan with APMPI support but not APXC support, and vice versa).
+
+The only prerequsisite for the APMPI module is that Darsan be configured with a MPI-3 compliant compiler. For APXC, the user must obviously be using a Cray XC system and must make the PAPI interace available to Darshan (i.e., by running `module load papi`, before building Darshan).
+
+If using the APMPI module, users can additionally specify the `--enable-apmpi-coll-sync` configure option to force Darshan to synchronize before calling underlying MPI routines and to capture additional timing information on how synchronized processes are. Users should note this option will impose additional overheads, but can be useful to help diagnose whether applications are spending a lot of time synchronizing as part of collective communication calls. For this reason, we do not recommend users setting this particular option for production Darshan deployments.
+
+[NOTE]
+====
+The AutoPerf instrumentation modules are provided as Git submodules to Darshan's main repository, so if building Darshan source that has been cloned from Git, it is neccessary to first retrieve the AutoPerf submodules by running the following command:
+
+----
+git submodule update --init
+----
+====
+
 == Darshan installation recipes
 
 The following recipes provide examples for prominent HPC systems.


=====================================
darshan-runtime/lib/darshan-core.c
=====================================
@@ -89,6 +89,10 @@ char** user_darshan_path_exclusions = NULL;
 extern void bgq_runtime_initialize();
 #endif
 
+#ifdef DARSHAN_USE_APXC
+extern void apxc_runtime_initialize();
+#endif
+
 /* array of init functions for modules which need to be statically
  * initialized by darshan at startup time
  */
@@ -96,6 +100,9 @@ void (*mod_static_init_fns[])(void) =
 {
 #ifdef DARSHAN_BGQ
     &bgq_runtime_initialize,
+#endif
+#ifdef DARSHAN_USE_APXC
+    &apxc_runtime_initialize,
 #endif
     NULL
 };
@@ -602,6 +609,7 @@ void darshan_core_shutdown()
                     HASH_FIND(hlink, final_core->name_hash, &shared_recs[j],
                         sizeof(darshan_record_id), ref);
                     assert(ref);
+
                     if(DARSHAN_MOD_FLAG_ISSET(ref->global_mod_flags, i))
                     {
                         mod_shared_recs[mod_shared_rec_cnt++] = shared_recs[j];
@@ -611,8 +619,10 @@ void darshan_core_shutdown()
                 /* allow the module an opportunity to reduce shared files */
                 if(this_mod->mod_funcs.mod_redux_func && (mod_shared_rec_cnt > 0) &&
                    (!getenv("DARSHAN_DISABLE_SHARED_REDUCTION")))
+                {
                     this_mod->mod_funcs.mod_redux_func(mod_buf, final_core->mpi_comm,
                         mod_shared_recs, mod_shared_rec_cnt);
+                }
             }
 #endif
 


=====================================
darshan-runtime/lib/darshan-mpiio.c
=====================================
@@ -30,22 +30,22 @@ DARSHAN_FORWARD_DECL(PMPI_File_close, int, (MPI_File *fh));
 DARSHAN_FORWARD_DECL(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 DARSHAN_FORWARD_DECL(PMPI_File_iread, int, (MPI_File fh, void  *buf, int  count, MPI_Datatype  datatype, __D_MPI_REQUEST  *request));
 DARSHAN_FORWARD_DECL(PMPI_File_iread_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh));
@@ -59,53 +59,53 @@ DARSHAN_FORWARD_DECL(PMPI_File_read, int, (MPI_File fh, void *buf, int count, MP
 DARSHAN_FORWARD_DECL(PMPI_File_read_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
 DARSHAN_FORWARD_DECL(PMPI_File_read_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 DARSHAN_FORWARD_DECL(PMPI_File_read_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info));
 #endif
 DARSHAN_FORWARD_DECL(PMPI_File_sync, int, (MPI_File fh));
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #endif
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
 #else
 DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
@@ -318,7 +318,7 @@ static int my_rank = -1;
  *        Wrappers for MPI-IO functions of interest       * 
  **********************************************************/
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh) 
 #else
 int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh) 
@@ -352,7 +352,7 @@ int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, char *filename, int amode, MPI_In
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open)
 #else
 DARSHAN_WRAPPER_MAP(PMPI_File_open, int,  (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open)
@@ -381,7 +381,7 @@ int DARSHAN_DECL(MPI_File_read)(MPI_File fh, void *buf, int count,
 DARSHAN_WRAPPER_MAP(PMPI_File_read, int, (MPI_File fh, void *buf, int count,
     MPI_Datatype datatype, MPI_Status *status), MPI_File_read)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_write)(MPI_File fh, const void *buf, int count,
     MPI_Datatype datatype, MPI_Status *status)
 #else
@@ -406,7 +406,7 @@ int DARSHAN_DECL(MPI_File_write)(MPI_File fh, void *buf, int count,
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, const void *buf, int count,
     MPI_Datatype datatype, MPI_Status *status), MPI_File_write)
 #else
@@ -436,7 +436,7 @@ int DARSHAN_DECL(MPI_File_read_at)(MPI_File fh, MPI_Offset offset, void *buf,
 DARSHAN_WRAPPER_MAP(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
     int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_read_at)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, const void *buf,
     int count, MPI_Datatype datatype, MPI_Status *status)
 #else
@@ -460,7 +460,7 @@ int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, void *buf,
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf,
     int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at)
 #else
@@ -491,7 +491,7 @@ int DARSHAN_DECL(MPI_File_read_all)(MPI_File fh, void * buf, int count, MPI_Data
 DARSHAN_WRAPPER_MAP(PMPI_File_read_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
         MPI_File_read_all)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
 #else
 int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
@@ -515,7 +515,7 @@ int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, void * buf, int count, MPI_Dat
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
         MPI_File_write_all)
 #else
@@ -546,7 +546,7 @@ DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset,
     int count, MPI_Datatype datatype, MPI_Status * status),
         MPI_File_read_at_all)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, const void * buf,
     int count, MPI_Datatype datatype, MPI_Status * status)
 #else
@@ -570,7 +570,7 @@ int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, void * b
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void * buf,
     int count, MPI_Datatype datatype, MPI_Status * status),
         MPI_File_write_at_all)
@@ -603,7 +603,7 @@ int DARSHAN_DECL(MPI_File_read_shared)(MPI_File fh, void * buf, int count, MPI_D
 DARSHAN_WRAPPER_MAP(PMPI_File_read_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
         MPI_File_read_shared)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
 #else
 int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
@@ -627,7 +627,7 @@ int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, void * buf, int count, MPI_
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
         MPI_File_write_shared)
 #else
@@ -661,7 +661,7 @@ DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered, int, (MPI_File fh, void * buf, int c
     MPI_Datatype datatype, MPI_Status * status),
         MPI_File_read_ordered)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, const void * buf, int count,
     MPI_Datatype datatype, MPI_Status * status)
 #else
@@ -687,7 +687,7 @@ int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, void * buf, int count,
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, const void * buf, int count,
     MPI_Datatype datatype, MPI_Status * status),
         MPI_File_write_ordered)
@@ -719,7 +719,7 @@ int DARSHAN_DECL(MPI_File_read_all_begin)(MPI_File fh, void * buf, int count, MP
 DARSHAN_WRAPPER_MAP(PMPI_File_read_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
         MPI_File_read_all_begin)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
 #else
 int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
@@ -743,7 +743,7 @@ int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, void * buf, int count, M
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
         MPI_File_write_all_begin)
 #else
@@ -773,7 +773,7 @@ int DARSHAN_DECL(MPI_File_read_at_all_begin)(MPI_File fh, MPI_Offset offset, voi
 DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
     int count, MPI_Datatype datatype), MPI_File_read_at_all_begin)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, const void * buf,
     int count, MPI_Datatype datatype)
 #else
@@ -797,7 +797,7 @@ int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, vo
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void * buf,
     int count, MPI_Datatype datatype), MPI_File_write_at_all_begin)
 #else
@@ -828,7 +828,7 @@ int DARSHAN_DECL(MPI_File_read_ordered_begin)(MPI_File fh, void * buf, int count
 DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
         MPI_File_read_ordered_begin)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_write_ordered_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
 #else
 int DARSHAN_DECL(MPI_File_write_ordered_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
@@ -852,7 +852,7 @@ int DARSHAN_DECL(MPI_File_write_ordered_begin)(MPI_File fh, void * buf, int coun
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
         MPI_File_write_ordered_begin)
 #else
@@ -882,7 +882,7 @@ int DARSHAN_DECL(MPI_File_iread)(MPI_File fh, void * buf, int count, MPI_Datatyp
 DARSHAN_WRAPPER_MAP(PMPI_File_iread, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST * request),
         MPI_File_iread)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_iwrite)(MPI_File fh, const void * buf, int count,
     MPI_Datatype datatype, __D_MPI_REQUEST * request)
 #else
@@ -907,7 +907,7 @@ int DARSHAN_DECL(MPI_File_iwrite)(MPI_File fh, void * buf, int count,
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_iwrite, int, (MPI_File fh, const void * buf, int count,
     MPI_Datatype datatype, __D_MPI_REQUEST * request),
         MPI_File_iwrite)
@@ -940,7 +940,7 @@ DARSHAN_WRAPPER_MAP(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, vo
     int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
         MPI_File_iread_at)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_iwrite_at)(MPI_File fh, MPI_Offset offset, const void * buf,
     int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
 #else
@@ -964,7 +964,7 @@ int DARSHAN_DECL(MPI_File_iwrite_at)(MPI_File fh, MPI_Offset offset, void * buf,
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void * buf,
     int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
         MPI_File_iwrite_at)
@@ -999,7 +999,7 @@ DARSHAN_WRAPPER_MAP(PMPI_File_iread_shared, int, (MPI_File fh, void * buf, int c
     MPI_Datatype datatype, __D_MPI_REQUEST * request),
         MPI_File_iread_shared)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_iwrite_shared)(MPI_File fh, const void * buf, int count,
     MPI_Datatype datatype, __D_MPI_REQUEST * request)
 #else
@@ -1025,7 +1025,7 @@ int DARSHAN_DECL(MPI_File_iwrite_shared)(MPI_File fh, void * buf, int count,
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_shared, int, (MPI_File fh, const void * buf, int count,
     MPI_Datatype datatype, __D_MPI_REQUEST * request),
         MPI_File_iwrite_shared)
@@ -1066,7 +1066,7 @@ int DARSHAN_DECL(MPI_File_sync)(MPI_File fh)
 }
 DARSHAN_WRAPPER_MAP(PMPI_File_sync, int, (MPI_File fh), MPI_File_sync)
 
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 int DARSHAN_DECL(MPI_File_set_view)(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
     MPI_Datatype filetype, const char *datarep, MPI_Info info)
 #else
@@ -1106,7 +1106,7 @@ int DARSHAN_DECL(MPI_File_set_view)(MPI_File fh, MPI_Offset disp, MPI_Datatype e
 
     return(ret);
 }
-#ifdef HAVE_MPIIO_CONST
+#ifdef HAVE_MPI_CONST
 DARSHAN_WRAPPER_MAP(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
     MPI_Datatype filetype, const char *datarep, MPI_Info info), MPI_File_set_view)
 #else


=====================================
darshan-runtime/lib/pkgconfig/darshan-runtime.pc.in
=====================================
@@ -15,5 +15,5 @@ darshan_libdir= -L${darshan_prefix}/lib
 darshan_linkopts="-Wl,@${darshan_share}/ld-opts/darshan-base-ld-opts"
 
 Cflags:
-Libs: ${darshan_libdir} -Wl,-rpath=${darshan_prefix}/lib -Wl,-no-as-needed -ldarshan @DARSHAN_LUSTRE_LD_FLAGS@ @DARSHAN_HDF5_LD_FLAGS@
-Libs.private: ${darshan_linkopts} ${darshan_libdir} -lfmpich -lmpichcxx -ldarshan @DARSHAN_LUSTRE_LD_FLAGS@ -lz -lrt -lpthread
+Libs: ${darshan_libdir} -Wl,-rpath=${darshan_prefix}/lib -Wl,-no-as-needed -ldarshan @DARSHAN_LUSTRE_LD_FLAGS@ @DARSHAN_HDF5_LD_FLAGS@ @with_papi@
+Libs.private: ${darshan_linkopts} ${darshan_libdir} -lfmpich -lmpichcxx -ldarshan @DARSHAN_LUSTRE_LD_FLAGS@ -lz -lrt -lpthread @with_papi@


=====================================
darshan-runtime/share/ld-opts/darshan-base-ld-opts.in
=====================================
@@ -13,3 +13,4 @@
 @DARSHAN_PNETCDF_LD_OPTS@
 @DARSHAN_HDF5_LD_OPTS@
 @DARSHAN_MDHIM_LD_OPTS@
+ at DARSHAN_APMPI_LD_OPTS@


=====================================
darshan-test/RELEASE-CHECKLIST.txt
=====================================
@@ -28,8 +28,9 @@ Notes on how to release a new version of Darshan
   - run regression tests on BG platform (see
     darshan-test/regression/README.BG.ALCF.txt)
 - export the tag and tar gzip it
-  - easiest method is to do a fresh checkout and remove the .git
-    subdirectory
+  - do a fresh checkout of the Darshan tag
+  - make sure to pull in AutoPerf submodule (git submodule update --init)
+  - remove the .git subdirectory
   - upload .tar.gz file to /mcs/ftp/pub/darshan/releases
 - generate web documentation from asciidoc by running make in
   darshan-util/doc/ and darshan-runtime/doc/ directories


=====================================
darshan-util/Makefile.in
=====================================
@@ -10,6 +10,9 @@ sbindir = @sbindir@
 bindir = @bindir@
 libdir = @libdir@
 
+DARSHAN_USE_APXC = @DARSHAN_USE_APXC@
+DARSHAN_USE_APMPI = @DARSHAN_USE_APMPI@
+
 DARSHAN_LOG_FORMAT = $(srcdir)/../darshan-log-format.h
 DARSHAN_MOD_LOG_FORMATS = $(srcdir)/../darshan-posix-log-format.h \
 			  $(srcdir)/../darshan-mpiio-log-format.h \
@@ -79,6 +82,13 @@ AR=@AR@
 
 LIBS = -lz @LIBBZ2@
 
+ifdef DARSHAN_USE_APXC
+include $(srcdir)/../modules/autoperf/apxc/util/Makefile.darshan
+endif
+ifdef DARSHAN_USE_APMPI
+include $(srcdir)/../modules/autoperf/apmpi/util/Makefile.darshan
+endif
+
 mktestdir::
 	mkdir -p test
 


=====================================
darshan-util/configure
=====================================
@@ -621,6 +621,8 @@ ac_includes_default="\
 
 ac_subst_vars='LTLIBOBJS
 LIBOBJS
+DARSHAN_USE_APMPI
+DARSHAN_USE_APXC
 DARSHAN_UTIL_VERSION
 DARSHAN_PYDARSHAN_PATH
 DARSHAN_ENABLE_PYDARSHAN
@@ -699,6 +701,8 @@ with_zlib
 with_bzlib
 enable_shared
 enable_pydarshan
+enable_autoperf_apxc
+enable_autoperf_apmpi
 '
       ac_precious_vars='build_alias
 host_alias
@@ -1332,6 +1336,8 @@ Optional Features:
   --enable-FEATURE[=ARG]  include FEATURE [ARG=yes]
   --enable-shared   enables building of shared darshan-util library
   --enable-pydarshan   enables build/install of pydarshan module and tools
+  --enable-autoperf-apxc    Enables compilation and use of the AutoPerf Cray XC module
+  --enable-autoperf-apmpi    Enables compilation and use of the AutoPerf MPI module
 
 Optional Packages:
   --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
@@ -1715,6 +1721,35 @@ fi
 
 } # ac_fn_c_try_link
 
+# ac_fn_c_check_header_preproc LINENO HEADER VAR
+# ----------------------------------------------
+# Tests whether HEADER is present, setting the cache variable VAR accordingly.
+ac_fn_c_check_header_preproc ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <$2>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+  eval "$3=yes"
+else
+  eval "$3=no"
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_preproc
+
 # ac_fn_c_check_func LINENO FUNC VAR
 # ----------------------------------
 # Tests whether FUNC exists, setting the cache variable VAR accordingly
@@ -4367,6 +4402,42 @@ fi
 fi
 
 
+# Check whether --enable-autoperf-apxc was given.
+if test "${enable_autoperf_apxc+set}" = set; then :
+  enableval=$enable_autoperf_apxc; enable_autoperf_apxc=yes
+fi
+
+# Check whether --enable-autoperf-apmpi was given.
+if test "${enable_autoperf_apmpi+set}" = set; then :
+  enableval=$enable_autoperf_apmpi; enable_autoperf_apmpi=yes
+fi
+
+
+if test x$enable_autoperf_apxc = xyes; then
+    abssrcdir=$(readlink -f ${srcdir})
+    as_ac_Header=`$as_echo "ac_cv_header_${abssrcdir}/../modules/autoperf/apxc/darshan-apxc-log-format.h" | $as_tr_sh`
+ac_fn_c_check_header_preproc "$LINENO" "${abssrcdir}/../modules/autoperf/apxc/darshan-apxc-log-format.h" "$as_ac_Header"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+  DARSHAN_USE_APXC=1
+else
+  as_fn_error $? "The autoperf APXC module is not present" "$LINENO" 5
+fi
+
+ # this last part tells it to only check for presence
+fi
+if test x$enable_autoperf_apmpi = xyes; then
+    abssrcdir=$(readlink -f ${srcdir})
+    as_ac_Header=`$as_echo "ac_cv_header_${abssrcdir}/../modules/autoperf/apmpi/darshan-apmpi-log-format.h" | $as_tr_sh`
+ac_fn_c_check_header_preproc "$LINENO" "${abssrcdir}/../modules/autoperf/apmpi/darshan-apmpi-log-format.h" "$as_ac_Header"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+  DARSHAN_USE_APMPI=1
+else
+  as_fn_error $? "The autoperf MPI module is not present" "$LINENO" 5
+fi
+
+ # this last part tells it to only check for presence
+fi
+
 for ac_func in strndup
 do :
   ac_fn_c_check_func "$LINENO" "strndup" "ac_cv_func_strndup"
@@ -4390,6 +4461,8 @@ DARSHAN_UTIL_VERSION="3.2.1"
 
 
 
+
+
 cat >confcache <<\_ACEOF
 # This file is a shell script that caches the results of configure
 # tests run on this system so they can be shared between configure


=====================================
darshan-util/configure.in
=====================================
@@ -115,6 +115,34 @@ AC_ARG_ENABLE(pydarshan,
 fi]
 ,)
 
+AC_ARG_ENABLE(
+    [autoperf-apxc],
+    [  --enable-autoperf-apxc    Enables compilation and use of the AutoPerf Cray XC module],
+    [enable_autoperf_apxc=yes],
+    []
+)
+AC_ARG_ENABLE(
+    [autoperf-apmpi],
+    [  --enable-autoperf-apmpi    Enables compilation and use of the AutoPerf MPI module],
+    [enable_autoperf_apmpi=yes],
+    []
+)
+
+if test x$enable_autoperf_apxc = xyes; then
+    abssrcdir=$(readlink -f ${srcdir})
+    AC_CHECK_HEADER([${abssrcdir}/../modules/autoperf/apxc/darshan-apxc-log-format.h],
+                    DARSHAN_USE_APXC=1,
+                    [AC_MSG_ERROR([The autoperf APXC module is not present])],
+                    [-]) # this last part tells it to only check for presence
+fi
+if test x$enable_autoperf_apmpi = xyes; then
+    abssrcdir=$(readlink -f ${srcdir})
+    AC_CHECK_HEADER([${abssrcdir}/../modules/autoperf/apmpi/darshan-apmpi-log-format.h],
+                    DARSHAN_USE_APMPI=1,
+                    [AC_MSG_ERROR([The autoperf MPI module is not present])],
+                    [-]) # this last part tells it to only check for presence
+fi
+
 AC_CHECK_FUNCS([strndup])
 
 DARSHAN_UTIL_VERSION="AC_PACKAGE_VERSION"
@@ -127,6 +155,8 @@ AC_SUBST(DARSHAN_ENABLE_PYDARSHAN)
 AC_SUBST(PYTHON)
 AC_SUBST(DARSHAN_PYDARSHAN_PATH)
 AC_SUBST(DARSHAN_UTIL_VERSION)
+AC_SUBST(DARSHAN_USE_APXC)
+AC_SUBST(DARSHAN_USE_APMPI)
 
 AC_OUTPUT
 


=====================================
darshan-util/darshan-logutils.c
=====================================
@@ -373,6 +373,7 @@ int darshan_log_get_exe(darshan_fd fd, char *buf)
     return (0);
 }
 
+
 /* darshan_log_put_exe()
  *
  * wrties the application exe name to darshan log file


=====================================
darshan-util/darshan-logutils.h
=====================================
@@ -155,6 +155,13 @@ extern struct darshan_mod_logutil_funcs *mod_logutils[];
 #include "darshan-dxt-logutils.h"
 #include "darshan-mdhim-logutils.h"
 
+#ifdef DARSHAN_USE_APXC
+#include "darshan-apxc-logutils.h"
+#endif
+#ifdef DARSHAN_USE_APMPI
+#include "darshan-apmpi-logutils.h"
+#endif
+
 darshan_fd darshan_log_open(const char *name);
 darshan_fd darshan_log_create(const char *name, enum darshan_comp_type comp_type,
     int partial_flag);
@@ -209,6 +216,14 @@ void darshan_log_get_filtered_name_records(darshan_fd fd,
         __file_name, __mnt_pt, __fs_type); \
 } while(0)
 
+#define DARSHAN_I_COUNTER_PRINT(__mod_name, __rank, __file_id, \
+                              __counter, __counter_val, __file_name, \
+                              __mnt_pt, __fs_type) do { \
+    printf("%s\t%" PRId64 "\t%" PRIu64 "\t%s\t%d\t%s\t%s\t%s\n", \
+        __mod_name, __rank, __file_id, __counter, __counter_val, \
+        __file_name, __mnt_pt, __fs_type); \
+} while(0)
+
 #define DARSHAN_F_COUNTER_PRINT(__mod_name, __rank, __file_id, \
                                 __counter, __counter_val, __file_name, \
                                 __mnt_pt, __fs_type) do { \
@@ -217,6 +232,14 @@ void darshan_log_get_filtered_name_records(darshan_fd fd,
         __file_name, __mnt_pt, __fs_type); \
 } while(0)
 
+#define DARSHAN_S_COUNTER_PRINT(__mod_name, __rank, __file_id, \
+                              __counter, __counter_val, __file_name, \
+                              __mnt_pt, __fs_type) do { \
+    printf("%s\t%" PRId64 "\t%" PRIu64 "\t%s\t%s\t%s\t%s\t%s\n", \
+        __mod_name, __rank, __file_id, __counter, __counter_val, \
+        __file_name, __mnt_pt, __fs_type); \
+} while(0)
+
 /* naive byte swap implementation */
 #define DARSHAN_BSWAP64(__ptr) do {\
     char __dst_char[8]; \


=====================================
darshan-util/doc/darshan-util.txt
=====================================
@@ -56,6 +56,19 @@ method of compilation.
 The `--enable-shared` argument to configure can be used to enable
 compilation of a shared version of the darshan-util library.
 
+The `--enable-autoperf-apmpi` and `--enable-autoperf-apxc` configure 
+arguments must be specified to build darshan-util with support for AutoPerf
+APMPI and APXC modules, respectively.
+
+[NOTE]
+====
+AutoPerf log analysis code is provided as Git submodules to Darshan's main repository, so if building Darshan source that has been cloned from Git, it is neccessary to first retrieve the AutoPerf submodules by running the following command:
+
+----
+git submodule update --init
+----
+====
+
 == Analyzing log files
 
 Each time a darshan-instrumented application is executed, it will generate a
@@ -433,6 +446,70 @@ value of 1 MiB for optimal file alignment.
 
 ===== Additional modules 
 
+.Lustre module (if enabled, for Lustre file systems)
+[cols="40%,60%",options="header"]
+|====
+| counter name | description
+| LUSTRE_OSTS | number of OSTs (object storage targets) for the file system
+| LUSTRE_MDTS | number of MDTs (metadata targets) for the file system
+| LUSTRE_STRIPE_OFFSET | OST id offset specified at file creation time
+| LUSTRE_STRIPE_SIZE | stripe size for the file in bytes
+| LUSTRE_STRIPE_WIDTH | number of OSTs over which the file is striped
+| LUSTRE_OST_ID_* | indices of OSTs over which the file is striped
+|====
+
+.APXC module header record (if enabled, for Cray XC systems)
+[cols="40%,60%",options="header"]
+|====
+| counter name | description
+| APXC_GROUPS | total number of groups for the job
+| APXC_CHASSIS | total number of chassis for the job
+| APXC_BLADES | total number of blades for the job
+| APXC_MEMORY_MODE | Intel Xeon memory mode
+| APXC_CLUSTER_MODE | Intel Xeon NUMA configuration
+| APXC_MEMORY_MODE_CONSISTENT | Intel Xeon memory mode consistent across all nodes
+| APXC_CLUSTER_MODE_CONSISTENT | Intel Xeon cluster mode consistent across all nodes
+|====
+
+.APXC module per-router record (if enabled, for Cray XC systems)
+[cols="40%,60%",options="header"]
+|====
+| counter name | description
+| APXC_GROUP | group this router is on
+| APXC_CHASSIS | chassis this router is on
+| APXC_BLADE | blade this router is on
+| APXC_NODE | node connected to this router
+| APXC_AR_RTR_x_y_INQ_PRF_INCOMING_FLIT_VC[0-7] | flits on VCs of x y tile for router-router ports
+| APXC_AR_RTR_x_y_INQ_PRF_ROWBUS_STALL_CNT | stalls on x y tile for router-router ports
+| APXC_AR_RTR_PT_x_y_INQ_PRF_INCOMING_FLIT_VC[0,4] | flits on VCs of x y tile for router-nic ports
+| APXC_AR_RTR_PT_x_y_INQ_PRF_REQ_ROWBUS_STALL_CNT | stalls on x y tile for router-nic ports
+|====
+
+.APMPI module header record (if enabled, for MPI applications)
+[cols="40%,60%",options="header"]
+|====
+| counter name | description
+| MPI_TOTAL_COMM_TIME_VARIANCE | variance in total communication time across all the processes
+| MPI_TOTAL_COMM_SYNC_TIME_VARIANCE | variance in total sync time across all the processes, if enabled
+|====
+
+.APMPI module per-process record (if enabled, for MPI applications)
+[cols="40%,60%",options="header"]
+|====
+| counter name | description
+| MPI_PROCESSOR_NAME | name of the processor used by the MPI process
+| MPI_*_CALL_COUNT | total call count for an MPI op
+| MPI_*_TOTAL_BYTES | total bytes (i.e., cumulative across all calls) moved with an MPI op
+| MPI_*\_MSG_SIZE_AGG_* | histogram of total bytes moved for all the calls of an MPI op
+| MPI_*_TOTAL_TIME | total time (i.e, cumulative across all calls) of an MPI op
+| MPI_*_MIN_TIME | minimum time across all calls of an MPI op
+| MPI_*_MAX_TIME | maximum time across all calls of an MPI op
+| MPI_*_TOTAL_SYNC_TIME | total sync time (cumulative across all calls of an op) of an MPI op, if enabled
+| MPI_TOTAL_COMM_TIME | total communication (MPI) time of a process across all the MPI ops
+| MPI_TOTAL_COMM_SYNC_TIME | total sync time of a process across all the MPI ops, if enabled
+|====
+
+
 .BG/Q module (if enabled on BG/Q systems)
 [cols="40%,60%",options="header"]
 |====
@@ -451,19 +528,6 @@ value of 1 MiB for optimal file alignment.
 | BGQ_F_TIMESTAMP | Timestamp of when BG/Q data was collected
 |====
 
-
-.Lustre module (if enabled, for Lustre file systems)
-[cols="40%,60%",options="header"]
-|====
-| counter name | description
-| LUSTRE_OSTS | number of OSTs (object storage targets) for the file system
-| LUSTRE_MDTS | number of MDTs (metadata targets) for the file system
-| LUSTRE_STRIPE_OFFSET | OST id offset specified at file creation time
-| LUSTRE_STRIPE_SIZE | stripe size for the file in bytes
-| LUSTRE_STRIPE_WIDTH | number of OSTs over which the file is striped
-| LUSTRE_OST_ID_* | indices of OSTs over which the file is striped
-|====
-
 ==== Additional summary output
 [[addsummary]]
 


=====================================
darshan-util/pydarshan/Makefile
=====================================
@@ -74,8 +74,8 @@ release: #dist # package and upload a release
 	twine upload dist/*
 
 dist: clean  # builds source and wheel package
-	python setup.py sdist
-	python setup.py bdist_wheel
+	python3 setup.py sdist
+	python3 setup.py bdist_wheel
 
 	# might want to remove none-any wheel, but more specific wheels seem to take precedence
 	# rm -r dist/*non-any.whl
@@ -90,4 +90,4 @@ dist: clean  # builds source and wheel package
 
 
 install: clean  # install the package to the active Python's site-packages
-	python setup.py install
+	python3 setup.py install 


=====================================
darshan-util/pydarshan/darshan/backend/api_def_c.py
=====================================
@@ -121,9 +121,6 @@ typedef struct segment_info {
     double end_time;
 } segment_info;
 
-
-
-
 /* counter names */
 extern char *bgq_counter_names[];
 extern char *bgq_f_counter_names[];
@@ -160,10 +157,10 @@ void darshan_log_get_filtered_name_records(void*, struct darshan_name_record **,
 
 
 
-def load_darshan_header():
+def load_darshan_header(addins=''):
     """
     Returns a CFFI compatible header for darshan-utlil as a string.
 
     :return: String with a CFFI compatible header for darshan-util.
     """
-    return header
+    return header + addins


=====================================
darshan-util/pydarshan/darshan/backend/cffi_backend.py
=====================================
@@ -18,7 +18,27 @@ from darshan.backend.api_def_c import load_darshan_header
 from darshan.discover_darshan import find_utils
 from darshan.discover_darshan import check_version
 
-API_def_c = load_darshan_header()
+addins = ""
+
+#
+# Optional APXC module
+#
+try:
+  from darshan.backend.apxc import *
+  addins += get_apxc_defs()
+except:
+  pass
+
+#
+# Optional APMPI module
+#
+try:
+  from darshan.backend.apmpi import *
+  addins += get_apmpi_defs()
+except:
+  pass
+
+API_def_c = load_darshan_header(addins)
 
 ffi = cffi.FFI()
 ffi.cdef(API_def_c)
@@ -41,6 +61,10 @@ _structdefs = {
     "PNETCDF": "struct darshan_pnetcdf_file **",
     "POSIX": "struct darshan_posix_file **",
     "STDIO": "struct darshan_stdio_file **",
+    "APXC-HEADER": "struct darshan_apxc_header_record **",
+    "APXC-PERF": "struct darshan_apxc_perf_record **",
+    "APMPI-HEADER": "struct darshan_apmpi_header_record **",
+    "APMPI-PERF": "struct darshan_apmpi_perf_record **",
 }
 
 
@@ -356,7 +380,7 @@ def log_get_generic_record(log, mod_name, dtype='numpy'):
     return rec
 
 
-def counter_names(mod_name, fcnts=False):
+def counter_names(mod_name, fcnts=False, special=''):
     """
     Returns a list of available counter names for the module.
     By default only integer counter names are listed, unless fcnts is set to
@@ -382,8 +406,8 @@ def counter_names(mod_name, fcnts=False):
     else:
         F = ""
 
-    end = "{0}_{1}NUM_INDICES".format(mod_name.upper(), F.upper())
-    var_name = "{0}_{1}counter_names".format(mod_name.lower(), F.lower())
+    end = "{0}_{1}{2}NUM_INDICES".format(mod_name.upper(), F.upper(), special.upper())
+    var_name = "{0}_{1}{2}counter_names".format(mod_name.lower(), F.lower(), special.lower())
 
     while True: 
         try:


=====================================
darshan-util/pydarshan/darshan/discover_darshan.py
=====================================
@@ -196,7 +196,7 @@ def find_utils(ffi, libdutil):
             os.chdir(save)
         except:
             libdutil = None
-
+    
     if libdutil is None:
         try:
             darshan_path = discover_darshan_pyinstaller()
@@ -211,7 +211,7 @@ def find_utils(ffi, libdutil):
             libdutil = None
   
     
-
+    
     if libdutil is None:
         raise RuntimeError('Could not find libdarshan-util.so! Is darshan-util installed? Please ensure one of the the following: 1) export LD_LIBRARY_PATH=<path-to-libdarshan-util.so>, or 2) darshan-parser can found using the PATH variable, or 3) pkg-config can resolve pkg-config --path darshan-util, or 4) install a wheel that includes darshan-utils via pip.')
 


=====================================
darshan-util/pydarshan/darshan/report.py
=====================================
@@ -40,7 +40,6 @@ class DarshanReportJSONEncoder(json.JSONEncoder):
         return json.JSONEncoder.default(self, obj)
 
 
-
 class DarshanRecordCollection(collections.abc.MutableSequence):
     """
     Darshan log records may nest various properties (e.g., DXT, Lustre).
@@ -195,7 +194,6 @@ class DarshanRecordCollection(collections.abc.MutableSequence):
                 pd.set_option('display.max_rows', pd_max_rows)
 
 
-
     ###########################################################################
     # Export Conversions (following the pandas naming conventions)
     ###########################################################################
@@ -647,7 +645,65 @@ class DarshanReport(object):
 
         pass
 
+    def mod_read_all_apmpi_records(self, mod, dtype=None, warnings=True):
+        """ 
+        Reads all APMPI records for provided module.
+
+        Args:
+            mod (str): Identifier of module to fetch all records
+            dtype (str): 'numpy' for ndarray (default), 'dict' for python dictionary
+
+        Return:
+            None
+
+        """
+        if mod not in self.data['modules']:
+            if warnings:
+                logger.warning(f"Skipping. Log does not contain data for mod: {mod}")
+            return
+
+
+        supported =  ['APMPI'] 
+        if mod not in supported:
+            if warnings:
+                logger.warning(f" Skipping. Unsupported module: {mod} in in mod_read_all_apmpi_records(). Supported: {supported}")
+            # skip mod
+            return
 
+        #print(mod+"-HEADER")
+        #print(_structdefs[mod+"-HEADER"])
+        # handling options
+        dtype = dtype if dtype else self.dtype
+
+        self.records[mod] = []
+        # update module metadata
+        self.modules[mod]['num_records'] = 0
+        if mod not in self.counters:
+            self.counters[mod] = {}
+
+        # fetch header record
+        header_rec = backend.log_get_apmpi_record(self.log, _structdefs[mod+"-HEADER"])
+        self.records[mod].append(header_rec)
+
+        # fetch records
+        rec = backend.log_get_apmpi_record(self.log, _structdefs[mod+"-PERF"])
+        while rec != None:
+            if dtype == 'numpy':
+                self.records[mod].append(rec)
+            else:
+                self.records[mod].append(rec)
+
+            self.data['modules'][mod]['num_records'] += 1
+
+            # fetch next
+            rec = backend.log_get_apmpi_record(self.log, _structdefs[mod+"-PERF"])
+
+
+        if self.lookup_name_records:
+            self.update_name_records()
+   
+        pass 
+ 
     def mod_read_all_dxt_records(self, mod, dtype=None, warnings=True, reads=True, writes=True):
         """
         Reads all dxt records for provided module.
@@ -779,7 +835,6 @@ class DarshanReport(object):
                 'counters': combined_c,
                 }]
 
-
         pass
 
 


=====================================
darshan-util/pydarshan/examples/01_darshan-apmpi.ipynb
=====================================
@@ -0,0 +1,1397 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# DarshanUtils for Python\n",
+    "\n",
+    "This notebook gives an overwiew of features provided by the Python bindings for DarshanUtils."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "By default all records, metadata, available modules and the name records are loaded when opening a Darshan log:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "env: LD_LIBRARY_PATH=/projects/Performance/chunduri/Software/Temp/build/darshan-util/install/lib/\n"
+     ]
+    }
+   ],
+   "source": [
+    "%env LD_LIBRARY_PATH=/projects/Performance/chunduri/Software/Temp/build/darshan-util/install/lib/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "env: PE_PKGCONFIG_LIBS=darshan-runtime\n",
+      "env: PKG_CONFIG_PATH=/projects/Performance/chunduri/Software/Temp/build/darshan-runtime/lib/pkgconfig\n",
+      "env: PATH=/projects/Performance/chunduri/Software/Temp/build/darshan-utils/bin:/opt/anaconda3x/bin:/opt/anaconda3x/condabin:/projects/Performance/chunduri/Work_backup_June252017/software/install/autotools/bin:/bin:/sbin:/opt/anaconda3x/bin:/usr/bin/usr/sbin:/usr/local/sbin:/usr/sbin:/dbhome/db2cat/sqllib/bin:/dbhome/db2cat/sqllib/adm:/dbhome/db2cat/sqllib/misc:/dbhome/db2cat/sqllib/gskit/bin:/opt/ibutils/bin:/home/chunduri/bin:/projects/Performance/chunduri/Software/Temp/build/darshan-util/install/bin\n"
+     ]
+    }
+   ],
+   "source": [
+    "%env PE_PKGCONFIG_LIBS=darshan-runtime \n",
+    "%env PKG_CONFIG_PATH=/projects/Performance/chunduri/Software/Temp/build/darshan-runtime/lib/pkgconfig \n",
+    "%env PATH=/projects/Performance/chunduri/Software/Temp/build/darshan-utils/bin:/opt/anaconda3x/bin:/opt/anaconda3x/condabin:/projects/Performance/chunduri/Work_backup_June252017/software/install/autotools/bin:/bin:/sbin:/opt/anaconda3x/bin:/usr/bin/usr/sbin:/usr/local/sbin:/usr/sbin:/dbhome/db2cat/sqllib/bin:/dbhome/db2cat/sqllib/adm:/dbhome/db2cat/sqllib/misc:/dbhome/db2cat/sqllib/gskit/bin:/opt/ibutils/bin:/home/chunduri/bin:/projects/Performance/chunduri/Software/Temp/build/darshan-util/install/bin"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "env: darshan_prefix=/projects/Performance/chunduri/Software/Temp/build/darshan-runtime/install\n",
+      "env: darshan_share=/projects/Performance/chunduri/Software/Temp/build/darshan-runtime/install/share\n",
+      "env: darshan_libdir=-L${darshan_prefix}/lib\n"
+     ]
+    }
+   ],
+   "source": [
+    "%env darshan_prefix=/projects/Performance/chunduri/Software/Temp/build/darshan-runtime/install\n",
+    "%env darshan_share=/projects/Performance/chunduri/Software/Temp/build/darshan-runtime/install/share\n",
+    "%env darshan_libdir= -L${darshan_prefix}/lib"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "/projects/Performance/chunduri/Software/Temp/build/darshan-util/install/lib/\n",
+      "/projects/Performance/chunduri/Software/Temp/build/darshan-utils/bin:/opt/anaconda3x/bin:/opt/anaconda3x/condabin:/projects/Performance/chunduri/Work_backup_June252017/software/install/autotools/bin:/bin:/sbin:/opt/anaconda3x/bin:/usr/bin/usr/sbin:/usr/local/sbin:/usr/sbin:/dbhome/db2cat/sqllib/bin:/dbhome/db2cat/sqllib/adm:/dbhome/db2cat/sqllib/misc:/dbhome/db2cat/sqllib/gskit/bin:/opt/ibutils/bin:/home/chunduri/bin:/projects/Performance/chunduri/Software/Temp/build/darshan-util/install/bin\n",
+      "/projects/Performance/chunduri/Software/Temp/build/darshan-runtime/lib/pkgconfig\n"
+     ]
+    }
+   ],
+   "source": [
+    "!echo $LD_LIBRARY_PATH\n",
+    "!echo $PATH\n",
+    "!echo $PKG_CONFIG_PATH"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "/lus/theta-fs0/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan/examples\n",
+      "/lus/theta-fs0/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan\n",
+      "/lus/theta-fs0/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan\n"
+     ]
+    }
+   ],
+   "source": [
+    "!pwd\n",
+    "%cd ..\n",
+    "!pwd"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import cffi\n",
+    "import numpy\n",
+    "import pandas\n",
+    "import matplotlib\n",
+    "import pprint\n",
+    "# ffi = cffi.FFI()\n",
+    "# libdutil = ffi.dlopen(\"/projects/Performance/chunduri/Software/Temp/build/darshan-util/install/lib/libdarshan-util.so\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from darshan.backend.cffi_backend import ffi"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import logging\n",
+    "logger = logging.getLogger(__name__)\n",
+    "logger\n",
+    "from darshan.report import DarshanReport"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import darshan.backend.cffi_backend as backend"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import darshan"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "pp = pprint.PrettyPrinter()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Filename:       /projects/Performance/chunduri/MILC/milctestv2-papi-reorder-darshan/MILC_128_498873/498873.128.64p.log\n",
+      "Times:          2021-02-17 11:19:42 to 2021-02-17 11:33:58 (Duration 0:14:16)\n",
+      "Executeable:    /lus/theta-fs0/projects/Performance/chunduri/MILC/milctestv2-papi-reorder-darshan/MILC_128_498873/../milc_qcd-git-201510211317/ks_imp_rhmc/su3_rhmd_hisq /lus/theta-fs0/projects/Performance/chunduri/MILC/milctestv2-papi-reorder-darshan/MILC_128_498873/../runs/l9648_4steps.in\n",
+      "Processes:      8192\n",
+      "JobID:          498873\n",
+      "UID:            32451\n",
+      "Modules in Log: ['LUSTRE', 'STDIO', 'APXC', 'APMPI']\n",
+      "Loaded Records: {}\n",
+      "Name Records:   0\n",
+      "Darshan/Hints:  {'lib_ver': '3.2.1', 'h': 'romio_no_indep_rw=true;cb_nodes=4'}\n",
+      "DarshanReport:  id(140383593345616) (tmp)\n"
+     ]
+    }
+   ],
+   "source": [
+    "#import darshan\n",
+    "\n",
+    "#report = darshan.DarshanReport(\"examples/example-logs/example.darshan\", read_all=True)  # Default behavior\n",
+    "#report = darshan.DarshanReport(\"/projects/Performance/chunduri/Work/3D-7PointStencil-MPI/496119.2.2p.log\", read_all=False) \n",
+    "report = darshan.DarshanReport(\"/projects/Performance/chunduri/MILC/milctestv2-papi-reorder-darshan/MILC_128_498873/498873.128.64p.log\", read_all=False)\n",
+    "report.info()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "APMPI-HEADER\n",
+      "struct darshan_apmpi_header_record **\n"
+     ]
+    }
+   ],
+   "source": [
+    "r = report.mod_read_all_apmpi_records(\"APMPI\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "None\n"
+     ]
+    }
+   ],
+   "source": [
+    "pp.pprint(r)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 15,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "dict_keys(['version', 'metadata', 'records', 'summary', 'modules', 'counters', 'name_records', 'mounts'])"
+      ]
+     },
+     "execution_count": 15,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "report.data.keys()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Filename:       /projects/Performance/chunduri/MILC/milctestv2-papi-reorder-darshan/MILC_128_498873/498873.128.64p.log\n",
+      "Times:          2021-02-17 11:19:42 to 2021-02-17 11:33:58 (Duration 0:14:16)\n",
+      "Executeable:    /lus/theta-fs0/projects/Performance/chunduri/MILC/milctestv2-papi-reorder-darshan/MILC_128_498873/../milc_qcd-git-201510211317/ks_imp_rhmc/su3_rhmd_hisq /lus/theta-fs0/projects/Performance/chunduri/MILC/milctestv2-papi-reorder-darshan/MILC_128_498873/../runs/l9648_4steps.in\n",
+      "Processes:      8192\n",
+      "JobID:          498873\n",
+      "UID:            32451\n",
+      "Modules in Log: ['LUSTRE', 'STDIO', 'APXC', 'APMPI']\n",
+      "Loaded Records: {'APMPI': 8193}\n",
+      "Name Records:   2\n",
+      "Darshan/Hints:  {'lib_ver': '3.2.1', 'h': 'romio_no_indep_rw=true;cb_nodes=4'}\n",
+      "DarshanReport:  id(140383593345616) (tmp)\n"
+     ]
+    }
+   ],
+   "source": [
+    "report.update_name_records()\n",
+    "report.info()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 43,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/opt/anaconda3x/lib/python3.7/site-packages/ipykernel_launcher.py:5: DeprecationWarning: time.clock has been deprecated in Python 3.3 and will be removed from Python 3.8: use time.perf_counter or time.process_time instead\n",
+      "  \"\"\"\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "1613589051.1455183 539.32\n",
+      "# darshan log version:  1\n",
+      "APMPI Variance in total mpi time:  96.16168403248518 \n",
+      "\n",
+      "       Rank   Node_ID           Call  Total_Time    Count  Total_Bytes  \\\n",
+      "0         0  nid00920       MPI_WAIT  216.377984  2607364         None   \n",
+      "1         0  nid00920  MPI_ALLREDUCE  129.520685   157515      1260012   \n",
+      "2         0  nid00920      MPI_ISEND   20.172348  1303682  30953347840   \n",
+      "3         0  nid00920      MPI_IRECV    6.696055  1303682  30953347840   \n",
+      "4         0  nid00920     MPI_REDUCE    0.987648        1            8   \n",
+      "...     ...       ...            ...         ...      ...          ...   \n",
+      "57339  8191  nid04603      MPI_ISEND   17.356476  1303682  30953347840   \n",
+      "57340  8191  nid04603      MPI_IRECV    5.852337  1303682  30953347840   \n",
+      "57341  8191  nid04603      MPI_BCAST    0.099630      104         8140   \n",
+      "57342  8191  nid04603    MPI_BARRIER    0.039288        2         None   \n",
+      "57343  8191  nid04603     MPI_REDUCE    0.000042        1            8   \n",
+      "\n",
+      "      [0-256B] [256-1KB] [1K-8KB] [8K-256KB] 256K-1MB [>1MB]  Min_Time  \\\n",
+      "0         None      None     None       None     None   None  0.000002   \n",
+      "1       157515         0        0          0        0      0  0.000090   \n",
+      "2           16         0     5624    1298042        0      0  0.000003   \n",
+      "3           16         0     5624    1298042        0      0  0.000002   \n",
+      "4            1         0        0          0        0      0  0.987648   \n",
+      "...        ...       ...      ...        ...      ...    ...       ...   \n",
+      "57339       16         0     5624    1298042        0      0  0.000004   \n",
+      "57340       16         0     5624    1298042        0      0  0.000002   \n",
+      "57341      101         0        3          0        0      0  0.000004   \n",
+      "57342     None      None     None       None     None   None  0.009431   \n",
+      "57343        1         0        0          0        0      0  0.000042   \n",
+      "\n",
+      "       Max_Time  \n",
+      "0      0.022655  \n",
+      "1      0.271192  \n",
+      "2      0.002505  \n",
+      "3      0.001940  \n",
+      "4      0.987648  \n",
+      "...         ...  \n",
+      "57339  0.003165  \n",
+      "57340  0.003266  \n",
+      "57341  0.027544  \n",
+      "57342  0.029857  \n",
+      "57343  0.000042  \n",
+      "\n",
+      "[57344 rows x 14 columns]\n",
+      "1613589334.4497185 822.67\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/opt/anaconda3x/lib/python3.7/site-packages/ipykernel_launcher.py:63: DeprecationWarning: time.clock has been deprecated in Python 3.3 and will be removed from Python 3.8: use time.perf_counter or time.process_time instead\n"
+     ]
+    }
+   ],
+   "source": [
+    "import pandas as pd\n",
+    "len(report.records['APMPI'])\n",
+    "rec = report.records['APMPI'][0]\n",
+    "import time\n",
+    "print(time.time(), time.clock())\n",
+    "print(\"# darshan log version: \", rec['version'])\n",
+    "sync_flag = rec['sync_flag']\n",
+    "\n",
+    "print(\"APMPI Variance in total mpi time: \", rec['variance_total_mpitime'], \"\\n\")\n",
+    "if sync_flag:\n",
+    "    print(\"APMPI Variance in total mpi sync time: \", rec['variance_total_mpisynctime'])\n",
+    "\n",
+    "df_apmpi = pd.DataFrame()\n",
+    "for rec in report.records['APMPI'][1:]:   #skip the first record which is header\n",
+    "    mpi_nonzero_callcount = []\n",
+    "    for k,v in rec['all_counters'].items():\n",
+    "        if k.endswith('_CALL_COUNT') and v>0:\n",
+    "            mpi_nonzero_callcount.append(k[:-(len('CALL_COUNT'))])\n",
+    "\n",
+    "    df_rank = pd.DataFrame()\n",
+    "    for mpiop in mpi_nonzero_callcount:\n",
+    "        ncall = mpiop\n",
+    "        #print(mpiop)\n",
+    "        ncount = mpiop + 'CALL_COUNT'\n",
+    "        nsize  = mpiop + 'TOTAL_BYTES'\n",
+    "        h0     = mpiop + 'MSG_SIZE_AGG_0_256'\n",
+    "        h1     = mpiop + 'MSG_SIZE_AGG_256_1K'\n",
+    "        h2     = mpiop + 'MSG_SIZE_AGG_1K_8K'\n",
+    "        h3     = mpiop + 'MSG_SIZE_AGG_8K_256K'\n",
+    "        h4     = mpiop + 'MSG_SIZE_AGG_256K_1M'\n",
+    "        h5     = mpiop + 'MSG_SIZE_AGG_1M_PLUS'\n",
+    "        ntime  = mpiop + 'TOTAL_TIME'\n",
+    "        mintime  = mpiop + 'MIN_TIME'\n",
+    "        maxtime  = mpiop + 'MAX_TIME'\n",
+    "        if sync_flag:\n",
+    "            totalsync = mpiop + 'TOTAL_SYNC_TIME'\n",
+    "        \n",
+    "        mpiopstat = {}\n",
+    "        mpiopstat['Rank'] = rec['rank']\n",
+    "        mpiopstat['Node_ID'] = rec['node_name']\n",
+    "        mpiopstat['Call'] = ncall[:-1]\n",
+    "        mpiopstat['Total_Time'] = rec['all_counters'][ntime]\n",
+    "        mpiopstat['Count'] = rec['all_counters'][ncount]\n",
+    "        mpiopstat['Total_Bytes'] = rec['all_counters'].get(nsize, None)\n",
+    "        mpiopstat['[0-256B]'] = rec['all_counters'].get(h0, None)\n",
+    "        mpiopstat['[256-1KB]'] = rec['all_counters'].get(h1, None)\n",
+    "        mpiopstat['[1K-8KB]'] = rec['all_counters'].get(h2, None)\n",
+    "        mpiopstat['[8K-256KB]'] = rec['all_counters'].get(h3, None)\n",
+    "        mpiopstat['256K-1MB'] = rec['all_counters'].get(h4, None)\n",
+    "        mpiopstat['[>1MB]'] = rec['all_counters'].get(h5, None)\n",
+    "        mpiopstat['Min_Time'] = rec['all_counters'][mintime]\n",
+    "        mpiopstat['Max_Time'] = rec['all_counters'][maxtime]\n",
+    "        if sync_flag:\n",
+    "            mpiopstat[\"Total_SYNC_Time\"] = rec['all_counters'][totalsync]\n",
+    "            \n",
+    "        df_mpiop  = pd.DataFrame([mpiopstat], columns=mpiopstat.keys())\n",
+    "        df_rank = pd.concat([df_rank, df_mpiop], axis =0).reset_index(drop=True)\n",
+    "        #sort data frame based on MPIOP total time\n",
+    "    \n",
+    "    df_rank = df_rank.sort_values(by=['Total_Time'], ascending=False)  \n",
+    "    df_apmpi = pd.concat([df_apmpi, df_rank], axis=0).reset_index(drop=True)\n",
+    "print(df_apmpi)\n",
+    "print(time.time(), time.clock())"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "\n",
+    "len(report.records['APMPI'])\n",
+    "rec = report.records['APMPI'][0]\n",
+    "\n",
+    "print(\"# darshan log version: \", rec['version'])\n",
+    "sync_flag = rec['sync_flag']\n",
+    "\n",
+    "print(\"APMPI Variance in total mpi time: \", rec['variance_total_mpitime'], \"\\n\")\n",
+    "if sync_flag:\n",
+    "    print(\"APMPI Variance in total mpi sync time: \", rec['variance_total_mpisynctime'])\n",
+    "\n",
+    "if sync_flag:\n",
+    "    print(\"{:<8}{:<10}{:<16}{:<18}{:<10}{:<15}{:<10}{:<10}{:<10}{:<10}{:<10}{:<10}{:<18}{:<18}{:<18}\\n{}\".format(\n",
+    "    \"Rank\",\"Node_ID\", \"Call\", \"Total_Time\", \"Count\", \"Total_Bytes\", \"0-256\", \"256-1K\", \"1K-8K\", \"8K-256K\", \"256K-1M\", \"1M+\", \n",
+    "    \"Min_Time\", \"Max_Time\", \"Total_SYNC_Time\", \"=\"*180))\n",
+    "else:\n",
+    "    print(\"{:<8}{:<10}{:<16}{:<18}{:<10}{:<15}{:<10}{:<10}{:<10}{:<10}{:<10}{:<10}{:<18}{:<18}\\n{}\".format(\n",
+    "    \"Rank\",\"Node_ID\", \"Call\", \"Total_Time\", \"Count\", \"Total_Bytes\", \"0-256\", \"256-1K\", \"1K-8K\", \"8K-256K\", \"256K-1M\", \"1M+\", \n",
+    "     \"Min_Time\", \"Max_Time\", \"=\"*180))\n",
+    "\n",
+    "\n",
+    "for rec in report.records['APMPI'][1:]:   #skip the first record which is header\n",
+    "    \n",
+    "    mpi_nonzero_callcount = []\n",
+    "    for k,v in rec['all_counters'].items():\n",
+    "        if k.endswith('_CALL_COUNT') and v>0:\n",
+    "            mpi_nonzero_callcount.append(k[:-(len('CALL_COUNT'))])\n",
+    "\n",
+    "    \n",
+    "    for mpiop in mpi_nonzero_callcount:\n",
+    "        ncall = mpiop\n",
+    "        ncount = mpiop + 'CALL_COUNT'\n",
+    "        nsize  = mpiop + 'TOTAL_BYTES'\n",
+    "        h0     = mpiop + 'MSG_SIZE_AGG_0_256'\n",
+    "        h1     = mpiop + 'MSG_SIZE_AGG_256_1K'\n",
+    "        h2     = mpiop + 'MSG_SIZE_AGG_1K_8K'\n",
+    "        h3     = mpiop + 'MSG_SIZE_AGG_8K_256K'\n",
+    "        h4     = mpiop + 'MSG_SIZE_AGG_256K_1M'\n",
+    "        h5     = mpiop + 'MSG_SIZE_AGG_1M_PLUS'\n",
+    "        ntime  = mpiop + 'TOTAL_TIME'\n",
+    "        mintime  = mpiop + 'MIN_TIME'\n",
+    "        maxtime  = mpiop + 'MAX_TIME'\n",
+    "        if sync_flag:\n",
+    "            totalsync = mpiop + 'TOTAL_SYNC_TIME'\n",
+    "        \n",
+    "        if (rec['all_counters'][ncount] > 0 or not args.quiet):\n",
+    "            print(\"{rank:<8}{node_name:<10}{call:<16}{ntime:<18.6f}{count:<10}{size:<15}{h0:<10}{h1:<10}{h2:<10}{h3:<10}{h4:<10}{h5:<10}{mintime:<18.6f}{maxtime:<18.6f}\".format(\n",
+    "            rank = rec['rank'],\n",
+    "            node_name = rec['node_name'],\n",
+    "            call = ncall[:-1],\n",
+    "            ntime = rec['all_counters'].get(ntime, 'NA'),\n",
+    "            count = rec['all_counters'][ncount],\n",
+    "            size = rec['all_counters'].get(nsize, 'NA'),\n",
+    "            h0 = rec['all_counters'].get(h0, 'NA'),\n",
+    "            h1 = rec['all_counters'].get(h1, 'NA'),\n",
+    "            h2 = rec['all_counters'].get(h2, 'NA'),\n",
+    "            h3 = rec['all_counters'].get(h3, 'NA'),\n",
+    "            h4 = rec['all_counters'].get(h4, 'NA'),\n",
+    "            h5 = rec['all_counters'].get(h5, 'NA'),\n",
+    "            mintime = rec['all_counters'].get(mintime, 'NA'),\n",
+    "            maxtime = rec['all_counters'].get(maxtime, 'NA')), end='')\n",
+    "            if sync_flag:\n",
+    "                print(\"{totalsync:18.6f}\".format( totalsync = rec['all_counters'][totalsync]))\n",
+    "            print(\" \")\n",
+    "            \n",
+    "    print(\"{rank:<8}{node_name:<10}{call:<16}{time:<18.6f}\".format(\n",
+    "        rank = rec['rank'],\n",
+    "        node_name = rec['node_name'],\n",
+    "        call = \"Total_MPI_time\",\n",
+    "        time=rec['all_counters']['RANK_TOTAL_MPITIME']\n",
+    "        )) \n",
+    "    #print(\"Rank\", rec['rank'], \"Total_MPI_time: \", rec['all_counters']['RANK_TOTAL_MPITIME'])\n",
+    "    if sync_flag:\n",
+    "        print(\"Rank\", rec['rank'], \"Total_MPI_SYNC_time: \", rec['all_counters']['RANK_TOTAL_MPISYNCTIME'])\n",
+    "    "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 40,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "dict_keys(['id', 'rank', 'node_name', 'all_counters'])\n"
+     ]
+    },
+    {
+     "data": {
+      "text/plain": [
+       "[('MPI_ALLREDUCE_CALL_COUNT', 1253735),\n",
+       " ('MPI_ALLREDUCE_TOTAL_BYTES', 10029388),\n",
+       " ('MPI_ALLREDUCE_MSG_SIZE_AGG_0_256', 1253735),\n",
+       " ('MPI_ALLREDUCE_MSG_SIZE_AGG_256_1K', 0),\n",
+       " ('MPI_ALLREDUCE_MSG_SIZE_AGG_1K_8K', 0),\n",
+       " ('MPI_ALLREDUCE_MSG_SIZE_AGG_8K_256K', 0),\n",
+       " ('MPI_ALLREDUCE_MSG_SIZE_AGG_256K_1M', 0),\n",
+       " ('MPI_ALLREDUCE_MSG_SIZE_AGG_1M_PLUS', 0),\n",
+       " ('MPI_ALLREDUCE_TOTAL_TIME', 55.16067552566528),\n",
+       " ('MPI_ALLREDUCE_MIN_TIME', 2.5033950805664062e-05),\n",
+       " ('MPI_ALLREDUCE_MAX_TIME', 0.007993221282958984),\n",
+       " ('MPI_ALLREDUCE_TOTAL_SYNC_TIME', 101.60499858856201)]"
+      ]
+     },
+     "execution_count": 40,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "#pp.pprint(report.records['APMPI'][0]['all_counters'])\n",
+    "\n",
+    "pp.pprint(report.records['APMPI'][0].keys())\n",
+    "[ (k,v) for k,v in report.records['APMPI'][0]['all_counters'].items() if k.startswith('MPI_ALLREDUCE_')]\n",
+    "\n",
+    "#[ (k,v) for k,v in report.records['APMPI'][0]['all_counters'].items() if k.endswith('_CALL_COUNT')]\n",
+    "\n",
+    "# for k,v in report.records['APMPI'][0]['all_counters'].items():\n",
+    "#     if k.endswith('_CALL_COUNT') and v>0:\n",
+    "#         print(k,v)\n",
+    "    \n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 27,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "['MPI_SEND_', 'MPI_RECV_']\n",
+      "MPI_SEND_CALL_COUNT 900\n",
+      "MPI_SEND_TOTAL_BYTES 629146800\n",
+      "MPI_SEND_MSG_SIZE_AGG_0_256 300\n",
+      "MPI_SEND_MSG_SIZE_AGG_256_1K 0\n",
+      "MPI_SEND_MSG_SIZE_AGG_1K_8K 0\n",
+      "MPI_SEND_MSG_SIZE_AGG_8K_256K 0\n",
+      "MPI_SEND_MSG_SIZE_AGG_256K_1M 600\n",
+      "MPI_SEND_MSG_SIZE_AGG_1M_PLUS 0\n",
+      "MPI_SEND_TOTAL_TIME 0.3348836898803711\n",
+      "MPI_SEND_MIN_TIME 5.9604644775390625e-06\n",
+      "MPI_SEND_MAX_TIME 0.10425710678100586\n",
+      "MPI_RECV_CALL_COUNT 900\n",
+      "MPI_RECV_TOTAL_BYTES 629146800\n",
+      "MPI_RECV_MSG_SIZE_AGG_0_256 300\n",
+      "MPI_RECV_MSG_SIZE_AGG_256_1K 0\n",
+      "MPI_RECV_MSG_SIZE_AGG_1K_8K 0\n",
+      "MPI_RECV_MSG_SIZE_AGG_8K_256K 0\n",
+      "MPI_RECV_MSG_SIZE_AGG_256K_1M 600\n",
+      "MPI_RECV_MSG_SIZE_AGG_1M_PLUS 0\n",
+      "MPI_RECV_TOTAL_TIME 0.2742795944213867\n",
+      "MPI_RECV_MIN_TIME 2.86102294921875e-06\n",
+      "MPI_RECV_MAX_TIME 0.15262413024902344\n"
+     ]
+    }
+   ],
+   "source": [
+    "[k for k in report.records['APMPI'][0]['all_counters'].keys() if k.startswith('MPI_SEND_')]\n",
+    "\n",
+    "mpi_nonzero_callcount = []\n",
+    "for k,v in report.records['APMPI'][0]['all_counters'].items():\n",
+    "    if k.endswith('_CALL_COUNT') and v>0:\n",
+    "        mpi_nonzero_callcount.append(k[:-(len('CALL_COUNT'))])\n",
+    "print(mpi_nonzero_callcount)\n",
+    "for mpiop in mpi_nonzero_callcount:\n",
+    "    for k in report.records['APMPI'][0]['all_counters'].keys():\n",
+    "        if k.startswith(mpiop):\n",
+    "            print(k, report.records['APMPI'][0]['all_counters'][k])\n",
+    "        "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "A few of the internal data structures explained:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# report.metadata         # dictionary with raw metadata from darshan log\n",
+    "# report.modules          # dictionary with raw module info from darshan log (need: technical, module idx)\n",
+    "# report.name_records     # dictionary for resovling name records: id -> path/name\n",
+    "# report.records          # per module \"dataframes\"/dictionaries holding loaded records"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The darshan report holds a variety of namespaces for report related data. All of them are also referenced in `report.data` at the moment, but reliance on this internal organization of the report object is discouraged once the API stabilized. Currently, `report.data` references the following information:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "report.data.keys()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.mod_read_all_records('POSIX')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.mod_read_all_records('STDIO')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.update_name_records()\n",
+    "report.info()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# visualization helper used by different examples in the remainder of this notebook\n",
+    "from IPython.display import display, HTML\n",
+    "# usage: display(obj)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Record Formats and Selectively Loading Records\n",
+    "\n",
+    "For memory efficiant analysis, it is possible to supress records from being loaded automatically. This is useful, for example, when analysis considers only records of a particular layer/module."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import darshan\n",
+    "report = darshan.DarshanReport(\"/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan/examples/example-logs/example.darshan\", read_all=False, lookup_name_records=True) # Loads no records!"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# expected to fail, as no records were loaded\n",
+    "try:\n",
+    "    print(len(report.records['STDIO']), \"records loaded for STDIO.\")\n",
+    "except:\n",
+    "    print(\"No STDIO records loaded for this report yet.\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Additional records then can be loaded selectively, for example, on a per module basis:"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### dtype: pandas"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.mod_read_all_records(\"STDIO\", dtype=\"pandas\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print('id', report.records['STDIO'][0]['id'])\n",
+    "print('rank', report.records['STDIO'][0]['rank'])\n",
+    "display(report.records['STDIO'][0]['counters'])\n",
+    "display(report.records['STDIO'][0]['fcounters'])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### dtype: dict"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.mod_read_all_records(\"STDIO\", dtype='dict')\n",
+    "report.records['STDIO'][0]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### dtype: numpy"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.mod_read_all_records(\"STDIO\")\n",
+    "report.records['STDIO'][0]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### The Log in Memory\n",
+    "\n",
+    "Let's have a look at how calling `report.mod_read_all_records(\"STDIO\")` changed the state of the log in memory."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Compare to info line: \"Loaded Records: {...}\"\n",
+    "report.info()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "When interacting on individual log data for example in a for loop you would most likely care about the following instead:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(\"Num records:\", len(report.records['STDIO']))\n",
+    "\n",
+    "# show first 10 records\n",
+    "for rec in report.records['STDIO'][0:10]:\n",
+    "    print(rec)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Aggregation and Filtering (Experimental)\n",
+    "\n",
+    "Darshan log data is routinely aggregated for quick overview. The report object offers a few methods to perform common aggregations:"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Report aggregations and summarization remains**experimental** for now, mostly to allow interfaces to stabilize. But experimental features can be switched on easily by invoking `darshan.enable_experimental()`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import darshan\n",
+    "darshan.enable_experimental(verbose=True) # Enable verbosity, listing new functionality"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Example report, which counts records in log across modules \n",
+    "report.name_records_summary()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Chain operations like filtering and reductions\n",
+    "The filter and reduce operations return DarshanReports themsleves, thus allow to convieniently chain operations."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "import pprint\n",
+    "\n",
+    "import darshan\n",
+    "darshan.enable_experimental()\n",
+    "\n",
+    "report = darshan.DarshanReport(\"/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan/examples/example-logs/example.darshan\", read_all=True)\n",
+    "report.name_records"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "report.filter(name_records=[6301063301082038805, 15920181672442173319]).records"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# reduce all after filtering\n",
+    "report.filter(pattern=\"*.hdf5\").reduce().records"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# only preserve some\n",
+    "report.filter(name_records=[6301063301082038805]).reduce(mods=['POSIX', 'STDIO']).records"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# expected to fail\n",
+    "try:\n",
+    "    pprint.pprint(report.summary['agg_ioops'])\n",
+    "except:\n",
+    "    print(\"IOOPS have not been aggregated for this report.\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.read_all() \n",
+    "report.summarize()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "report.summary['agg_ioops']"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Or fine grained:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.mod_agg_iohist(\"MPI-IO\")  # to create the histograms"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.agg_ioops()               # to create the combined operation type summary"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Report Algebra (Experimental)\n",
+    "\n",
+    "Various operations are implemented to merge, combine and manipulate log records. This is useful for analysis task, but can also be used to construct performance projections or extrapolation.\n",
+    "\n",
+    "For convienience, we overload some of the operations provided by Python when they resemble intuitive equivalence to their mathematical counterparts. In particular, we enable the combination of different object types."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import darshan\n",
+    "darshan.enable_experimental()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "# merging records\n",
+    "from darshan.experimental.plots.matplotlib import plot_access_histogram\n",
+    "from darshan.experimental.plots.matplotlib import plot_opcounts\n",
+    "\n",
+    "r1 = darshan.DarshanReport(\"/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan/examples/example-logs/example.darshan\", read_all=True)\n",
+    "r2 = darshan.DarshanReport(\"/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan/examples/example-logs/example2.darshan\", read_all=True)\n",
+    "rx = r1 + r2\n",
+    "\n",
+    "for r in [r1, r2, rx]:\n",
+    "    plt = plot_opcounts(r)\n",
+    "    plt.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# multiply records with a scalar (think, four times the I/O load)\n",
+    "#r1 = darshan.DarshanReport(\"example.darshan\", read_all=True)\n",
+    "#rx = r1 * 4\n",
+    "#plot_opcounts(rx)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# rebase via timedelta\n",
+    "#r1 = darshan.DarshanReport(\"example.darshan\", read_all=True)\n",
+    "#dt = datetime.timedelta()\n",
+    "#rx = r1 + dt"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Plotting"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import darshan\n",
+    "darshan.enable_experimental(verbose=False)\n",
+    "\n",
+    "r3 = darshan.DarshanReport(\"/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan/examples/example-logs/example.darshan\")\n",
+    "r3.mod_read_all_records('POSIX')\n",
+    "\n",
+    "from darshan.experimental.plots.matplotlib import plot_access_histogram\n",
+    "plot_access_histogram(r3, mod='POSIX')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import darshan\n",
+    "darshan.enable_experimental(verbose=False)\n",
+    "\n",
+    "r3 = darshan.DarshanReport(\"/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan/examples/example-logs/example.darshan\")\n",
+    "r3.mod_read_all_records('MPI-IO')\n",
+    "\n",
+    "from darshan.experimental.plots.matplotlib import plot_access_histogram\n",
+    "plot_access_histogram(r3, mod='MPI-IO')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "import darshan\n",
+    "darshan.enable_experimental(verbose=False)\n",
+    "\n",
+    "r3 = darshan.DarshanReport(\"/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan/examples/example-logs/example.darshan\")\n",
+    "r3.read_all()\n",
+    "\n",
+    "from darshan.experimental.plots.matplotlib import plot_opcounts\n",
+    "plot_opcounts(r3)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### DXT Records\n",
+    "\n",
+    "DXT records are also supported, and can be loaded individually on a per module basis as follows:\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import darshan\n",
+    "\n",
+    "report2 = darshan.DarshanReport(\"/projects/Performance/chunduri/Software/Temp/darshan/darshan-util/pydarshan/examples/example-logs/dxt.darshan\")\n",
+    "report2.info()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report2.records['DXT_POSIX'][0].keys()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Sometimes it is easier to visualize or transform data to get an overview:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# load prepared transformations\n",
+    "# might require: pip install pillow\n",
+    "from darshan.experimental.transforms.dxt2png import segment, wallclock\n",
+    "\n",
+    "report2.mod_read_all_dxt_records(\"DXT_POSIX\", dtype=\"dict\")  # need dict format for now\n",
+    "rec = report2.records['DXT_POSIX'][2]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "segment(rec)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "wallclock(rec)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from IPython.display import display, HTML\n",
+    "\n",
+    "report2.mod_read_all_dxt_records(\"DXT_POSIX\", dtype=\"pandas\") \n",
+    "\n",
+    "print(\"Write Segments:\")\n",
+    "display(report2.records['DXT_POSIX'][2]['write_segments'])\n",
+    "print(\"Read Segments:\")\n",
+    "display(report2.records['DXT_POSIX'][2]['read_segments'])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Exercise left for the reader ;P \n",
+    "Implement a custom aggregator/summary function and commit it as a contribution to pydarshan:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Create file: <darshan-repo>/darshan-util/pydarshan/darshan/experimental/aggregators/dxt_summary.py\n",
+    "from darshan.report import *\n",
+    "\n",
+    "def dxt_summary(self):\n",
+    "    \"\"\"\n",
+    "    Count records for every name record.\n",
+    "\n",
+    "    Args:\n",
+    "        mod_name (str): \n",
+    "\n",
+    "    Return:\n",
+    "        None\n",
+    "    \"\"\"\n",
+    "\n",
+    "    counts = {}\n",
+    "\n",
+    "    for mod, records in self.records.items():\n",
+    "        for rec in records:\n",
+    "            if rec['id'] not in counts:\n",
+    "                counts[rec['id']] = {'name': self.name_records[rec['id']], 'counts': {}}\n",
+    "\n",
+    "            if mod not in counts[rec['id']]['counts']:\n",
+    "                counts[rec['id']]['counts'][mod] = 1\n",
+    "            else:\n",
+    "                counts[rec['id']]['counts'][mod] += 1\n",
+    "\n",
+    "    return counts\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Exporting Data for Use in Third-Party Analysis\n",
+    "\n",
+    "Darshan logs may be used in contexts beyond our imagination. To make this effortless export in JSON is easy."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "import darshan\n",
+    "report = darshan.DarshanReport(\"example-logs/example.darshan\", read_all=True)\n",
+    "report.to_json()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Error Handling?\n",
+    "\n",
+    "Currently, playing with two modes, both have their pros and cons.\n",
+    "\n",
+    "Generally, should expose errors and let users handle them. At the same time, just skipping invalid load requests does little harm but greatly improves convienince.\n",
+    "\n",
+    "Could add a switch to enable disable these guards :/"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "scrolled": true
+   },
+   "outputs": [],
+   "source": [
+    "report = darshan.DarshanReport(\"example-logs/example.darshan\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.mod_read_all_records(\"ABC\") # Expect KeyError"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "report.mod_read_all_dxt_records(\"ABC\") # Expect warning, but not exception"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.4"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}


=====================================
darshan-util/pydarshan/examples/apmpi_analysis.py
=====================================
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# # DarshanUtils for Python for processing APMPI records
+#
+# This notebook gives an overwiew of features provided by the Python bindings for DarshanUtils.
+
+# By default all AMMPI module records, metadata, and the name records are loaded when opening a Darshan log:
+
+import argparse
+import darshan
+import cffi
+import numpy
+import pandas
+import matplotlib
+#import pprint
+import pandas as pd
+import logging
+
+from darshan.backend.cffi_backend import ffi
+
+logger = logging.getLogger(__name__)
+from darshan.report import DarshanReport
+import darshan.backend.cffi_backend as backend
+import darshan
+import pandas as pd
+import time
+'''
+from rich import print  as rprint
+from rich import pretty
+from rich.panel import Panel
+from rich import inspect
+from rich.color import Color
+from rich.console import Console
+console = Console()
+'''
+from matplotlib.backends.backend_pdf import FigureCanvasPdf, PdfPages
+from matplotlib.figure import Figure
+
+#pp = pprint.PrettyPrinter()
+#pretty.install()
+#color = Color.parse("blue")
+
+#inspect(color, methods=True)
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--quiet",
+        dest="quiet",
+        action="store_true",
+        default=False,
+        help="Surpress zero count calls",
+    )
+    parser.add_argument(
+        "logname", metavar="logname", type=str, nargs=1, help="Logname to parse"
+    )
+    args = parser.parse_args()
+
+    report = darshan.DarshanReport(args.logname[0], read_all=False)
+    report.info()
+    
+    if "APMPI" not in report.modules:
+        print("This log does not contain AutoPerf MPI data")
+        return
+    r = report.mod_read_all_apmpi_records("APMPI")
+    
+    report.update_name_records()
+    report.info()
+    
+    pdf = matplotlib.backends.backend_pdf.PdfPages("apmpi_output.pdf")
+
+    header_rec = report.records["APMPI"][0]
+    print("# darshan log version: ", header_rec["version"])
+    sync_flag = header_rec["sync_flag"]
+    print(
+        "APMPI Variance in total mpi time: ", header_rec["variance_total_mpitime"], "\n"
+    )
+    if sync_flag:
+        print(
+            "APMPI Variance in total mpi sync time: ",
+            header_rec["variance_total_mpisynctime"],
+        )
+
+    df_apmpi = pd.DataFrame()
+    list_mpiop = []
+    list_rank = []
+    for rec in report.records["APMPI"][
+        1:
+    ]:  # skip the first record which is header record
+        mpi_nonzero_callcount = []
+        for k, v in rec["all_counters"].items():
+            if k.endswith("_CALL_COUNT") and v > 0:
+                mpi_nonzero_callcount.append(k[: -(len("CALL_COUNT"))])
+
+        df_rank = pd.DataFrame()
+        for mpiop in mpi_nonzero_callcount:
+            ncall = mpiop
+            ncount = mpiop + "CALL_COUNT"
+            nsize = mpiop + "TOTAL_BYTES"
+            h0 = mpiop + "MSG_SIZE_AGG_0_256"
+            h1 = mpiop + "MSG_SIZE_AGG_256_1K"
+            h2 = mpiop + "MSG_SIZE_AGG_1K_8K"
+            h3 = mpiop + "MSG_SIZE_AGG_8K_256K"
+            h4 = mpiop + "MSG_SIZE_AGG_256K_1M"
+            h5 = mpiop + "MSG_SIZE_AGG_1M_PLUS"
+            ntime = mpiop + "TOTAL_TIME"
+            mintime = mpiop + "MIN_TIME"
+            maxtime = mpiop + "MAX_TIME"
+            if sync_flag:
+                totalsync = mpiop + "TOTAL_SYNC_TIME"
+
+            mpiopstat = {}
+            mpiopstat["Rank"] = rec["rank"]
+            mpiopstat["Node_ID"] = rec["node_name"]
+            mpiopstat["Call"] = ncall[:-1]
+            mpiopstat["Total_Time"] = rec["all_counters"][ntime]
+            mpiopstat["Count"] = rec["all_counters"][ncount]
+            mpiopstat["Total_Bytes"] = rec["all_counters"].get(nsize, None)
+            mpiopstat["[0-256B]"] = rec["all_counters"].get(h0, None)
+            mpiopstat["[256-1KB]"] = rec["all_counters"].get(h1, None)
+            mpiopstat["[1K-8KB]"] = rec["all_counters"].get(h2, None)
+            mpiopstat["[8K-256KB]"] = rec["all_counters"].get(h3, None)
+            mpiopstat["256K-1MB"] = rec["all_counters"].get(h4, None)
+            mpiopstat["[>1MB]"] = rec["all_counters"].get(h5, None)
+            mpiopstat["Min_Time"] = rec["all_counters"][mintime]
+            mpiopstat["Max_Time"] = rec["all_counters"][maxtime]
+            if sync_flag:
+                mpiopstat["Total_SYNC_Time"] = rec["all_counters"][totalsync]
+
+            list_mpiop.append(mpiopstat)
+        rankstat = {}
+        rankstat["Rank"] = rec["rank"]
+        rankstat["Node_ID"] = rec["node_name"]
+        rankstat["Call"] = "Total_MPI_time"
+        rankstat["Total_Time"] = rec["all_counters"]["RANK_TOTAL_MPITIME"]
+        list_rank.append(rankstat)
+    df_rank = pd.DataFrame(list_rank)
+    avg_total_time = df_rank["Total_Time"].mean()
+    max_total_time = df_rank["Total_Time"].max()
+    min_total_time = df_rank["Total_Time"].min()
+    max_rank = df_rank.loc[df_rank["Total_Time"].idxmax()]["Rank"]
+    min_rank = df_rank.loc[df_rank["Total_Time"].idxmin()]["Rank"]
+    # assumption: row index and rank id are same in df_rank 
+    # .. need to check if that is an incorrect assumption
+    mean_rank = (
+        (df_rank["Total_Time"] - df_rank["Total_Time"].mean()).abs().argsort()[:1][0]
+    )
+
+    list_combined = list_mpiop + list_rank
+    df_apmpi = pd.DataFrame(list_combined)
+    df_apmpi = df_apmpi.sort_values(by=["Rank", "Total_Time"], ascending=[True, False])
+    print("[bold green] MPI stats for rank with maximum MPI time")#, border_style="blue")
+    print("[bold green] MPI stats for rank with maximum MPI time\n", df_apmpi.loc[df_apmpi["Rank"] == max_rank])
+    print("[bold green] MPI stats for rank with minimum MPI time")# border_style="blue")
+    print(df_apmpi.loc[df_apmpi["Rank"] == min_rank])
+    print("[bold green] MPI stats for rank with mean MPI time")#, border_style="blue")
+    print(df_apmpi.loc[df_apmpi["Rank"] == mean_rank])
+    # print(df_apmpi)
+    df_apmpi.to_csv('apmpi.csv', index=False)
+    fig = Figure()
+    ax = fig.gca()
+    ax.plot(df_rank["Rank"], df_rank["Total_Time"])
+    ax.set_xlabel("Rank")
+    ax.set_ylabel("MPI Total time(s)")
+    canvas = FigureCanvasPdf(fig)
+    canvas.print_figure(pdf)
+    fig = Figure()
+    ax = fig.gca()
+    #fig2.plot(df_apmpi.loc[df_apmpi["Rank"] == max_rank])
+    ax.plot(df_apmpi.loc[df_apmpi["Rank"] == max_rank]["Call"], df_apmpi.loc[df_apmpi["Rank"] == max_rank]["Total_Time"])
+    ax.set_xlabel("MPI OP")
+    ax.set_ylabel("Total time(s)")
+    canvas = FigureCanvasPdf(fig)
+    #canvas.print_figure(pdf)
+    fig = Figure()
+    ax = fig.gca()
+    ax.plot(df_apmpi.loc[df_apmpi["Rank"] == min_rank]["Call"], df_apmpi.loc[df_apmpi["Rank"] == min_rank]["Total_Time"])
+    ax.set_xlabel("MPI OP")
+    ax.set_ylabel("Total time(s)")
+    ax.set_title("Min rank MPI times")
+    canvas = FigureCanvasPdf(fig)
+    #canvas.print_figure(pdf)
+    #fig3.plot(df_apmpi.loc[df_apmpi["Rank"] == min_rank])
+    return
+
+
+if __name__ == "__main__":
+    main()


=====================================
darshan-util/pydarshan/setup.py
=====================================
@@ -3,15 +3,18 @@
 
 from setuptools import setup, find_packages, Extension
 import sys
+import os
 
 
-with open('README.rst') as readme_file:
+with open("README.rst") as readme_file:
     readme = readme_file.read()
 
 
-requirements = ['cffi', 'numpy', 'pandas', 'matplotlib']
-setup_requirements = ['pytest-runner', ]
-test_requirements = ['pytest']
+requirements = ["cffi", "numpy", "pandas", "matplotlib"]
+setup_requirements = [
+    "pytest-runner",
+]
+test_requirements = ["pytest"]
 
 
 # NOTE: The Python C extension is currently only used to automate
@@ -21,41 +24,56 @@ test_requirements = ['pytest']
 # discoverable in the environment by means of LD_LIBRARY_PATH or 
 # pkg-config there is no need to build the extension.
 ext_modules = []
-if '--with-extension' in sys.argv:
-    ext_modules.append(Extension(
-        'darshan.extension',
-        #optional=True,
-        sources=['darshan/extension.c'],
-        include_dirs=['/usr/include'],
-        libraries=['darshan-util']
-        ))
-    sys.argv.remove('--with-extension')
+if "--with-extension" in sys.argv:
+    ext_modules.append(
+        Extension(
+            "darshan.extension",
+            # optional=True,
+            sources=["darshan/extension.c"],
+            include_dirs=["/usr/include"],
+            libraries=["darshan-util"],
+        )
+    )
+    sys.argv.remove("--with-extension")
+
+#
+# Find backend python files in modules and copy them into lib
+#
+for root, dirs, files in os.walk("../../modules"):
+    for f in files:
+        if f.endswith("-backend.py"):
+            fname = f.replace("-backend", "")
+            try:
+                os.symlink("../../" + os.path.join(root, f), f"darshan/backend/{fname}")
+            except:
+                pass
+            print("Adding {0} to backends.".format(os.path.join(root, f)))
 
 
 setup(
-    author='',
-    author_email='',
+    author="",
+    author_email="",
     classifiers=[
-        'Development Status :: 4 - Beta',
-        'Intended Audience :: Developers',
-        'Intended Audience :: Science/Research',
-        'Natural Language :: English',
-        'Programming Language :: Python :: 3',
-        'Programming Language :: Python :: 3.6',
-        'Programming Language :: Python :: 3.7',
-        'Programming Language :: Python :: 3.8',
-        'Programming Language :: Python :: 3.9'
+        "Development Status :: 4 - Beta",
+        "Intended Audience :: Developers",
+        "Intended Audience :: Science/Research",
+        "Natural Language :: English",
+        "Programming Language :: Python :: 3",
+        "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: 3.7",
+        "Programming Language :: Python :: 3.8",
+        "Programming Language :: Python :: 3.9",
     ],
     description="Python tools to interact with darshan log records of HPC applications.",
     long_description=readme,
-    ext_modules = ext_modules,  
+    ext_modules=ext_modules,
     install_requires=requirements,
     include_package_data=True,
-    keywords='darshan',
-    name='darshan',
-    packages=find_packages(include=['darshan*']),
+    keywords="darshan",
+    name="darshan",
+    packages=find_packages(include=["darshan*"]),
     setup_requires=setup_requirements,
-    test_suite='tests',
+    test_suite="tests",
     tests_require=test_requirements,
     url='https://www.mcs.anl.gov/research/projects/darshan/',
     version='0.0.8.4',


=====================================
modules/autoperf
=====================================
@@ -0,0 +1 @@
+Subproject commit 64d7448238bf7785815e803bf4096defce3a5f97



View it on GitLab: https://xgitlab.cels.anl.gov/darshan/darshan/-/compare/f1b80c21e6693eb8b2a153a2d5fc25f08f1058c0...59379bfb7d909731e15c43f77d45e915b995472d

-- 
View it on GitLab: https://xgitlab.cels.anl.gov/darshan/darshan/-/compare/f1b80c21e6693eb8b2a153a2d5fc25f08f1058c0...59379bfb7d909731e15c43f77d45e915b995472d
You're receiving this email because of your account on xgitlab.cels.anl.gov.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.mcs.anl.gov/pipermail/darshan-commits/attachments/20210329/52c2f5b3/attachment-0001.html>


More information about the Darshan-commits mailing list