[Darshan-commits] [Git][darshan/darshan][dev-no-mpi] 3 commits: port lustre mod to recent darshan changes

Shane Snyder xgitlab at cels.anl.gov
Tue Dec 10 15:56:35 CST 2019



Shane Snyder pushed to branch dev-no-mpi at darshan / darshan


Commits:
4fd1d6d3 by Shane Snyder at 2019-12-10T20:13:17Z
port lustre mod to recent darshan changes

- - - - -
146a6ccb by Shane Snyder at 2019-12-10T20:29:00Z
port hdf5 mod to recent darshan changes

- - - - -
c0e202d6 by Shane Snyder at 2019-12-10T21:55:25Z
only include ld-opts for modules we build

- - - - -


6 changed files:

- darshan-runtime/Makefile.in
- darshan-runtime/configure
- darshan-runtime/configure.in
- darshan-runtime/lib/darshan-hdf5.c
- darshan-runtime/lib/darshan-lustre.c
- darshan-runtime/share/ld-opts/darshan-base-ld-opts.in


Changes:

=====================================
darshan-runtime/Makefile.in
=====================================
@@ -248,16 +248,24 @@ endif
 	install -m 755 share/mpi-profile/darshan-bg-f.conf $(datarootdir)/mpi-profile/darshan-bg-f.conf
 	install -d $(datarootdir)/ld-opts
 	install -m 644 share/ld-opts/darshan-base-ld-opts $(datarootdir)/ld-opts/darshan-base-ld-opts
+ifdef BUILD_POSIX_MODULE
 	install -m 644 $(srcdir)/share/ld-opts/darshan-posix-ld-opts $(datarootdir)/ld-opts/darshan-posix-ld-opts
+endif
+ifdef BUILD_STDIO_MODULE
+	install -m 644 $(srcdir)/share/ld-opts/darshan-stdio-ld-opts $(datarootdir)/ld-opts/darshan-stdio-ld-opts
+endif
+ifdef BUILD_MPIIO_MODULE
+	install -m 644 $(srcdir)/share/ld-opts/darshan-mpiio-ld-opts $(datarootdir)/ld-opts/darshan-mpiio-ld-opts
+endif
+ifdef BUILD_PNETCDF_MODULE
+	install -m 644 $(srcdir)/share/ld-opts/darshan-pnetcdf-ld-opts $(datarootdir)/ld-opts/darshan-pnetcdf-ld-opts
+endif
 ifdef BUILD_HDF5_MODULE
 	install -m 644 $(srcdir)/share/ld-opts/darshan-hdf5-ld-opts $(datarootdir)/ld-opts/darshan-hdf5-ld-opts
 endif
 ifdef BUILD_MDHIM_MODULE
 	install -m 644 $(srcdir)/share/ld-opts/darshan-mdhim-ld-opts $(datarootdir)/ld-opts/darshan-mdhim-ld-opts
 endif
-	install -m 644 $(srcdir)/share/ld-opts/darshan-pnetcdf-ld-opts $(datarootdir)/ld-opts/darshan-pnetcdf-ld-opts
-	install -m 644 $(srcdir)/share/ld-opts/darshan-stdio-ld-opts $(datarootdir)/ld-opts/darshan-stdio-ld-opts
-	install -m 644 $(srcdir)/share/ld-opts/darshan-mpiio-ld-opts $(datarootdir)/ld-opts/darshan-mpiio-ld-opts
 ifdef ENABLE_MMAP_LOGS
 	install -m 755 share/darshan-mmap-epilog.sh $(datarootdir)/darshan-mmap-epilog.sh
 endif


=====================================
darshan-runtime/configure
=====================================
@@ -679,7 +679,6 @@ infodir
 docdir
 oldincludedir
 includedir
-runstatedir
 localstatedir
 sharedstatedir
 sysconfdir
@@ -775,7 +774,6 @@ datadir='${datarootdir}'
 sysconfdir='${prefix}/etc'
 sharedstatedir='${prefix}/com'
 localstatedir='${prefix}/var'
-runstatedir='${localstatedir}/run'
 includedir='${prefix}/include'
 oldincludedir='/usr/include'
 docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
@@ -1028,15 +1026,6 @@ do
   | -silent | --silent | --silen | --sile | --sil)
     silent=yes ;;
 
-  -runstatedir | --runstatedir | --runstatedi | --runstated \
-  | --runstate | --runstat | --runsta | --runst | --runs \
-  | --run | --ru | --r)
-    ac_prev=runstatedir ;;
-  -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
-  | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
-  | --run=* | --ru=* | --r=*)
-    runstatedir=$ac_optarg ;;
-
   -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
     ac_prev=sbindir ;;
   -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
@@ -1174,7 +1163,7 @@ fi
 for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
 		datadir sysconfdir sharedstatedir localstatedir includedir \
 		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
-		libdir localedir mandir runstatedir
+		libdir localedir mandir
 do
   eval ac_val=\$$ac_var
   # Remove trailing slashes.
@@ -1327,7 +1316,6 @@ Fine tuning of the installation directories:
   --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
   --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
   --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
-  --runstatedir=DIR       modifiable per-process data [LOCALSTATEDIR/run]
   --libdir=DIR            object code libraries [EPREFIX/lib]
   --includedir=DIR        C header files [PREFIX/include]
   --oldincludedir=DIR     C header files for non-gcc [/usr/include]
@@ -2304,6 +2292,25 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
 test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
 
 
+# We need to know the value of the $libdir and $bindir variables so that
+# we can reference the correct path in the darshan compiler wrappers.
+# Unfortunately, those two variables are not normally evaluated by autoconf.
+# They are evaluated at build time using Makefile variable substitutions.
+#
+# The following logic was copied from mpich2 1.3.1 to resolve the $libdir
+# variable at configure time.
+#
+# Temporarily replace the default NONE value for exec_prefix
+# and prefix with the actual, default values.
+savePrefix=$prefix
+saveExecprefix=$exec_prefix
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+test "x$exec_prefix" = xNONE && exec_prefix=$prefix
+eval darshan_lib_path=$libdir
+eval darshan_share_path=$datarootdir
+prefix=$savePrefix
+exec_prefix=$saveExecprefix
+
 
 # Check whether --with-mpi was given.
 if test "${with_mpi+set}" = set; then :
@@ -4102,6 +4109,7 @@ fi
 
 # HDF5 module
 BUILD_HDF5_MODULE=
+BUILD_HDF5_POST110=
 DARSHAN_HDF5_LD_OPTS=
 
 # see if user explicitly enabled support for an HDF5 API
@@ -4115,6 +4123,7 @@ $as_echo "#define __DARSHAN_ENABLE_HDF5110 1" >>confdefs.h
         as_fn_error $? "Cannot set both --enable-HDF5-post-1.10 and --enable-HDF5-pre-1.10" "$LINENO" 5
     fi
     BUILD_HDF5_MODULE=1
+    BUILD_HDF5_POST110=1
     DARSHAN_HDF5_LD_OPTS="@${darshan_share_path}/ld-opts/darshan-hdf5-ld-opts"
 fi
 
@@ -4635,25 +4644,6 @@ fi
 done
 
 
-# We need to know the value of the $libdir and $bindir variables so that
-# we can reference the correct path in the darshan compiler wrappers.
-# Unfortunately, those two variables are not normally evaluated by autoconf.
-# They are evaluated at build time using Makefile variable substitutions.
-#
-# The following logic was copied from mpich2 1.3.1 to resolve the $libdir
-# variable at configure time.
-#
-# Temporarily replace the default NONE value for exec_prefix
-# and prefix with the actual, default values.
-savePrefix=$prefix
-saveExecprefix=$exec_prefix
-test "x$prefix" = xNONE && prefix=$ac_default_prefix
-test "x$exec_prefix" = xNONE && exec_prefix=$prefix
-eval darshan_lib_path=$libdir
-eval darshan_share_path=$datarootdir
-prefix=$savePrefix
-exec_prefix=$saveExecprefix
-
 #
 # Begin tests for MPI-enabled builds
 #
@@ -6202,7 +6192,7 @@ fi
 if test "x$BUILD_HDF5_MODULE" = "x"; then :
   { $as_echo "$as_me:${as_lineno-$LINENO}: HDF5 module support:    no" >&5
 $as_echo "$as_me: HDF5 module support:    no" >&6;}
-elif if test "x$BUILD_HDF5_PRE110" = "x1"; then :
+elif if test "x$BUILD_HDF5_POST110" = "x1"; then :
   { $as_echo "$as_me:${as_lineno-$LINENO}: HDF5 module support:    1.10+" >&5
 $as_echo "$as_me: HDF5 module support:    1.10+" >&6;}
 else


=====================================
darshan-runtime/configure.in
=====================================
@@ -13,6 +13,25 @@ AC_CONFIG_HEADER(darshan-runtime-config.h)
 
 AC_PROG_INSTALL
 
+# We need to know the value of the $libdir and $bindir variables so that
+# we can reference the correct path in the darshan compiler wrappers.
+# Unfortunately, those two variables are not normally evaluated by autoconf.
+# They are evaluated at build time using Makefile variable substitutions.
+#
+# The following logic was copied from mpich2 1.3.1 to resolve the $libdir
+# variable at configure time.
+#
+# Temporarily replace the default NONE value for exec_prefix
+# and prefix with the actual, default values.
+savePrefix=$prefix
+saveExecprefix=$exec_prefix
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+test "x$exec_prefix" = xNONE && exec_prefix=$prefix
+eval darshan_lib_path=$libdir
+eval darshan_share_path=$datarootdir
+prefix=$savePrefix
+exec_prefix=$saveExecprefix
+
 dnl Check for MPI
 AC_ARG_WITH(mpi,
             [AS_HELP_STRING([--without-mpi], [Build without support for MPI])],
@@ -211,6 +230,7 @@ AS_IF([test "x$enable_pnetcdf_mod" = "xno" || test "x$ENABLE_MPI" = "x"],
 
 # HDF5 module
 BUILD_HDF5_MODULE=
+BUILD_HDF5_POST110=
 DARSHAN_HDF5_LD_OPTS=
 
 # see if user explicitly enabled support for an HDF5 API
@@ -223,6 +243,7 @@ AC_ARG_ENABLE(HDF5-post-1.10,
         AC_MSG_ERROR([Cannot set both --enable-HDF5-post-1.10 and --enable-HDF5-pre-1.10])
     fi
     BUILD_HDF5_MODULE=1
+    BUILD_HDF5_POST110=1
     DARSHAN_HDF5_LD_OPTS="@${darshan_share_path}/ld-opts/darshan-hdf5-ld-opts"
 fi]
 ,)
@@ -365,25 +386,6 @@ CFLAGS="$old_cflags"
 
 AC_CHECK_HEADERS(mntent.h sys/mount.h)
 
-# We need to know the value of the $libdir and $bindir variables so that
-# we can reference the correct path in the darshan compiler wrappers.
-# Unfortunately, those two variables are not normally evaluated by autoconf.
-# They are evaluated at build time using Makefile variable substitutions.
-#
-# The following logic was copied from mpich2 1.3.1 to resolve the $libdir
-# variable at configure time.
-#
-# Temporarily replace the default NONE value for exec_prefix
-# and prefix with the actual, default values.
-savePrefix=$prefix
-saveExecprefix=$exec_prefix
-test "x$prefix" = xNONE && prefix=$ac_default_prefix
-test "x$exec_prefix" = xNONE && exec_prefix=$prefix
-eval darshan_lib_path=$libdir
-eval darshan_share_path=$datarootdir
-prefix=$savePrefix
-exec_prefix=$saveExecprefix
-
 #
 # Begin tests for MPI-enabled builds
 #
@@ -535,7 +537,7 @@ AS_IF(
     [test "x$BUILD_HDF5_MODULE" = "x"],
     [AC_MSG_NOTICE(HDF5 module support:    no)],
     [AS_IF(
-        [test "x$BUILD_HDF5_PRE110" = "x1"],
+        [test "x$BUILD_HDF5_POST110" = "x1"],
         [AC_MSG_NOTICE(HDF5 module support:    1.10+)],
         [AC_MSG_NOTICE(HDF5 module support:    pre-1.10)]
     )],


=====================================
darshan-runtime/lib/darshan-hdf5.c
=====================================
@@ -60,14 +60,17 @@ static void hdf5_runtime_initialize(
     void);
 static struct hdf5_file_record_ref *hdf5_track_new_file_record(
     darshan_record_id rec_id, const char *path);
-static void hdf5_record_reduction_op(
-    void* infile_v, void* inoutfile_v, int *len, MPI_Datatype *datatype);
 static void hdf5_cleanup_runtime(
     void);
-
+#ifdef HAVE_MPI
+static void hdf5_record_reduction_op(
+    void* infile_v, void* inoutfile_v, int *len, MPI_Datatype *datatype);
+static void hdf5_mpi_redux(
+    void *hdf5_buf, MPI_Comm mod_comm,
+    darshan_record_id *shared_recs, int shared_rec_count);
+#endif
 static void hdf5_shutdown(
-    MPI_Comm mod_comm, darshan_record_id *shared_recs,
-    int shared_rec_count, void **hdf5_buf, int *hdf5_buf_sz);
+    void **hdf5_buf, int *hdf5_buf_sz);
 
 static struct hdf5_runtime *hdf5_runtime = NULL;
 static pthread_mutex_t hdf5_runtime_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
@@ -257,6 +260,12 @@ herr_t DARSHAN_DECL(H5Fclose)(hid_t file_id)
 static void hdf5_runtime_initialize()
 {
     int hdf5_buf_size;
+    darshan_module_funcs mod_funcs = {
+#ifdef HAVE_MPI
+    .mod_redux_func = &hdf5_mpi_redux,
+#endif
+    .mod_shutdown_func = &hdf5_shutdown
+    };
 
     /* try and store the default number of records for this module */
     hdf5_buf_size = DARSHAN_DEF_MOD_REC_COUNT * sizeof(struct darshan_hdf5_file);
@@ -264,7 +273,7 @@ static void hdf5_runtime_initialize()
     /* register hdf5 module with darshan-core */
     darshan_core_register_module(
         DARSHAN_HDF5_MOD,
-        &hdf5_shutdown,
+        mod_funcs,
         &hdf5_buf_size,
         &my_rank,
         NULL);
@@ -335,6 +344,18 @@ static struct hdf5_file_record_ref *hdf5_track_new_file_record(
     return(rec_ref);
 }
 
+static void hdf5_cleanup_runtime()
+{
+    darshan_clear_record_refs(&(hdf5_runtime->hid_hash), 0);
+    darshan_clear_record_refs(&(hdf5_runtime->rec_id_hash), 1);
+
+    free(hdf5_runtime);
+    hdf5_runtime = NULL;
+
+    return;
+}
+
+#ifdef HAVE_MPI
 static void hdf5_record_reduction_op(void* infile_v, void* inoutfile_v,
     int *len, MPI_Datatype *datatype)
 {
@@ -384,32 +405,22 @@ static void hdf5_record_reduction_op(void* infile_v, void* inoutfile_v,
 
     return;
 }
-
-static void hdf5_cleanup_runtime()
-{
-    darshan_clear_record_refs(&(hdf5_runtime->hid_hash), 0);
-    darshan_clear_record_refs(&(hdf5_runtime->rec_id_hash), 1);
-
-    free(hdf5_runtime);
-    hdf5_runtime = NULL;
-
-    return;
-}
+#endif
 
 /************************************************************************
  * Functions exported by HDF5 module for coordinating with darshan-core *
  ************************************************************************/
 
-static void hdf5_shutdown(
+#ifdef HAVE_MPI
+static void hdf5_mpi_redux(
+    void *hdf5_buf,
     MPI_Comm mod_comm,
     darshan_record_id *shared_recs,
-    int shared_rec_count,
-    void **hdf5_buf,
-    int *hdf5_buf_sz)
+    int shared_rec_count)
 {
+    int hdf5_rec_count;
     struct hdf5_file_record_ref *rec_ref;
     struct darshan_hdf5_file *hdf5_rec_buf = *(struct darshan_hdf5_file **)hdf5_buf;
-    int hdf5_rec_count;
     struct darshan_hdf5_file *red_send_buf = NULL;
     struct darshan_hdf5_file *red_recv_buf = NULL;
     MPI_Datatype red_type;
@@ -419,81 +430,88 @@ static void hdf5_shutdown(
     HDF5_LOCK();
     assert(hdf5_runtime);
 
-    hdf5_rec_count = hdf5_runtime->file_rec_count;
+    /* necessary initialization of shared records */
+    for(i = 0; i < shared_rec_count; i++)
+    {
+        rec_ref = darshan_lookup_record_ref(hdf5_runtime->rec_id_hash,
+            &shared_recs[i], sizeof(darshan_record_id));
+        assert(rec_ref);
+
+        rec_ref->file_rec->base_rec.rank = -1;
+    }
 
-    /* if there are globally shared files, do a shared file reduction */
-    /* NOTE: the shared file reduction is also skipped if the 
-     * DARSHAN_DISABLE_SHARED_REDUCTION environment variable is set.
+    /* sort the array of records so we get all of the shared records
+     * (marked by rank -1) in a contiguous portion at end of the array
      */
-    if(shared_rec_count && !getenv("DARSHAN_DISABLE_SHARED_REDUCTION"))
+    darshan_record_sort(hdf5_rec_buf, hdf5_rec_count,
+        sizeof(struct darshan_hdf5_file));
+
+    /* make *send_buf point to the shared files at the end of sorted array */
+    red_send_buf = &(hdf5_rec_buf[hdf5_rec_count-shared_rec_count]);
+
+    /* allocate memory for the reduction output on rank 0 */
+    if(my_rank == 0)
     {
-        /* necessary initialization of shared records */
-        for(i = 0; i < shared_rec_count; i++)
+        red_recv_buf = malloc(shared_rec_count * sizeof(struct darshan_hdf5_file));
+        if(!red_recv_buf)
         {
-            rec_ref = darshan_lookup_record_ref(hdf5_runtime->rec_id_hash,
-                &shared_recs[i], sizeof(darshan_record_id));
-            assert(rec_ref);
-
-            rec_ref->file_rec->base_rec.rank = -1;
+            HDF5_UNLOCK();
+            return;
         }
+    }
 
-        /* sort the array of records so we get all of the shared records
-         * (marked by rank -1) in a contiguous portion at end of the array
-         */
-        darshan_record_sort(hdf5_rec_buf, hdf5_rec_count,
-            sizeof(struct darshan_hdf5_file));
+    /* construct a datatype for a HDF5 file record.  This is serving no purpose
+     * except to make sure we can do a reduction on proper boundaries
+     */
+    PMPI_Type_contiguous(sizeof(struct darshan_hdf5_file),
+        MPI_BYTE, &red_type);
+    PMPI_Type_commit(&red_type);
 
-        /* make *send_buf point to the shared files at the end of sorted array */
-        red_send_buf = &(hdf5_rec_buf[hdf5_rec_count-shared_rec_count]);
+    /* register a HDF5 file record reduction operator */
+    PMPI_Op_create(hdf5_record_reduction_op, 1, &red_op);
 
-        /* allocate memory for the reduction output on rank 0 */
-        if(my_rank == 0)
-        {
-            red_recv_buf = malloc(shared_rec_count * sizeof(struct darshan_hdf5_file));
-            if(!red_recv_buf)
-            {
-                HDF5_UNLOCK();
-                return;
-            }
-        }
+    /* reduce shared HDF5 file records */
+    PMPI_Reduce(red_send_buf, red_recv_buf,
+        shared_rec_count, red_type, red_op, 0, mod_comm);
 
-        /* construct a datatype for a HDF5 file record.  This is serving no purpose
-         * except to make sure we can do a reduction on proper boundaries
-         */
-        PMPI_Type_contiguous(sizeof(struct darshan_hdf5_file),
-            MPI_BYTE, &red_type);
-        PMPI_Type_commit(&red_type);
+    /* clean up reduction state */
+    if(my_rank == 0)
+    {
+        int tmp_ndx = hdf5_rec_count - shared_rec_count;
+        memcpy(&(hdf5_rec_buf[tmp_ndx]), red_recv_buf,
+            shared_rec_count * sizeof(struct darshan_hdf5_file));
+        free(red_recv_buf);
+    }
+    else
+    {
+        hdf5_rec_count -= shared_rec_count;
+    }
 
-        /* register a HDF5 file record reduction operator */
-        PMPI_Op_create(hdf5_record_reduction_op, 1, &red_op);
+    PMPI_Type_free(&red_type);
+    PMPI_Op_free(&red_op);
 
-        /* reduce shared HDF5 file records */
-        PMPI_Reduce(red_send_buf, red_recv_buf,
-            shared_rec_count, red_type, red_op, 0, mod_comm);
+    HDF5_UNLOCK();
+    return;
+}
+#endif
 
-        /* clean up reduction state */
-        if(my_rank == 0)
-        {
-            int tmp_ndx = hdf5_rec_count - shared_rec_count;
-            memcpy(&(hdf5_rec_buf[tmp_ndx]), red_recv_buf,
-                shared_rec_count * sizeof(struct darshan_hdf5_file));
-            free(red_recv_buf);
-        }
-        else
-        {
-            hdf5_rec_count -= shared_rec_count;
-        }
+static void hdf5_shutdown(
+    void **hdf5_buf,
+    int *hdf5_buf_sz)
+{
+    int hdf5_rec_count;
 
-        PMPI_Type_free(&red_type);
-        PMPI_Op_free(&red_op);
-    }
+    HDF5_LOCK();
+    assert(hdf5_runtime);
 
-    /* update output buffer size to account for shared file reduction */
-    *hdf5_buf_sz = hdf5_rec_count * sizeof(struct darshan_hdf5_file);
+    hdf5_rec_count = hdf5_runtime->file_rec_count;
 
     /* shutdown internal structures used for instrumenting */
     hdf5_cleanup_runtime();
 
+    /* update output buffer size to account for shared file reduction */
+    *hdf5_buf_sz = hdf5_rec_count * sizeof(struct darshan_hdf5_file);
+
     HDF5_UNLOCK();
     return;
 }


=====================================
darshan-runtime/lib/darshan-lustre.c
=====================================
@@ -35,9 +35,13 @@ static int lustre_record_compare(
 int sort_lustre_records(
     void);
 
+#ifdef HAVE_MPI
+static void lustre_mpi_redux(
+    void *lustre_buf, MPI_Comm mod_comm,
+    darshan_record_id *shared_recs, int shared_rec_count);
+#endif
 static void lustre_shutdown(
-    MPI_Comm mod_comm, darshan_record_id *shared_recs,
-    int shared_rec_count, void **lustre_buf, int *lustre_buf_sz);
+    void **lustre_buf, int *lustre_buf_sz);
 
 struct lustre_runtime *lustre_runtime = NULL;
 static pthread_mutex_t lustre_runtime_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
@@ -49,6 +53,7 @@ static int my_rank = -1;
 #ifndef LOV_MAX_STRIPE_COUNT /* for Lustre < 2.4 */
     #define LOV_MAX_STRIPE_COUNT 2000
 #endif
+
 void darshan_instrument_lustre_file(const char* filepath, int fd)
 {
     struct lustre_record_ref *rec_ref;
@@ -177,6 +182,13 @@ void darshan_instrument_lustre_file(const char* filepath, int fd)
 static void lustre_runtime_initialize()
 {
     int lustre_buf_size;
+    darshan_module_funcs mod_funcs = {
+#ifdef HAVE_MPI
+        .mod_redux_func = &lustre_mpi_redux,
+#endif
+        .mod_shutdown_func = &lustre_shutdown
+        };
+
 
     /* try and store a default number of records for this module, assuming
      * each file uses 64 OSTs
@@ -186,7 +198,7 @@ static void lustre_runtime_initialize()
     /* register the lustre module with darshan-core */
     darshan_core_register_module(
         DARSHAN_LUSTRE_MOD,
-        &lustre_shutdown,
+        mod_funcs,
         &lustre_buf_size,
         &my_rank,
         NULL);
@@ -215,12 +227,12 @@ static void lustre_runtime_initialize()
  * Functions exported by Lustre module for coordinating with darshan-core *
  **************************************************************************/
 
-static void lustre_shutdown(
+#ifdef HAVE_MPI
+static void lustre_mpi_redux(
+    void *posix_buf,
     MPI_Comm mod_comm,
     darshan_record_id *shared_recs,
-    int shared_rec_count,
-    void **lustre_buf,
-    int *lustre_buf_sz)
+    int shared_rec_count)
 {
     struct lustre_record_ref *rec_ref;
     int i;
@@ -228,9 +240,6 @@ static void lustre_shutdown(
     LUSTRE_LOCK();
     assert(lustre_runtime);
 
-    lustre_runtime->record_buffer = *lustre_buf;
-    lustre_runtime->record_buffer_size = *lustre_buf_sz;
-
     /* if there are globally shared files, do a shared file reduction */
     /* NOTE: the shared file reduction is also skipped if the 
      * DARSHAN_DISABLE_SHARED_REDUCTION environment variable is set.
@@ -253,21 +262,36 @@ static void lustre_shutdown(
             else
                 darshan_core_fprintf(stderr, "WARNING: unexpected condition in Darshan, possibly triggered by memory corruption.  Darshan log may be incorrect.\n");
         }
+    }
 
-        /* sort the array of files descending by rank so that we get all of the 
-         * shared files (marked by rank -1) in a contiguous portion at end 
-         * of the array
-         */
-        sort_lustre_records();
+    LUSTRE_UNLOCK();
+    return;
+}
+#endif
 
-        /* simply drop all shared records from the end of the record array on
-         * non-root ranks simply by recalculating the size of the buffer
-         */
-        if (my_rank != 0)
-        {
-            darshan_iter_record_refs(lustre_runtime->record_id_hash, 
-                &lustre_subtract_shared_rec_size);
-        }
+static void lustre_shutdown(
+    void **lustre_buf,
+    int *lustre_buf_sz)
+{
+    LUSTRE_LOCK();
+    assert(lustre_runtime);
+
+    lustre_runtime->record_buffer = *lustre_buf;
+    lustre_runtime->record_buffer_size = *lustre_buf_sz;
+
+    /* sort the array of files descending by rank so that we get all of the 
+     * shared files (marked by rank -1) in a contiguous portion at end 
+     * of the array
+     */
+    sort_lustre_records();
+
+    /* simply drop all shared records from the end of the record array on
+     * non-root ranks simply by recalculating the size of the buffer
+     */
+    if (my_rank != 0)
+    {
+        darshan_iter_record_refs(lustre_runtime->record_id_hash, 
+            &lustre_subtract_shared_rec_size);
     }
 
     /* modify output buffer size to account for any shared records that were removed */


=====================================
darshan-runtime/share/ld-opts/darshan-base-ld-opts.in
=====================================
@@ -7,9 +7,9 @@
 --wrap=PMPI_Init
 --wrap=PMPI_Init_thread
 --wrap=PMPI_Finalize
-@@darshan_share_path@/ld-opts/darshan-posix-ld-opts
-@@darshan_share_path@/ld-opts/darshan-pnetcdf-ld-opts
-@@darshan_share_path@/ld-opts/darshan-stdio-ld-opts
-@@darshan_share_path@/ld-opts/darshan-mpiio-ld-opts
+ at DARSHAN_POSIX_LD_OPTS@
+ at DARSHAN_STDIO_LD_OPTS@
+ at DARSHAN_MPIIO_LD_OPTS@
+ at DARSHAN_PNETCDF_LD_OPTS@
 @DARSHAN_HDF5_LD_OPTS@
 @DARSHAN_MDHIM_LD_OPTS@



View it on GitLab: https://xgitlab.cels.anl.gov/darshan/darshan/compare/2d3f980d516bf52a3edf87671f6ddfb9f5962801...c0e202d699e382a9516eac42dca3e0592137cf45

-- 
View it on GitLab: https://xgitlab.cels.anl.gov/darshan/darshan/compare/2d3f980d516bf52a3edf87671f6ddfb9f5962801...c0e202d699e382a9516eac42dca3e0592137cf45
You're receiving this email because of your account on xgitlab.cels.anl.gov.


-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.mcs.anl.gov/pipermail/darshan-commits/attachments/20191210/252724e4/attachment-0001.html>


More information about the Darshan-commits mailing list