[Darshan-commits] [Git][darshan/darshan][master] 7 commits: Darshan Wrapper fixes for Spectrum/OpenMPI 2
Shane Snyder
xgitlab at cels.anl.gov
Fri Oct 13 10:35:53 CDT 2017
Shane Snyder pushed to branch master at darshan / darshan
Commits:
4727d0a1 by cz7 at 2017-09-18T11:27:25-04:00
Darshan Wrapper fixes for Spectrum/OpenMPI 2
- - - - -
1dabc539 by Phil Carns at 2017-09-19T11:21:16-04:00
refactor LD_PRELOAD MPI wrappers
- match convention of other modules where possible
- avoid trampoline for non-PMPI calls
- resolve underlying symbols at run time
- use symbol name direclty in wrapper
- - - - -
fc3d349d by Phil Carns at 2017-09-19T11:50:53-04:00
add ld opts for static wrapping of mpiio
- - - - -
910486f8 by Shane Snyder at 2017-10-04T14:39:23+00:00
changes to get new wrappers working for static builds
- - - - -
cac9a0e6 by Shane Snyder at 2017-10-04T11:28:07-05:00
need to declar PMPI wrappers after MPI wrappers
- - - - -
ccddff36 by Shane Snyder at 2017-10-04T16:51:38+00:00
bug fix in mpi-io wrapper prototype
- - - - -
48dbd2e8 by Shane Snyder at 2017-10-13T10:22:50-05:00
Merge branch 'snyder-wrap-mod'
- - - - -
12 changed files:
- darshan-runtime/Makefile.in
- darshan-runtime/darshan.h
- darshan-runtime/lib/darshan-bgq.c
- darshan-runtime/lib/darshan-core-init-finalize.c
- darshan-runtime/lib/darshan-core.c
- darshan-runtime/lib/darshan-hdf5.c
- darshan-runtime/lib/darshan-mpiio.c
- darshan-runtime/lib/darshan-pnetcdf.c
- darshan-runtime/lib/darshan-posix.c
- darshan-runtime/lib/darshan-stdio.c
- darshan-runtime/share/ld-opts/darshan-base-ld-opts.in
- + darshan-runtime/share/ld-opts/darshan-mpiio-ld-opts
Changes:
=====================================
darshan-runtime/Makefile.in
=====================================
--- a/darshan-runtime/Makefile.in
+++ b/darshan-runtime/Makefile.in
@@ -199,6 +199,7 @@ ifdef BUILD_HDF5_MODULE
endif
install -m 644 $(srcdir)/share/ld-opts/darshan-pnetcdf-ld-opts $(datarootdir)/ld-opts/darshan-pnetcdf-ld-opts
install -m 644 $(srcdir)/share/ld-opts/darshan-stdio-ld-opts $(datarootdir)/ld-opts/darshan-stdio-ld-opts
+ install -m 644 $(srcdir)/share/ld-opts/darshan-mpiio-ld-opts $(datarootdir)/ld-opts/darshan-mpiio-ld-opts
ifdef ENABLE_MMAP_LOGS
install -m 755 share/darshan-mmap-epilog.sh $(datarootdir)/darshan-mmap-epilog.sh
endif
=====================================
darshan-runtime/darshan.h
=====================================
--- a/darshan-runtime/darshan.h
+++ b/darshan-runtime/darshan.h
@@ -28,15 +28,27 @@
#define DARSHAN_DECL(__func) __func
-#define DARSHAN_MPI_CALL(__func) __real_ ## __func
-
+/* creates P* variant of MPI symbols for LD_PRELOAD so that we can handle
+ * language bindings that map to MPI or PMPI symbols under the covers.
+ */
+#define DARSHAN_WRAPPER_MAP(__func,__ret,__args,__fcall) \
+ __ret __func __args { \
+ __ret i; \
+ i = __fcall; \
+ return i; \
+ }
+
+/* Map the desired function call to a pointer called __real_NAME at run
+ * time. Note that we fall back to looking for the same symbol with a P
+ * prefix to handle MPI bindings that call directly to the PMPI layer.
+ */
#define MAP_OR_FAIL(__func) \
if (!(__real_ ## __func)) \
{ \
__real_ ## __func = dlsym(RTLD_NEXT, #__func); \
if(!(__real_ ## __func)) { \
- fprintf(stderr, "Darshan failed to map symbol: %s\n", #__func); \
- exit(1); \
+ fprintf(stderr, "Darshan failed to map symbol: %s\n", #__func); \
+ exit(1); \
} \
}
@@ -47,7 +59,15 @@
#define DARSHAN_DECL(__name) __wrap_ ## __name
-#define DARSHAN_MPI_CALL(__func) __func
+/* creates P* variant of MPI symbols for static linking so that we can handle
+ * language bindings that map to MPI or PMPI symbols under the covers.
+ */
+#define DARSHAN_WRAPPER_MAP(__func,__ret,__args,__fcall) \
+ __ret __wrap_ ## __func __args { \
+ __ret i; \
+ i = __wrap_ ## __fcall; \
+ return i; \
+ }
#define MAP_OR_FAIL(__func)
=====================================
darshan-runtime/lib/darshan-bgq.c
=====================================
--- a/darshan-runtime/lib/darshan-bgq.c
+++ b/darshan-runtime/lib/darshan-bgq.c
@@ -196,13 +196,13 @@ static void bgq_shutdown(
{
bgq_runtime->record->base_rec.rank = -1;
- DARSHAN_MPI_CALL(PMPI_Comm_size)(mod_comm, &nprocs);
+ PMPI_Comm_size(mod_comm, &nprocs);
ion_ids = malloc(sizeof(*ion_ids)*nprocs);
result = (ion_ids != NULL);
if(!result)
bgq_runtime->record->counters[BGQ_INODES] = -1;
}
- DARSHAN_MPI_CALL(PMPI_Bcast)(&result, 1, MPI_INT, 0, mod_comm);
+ PMPI_Bcast(&result, 1, MPI_INT, 0, mod_comm);
/* caclulate the number of I/O nodes */
if (result)
@@ -210,7 +210,7 @@ static void bgq_shutdown(
int i, found;
uint64_t val;
- DARSHAN_MPI_CALL(PMPI_Gather)(&bgq_runtime->record->counters[BGQ_INODES],
+ PMPI_Gather(&bgq_runtime->record->counters[BGQ_INODES],
1,
MPI_LONG_LONG_INT,
ion_ids,
=====================================
darshan-runtime/lib/darshan-core-init-finalize.c
=====================================
--- a/darshan-runtime/lib/darshan-core-init-finalize.c
+++ b/darshan-runtime/lib/darshan-core-init-finalize.c
@@ -17,203 +17,17 @@
#include "darshan-core.h"
#include "darshan-dynamic.h"
-#ifdef DARSHAN_PRELOAD
-
-DARSHAN_FORWARD_DECL(PMPI_File_close, int, (MPI_File *fh));
-DARSHAN_FORWARD_DECL(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-DARSHAN_FORWARD_DECL(PMPI_File_iread, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-DARSHAN_FORWARD_DECL(PMPI_File_iread_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh));
-#endif
-DARSHAN_FORWARD_DECL(PMPI_File_read_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
-DARSHAN_FORWARD_DECL(PMPI_File_read_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-DARSHAN_FORWARD_DECL(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-DARSHAN_FORWARD_DECL(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
-DARSHAN_FORWARD_DECL(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-DARSHAN_FORWARD_DECL(PMPI_File_read, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-DARSHAN_FORWARD_DECL(PMPI_File_read_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
-DARSHAN_FORWARD_DECL(PMPI_File_read_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-DARSHAN_FORWARD_DECL(PMPI_File_read_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info));
-#endif
-DARSHAN_FORWARD_DECL(PMPI_File_sync, int, (MPI_File fh));
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#else
-DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
-#endif
DARSHAN_FORWARD_DECL(PMPI_Finalize, int, ());
DARSHAN_FORWARD_DECL(PMPI_Init, int, (int *argc, char ***argv));
DARSHAN_FORWARD_DECL(PMPI_Init_thread, int, (int *argc, char ***argv, int required, int *provided));
-DARSHAN_FORWARD_DECL(PMPI_Wtime, double, ());
-DARSHAN_FORWARD_DECL(PMPI_Allreduce, int, (void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm));
-DARSHAN_FORWARD_DECL(PMPI_Bcast, int, (void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm));
-DARSHAN_FORWARD_DECL(PMPI_Comm_rank, int, (MPI_Comm comm, int *rank));
-DARSHAN_FORWARD_DECL(PMPI_Comm_size, int, (MPI_Comm comm, int *size));
-DARSHAN_FORWARD_DECL(PMPI_Scan, int, (void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm));
-DARSHAN_FORWARD_DECL(PMPI_Type_commit, int, (MPI_Datatype *datatype));
-DARSHAN_FORWARD_DECL(PMPI_Type_contiguous, int, (int count, MPI_Datatype oldtype, MPI_Datatype *newtype));
-DARSHAN_FORWARD_DECL(PMPI_Type_extent, int, (MPI_Datatype datatype, MPI_Aint *extent));
-DARSHAN_FORWARD_DECL(PMPI_Type_free, int, (MPI_Datatype *datatype));
-DARSHAN_FORWARD_DECL(PMPI_Type_hindexed, int, (int count, int *array_of_blocklengths, MPI_Aint *array_of_displacements, MPI_Datatype oldtype, MPI_Datatype *newtype));
-DARSHAN_FORWARD_DECL(PMPI_Type_get_envelope, int, (MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner));
-DARSHAN_FORWARD_DECL(PMPI_Type_size, int, (MPI_Datatype datatype, int *size));
-DARSHAN_FORWARD_DECL(PMPI_Op_create, int, (MPI_User_function *function, int commute, MPI_Op *op));
-DARSHAN_FORWARD_DECL(PMPI_Op_free, int, (MPI_Op *op));
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_Reduce, int, (const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm));
-#else
-DARSHAN_FORWARD_DECL(PMPI_Reduce, int, (void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm));
-#endif
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_Send, int, (const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm));
-#else
-DARSHAN_FORWARD_DECL(PMPI_Send, int, (void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm));
-#endif
-DARSHAN_FORWARD_DECL(PMPI_Recv, int, (void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status));
-#ifdef HAVE_MPIIO_CONST
-DARSHAN_FORWARD_DECL(PMPI_Gather, int, (const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm));
-#else
-DARSHAN_FORWARD_DECL(PMPI_Gather, int, (void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm));
-#endif
-DARSHAN_FORWARD_DECL(PMPI_Barrier, int, (MPI_Comm comm));
-void resolve_mpi_symbols (void)
-{
- /*
- * Overloaded functions
- */
- MAP_OR_FAIL(PMPI_File_close);
- MAP_OR_FAIL(PMPI_File_iread_at);
- MAP_OR_FAIL(PMPI_File_iread);
- MAP_OR_FAIL(PMPI_File_iread_shared);
- MAP_OR_FAIL(PMPI_File_iwrite_at);
- MAP_OR_FAIL(PMPI_File_iwrite);
- MAP_OR_FAIL(PMPI_File_iwrite_shared);
- MAP_OR_FAIL(PMPI_File_open);
- MAP_OR_FAIL(PMPI_File_read_all_begin);
- MAP_OR_FAIL(PMPI_File_read_all);
- MAP_OR_FAIL(PMPI_File_read_at_all_begin);
- MAP_OR_FAIL(PMPI_File_read_at_all);
- MAP_OR_FAIL(PMPI_File_read_at);
- MAP_OR_FAIL(PMPI_File_read);
- MAP_OR_FAIL(PMPI_File_read_ordered_begin);
- MAP_OR_FAIL(PMPI_File_read_ordered);
- MAP_OR_FAIL(PMPI_File_read_shared);
- MAP_OR_FAIL(PMPI_File_set_view);
- MAP_OR_FAIL(PMPI_File_sync);
- MAP_OR_FAIL(PMPI_File_write_all_begin);
- MAP_OR_FAIL(PMPI_File_write_all);
- MAP_OR_FAIL(PMPI_File_write_at_all_begin);
- MAP_OR_FAIL(PMPI_File_write_at_all);
- MAP_OR_FAIL(PMPI_File_write_at);
- MAP_OR_FAIL(PMPI_File_write);
- MAP_OR_FAIL(PMPI_File_write_ordered_begin);
- MAP_OR_FAIL(PMPI_File_write_ordered);
- MAP_OR_FAIL(PMPI_File_write_shared);
- MAP_OR_FAIL(PMPI_Finalize);
- MAP_OR_FAIL(PMPI_Init);
- MAP_OR_FAIL(PMPI_Init_thread);
- /*
- * These function are not intercepted but are used
- * by darshan itself.
- */
- MAP_OR_FAIL(PMPI_Wtime);
- MAP_OR_FAIL(PMPI_Allreduce);
- MAP_OR_FAIL(PMPI_Bcast);
- MAP_OR_FAIL(PMPI_Comm_rank);
- MAP_OR_FAIL(PMPI_Comm_size);
- MAP_OR_FAIL(PMPI_Scan);
- MAP_OR_FAIL(PMPI_Type_commit);
- MAP_OR_FAIL(PMPI_Type_contiguous);
- MAP_OR_FAIL(PMPI_Type_extent);
- MAP_OR_FAIL(PMPI_Type_free);
- MAP_OR_FAIL(PMPI_Type_hindexed);
- MAP_OR_FAIL(PMPI_Type_get_envelope);
- MAP_OR_FAIL(PMPI_Type_size);
- MAP_OR_FAIL(PMPI_Op_create);
- MAP_OR_FAIL(PMPI_Op_free);
- MAP_OR_FAIL(PMPI_Reduce);
- MAP_OR_FAIL(PMPI_Send);
- MAP_OR_FAIL(PMPI_Recv);
- MAP_OR_FAIL(PMPI_Gather);
- MAP_OR_FAIL(PMPI_Barrier);
-
- return;
-}
-
-#endif
-
-int MPI_Init(int *argc, char ***argv)
+int DARSHAN_DECL(MPI_Init)(int *argc, char ***argv)
{
int ret;
-#ifdef DARSHAN_PRELOAD
- resolve_mpi_symbols();
-#endif
+ MAP_OR_FAIL(PMPI_Init);
- ret = DARSHAN_MPI_CALL(PMPI_Init)(argc, argv);
+ ret = __real_PMPI_Init(argc, argv);
if(ret != MPI_SUCCESS)
{
return(ret);
@@ -231,16 +45,15 @@ int MPI_Init(int *argc, char ***argv)
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_Init, int, (int *argc, char ***argv), MPI_Init(argc,argv))
-int MPI_Init_thread(int *argc, char ***argv, int required, int *provided)
+int DARSHAN_DECL(MPI_Init_thread)(int *argc, char ***argv, int required, int *provided)
{
int ret;
-#ifdef DARSHAN_PRELOAD
- resolve_mpi_symbols();
-#endif
+ MAP_OR_FAIL(PMPI_Init_thread);
- ret = DARSHAN_MPI_CALL(PMPI_Init_thread)(argc, argv, required, provided);
+ ret = __real_PMPI_Init_thread(argc, argv, required, provided);
if(ret != MPI_SUCCESS)
{
return(ret);
@@ -258,16 +71,20 @@ int MPI_Init_thread(int *argc, char ***argv, int required, int *provided)
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_Init_thread, int, (int *argc, char ***argv, int required, int *provided), MPI_Init_thread(argc,argv,required,provided))
-int MPI_Finalize(void)
+int DARSHAN_DECL(MPI_Finalize)(void)
{
int ret;
+ MAP_OR_FAIL(PMPI_Finalize);
+
darshan_core_shutdown();
- ret = DARSHAN_MPI_CALL(PMPI_Finalize)();
+ ret = __real_PMPI_Finalize();
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_Finalize, int, (void), MPI_Finalize())
/*
* Local variables:
=====================================
darshan-runtime/lib/darshan-core.c
=====================================
--- a/darshan-runtime/lib/darshan-core.c
+++ b/darshan-runtime/lib/darshan-core.c
@@ -163,14 +163,14 @@ void darshan_core_initialize(int argc, char **argv)
int tmpval;
double tmpfloat;
- DARSHAN_MPI_CALL(PMPI_Comm_size)(MPI_COMM_WORLD, &nprocs);
- DARSHAN_MPI_CALL(PMPI_Comm_rank)(MPI_COMM_WORLD, &my_rank);
+ PMPI_Comm_size(MPI_COMM_WORLD, &nprocs);
+ PMPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if(getenv("DARSHAN_INTERNAL_TIMING"))
internal_timing_flag = 1;
if(internal_timing_flag)
- init_start = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ init_start = PMPI_Wtime();
/* setup darshan runtime if darshan is enabled and hasn't been initialized already */
if(!getenv("DARSHAN_DISABLE") && !darshan_core)
@@ -236,7 +236,7 @@ void darshan_core_initialize(int argc, char **argv)
if(init_core)
{
memset(init_core, 0, sizeof(*init_core));
- init_core->wtime_offset = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ init_core->wtime_offset = PMPI_Wtime();
/* TODO: do we alloc new memory as we go or just do everything up front? */
@@ -325,8 +325,8 @@ void darshan_core_initialize(int argc, char **argv)
if(internal_timing_flag)
{
- init_time = DARSHAN_MPI_CALL(PMPI_Wtime)() - init_start;
- DARSHAN_MPI_CALL(PMPI_Reduce)(&init_time, &init_max, 1,
+ init_time = PMPI_Wtime() - init_start;
+ PMPI_Reduce(&init_time, &init_max, 1,
MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
if(my_rank == 0)
{
@@ -371,8 +371,8 @@ void darshan_core_shutdown()
internal_timing_flag = 1;
/* synchronize before getting start time */
- DARSHAN_MPI_CALL(PMPI_Barrier)(MPI_COMM_WORLD);
- start_log_time = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ PMPI_Barrier(MPI_COMM_WORLD);
+ start_log_time = PMPI_Wtime();
/* disable darhan-core while we shutdown */
DARSHAN_CORE_LOCK();
@@ -398,9 +398,9 @@ void darshan_core_shutdown()
final_core->log_job_p->end_time = time(NULL);
/* reduce to report first start and last end time across all ranks at rank 0 */
- DARSHAN_MPI_CALL(PMPI_Reduce)(&final_core->log_job_p->start_time, &first_start_time,
+ PMPI_Reduce(&final_core->log_job_p->start_time, &first_start_time,
1, MPI_INT64_T, MPI_MIN, 0, MPI_COMM_WORLD);
- DARSHAN_MPI_CALL(PMPI_Reduce)(&final_core->log_job_p->end_time, &last_end_time,
+ PMPI_Reduce(&final_core->log_job_p->end_time, &last_end_time,
1, MPI_INT64_T, MPI_MAX, 0, MPI_COMM_WORLD);
if(my_rank == 0)
{
@@ -433,7 +433,7 @@ void darshan_core_shutdown()
}
/* broadcast log file name */
- DARSHAN_MPI_CALL(PMPI_Bcast)(logfile_name, PATH_MAX, MPI_CHAR, 0,
+ PMPI_Bcast(logfile_name, PATH_MAX, MPI_CHAR, 0,
MPI_COMM_WORLD);
if(strlen(logfile_name) == 0)
@@ -456,21 +456,21 @@ void darshan_core_shutdown()
}
/* reduce the number of times a module was opened globally and bcast to everyone */
- DARSHAN_MPI_CALL(PMPI_Allreduce)(local_mod_use, global_mod_use_count,
+ PMPI_Allreduce(local_mod_use, global_mod_use_count,
DARSHAN_MAX_MODS, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
/* get a list of records which are shared across all processes */
darshan_get_shared_records(final_core, &shared_recs, &shared_rec_cnt);
if(internal_timing_flag)
- open1 = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ open1 = PMPI_Wtime();
/* collectively open the darshan log file */
ret = darshan_log_open_all(logfile_name, &log_fh);
if(internal_timing_flag)
- open2 = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ open2 = PMPI_Wtime();
/* error out if unable to open log file */
- DARSHAN_MPI_CALL(PMPI_Allreduce)(&ret, &all_ret, 1, MPI_INT,
+ PMPI_Allreduce(&ret, &all_ret, 1, MPI_INT,
MPI_LOR, MPI_COMM_WORLD);
if(all_ret != 0)
{
@@ -485,7 +485,7 @@ void darshan_core_shutdown()
}
if(internal_timing_flag)
- job1 = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ job1 = PMPI_Wtime();
/* rank 0 is responsible for writing the compressed darshan job information */
if(my_rank == 0)
{
@@ -505,7 +505,7 @@ void darshan_core_shutdown()
{
/* write the job information, preallocing space for the log header */
gz_fp += sizeof(struct darshan_header);
- all_ret = DARSHAN_MPI_CALL(PMPI_File_write_at)(log_fh, gz_fp,
+ all_ret = PMPI_File_write_at(log_fh, gz_fp,
final_core->comp_buf, comp_buf_sz, MPI_BYTE, &status);
if(all_ret != MPI_SUCCESS)
{
@@ -520,7 +520,7 @@ void darshan_core_shutdown()
}
/* error out if unable to write job information */
- DARSHAN_MPI_CALL(PMPI_Bcast)(&all_ret, 1, MPI_INT, 0, MPI_COMM_WORLD);
+ PMPI_Bcast(&all_ret, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(all_ret != 0)
{
free(logfile_name);
@@ -528,17 +528,17 @@ void darshan_core_shutdown()
return;
}
if(internal_timing_flag)
- job2 = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ job2 = PMPI_Wtime();
if(internal_timing_flag)
- rec1 = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ rec1 = PMPI_Wtime();
/* write the record name->id hash to the log file */
final_core->log_hdr_p->name_map.off = gz_fp;
ret = darshan_log_write_name_record_hash(log_fh, final_core, &gz_fp);
final_core->log_hdr_p->name_map.len = gz_fp - final_core->log_hdr_p->name_map.off;
/* error out if unable to write the name record hash */
- DARSHAN_MPI_CALL(PMPI_Allreduce)(&ret, &all_ret, 1, MPI_INT,
+ PMPI_Allreduce(&ret, &all_ret, 1, MPI_INT,
MPI_LOR, MPI_COMM_WORLD);
if(all_ret != 0)
{
@@ -554,7 +554,7 @@ void darshan_core_shutdown()
return;
}
if(internal_timing_flag)
- rec2 = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ rec2 = PMPI_Wtime();
mod_shared_recs = malloc(shared_rec_cnt * sizeof(darshan_record_id));
assert(mod_shared_recs);
@@ -586,7 +586,7 @@ void darshan_core_shutdown()
}
if(internal_timing_flag)
- mod1[i] = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ mod1[i] = PMPI_Wtime();
/* set the shared record list for this module */
for(j = 0; j < shared_rec_cnt; j++)
@@ -626,7 +626,7 @@ void darshan_core_shutdown()
free(mod_buf);
/* error out if the log append failed */
- DARSHAN_MPI_CALL(PMPI_Allreduce)(&ret, &all_ret, 1, MPI_INT,
+ PMPI_Allreduce(&ret, &all_ret, 1, MPI_INT,
MPI_LOR, MPI_COMM_WORLD);
if(all_ret != 0)
{
@@ -643,11 +643,11 @@ void darshan_core_shutdown()
}
if(internal_timing_flag)
- mod2[i] = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ mod2[i] = PMPI_Wtime();
}
if(internal_timing_flag)
- header1 = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ header1 = PMPI_Wtime();
/* write out log header, after running 2 reductions on header variables:
* 1) reduce 'partial_flag' variable to determine which modules ran out
* of memory for storing data
@@ -659,14 +659,14 @@ void darshan_core_shutdown()
/* rank 0 is responsible for writing the log header */
final_core->log_hdr_p->comp_type = DARSHAN_ZLIB_COMP;
- DARSHAN_MPI_CALL(PMPI_Reduce)(
+ PMPI_Reduce(
MPI_IN_PLACE, &(final_core->log_hdr_p->partial_flag),
1, MPI_UINT32_T, MPI_BOR, 0, MPI_COMM_WORLD);
- DARSHAN_MPI_CALL(PMPI_Reduce)(
+ PMPI_Reduce(
MPI_IN_PLACE, &(final_core->log_hdr_p->mod_ver),
DARSHAN_MAX_MODS, MPI_UINT32_T, MPI_MAX, 0, MPI_COMM_WORLD);
- all_ret = DARSHAN_MPI_CALL(PMPI_File_write_at)(log_fh, 0, final_core->log_hdr_p,
+ all_ret = PMPI_File_write_at(log_fh, 0, final_core->log_hdr_p,
sizeof(struct darshan_header), MPI_BYTE, &status);
if(all_ret != MPI_SUCCESS)
{
@@ -677,16 +677,16 @@ void darshan_core_shutdown()
}
else
{
- DARSHAN_MPI_CALL(PMPI_Reduce)(
+ PMPI_Reduce(
&(final_core->log_hdr_p->partial_flag), &(final_core->log_hdr_p->partial_flag),
1, MPI_UINT32_T, MPI_BOR, 0, MPI_COMM_WORLD);
- DARSHAN_MPI_CALL(PMPI_Reduce)(
+ PMPI_Reduce(
&(final_core->log_hdr_p->mod_ver), &(final_core->log_hdr_p->mod_ver),
DARSHAN_MAX_MODS, MPI_UINT32_T, MPI_MAX, 0, MPI_COMM_WORLD);
}
/* error out if unable to write log header */
- DARSHAN_MPI_CALL(PMPI_Bcast)(&all_ret, 1, MPI_INT, 0, MPI_COMM_WORLD);
+ PMPI_Bcast(&all_ret, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(all_ret != 0)
{
free(logfile_name);
@@ -694,9 +694,9 @@ void darshan_core_shutdown()
return;
}
if(internal_timing_flag)
- header2 = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ header2 = PMPI_Wtime();
- DARSHAN_MPI_CALL(PMPI_File_close)(&log_fh);
+ PMPI_File_close(&log_fh);
/* if we got this far, there are no errors, so rename from *.darshan_partial
* to *-<logwritetime>.darshan, which indicates that this log file is
@@ -723,7 +723,7 @@ void darshan_core_shutdown()
if(new_logfile_name)
{
new_logfile_name[0] = '\0';
- end_log_time = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ end_log_time = PMPI_Wtime();
strcat(new_logfile_name, logfile_name);
tmp_index = strstr(new_logfile_name, ".darshan_partial");
sprintf(tmp_index, "_%d.darshan", (int)(end_log_time-start_log_time+1));
@@ -749,7 +749,7 @@ void darshan_core_shutdown()
double mod_tm[DARSHAN_MAX_MODS], mod_slowest[DARSHAN_MAX_MODS];
double all_tm, all_slowest;
- tm_end = DARSHAN_MPI_CALL(PMPI_Wtime)();
+ tm_end = PMPI_Wtime();
open_tm = open2 - open1;
header_tm = header2 - header1;
@@ -761,17 +761,17 @@ void darshan_core_shutdown()
mod_tm[i] = mod2[i] - mod1[i];
}
- DARSHAN_MPI_CALL(PMPI_Reduce)(&open_tm, &open_slowest, 1,
+ PMPI_Reduce(&open_tm, &open_slowest, 1,
MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
- DARSHAN_MPI_CALL(PMPI_Reduce)(&header_tm, &header_slowest, 1,
+ PMPI_Reduce(&header_tm, &header_slowest, 1,
MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
- DARSHAN_MPI_CALL(PMPI_Reduce)(&job_tm, &job_slowest, 1,
+ PMPI_Reduce(&job_tm, &job_slowest, 1,
MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
- DARSHAN_MPI_CALL(PMPI_Reduce)(&rec_tm, &rec_slowest, 1,
+ PMPI_Reduce(&rec_tm, &rec_slowest, 1,
MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
- DARSHAN_MPI_CALL(PMPI_Reduce)(&all_tm, &all_slowest, 1,
+ PMPI_Reduce(&all_tm, &all_slowest, 1,
MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
- DARSHAN_MPI_CALL(PMPI_Reduce)(mod_tm, mod_slowest, DARSHAN_MAX_MODS,
+ PMPI_Reduce(mod_tm, mod_slowest, DARSHAN_MAX_MODS,
MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
if(my_rank == 0)
@@ -834,11 +834,11 @@ static void *darshan_init_mmap_log(struct darshan_core_runtime* core, int jobid)
*/
if(my_rank == 0)
{
- hlevel=DARSHAN_MPI_CALL(PMPI_Wtime)() * 1000000;
+ hlevel=PMPI_Wtime() * 1000000;
(void)gethostname(hname, sizeof(hname));
logmod = darshan_hash((void*)hname,strlen(hname),hlevel);
}
- DARSHAN_MPI_CALL(PMPI_Bcast)(&logmod, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD);
+ PMPI_Bcast(&logmod, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD);
/* construct a unique temporary log file name for this process
* to write mmap log data to
@@ -1353,7 +1353,7 @@ static void darshan_get_logfile_name(char* logfile_name, int jobid, struct tm* s
darshan_get_user_name(cuser);
/* generate a random number to help differentiate the log */
- hlevel=DARSHAN_MPI_CALL(PMPI_Wtime)() * 1000000;
+ hlevel=PMPI_Wtime() * 1000000;
(void)gethostname(hname, sizeof(hname));
logmod = darshan_hash((void*)hname,strlen(hname),hlevel);
@@ -1442,7 +1442,7 @@ static void darshan_get_shared_records(struct darshan_core_runtime *core,
uint64_t *global_mod_flags;
/* broadcast root's number of records to all other processes */
- DARSHAN_MPI_CALL(PMPI_Bcast)(&tmp_cnt, 1, MPI_INT, 0, MPI_COMM_WORLD);
+ PMPI_Bcast(&tmp_cnt, 1, MPI_INT, 0, MPI_COMM_WORLD);
/* use root record count to allocate data structures */
id_array = malloc(tmp_cnt * sizeof(darshan_record_id));
@@ -1466,7 +1466,7 @@ static void darshan_get_shared_records(struct darshan_core_runtime *core,
}
/* broadcast root's list of records to all other processes */
- DARSHAN_MPI_CALL(PMPI_Bcast)(id_array, (tmp_cnt * sizeof(darshan_record_id)),
+ PMPI_Bcast(id_array, (tmp_cnt * sizeof(darshan_record_id)),
MPI_BYTE, 0, MPI_COMM_WORLD);
/* everyone looks to see if they opened the same records as root */
@@ -1483,7 +1483,7 @@ static void darshan_get_shared_records(struct darshan_core_runtime *core,
/* now allreduce so everyone agrees which records are shared and
* which modules accessed them collectively
*/
- DARSHAN_MPI_CALL(PMPI_Allreduce)(mod_flags, global_mod_flags, tmp_cnt,
+ PMPI_Allreduce(mod_flags, global_mod_flags, tmp_cnt,
MPI_UINT64_T, MPI_BAND, MPI_COMM_WORLD);
j = 0;
@@ -1562,7 +1562,7 @@ static int darshan_log_open_all(char *logfile_name, MPI_File *log_fh)
}
/* open the darshan log file for writing */
- ret = DARSHAN_MPI_CALL(PMPI_File_open)(MPI_COMM_WORLD, logfile_name,
+ ret = PMPI_File_open(MPI_COMM_WORLD, logfile_name,
MPI_MODE_CREATE | MPI_MODE_WRONLY | MPI_MODE_EXCL, info, log_fh);
if(ret != MPI_SUCCESS)
return(-1);
@@ -1780,7 +1780,7 @@ static int darshan_log_append_all(MPI_File log_fh, struct darshan_core_runtime *
send_off += *inout_off; /* rank 0 knows the beginning offset */
}
- DARSHAN_MPI_CALL(PMPI_Scan)(&send_off, &my_off, 1, MPI_OFFSET,
+ PMPI_Scan(&send_off, &my_off, 1, MPI_OFFSET,
MPI_SUM, MPI_COMM_WORLD);
/* scan is inclusive; subtract local size back out */
my_off -= comp_buf_sz;
@@ -1788,7 +1788,7 @@ static int darshan_log_append_all(MPI_File log_fh, struct darshan_core_runtime *
if(ret == 0)
{
/* no compression errors, proceed with the collective write */
- ret = DARSHAN_MPI_CALL(PMPI_File_write_at_all)(log_fh, my_off,
+ ret = PMPI_File_write_at_all(log_fh, my_off,
core->comp_buf, comp_buf_sz, MPI_BYTE, &status);
}
else
@@ -1796,7 +1796,7 @@ static int darshan_log_append_all(MPI_File log_fh, struct darshan_core_runtime *
/* error during compression. preserve and return error to caller,
* but participate in collective write to avoid deadlock.
*/
- (void)DARSHAN_MPI_CALL(PMPI_File_write_at_all)(log_fh, my_off,
+ (void)PMPI_File_write_at_all(log_fh, my_off,
core->comp_buf, comp_buf_sz, MPI_BYTE, &status);
}
@@ -1806,12 +1806,12 @@ static int darshan_log_append_all(MPI_File log_fh, struct darshan_core_runtime *
if(my_rank == (nprocs-1))
{
my_off += comp_buf_sz;
- DARSHAN_MPI_CALL(PMPI_Send)(&my_off, 1, MPI_OFFSET, 0, 0,
+ PMPI_Send(&my_off, 1, MPI_OFFSET, 0, 0,
MPI_COMM_WORLD);
}
if(my_rank == 0)
{
- DARSHAN_MPI_CALL(PMPI_Recv)(&my_off, 1, MPI_OFFSET, (nprocs-1), 0,
+ PMPI_Recv(&my_off, 1, MPI_OFFSET, (nprocs-1), 0,
MPI_COMM_WORLD, &status);
*inout_off = my_off;
@@ -1886,7 +1886,7 @@ void darshan_shutdown_bench(int argc, char **argv)
if(my_rank == 0)
fprintf(stderr, "# 1 unique file per proc\n");
- DARSHAN_MPI_CALL(PMPI_Barrier)(MPI_COMM_WORLD);
+ PMPI_Barrier(MPI_COMM_WORLD);
darshan_core_shutdown();
darshan_core = NULL;
@@ -1901,7 +1901,7 @@ void darshan_shutdown_bench(int argc, char **argv)
if(my_rank == 0)
fprintf(stderr, "# 1 shared file per proc\n");
- DARSHAN_MPI_CALL(PMPI_Barrier)(MPI_COMM_WORLD);
+ PMPI_Barrier(MPI_COMM_WORLD);
darshan_core_shutdown();
darshan_core = NULL;
@@ -1916,7 +1916,7 @@ void darshan_shutdown_bench(int argc, char **argv)
if(my_rank == 0)
fprintf(stderr, "# 1024 unique files per proc\n");
- DARSHAN_MPI_CALL(PMPI_Barrier)(MPI_COMM_WORLD);
+ PMPI_Barrier(MPI_COMM_WORLD);
darshan_core_shutdown();
darshan_core = NULL;
@@ -1931,7 +1931,7 @@ void darshan_shutdown_bench(int argc, char **argv)
if(my_rank == 0)
fprintf(stderr, "# 1024 shared files per proc\n");
- DARSHAN_MPI_CALL(PMPI_Barrier)(MPI_COMM_WORLD);
+ PMPI_Barrier(MPI_COMM_WORLD);
darshan_core_shutdown();
darshan_core = NULL;
@@ -2130,7 +2130,7 @@ double darshan_core_wtime()
}
DARSHAN_CORE_UNLOCK();
- return(DARSHAN_MPI_CALL(PMPI_Wtime)() - darshan_core->wtime_offset);
+ return(PMPI_Wtime() - darshan_core->wtime_offset);
}
int darshan_core_excluded_path(const char *path)
=====================================
darshan-runtime/lib/darshan-hdf5.c
=====================================
--- a/darshan-runtime/lib/darshan-hdf5.c
+++ b/darshan-runtime/lib/darshan-hdf5.c
@@ -449,15 +449,15 @@ static void hdf5_shutdown(
/* construct a datatype for a HDF5 file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
- DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_hdf5_file),
+ PMPI_Type_contiguous(sizeof(struct darshan_hdf5_file),
MPI_BYTE, &red_type);
- DARSHAN_MPI_CALL(PMPI_Type_commit)(&red_type);
+ PMPI_Type_commit(&red_type);
/* register a HDF5 file record reduction operator */
- DARSHAN_MPI_CALL(PMPI_Op_create)(hdf5_record_reduction_op, 1, &red_op);
+ PMPI_Op_create(hdf5_record_reduction_op, 1, &red_op);
/* reduce shared HDF5 file records */
- DARSHAN_MPI_CALL(PMPI_Reduce)(red_send_buf, red_recv_buf,
+ PMPI_Reduce(red_send_buf, red_recv_buf,
shared_rec_count, red_type, red_op, 0, mod_comm);
/* clean up reduction state */
@@ -473,8 +473,8 @@ static void hdf5_shutdown(
hdf5_rec_count -= shared_rec_count;
}
- DARSHAN_MPI_CALL(PMPI_Type_free)(&red_type);
- DARSHAN_MPI_CALL(PMPI_Op_free)(&red_op);
+ PMPI_Type_free(&red_type);
+ PMPI_Op_free(&red_op);
}
/* update output buffer size to account for shared file reduction */
=====================================
darshan-runtime/lib/darshan-mpiio.c
=====================================
--- a/darshan-runtime/lib/darshan-mpiio.c
+++ b/darshan-runtime/lib/darshan-mpiio.c
@@ -25,6 +25,91 @@
#include "darshan.h"
#include "darshan-dynamic.h"
+DARSHAN_FORWARD_DECL(PMPI_File_close, int, (MPI_File *fh));
+DARSHAN_FORWARD_DECL(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
+DARSHAN_FORWARD_DECL(PMPI_File_iread, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
+DARSHAN_FORWARD_DECL(PMPI_File_iread_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_iwrite, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_iwrite_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST *request));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_open, int, (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh));
+#endif
+DARSHAN_FORWARD_DECL(PMPI_File_read_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
+DARSHAN_FORWARD_DECL(PMPI_File_read_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+DARSHAN_FORWARD_DECL(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+DARSHAN_FORWARD_DECL(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
+DARSHAN_FORWARD_DECL(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+DARSHAN_FORWARD_DECL(PMPI_File_read, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+DARSHAN_FORWARD_DECL(PMPI_File_read_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
+DARSHAN_FORWARD_DECL(PMPI_File_read_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+DARSHAN_FORWARD_DECL(PMPI_File_read_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char *datarep, MPI_Info info));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info));
+#endif
+DARSHAN_FORWARD_DECL(PMPI_File_sync, int, (MPI_File fh));
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_write_all_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_write_all, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_write, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_write_ordered_begin, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_write_ordered, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#endif
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, const void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#else
+DARSHAN_FORWARD_DECL(PMPI_File_write_shared, int, (MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status));
+#endif
+
/* The mpiio_file_record_ref structure maintains necessary runtime metadata
* for the MPIIO file record (darshan_mpiio_file structure, defined in
* darshan-mpiio-log-format.h) pointed to by 'file_rec'. This metadata
@@ -137,7 +222,7 @@ static int enable_dxt_io_trace = 0;
break; \
} \
rec_ref->file_rec->counters[MPIIO_MODE] = __mode; \
- DARSHAN_MPI_CALL(PMPI_Comm_size)(__comm, &comm_size); \
+ PMPI_Comm_size(__comm, &comm_size); \
if(comm_size == 1) \
rec_ref->file_rec->counters[MPIIO_INDEP_OPENS] += 1; \
else \
@@ -160,7 +245,7 @@ static int enable_dxt_io_trace = 0;
if(__ret != MPI_SUCCESS) break; \
rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
if(!rec_ref) break; \
- DARSHAN_MPI_CALL(PMPI_Type_size)(__datatype, &size); \
+ PMPI_Type_size(__datatype, &size); \
size = size * __count; \
/* DXT to record detailed read tracing information */ \
if(enable_dxt_io_trace) { \
@@ -193,7 +278,7 @@ static int enable_dxt_io_trace = 0;
if(__ret != MPI_SUCCESS) break; \
rec_ref = darshan_lookup_record_ref(mpiio_runtime->fh_hash, &(__fh), sizeof(MPI_File)); \
if(!rec_ref) break; \
- DARSHAN_MPI_CALL(PMPI_Type_size)(__datatype, &size); \
+ PMPI_Type_size(__datatype, &size); \
size = size * __count; \
/* DXT to record detailed write tracing information */ \
if(enable_dxt_io_trace) { \
@@ -224,9 +309,9 @@ static int enable_dxt_io_trace = 0;
**********************************************************/
#ifdef HAVE_MPIIO_CONST
-int MPI_File_open(MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh)
+int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh)
#else
-int MPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh)
+int DARSHAN_DECL(MPI_File_open)(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh)
#endif
{
int ret;
@@ -234,8 +319,10 @@ int MPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_F
char* tmp;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_open);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_open)(comm, filename, amode, info, fh);
+ ret = __real_PMPI_File_open(comm, filename, amode, info, fh);
tm2 = darshan_core_wtime();
/* use ROMIO approach to strip prefix if present */
@@ -255,15 +342,22 @@ int MPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_F
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_open, int, (MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_open, int, (MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh), MPI_File_open(comm,filename,amode,info,fh))
+#endif
-int MPI_File_read(MPI_File fh, void *buf, int count,
+int DARSHAN_DECL(MPI_File_read)(MPI_File fh, void *buf, int count,
MPI_Datatype datatype, MPI_Status *status)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_read);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_read)(fh, buf, count, datatype, status);
+ ret = __real_PMPI_File_read(fh, buf, count, datatype, status);
tm2 = darshan_core_wtime();
MPIIO_PRE_RECORD();
@@ -272,20 +366,24 @@ int MPI_File_read(MPI_File fh, void *buf, int count,
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_read, int, (MPI_File fh, void *buf, int count,
+ MPI_Datatype datatype, MPI_Status *status), MPI_File_read(fh,buf,count,datatype,status))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_write(MPI_File fh, const void *buf, int count,
+int DARSHAN_DECL(MPI_File_write)(MPI_File fh, const void *buf, int count,
MPI_Datatype datatype, MPI_Status *status)
#else
-int MPI_File_write(MPI_File fh, void *buf, int count,
+int DARSHAN_DECL(MPI_File_write)(MPI_File fh, void *buf, int count,
MPI_Datatype datatype, MPI_Status *status)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_write);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_write)(fh, buf, count, datatype, status);
+ ret = __real_PMPI_File_write(fh, buf, count, datatype, status);
tm2 = darshan_core_wtime();
MPIIO_PRE_RECORD();
@@ -294,15 +392,24 @@ int MPI_File_write(MPI_File fh, void *buf, int count,
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, const void *buf, int count,
+ MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_write, int, (MPI_File fh, void *buf, int count,
+ MPI_Datatype datatype, MPI_Status *status), MPI_File_write(fh,buf,count,datatype,status))
+#endif
-int MPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf,
+int DARSHAN_DECL(MPI_File_read_at)(MPI_File fh, MPI_Offset offset, void *buf,
int count, MPI_Datatype datatype, MPI_Status *status)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_read_at);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_read_at)(fh, offset, buf,
+ ret = __real_PMPI_File_read_at(fh, offset, buf,
count, datatype, status);
tm2 = darshan_core_wtime();
@@ -312,20 +419,24 @@ int MPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf,
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_read_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
+ int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_read_at(fh, offset, buf, count, datatype, status))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_write_at(MPI_File fh, MPI_Offset offset, const void *buf,
+int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, const void *buf,
int count, MPI_Datatype datatype, MPI_Status *status)
#else
-int MPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf,
+int DARSHAN_DECL(MPI_File_write_at)(MPI_File fh, MPI_Offset offset, void *buf,
int count, MPI_Datatype datatype, MPI_Status *status)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_write_at);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_write_at)(fh, offset, buf,
+ ret = __real_PMPI_File_write_at(fh, offset, buf,
count, datatype, status);
tm2 = darshan_core_wtime();
@@ -335,14 +446,23 @@ int MPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf,
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, const void *buf,
+ int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_write_at, int, (MPI_File fh, MPI_Offset offset, void *buf,
+ int count, MPI_Datatype datatype, MPI_Status *status), MPI_File_write_at(fh, offset, buf, count, datatype, status))
+#endif
-int MPI_File_read_all(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
+int DARSHAN_DECL(MPI_File_read_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_write_at);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_read_all)(fh, buf, count,
+ ret = __real_PMPI_File_read_all(fh, buf, count,
datatype, status);
tm2 = darshan_core_wtime();
@@ -352,18 +472,22 @@ int MPI_File_read_all(MPI_File fh, void * buf, int count, MPI_Datatype datatype,
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_read_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
+ MPI_File_read_all(fh,buf,count,datatype,status))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_write_all(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
+int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
#else
-int MPI_File_write_all(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
+int DARSHAN_DECL(MPI_File_write_all)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_write_all);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_write_all)(fh, buf, count,
+ ret = __real_PMPI_File_write_all(fh, buf, count,
datatype, status);
tm2 = darshan_core_wtime();
@@ -373,15 +497,24 @@ int MPI_File_write_all(MPI_File fh, void * buf, int count, MPI_Datatype datatype
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
+ MPI_File_write_all(fh, buf, count, datatype, status))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_write_all, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
+ MPI_File_write_all(fh, buf, count, datatype, status))
+#endif
-int MPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void * buf,
+int DARSHAN_DECL(MPI_File_read_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
int count, MPI_Datatype datatype, MPI_Status * status)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_read_at_all);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_read_at_all)(fh, offset, buf,
+ ret = __real_PMPI_File_read_at_all(fh, offset, buf,
count, datatype, status);
tm2 = darshan_core_wtime();
@@ -391,20 +524,25 @@ int MPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void * buf,
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
+ int count, MPI_Datatype datatype, MPI_Status * status),
+ MPI_File_read_at_all(fh,offset,buf,count,datatype,status))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_write_at_all(MPI_File fh, MPI_Offset offset, const void * buf,
+int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, const void * buf,
int count, MPI_Datatype datatype, MPI_Status * status)
#else
-int MPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void * buf,
+int DARSHAN_DECL(MPI_File_write_at_all)(MPI_File fh, MPI_Offset offset, void * buf,
int count, MPI_Datatype datatype, MPI_Status * status)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_write_at_all);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_write_at_all)(fh, offset, buf,
+ ret = __real_PMPI_File_write_at_all(fh, offset, buf,
count, datatype, status);
tm2 = darshan_core_wtime();
@@ -414,14 +552,25 @@ int MPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void * buf,
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, const void * buf,
+ int count, MPI_Datatype datatype, MPI_Status * status),
+ MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all, int, (MPI_File fh, MPI_Offset offset, void * buf,
+ int count, MPI_Datatype datatype, MPI_Status * status),
+ MPI_File_write_at_all(fh, offset, buf, count, datatype, status))
+#endif
-int MPI_File_read_shared(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
+int DARSHAN_DECL(MPI_File_read_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_read_shared);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_read_shared)(fh, buf, count,
+ ret = __real_PMPI_File_read_shared(fh, buf, count,
datatype, status);
tm2 = darshan_core_wtime();
@@ -431,18 +580,22 @@ int MPI_File_read_shared(MPI_File fh, void * buf, int count, MPI_Datatype dataty
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_read_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
+ MPI_File_read_shared(fh, buf, count, datatype, status))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_write_shared(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
+int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
#else
-int MPI_File_write_shared(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
+int DARSHAN_DECL(MPI_File_write_shared)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_write_shared);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_write_shared)(fh, buf, count,
+ ret = __real_PMPI_File_write_shared(fh, buf, count,
datatype, status);
tm2 = darshan_core_wtime();
@@ -452,15 +605,25 @@ int MPI_File_write_shared(MPI_File fh, void * buf, int count, MPI_Datatype datat
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
+ MPI_File_write_shared(fh, buf, count, datatype, status))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_write_shared, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status),
+ MPI_File_write_shared(fh, buf, count, datatype, status))
+#endif
-int MPI_File_read_ordered(MPI_File fh, void * buf, int count,
+
+int DARSHAN_DECL(MPI_File_read_ordered)(MPI_File fh, void * buf, int count,
MPI_Datatype datatype, MPI_Status * status)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_read_ordered);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_read_ordered)(fh, buf, count,
+ ret = __real_PMPI_File_read_ordered(fh, buf, count,
datatype, status);
tm2 = darshan_core_wtime();
@@ -470,20 +633,25 @@ int MPI_File_read_ordered(MPI_File fh, void * buf, int count,
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered, int, (MPI_File fh, void * buf, int count,
+ MPI_Datatype datatype, MPI_Status * status),
+ MPI_File_read_ordered(fh, buf, count, datatype, status))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_write_ordered(MPI_File fh, const void * buf, int count,
+int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, const void * buf, int count,
MPI_Datatype datatype, MPI_Status * status)
#else
-int MPI_File_write_ordered(MPI_File fh, void * buf, int count,
+int DARSHAN_DECL(MPI_File_write_ordered)(MPI_File fh, void * buf, int count,
MPI_Datatype datatype, MPI_Status * status)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_write_ordered);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_write_ordered)(fh, buf, count,
+ ret = __real_PMPI_File_write_ordered(fh, buf, count,
datatype, status);
tm2 = darshan_core_wtime();
@@ -493,14 +661,25 @@ int MPI_File_write_ordered(MPI_File fh, void * buf, int count,
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, const void * buf, int count,
+ MPI_Datatype datatype, MPI_Status * status),
+ MPI_File_write_ordered(fh, buf, count, datatype, status))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered, int, (MPI_File fh, void * buf, int count,
+ MPI_Datatype datatype, MPI_Status * status),
+ MPI_File_write_ordered(fh, buf, count, datatype, status))
+#endif
-int MPI_File_read_all_begin(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
+int DARSHAN_DECL(MPI_File_read_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_read_all_begin);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_read_all_begin)(fh, buf, count, datatype);
+ ret = __real_PMPI_File_read_all_begin(fh, buf, count, datatype);
tm2 = darshan_core_wtime();
MPIIO_PRE_RECORD();
@@ -509,18 +688,22 @@ int MPI_File_read_all_begin(MPI_File fh, void * buf, int count, MPI_Datatype dat
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_read_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
+ MPI_File_read_all_begin(fh, buf, count, datatype))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_write_all_begin(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
+int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
#else
-int MPI_File_write_all_begin(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
+int DARSHAN_DECL(MPI_File_write_all_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_write_all_begin);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_write_all_begin)(fh, buf, count, datatype);
+ ret = __real_PMPI_File_write_all_begin(fh, buf, count, datatype);
tm2 = darshan_core_wtime();
MPIIO_PRE_RECORD();
@@ -529,15 +712,24 @@ int MPI_File_write_all_begin(MPI_File fh, void * buf, int count, MPI_Datatype da
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
+ MPI_File_write_all_begin(fh, buf, count, datatype))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_write_all_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
+ MPI_File_write_all_begin(fh, buf, count, datatype))
+#endif
-int MPI_File_read_at_all_begin(MPI_File fh, MPI_Offset offset, void * buf,
+int DARSHAN_DECL(MPI_File_read_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
int count, MPI_Datatype datatype)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_read_at_all_begin);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_read_at_all_begin)(fh, offset, buf,
+ ret = __real_PMPI_File_read_at_all_begin(fh, offset, buf,
count, datatype);
tm2 = darshan_core_wtime();
@@ -547,20 +739,25 @@ int MPI_File_read_at_all_begin(MPI_File fh, MPI_Offset offset, void * buf,
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_read_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
+ int count, MPI_Datatype datatype), MPI_File_read_at_all_begin(fh, offset, buf, count,
+ datatype))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_write_at_all_begin(MPI_File fh, MPI_Offset offset, const void * buf,
+int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, const void * buf,
int count, MPI_Datatype datatype)
#else
-int MPI_File_write_at_all_begin(MPI_File fh, MPI_Offset offset, void * buf,
+int DARSHAN_DECL(MPI_File_write_at_all_begin)(MPI_File fh, MPI_Offset offset, void * buf,
int count, MPI_Datatype datatype)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_write_at_all_begin);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_write_at_all_begin)(fh, offset,
+ ret = __real_PMPI_File_write_at_all_begin(fh, offset,
buf, count, datatype);
tm2 = darshan_core_wtime();
@@ -570,14 +767,23 @@ int MPI_File_write_at_all_begin(MPI_File fh, MPI_Offset offset, void * buf,
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, const void * buf,
+ int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_write_at_all_begin, int, (MPI_File fh, MPI_Offset offset, void * buf,
+ int count, MPI_Datatype datatype), MPI_File_write_at_all_begin( fh, offset, buf, count, datatype))
+#endif
-int MPI_File_read_ordered_begin(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
+int DARSHAN_DECL(MPI_File_read_ordered_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_read_ordered_begin);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_read_ordered_begin)(fh, buf, count,
+ ret = __real_PMPI_File_read_ordered_begin(fh, buf, count,
datatype);
tm2 = darshan_core_wtime();
@@ -587,18 +793,22 @@ int MPI_File_read_ordered_begin(MPI_File fh, void * buf, int count, MPI_Datatype
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_read_ordered_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
+ MPI_File_read_ordered_begin(fh, buf, count, datatype))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_write_ordered_begin(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
+int DARSHAN_DECL(MPI_File_write_ordered_begin)(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
#else
-int MPI_File_write_ordered_begin(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
+int DARSHAN_DECL(MPI_File_write_ordered_begin)(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_write_ordered_begin);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_write_ordered_begin)(fh, buf, count,
+ ret = __real_PMPI_File_write_ordered_begin(fh, buf, count,
datatype);
tm2 = darshan_core_wtime();
@@ -608,14 +818,23 @@ int MPI_File_write_ordered_begin(MPI_File fh, void * buf, int count, MPI_Datatyp
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered_begin, int, (MPI_File fh, const void * buf, int count, MPI_Datatype datatype),
+ MPI_File_write_ordered_begin(fh, buf, count, datatype))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_write_ordered_begin, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype),
+ MPI_File_write_ordered_begin(fh, buf, count, datatype))
+#endif
-int MPI_File_iread(MPI_File fh, void * buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST * request)
+int DARSHAN_DECL(MPI_File_iread)(MPI_File fh, void * buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST * request)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_iread);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_iread)(fh, buf, count, datatype, request);
+ ret = __real_PMPI_File_iread(fh, buf, count, datatype, request);
tm2 = darshan_core_wtime();
MPIIO_PRE_RECORD();
@@ -624,20 +843,24 @@ int MPI_File_iread(MPI_File fh, void * buf, int count, MPI_Datatype datatype, __
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_iread, int, (MPI_File fh, void * buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST * request),
+ MPI_File_iread(fh, buf, count, datatype, request))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_iwrite(MPI_File fh, const void * buf, int count,
+int DARSHAN_DECL(MPI_File_iwrite)(MPI_File fh, const void * buf, int count,
MPI_Datatype datatype, __D_MPI_REQUEST * request)
#else
-int MPI_File_iwrite(MPI_File fh, void * buf, int count,
+int DARSHAN_DECL(MPI_File_iwrite)(MPI_File fh, void * buf, int count,
MPI_Datatype datatype, __D_MPI_REQUEST * request)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_iwrite);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_iwrite)(fh, buf, count, datatype, request);
+ ret = __real_PMPI_File_iwrite(fh, buf, count, datatype, request);
tm2 = darshan_core_wtime();
MPIIO_PRE_RECORD();
@@ -646,15 +869,26 @@ int MPI_File_iwrite(MPI_File fh, void * buf, int count,
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_iwrite, int, (MPI_File fh, const void * buf, int count,
+ MPI_Datatype datatype, __D_MPI_REQUEST * request),
+ MPI_File_iwrite(fh, buf, count, datatype, request))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_iwrite, int, (MPI_File fh, void * buf, int count,
+ MPI_Datatype datatype, __D_MPI_REQUEST * request),
+ MPI_File_iwrite(fh, buf, count, datatype, request))
+#endif
-int MPI_File_iread_at(MPI_File fh, MPI_Offset offset, void * buf,
+int DARSHAN_DECL(MPI_File_iread_at)(MPI_File fh, MPI_Offset offset, void * buf,
int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_iread_at);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_iread_at)(fh, offset, buf, count,
+ ret = __real_PMPI_File_iread_at(fh, offset, buf, count,
datatype, request);
tm2 = darshan_core_wtime();
@@ -664,20 +898,25 @@ int MPI_File_iread_at(MPI_File fh, MPI_Offset offset, void * buf,
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_iread_at, int, (MPI_File fh, MPI_Offset offset, void * buf,
+ int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
+ MPI_File_iread_at(fh, offset,buf,count,datatype,request))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, const void * buf,
+int DARSHAN_DECL(MPI_File_iwrite_at)(MPI_File fh, MPI_Offset offset, const void * buf,
int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
#else
-int MPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, void * buf,
+int DARSHAN_DECL(MPI_File_iwrite_at)(MPI_File fh, MPI_Offset offset, void * buf,
int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_iwrite_at);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_iwrite_at)(fh, offset, buf,
+ ret = __real_PMPI_File_iwrite_at(fh, offset, buf,
count, datatype, request);
tm2 = darshan_core_wtime();
@@ -687,15 +926,26 @@ int MPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, void * buf,
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, const void * buf,
+ int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
+ MPI_File_iwrite_at(fh, offset, buf, count, datatype, request))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_at, int, (MPI_File fh, MPI_Offset offset, void * buf,
+ int count, MPI_Datatype datatype, __D_MPI_REQUEST *request),
+ MPI_File_iwrite_at(fh, offset, buf, count, datatype, request))
+#endif
-int MPI_File_iread_shared(MPI_File fh, void * buf, int count,
+int DARSHAN_DECL(MPI_File_iread_shared)(MPI_File fh, void * buf, int count,
MPI_Datatype datatype, __D_MPI_REQUEST * request)
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_iread_shared);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_iread_shared)(fh, buf, count,
+ ret = __real_PMPI_File_iread_shared(fh, buf, count,
datatype, request);
tm2 = darshan_core_wtime();
@@ -705,20 +955,25 @@ int MPI_File_iread_shared(MPI_File fh, void * buf, int count,
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_iread_shared, int, (MPI_File fh, void * buf, int count,
+ MPI_Datatype datatype, __D_MPI_REQUEST * request),
+ MPI_File_iread_shared(fh, buf, count, datatype, request))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_iwrite_shared(MPI_File fh, const void * buf, int count,
+int DARSHAN_DECL(MPI_File_iwrite_shared)(MPI_File fh, const void * buf, int count,
MPI_Datatype datatype, __D_MPI_REQUEST * request)
#else
-int MPI_File_iwrite_shared(MPI_File fh, void * buf, int count,
+int DARSHAN_DECL(MPI_File_iwrite_shared)(MPI_File fh, void * buf, int count,
MPI_Datatype datatype, __D_MPI_REQUEST * request)
#endif
{
int ret;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_iwrite_shared);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_iwrite_shared)(fh, buf, count,
+ ret = __real_PMPI_File_iwrite_shared(fh, buf, count,
datatype, request);
tm2 = darshan_core_wtime();
@@ -728,15 +983,26 @@ int MPI_File_iwrite_shared(MPI_File fh, void * buf, int count,
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_shared, int, (MPI_File fh, const void * buf, int count,
+ MPI_Datatype datatype, __D_MPI_REQUEST * request),
+ MPI_File_iwrite_shared(fh, buf, count, datatype, request))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_iwrite_shared, int, (MPI_File fh, void * buf, int count,
+ MPI_Datatype datatype, __D_MPI_REQUEST * request),
+ MPI_File_iwrite_shared(fh, buf, count, datatype, request))
+#endif
-int MPI_File_sync(MPI_File fh)
+int DARSHAN_DECL(MPI_File_sync)(MPI_File fh)
{
int ret;
struct mpiio_file_record_ref *rec_ref;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_sync);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_sync)(fh);
+ ret = __real_PMPI_File_sync(fh);
tm2 = darshan_core_wtime();
if(ret == MPI_SUCCESS)
@@ -756,12 +1022,13 @@ int MPI_File_sync(MPI_File fh)
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_sync, int, (MPI_File fh), MPI_File_sync(fh))
#ifdef HAVE_MPIIO_CONST
-int MPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
+int DARSHAN_DECL(MPI_File_set_view)(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
MPI_Datatype filetype, const char *datarep, MPI_Info info)
#else
-int MPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
+int DARSHAN_DECL(MPI_File_set_view)(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
MPI_Datatype filetype, char *datarep, MPI_Info info)
#endif
{
@@ -769,8 +1036,10 @@ int MPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
struct mpiio_file_record_ref *rec_ref;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_set_view);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_set_view)(fh, disp, etype, filetype,
+ ret = __real_PMPI_File_set_view(fh, disp, etype, filetype,
datarep, info);
tm2 = darshan_core_wtime();
@@ -795,16 +1064,25 @@ int MPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
return(ret);
}
+#ifdef HAVE_MPIIO_CONST
+DARSHAN_WRAPPER_MAP(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
+ MPI_Datatype filetype, const char *datarep, MPI_Info info), MPI_File_set_view(fh, disp, etype, filetype, datarep, info))
+#else
+DARSHAN_WRAPPER_MAP(PMPI_File_set_view, int, (MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
+ MPI_Datatype filetype, char *datarep, MPI_Info info), MPI_File_set_view(fh, disp, etype, filetype, datarep, info))
+#endif
-int MPI_File_close(MPI_File *fh)
+int DARSHAN_DECL(MPI_File_close)(MPI_File *fh)
{
int ret;
struct mpiio_file_record_ref *rec_ref;
MPI_File tmp_fh = *fh;
double tm1, tm2;
+ MAP_OR_FAIL(PMPI_File_close);
+
tm1 = darshan_core_wtime();
- ret = DARSHAN_MPI_CALL(PMPI_File_close)(fh);
+ ret = __real_PMPI_File_close(fh);
tm2 = darshan_core_wtime();
MPIIO_PRE_RECORD();
@@ -824,6 +1102,7 @@ int MPI_File_close(MPI_File *fh)
return(ret);
}
+DARSHAN_WRAPPER_MAP(PMPI_File_close, int, (MPI_File *fh), MPI_File_close(fh))
/***********************************************************
* Internal functions for manipulating MPI-IO module state *
@@ -1112,11 +1391,11 @@ static void mpiio_shared_record_variance(MPI_Comm mod_comm,
struct darshan_variance_dt *var_send_buf = NULL;
struct darshan_variance_dt *var_recv_buf = NULL;
- DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_variance_dt),
+ PMPI_Type_contiguous(sizeof(struct darshan_variance_dt),
MPI_BYTE, &var_dt);
- DARSHAN_MPI_CALL(PMPI_Type_commit)(&var_dt);
+ PMPI_Type_commit(&var_dt);
- DARSHAN_MPI_CALL(PMPI_Op_create)(darshan_variance_reduce, 1, &var_op);
+ PMPI_Op_create(darshan_variance_reduce, 1, &var_op);
var_send_buf = malloc(shared_rec_count * sizeof(struct darshan_variance_dt));
if(!var_send_buf)
@@ -1141,7 +1420,7 @@ static void mpiio_shared_record_variance(MPI_Comm mod_comm,
inrec_array[i].fcounters[MPIIO_F_META_TIME];
}
- DARSHAN_MPI_CALL(PMPI_Reduce)(var_send_buf, var_recv_buf, shared_rec_count,
+ PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
var_dt, var_op, 0, mod_comm);
if(my_rank == 0)
@@ -1164,7 +1443,7 @@ static void mpiio_shared_record_variance(MPI_Comm mod_comm,
inrec_array[i].counters[MPIIO_BYTES_WRITTEN];
}
- DARSHAN_MPI_CALL(PMPI_Reduce)(var_send_buf, var_recv_buf, shared_rec_count,
+ PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
var_dt, var_op, 0, mod_comm);
if(my_rank == 0)
@@ -1176,8 +1455,8 @@ static void mpiio_shared_record_variance(MPI_Comm mod_comm,
}
}
- DARSHAN_MPI_CALL(PMPI_Type_free)(&var_dt);
- DARSHAN_MPI_CALL(PMPI_Op_free)(&var_op);
+ PMPI_Type_free(&var_dt);
+ PMPI_Op_free(&var_op);
free(var_send_buf);
free(var_recv_buf);
@@ -1370,15 +1649,15 @@ static void mpiio_shutdown(
/* construct a datatype for a MPIIO file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
- DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_mpiio_file),
+ PMPI_Type_contiguous(sizeof(struct darshan_mpiio_file),
MPI_BYTE, &red_type);
- DARSHAN_MPI_CALL(PMPI_Type_commit)(&red_type);
+ PMPI_Type_commit(&red_type);
/* register a MPIIO file record reduction operator */
- DARSHAN_MPI_CALL(PMPI_Op_create)(mpiio_record_reduction_op, 1, &red_op);
+ PMPI_Op_create(mpiio_record_reduction_op, 1, &red_op);
/* reduce shared MPIIO file records */
- DARSHAN_MPI_CALL(PMPI_Reduce)(red_send_buf, red_recv_buf,
+ PMPI_Reduce(red_send_buf, red_recv_buf,
shared_rec_count, red_type, red_op, 0, mod_comm);
/* get the time and byte variances for shared files */
@@ -1398,8 +1677,8 @@ static void mpiio_shutdown(
mpiio_rec_count -= shared_rec_count;
}
- DARSHAN_MPI_CALL(PMPI_Type_free)(&red_type);
- DARSHAN_MPI_CALL(PMPI_Op_free)(&red_op);
+ PMPI_Type_free(&red_type);
+ PMPI_Op_free(&red_op);
}
*mpiio_buf_sz = mpiio_rec_count * sizeof(struct darshan_mpiio_file);
=====================================
darshan-runtime/lib/darshan-pnetcdf.c
=====================================
--- a/darshan-runtime/lib/darshan-pnetcdf.c
+++ b/darshan-runtime/lib/darshan-pnetcdf.c
@@ -94,7 +94,7 @@ static int my_rank = -1;
if(newpath != __path) free(newpath); \
break; \
} \
- DARSHAN_MPI_CALL(PMPI_Comm_size)(__comm, &comm_size); \
+ PMPI_Comm_size(__comm, &comm_size); \
if(rec_ref->file_rec->fcounters[PNETCDF_F_OPEN_TIMESTAMP] == 0) \
rec_ref->file_rec->fcounters[PNETCDF_F_OPEN_TIMESTAMP] = __tm1; \
if(comm_size == 1) rec_ref->file_rec->counters[PNETCDF_INDEP_OPENS] += 1; \
@@ -406,15 +406,15 @@ static void pnetcdf_shutdown(
/* construct a datatype for a PNETCDF file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
- DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_pnetcdf_file),
+ PMPI_Type_contiguous(sizeof(struct darshan_pnetcdf_file),
MPI_BYTE, &red_type);
- DARSHAN_MPI_CALL(PMPI_Type_commit)(&red_type);
+ PMPI_Type_commit(&red_type);
/* register a PNETCDF file record reduction operator */
- DARSHAN_MPI_CALL(PMPI_Op_create)(pnetcdf_record_reduction_op, 1, &red_op);
+ PMPI_Op_create(pnetcdf_record_reduction_op, 1, &red_op);
/* reduce shared PNETCDF file records */
- DARSHAN_MPI_CALL(PMPI_Reduce)(red_send_buf, red_recv_buf,
+ PMPI_Reduce(red_send_buf, red_recv_buf,
shared_rec_count, red_type, red_op, 0, mod_comm);
/* clean up reduction state */
@@ -430,8 +430,8 @@ static void pnetcdf_shutdown(
pnetcdf_rec_count -= shared_rec_count;
}
- DARSHAN_MPI_CALL(PMPI_Type_free)(&red_type);
- DARSHAN_MPI_CALL(PMPI_Op_free)(&red_op);
+ PMPI_Type_free(&red_type);
+ PMPI_Op_free(&red_op);
}
/* update output buffer size to account for shared file reduction */
=====================================
darshan-runtime/lib/darshan-posix.c
=====================================
--- a/darshan-runtime/lib/darshan-posix.c
+++ b/darshan-runtime/lib/darshan-posix.c
@@ -1656,11 +1656,11 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
struct darshan_variance_dt *var_send_buf = NULL;
struct darshan_variance_dt *var_recv_buf = NULL;
- DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_variance_dt),
+ PMPI_Type_contiguous(sizeof(struct darshan_variance_dt),
MPI_BYTE, &var_dt);
- DARSHAN_MPI_CALL(PMPI_Type_commit)(&var_dt);
+ PMPI_Type_commit(&var_dt);
- DARSHAN_MPI_CALL(PMPI_Op_create)(darshan_variance_reduce, 1, &var_op);
+ PMPI_Op_create(darshan_variance_reduce, 1, &var_op);
var_send_buf = malloc(shared_rec_count * sizeof(struct darshan_variance_dt));
if(!var_send_buf)
@@ -1685,7 +1685,7 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
inrec_array[i].fcounters[POSIX_F_META_TIME];
}
- DARSHAN_MPI_CALL(PMPI_Reduce)(var_send_buf, var_recv_buf, shared_rec_count,
+ PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
var_dt, var_op, 0, mod_comm);
if(my_rank == 0)
@@ -1708,7 +1708,7 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
inrec_array[i].counters[POSIX_BYTES_WRITTEN];
}
- DARSHAN_MPI_CALL(PMPI_Reduce)(var_send_buf, var_recv_buf, shared_rec_count,
+ PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
var_dt, var_op, 0, mod_comm);
if(my_rank == 0)
@@ -1720,8 +1720,8 @@ static void posix_shared_record_variance(MPI_Comm mod_comm,
}
}
- DARSHAN_MPI_CALL(PMPI_Type_free)(&var_dt);
- DARSHAN_MPI_CALL(PMPI_Op_free)(&var_op);
+ PMPI_Type_free(&var_dt);
+ PMPI_Op_free(&var_op);
free(var_send_buf);
free(var_recv_buf);
@@ -1906,15 +1906,15 @@ static void posix_shutdown(
/* construct a datatype for a POSIX file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
- DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_posix_file),
+ PMPI_Type_contiguous(sizeof(struct darshan_posix_file),
MPI_BYTE, &red_type);
- DARSHAN_MPI_CALL(PMPI_Type_commit)(&red_type);
+ PMPI_Type_commit(&red_type);
/* register a POSIX file record reduction operator */
- DARSHAN_MPI_CALL(PMPI_Op_create)(posix_record_reduction_op, 1, &red_op);
+ PMPI_Op_create(posix_record_reduction_op, 1, &red_op);
/* reduce shared POSIX file records */
- DARSHAN_MPI_CALL(PMPI_Reduce)(red_send_buf, red_recv_buf,
+ PMPI_Reduce(red_send_buf, red_recv_buf,
shared_rec_count, red_type, red_op, 0, mod_comm);
/* get the time and byte variances for shared files */
@@ -1934,8 +1934,8 @@ static void posix_shutdown(
posix_rec_count -= shared_rec_count;
}
- DARSHAN_MPI_CALL(PMPI_Type_free)(&red_type);
- DARSHAN_MPI_CALL(PMPI_Op_free)(&red_op);
+ PMPI_Type_free(&red_type);
+ PMPI_Op_free(&red_op);
}
/* update output buffer size to account for shared file reduction */
=====================================
darshan-runtime/lib/darshan-stdio.c
=====================================
--- a/darshan-runtime/lib/darshan-stdio.c
+++ b/darshan-runtime/lib/darshan-stdio.c
@@ -1184,15 +1184,15 @@ static void stdio_shutdown(
/* construct a datatype for a STDIO file record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
- DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_stdio_file),
+ PMPI_Type_contiguous(sizeof(struct darshan_stdio_file),
MPI_BYTE, &red_type);
- DARSHAN_MPI_CALL(PMPI_Type_commit)(&red_type);
+ PMPI_Type_commit(&red_type);
/* register a STDIO file record reduction operator */
- DARSHAN_MPI_CALL(PMPI_Op_create)(stdio_record_reduction_op, 1, &red_op);
+ PMPI_Op_create(stdio_record_reduction_op, 1, &red_op);
/* reduce shared STDIO file records */
- DARSHAN_MPI_CALL(PMPI_Reduce)(red_send_buf, red_recv_buf,
+ PMPI_Reduce(red_send_buf, red_recv_buf,
shared_rec_count, red_type, red_op, 0, mod_comm);
/* get the time and byte variances for shared files */
@@ -1212,8 +1212,8 @@ static void stdio_shutdown(
stdio_rec_count -= shared_rec_count;
}
- DARSHAN_MPI_CALL(PMPI_Type_free)(&red_type);
- DARSHAN_MPI_CALL(PMPI_Op_free)(&red_op);
+ PMPI_Type_free(&red_type);
+ PMPI_Op_free(&red_op);
}
/* filter out any records that have no activity on them; this is
@@ -1323,11 +1323,11 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
struct darshan_variance_dt *var_send_buf = NULL;
struct darshan_variance_dt *var_recv_buf = NULL;
- DARSHAN_MPI_CALL(PMPI_Type_contiguous)(sizeof(struct darshan_variance_dt),
+ PMPI_Type_contiguous(sizeof(struct darshan_variance_dt),
MPI_BYTE, &var_dt);
- DARSHAN_MPI_CALL(PMPI_Type_commit)(&var_dt);
+ PMPI_Type_commit(&var_dt);
- DARSHAN_MPI_CALL(PMPI_Op_create)(darshan_variance_reduce, 1, &var_op);
+ PMPI_Op_create(darshan_variance_reduce, 1, &var_op);
var_send_buf = malloc(shared_rec_count * sizeof(struct darshan_variance_dt));
if(!var_send_buf)
@@ -1352,7 +1352,7 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
inrec_array[i].fcounters[STDIO_F_META_TIME];
}
- DARSHAN_MPI_CALL(PMPI_Reduce)(var_send_buf, var_recv_buf, shared_rec_count,
+ PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
var_dt, var_op, 0, mod_comm);
if(my_rank == 0)
@@ -1375,7 +1375,7 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
inrec_array[i].counters[STDIO_BYTES_WRITTEN];
}
- DARSHAN_MPI_CALL(PMPI_Reduce)(var_send_buf, var_recv_buf, shared_rec_count,
+ PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
var_dt, var_op, 0, mod_comm);
if(my_rank == 0)
@@ -1387,8 +1387,8 @@ static void stdio_shared_record_variance(MPI_Comm mod_comm,
}
}
- DARSHAN_MPI_CALL(PMPI_Type_free)(&var_dt);
- DARSHAN_MPI_CALL(PMPI_Op_free)(&var_op);
+ PMPI_Type_free(&var_dt);
+ PMPI_Op_free(&var_op);
free(var_send_buf);
free(var_recv_buf);
=====================================
darshan-runtime/share/ld-opts/darshan-base-ld-opts.in
=====================================
--- a/darshan-runtime/share/ld-opts/darshan-base-ld-opts.in
+++ b/darshan-runtime/share/ld-opts/darshan-base-ld-opts.in
@@ -1,6 +1,13 @@
--undefined=MPI_Init
--undefined=MPI_Wtime
+--wrap=MPI_Init
+--wrap=MPI_Init_thread
+--wrap=MPI_Finalize
+--wrap=PMPI_Init
+--wrap=PMPI_Init_thread
+--wrap=PMPI_Finalize
@@darshan_share_path@/ld-opts/darshan-posix-ld-opts
@@darshan_share_path@/ld-opts/darshan-pnetcdf-ld-opts
@@darshan_share_path@/ld-opts/darshan-stdio-ld-opts
+@@darshan_share_path@/ld-opts/darshan-mpiio-ld-opts
@DARSHAN_HDF5_LD_OPTS@
=====================================
darshan-runtime/share/ld-opts/darshan-mpiio-ld-opts
=====================================
--- /dev/null
+++ b/darshan-runtime/share/ld-opts/darshan-mpiio-ld-opts
@@ -0,0 +1,84 @@
+--wrap=MPI_File_close
+--wrap=MPI_File_iread_at
+--wrap=MPI_File_iread
+--wrap=MPI_File_iread_shared
+--wrap=MPI_File_iwrite_at
+--wrap=MPI_File_iwrite_at
+--wrap=MPI_File_iwrite
+--wrap=MPI_File_iwrite
+--wrap=MPI_File_iwrite_shared
+--wrap=MPI_File_iwrite_shared
+--wrap=MPI_File_open
+--wrap=MPI_File_open
+--wrap=MPI_File_read_all_begin
+--wrap=MPI_File_read_all
+--wrap=MPI_File_read_at_all
+--wrap=MPI_File_read_at_all_begin
+--wrap=MPI_File_read_at
+--wrap=MPI_File_read
+--wrap=MPI_File_read_ordered_begin
+--wrap=MPI_File_read_ordered
+--wrap=MPI_File_read_shared
+--wrap=MPI_File_set_view
+--wrap=MPI_File_set_view
+--wrap=MPI_File_sync
+--wrap=MPI_File_write_all_begin
+--wrap=MPI_File_write_all_begin
+--wrap=MPI_File_write_all
+--wrap=MPI_File_write_all
+--wrap=MPI_File_write_at_all_begin
+--wrap=MPI_File_write_at_all_begin
+--wrap=MPI_File_write_at_all
+--wrap=MPI_File_write_at_all
+--wrap=MPI_File_write_at
+--wrap=MPI_File_write_at
+--wrap=MPI_File_write
+--wrap=MPI_File_write
+--wrap=MPI_File_write_ordered_begin
+--wrap=MPI_File_write_ordered_begin
+--wrap=MPI_File_write_ordered
+--wrap=MPI_File_write_ordered
+--wrap=MPI_File_write_shared
+--wrap=MPI_File_write_shared
+--wrap=PMPI_File_close
+--wrap=PMPI_File_iread_at
+--wrap=PMPI_File_iread
+--wrap=PMPI_File_iread_shared
+--wrap=PMPI_File_iwrite_at
+--wrap=PMPI_File_iwrite_at
+--wrap=PMPI_File_iwrite
+--wrap=PMPI_File_iwrite
+--wrap=PMPI_File_iwrite_shared
+--wrap=PMPI_File_iwrite_shared
+--wrap=PMPI_File_open
+--wrap=PMPI_File_open
+--wrap=PMPI_File_read_all_begin
+--wrap=PMPI_File_read_all
+--wrap=PMPI_File_read_at_all
+--wrap=PMPI_File_read_at_all_begin
+--wrap=PMPI_File_read_at
+--wrap=PMPI_File_read
+--wrap=PMPI_File_read_ordered_begin
+--wrap=PMPI_File_read_ordered
+--wrap=PMPI_File_read_shared
+--wrap=PMPI_File_set_view
+--wrap=PMPI_File_set_view
+--wrap=PMPI_File_sync
+--wrap=PMPI_File_write_all_begin
+--wrap=PMPI_File_write_all_begin
+--wrap=PMPI_File_write_all
+--wrap=PMPI_File_write_all
+--wrap=PMPI_File_write_at_all_begin
+--wrap=PMPI_File_write_at_all_begin
+--wrap=PMPI_File_write_at_all
+--wrap=PMPI_File_write_at_all
+--wrap=PMPI_File_write_at
+--wrap=PMPI_File_write_at
+--wrap=PMPI_File_write
+--wrap=PMPI_File_write
+--wrap=PMPI_File_write_ordered_begin
+--wrap=PMPI_File_write_ordered_begin
+--wrap=PMPI_File_write_ordered
+--wrap=PMPI_File_write_ordered
+--wrap=PMPI_File_write_shared
+--wrap=PMPI_File_write_shared
View it on GitLab: https://xgitlab.cels.anl.gov/darshan/darshan/compare/c7b5ac907c2cc0bb038b304e85541bc84ad523fd...48dbd2e8f97e54f3d48870b55440dc64e0c7ec64
---
View it on GitLab: https://xgitlab.cels.anl.gov/darshan/darshan/compare/c7b5ac907c2cc0bb038b304e85541bc84ad523fd...48dbd2e8f97e54f3d48870b55440dc64e0c7ec64
You're receiving this email because of your account on xgitlab.cels.anl.gov.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.mcs.anl.gov/pipermail/darshan-commits/attachments/20171013/ca3c4465/attachment-0001.html>
More information about the Darshan-commits
mailing list