[Darshan-commits] [Darshan] branch, dev-modular, updated. darshan-2.3.1-110-g53fa81e
Service Account
git at mcs.anl.gov
Tue Jul 7 16:42:34 CDT 2015
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "".
The branch, dev-modular has been updated
via 53fa81e8bd7449c9caf48f78fb64e55829b9aa1d (commit)
from c572aa446c79a9192b8d2c30d33485da8c6f4ef2 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit 53fa81e8bd7449c9caf48f78fb64e55829b9aa1d
Author: Shane Snyder <ssnyder at mcs.anl.gov>
Date: Tue Jul 7 16:42:13 2015 -0500
add the remaining read/write wrappers to mpiio
-----------------------------------------------------------------------
Summary of changes:
darshan-runtime/lib/darshan-mpiio.c | 315 ++++++++++++++++++++++++++++++++++-
1 files changed, 313 insertions(+), 2 deletions(-)
Diff of changes:
diff --git a/darshan-runtime/lib/darshan-mpiio.c b/darshan-runtime/lib/darshan-mpiio.c
index 8c41e10..9fd4246 100644
--- a/darshan-runtime/lib/darshan-mpiio.c
+++ b/darshan-runtime/lib/darshan-mpiio.c
@@ -396,7 +396,319 @@ int MPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void * buf,
return(ret);
}
-/* TODO: reads and writes */
+int MPI_File_read_shared(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_read_shared)(fh, buf, count,
+ datatype, status);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_INDEP_READS, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+#ifdef HAVE_MPIIO_CONST
+int MPI_File_write_shared(MPI_File fh, const void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
+#else
+int MPI_File_write_shared(MPI_File fh, void * buf, int count, MPI_Datatype datatype, MPI_Status *status)
+#endif
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_write_shared)(fh, buf, count,
+ datatype, status);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_INDEP_WRITES, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+int MPI_File_read_ordered(MPI_File fh, void * buf, int count,
+ MPI_Datatype datatype, MPI_Status * status)
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_read_ordered)(fh, buf, count,
+ datatype, status);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_COLL_READS, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+#ifdef HAVE_MPIIO_CONST
+int MPI_File_write_ordered(MPI_File fh, const void * buf, int count,
+ MPI_Datatype datatype, MPI_Status * status)
+#else
+int MPI_File_write_ordered(MPI_File fh, void * buf, int count,
+ MPI_Datatype datatype, MPI_Status * status)
+#endif
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_write_ordered)(fh, buf, count,
+ datatype, status);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_COLL_WRITES, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+int MPI_File_read_all_begin(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_read_all_begin)(fh, buf, count, datatype);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+#ifdef HAVE_MPIIO_CONST
+int MPI_File_write_all_begin(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
+#else
+int MPI_File_write_all_begin(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
+#endif
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_write_all_begin)(fh, buf, count, datatype);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+int MPI_File_read_at_all_begin(MPI_File fh, MPI_Offset offset, void * buf,
+ int count, MPI_Datatype datatype)
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_read_at_all_begin)(fh, offset, buf,
+ count, datatype);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+#ifdef HAVE_MPIIO_CONST
+int MPI_File_write_at_all_begin(MPI_File fh, MPI_Offset offset, const void * buf,
+ int count, MPI_Datatype datatype)
+#else
+int MPI_File_write_at_all_begin(MPI_File fh, MPI_Offset offset, void * buf,
+ int count, MPI_Datatype datatype)
+#endif
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_write_at_all_begin)(fh, offset,
+ buf, count, datatype);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+int MPI_File_read_ordered_begin(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_read_ordered_begin)(fh, buf, count,
+ datatype);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_SPLIT_READS, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+#ifdef HAVE_MPIIO_CONST
+int MPI_File_write_ordered_begin(MPI_File fh, const void * buf, int count, MPI_Datatype datatype)
+#else
+int MPI_File_write_ordered_begin(MPI_File fh, void * buf, int count, MPI_Datatype datatype)
+#endif
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_write_ordered_begin)(fh, buf, count,
+ datatype);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_SPLIT_WRITES, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+int MPI_File_iread(MPI_File fh, void * buf, int count, MPI_Datatype datatype, __D_MPI_REQUEST * request)
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_iread)(fh, buf, count, datatype, request);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_NB_READS, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+#ifdef HAVE_MPIIO_CONST
+int MPI_File_iwrite(MPI_File fh, const void * buf, int count,
+ MPI_Datatype datatype, __D_MPI_REQUEST * request)
+#else
+int MPI_File_iwrite(MPI_File fh, void * buf, int count,
+ MPI_Datatype datatype, __D_MPI_REQUEST * request)
+#endif
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_iwrite)(fh, buf, count, datatype, request);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_NB_WRITES, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+int MPI_File_iread_at(MPI_File fh, MPI_Offset offset, void * buf,
+ int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_iread_at)(fh, offset, buf, count,
+ datatype, request);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_NB_READS, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+#ifdef HAVE_MPIIO_CONST
+int MPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, const void * buf,
+ int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
+#else
+int MPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, void * buf,
+ int count, MPI_Datatype datatype, __D_MPI_REQUEST *request)
+#endif
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_iwrite_at)(fh, offset, buf,
+ count, datatype, request);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_NB_WRITES, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+int MPI_File_iread_shared(MPI_File fh, void * buf, int count,
+ MPI_Datatype datatype, __D_MPI_REQUEST * request)
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_iread_shared)(fh, buf, count,
+ datatype, request);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_READ(ret, fh, count, datatype, MPIIO_NB_READS, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
+
+#ifdef HAVE_MPIIO_CONST
+int MPI_File_iwrite_shared(MPI_File fh, const void * buf, int count,
+ MPI_Datatype datatype, __D_MPI_REQUEST * request)
+#else
+int MPI_File_iwrite_shared(MPI_File fh, void * buf, int count,
+ MPI_Datatype datatype, __D_MPI_REQUEST * request)
+#endif
+{
+ int ret;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_iwrite_shared)(fh, buf, count,
+ datatype, request);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ MPIIO_RECORD_WRITE(ret, fh, count, datatype, MPIIO_NB_WRITES, tm1, tm2);
+ MPIIO_UNLOCK();
+ return(ret);
+}
int MPI_File_sync(MPI_File fh)
{
@@ -425,7 +737,6 @@ int MPI_File_sync(MPI_File fh)
return(ret);
}
-/* TODO: test */
#ifdef HAVE_MPIIO_CONST
int MPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
MPI_Datatype filetype, const char *datarep, MPI_Info info)
hooks/post-receive
--
More information about the Darshan-commits
mailing list