[Darshan-commits] [Darshan] branch, dev-modular, updated. darshan-2.3.1-108-gf1ad34e
Service Account
git at mcs.anl.gov
Thu Jul 2 15:44:16 CDT 2015
This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "".
The branch, dev-modular has been updated
via f1ad34e8cf3f2aff0e12f83cd85e9f95726bd9e6 (commit)
via 628724ba033d37cad799ef52d95cbe74d2274776 (commit)
from 28f19402834f8b016441741b7dd87c0b4859e9f7 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
- Log -----------------------------------------------------------------
commit f1ad34e8cf3f2aff0e12f83cd85e9f95726bd9e6
Author: Shane Snyder <ssnyder at mcs.anl.gov>
Date: Thu Jul 2 11:22:07 2015 -0500
add mpiio wrappers for close,sync,set_view
commit 628724ba033d37cad799ef52d95cbe74d2274776
Author: Shane Snyder <ssnyder at mcs.anl.gov>
Date: Wed Jul 1 15:56:41 2015 -0500
clean up & reorganize mpiio module source file
-----------------------------------------------------------------------
Summary of changes:
darshan-mpiio-log-format.h | 102 ++++++-
darshan-runtime/lib/darshan-mpiio.c | 519 +++++++++++++++++++++++++--------
darshan-util/darshan-mpiio-logutils.c | 4 +-
darshan-util/darshan-mpiio-parser.c | 76 +++++-
4 files changed, 554 insertions(+), 147 deletions(-)
Diff of changes:
diff --git a/darshan-mpiio-log-format.h b/darshan-mpiio-log-format.h
index 857133d..d6a4047 100644
--- a/darshan-mpiio-log-format.h
+++ b/darshan-mpiio-log-format.h
@@ -11,27 +11,111 @@
enum darshan_mpiio_indices
{
- DARSHAN_MPIIO_INDEP_OPENS, /* independent opens */
- DARSHAN_MPIIO_COLL_OPENS, /* collective opens */
- DARSHAN_MPIIO_HINTS, /* how many times hints were used */
+ MPIIO_INDEP_OPENS, /* count of MPI independent opens */
+ MPIIO_COLL_OPENS, /* count of MPI collective opens */
+ MPIIO_INDEP_READS, /* count of MPI independent reads */
+ MPIIO_INDEP_WRITES, /* count of MPI independent writes */
+ MPIIO_COLL_READS, /* count of MPI collective reads */
+ MPIIO_COLL_WRITES, /* count of MPI collective writes */
+ MPIIO_SPLIT_READS, /* count of MPI split collective reads */
+ MPIIO_SPLIT_WRITES, /* count of MPI split collective writes */
+ MPIIO_NB_READS, /* count of MPI nonblocking reads */
+ MPIIO_NB_WRITES, /* count of MPI nonblocking writes */
+ MPIIO_SYNCS, /* count of MPI file syncs */
+ MPIIO_HINTS, /* count of MPI hints used */
+ MPIIO_VIEWS, /* count of MPI set view calls */
+ /* type categories */
+ MPIIO_COMBINER_NAMED, /* count of each MPI datatype category */
+ MPIIO_COMBINER_DUP,
+ MPIIO_COMBINER_CONTIGUOUS,
+ MPIIO_COMBINER_VECTOR,
+ MPIIO_COMBINER_HVECTOR_INTEGER,
+ MPIIO_COMBINER_HVECTOR,
+ MPIIO_COMBINER_INDEXED,
+ MPIIO_COMBINER_HINDEXED_INTEGER,
+ MPIIO_COMBINER_HINDEXED,
+ MPIIO_COMBINER_INDEXED_BLOCK,
+ MPIIO_COMBINER_STRUCT_INTEGER,
+ MPIIO_COMBINER_STRUCT,
+ MPIIO_COMBINER_SUBARRAY,
+ MPIIO_COMBINER_DARRAY,
+ MPIIO_COMBINER_F90_REAL,
+ MPIIO_COMBINER_F90_COMPLEX,
+ MPIIO_COMBINER_F90_INTEGER,
+ MPIIO_COMBINER_RESIZED,
+#if 0
+ /* buckets */
+ MPIIO_SIZE_READ_AGG_0_100, /* count of MPI read size ranges */
+ MPIIO_SIZE_READ_AGG_100_1K,
+ MPIIO_SIZE_READ_AGG_1K_10K,
+ MPIIO_SIZE_READ_AGG_10K_100K,
+ MPIIO_SIZE_READ_AGG_100K_1M,
+ MPIIO_SIZE_READ_AGG_1M_4M,
+ MPIIO_SIZE_READ_AGG_4M_10M,
+ MPIIO_SIZE_READ_AGG_10M_100M,
+ MPIIO_SIZE_READ_AGG_100M_1G,
+ MPIIO_SIZE_READ_AGG_1G_PLUS,
+ /* buckets */
+ MPIIO_SIZE_WRITE_AGG_0_100, /* count of MPI write size ranges */
+ MPIIO_SIZE_WRITE_AGG_100_1K,
+ MPIIO_SIZE_WRITE_AGG_1K_10K,
+ MPIIO_SIZE_WRITE_AGG_10K_100K,
+ MPIIO_SIZE_WRITE_AGG_100K_1M,
+ MPIIO_SIZE_WRITE_AGG_1M_4M,
+ MPIIO_SIZE_WRITE_AGG_4M_10M,
+ MPIIO_SIZE_WRITE_AGG_10M_100M,
+ MPIIO_SIZE_WRITE_AGG_100M_1G,
+ MPIIO_SIZE_WRITE_AGG_1G_PLUS,
+ /* buckets */
+ MPIIO_EXTENT_READ_0_100, /* count of MPI read extent ranges */
+ MPIIO_EXTENT_READ_100_1K,
+ MPIIO_EXTENT_READ_1K_10K,
+ MPIIO_EXTENT_READ_10K_100K,
+ MPIIO_EXTENT_READ_100K_1M,
+ MPIIO_EXTENT_READ_1M_4M,
+ MPIIO_EXTENT_READ_4M_10M,
+ MPIIO_EXTENT_READ_10M_100M,
+ MPIIO_EXTENT_READ_100M_1G,
+ MPIIO_EXTENT_READ_1G_PLUS,
+ /* buckets */
+ MPIIO_EXTENT_WRITE_0_100, /* count of MPI write extent ranges */
+ MPIIO_EXTENT_WRITE_100_1K,
+ MPIIO_EXTENT_WRITE_1K_10K,
+ MPIIO_EXTENT_WRITE_10K_100K,
+ MPIIO_EXTENT_WRITE_100K_1M,
+ MPIIO_EXTENT_WRITE_1M_4M,
+ MPIIO_EXTENT_WRITE_4M_10M,
+ MPIIO_EXTENT_WRITE_10M_100M,
+ MPIIO_EXTENT_WRITE_100M_1G,
+ MPIIO_EXTENT_WRITE_1G_PLUS,
+#endif
- DARSHAN_MPIIO_NUM_INDICES,
+ MPIIO_NUM_INDICES,
};
enum darshan_mpiio_f_indices
{
- DARSHAN_MPIIO_F_META_TIME, /* cumulative metadata time */
- DARSHAN_MPIIO_F_OPEN_TIMESTAMP, /* first open timestamp */
+ MPIIO_F_OPEN_TIMESTAMP,
+#if 0
+ MPIIO_F_READ_START_TIMESTAMP,
+ MPIIO_F_WRITE_START_TIMESTAMP,
+ MPIIO_F_READ_END_TIMESTAMP,
+ MPIIO_F_WRITE_END_TIMESTAMP,
+#endif
+ MPIIO_F_CLOSE_TIMESTAMP,
+ MPIIO_F_READ_TIME,
+ MPIIO_F_WRITE_TIME,
+ MPIIO_F_META_TIME,
- DARSHAN_MPIIO_F_NUM_INDICES,
+ MPIIO_F_NUM_INDICES,
};
struct darshan_mpiio_file
{
darshan_record_id f_id;
int64_t rank;
- int64_t counters[DARSHAN_MPIIO_NUM_INDICES];
- double fcounters[DARSHAN_MPIIO_F_NUM_INDICES];
+ int64_t counters[MPIIO_NUM_INDICES];
+ double fcounters[MPIIO_F_NUM_INDICES];
};
#endif /* __DARSHAN_MPIIO_LOG_FORMAT_H */
diff --git a/darshan-runtime/lib/darshan-mpiio.c b/darshan-runtime/lib/darshan-mpiio.c
index 2d3a82d..2aad9d4 100644
--- a/darshan-runtime/lib/darshan-mpiio.c
+++ b/darshan-runtime/lib/darshan-mpiio.c
@@ -25,6 +25,7 @@
#include <pthread.h>
#include "uthash.h"
+
#include "darshan.h"
#include "darshan-mpiio-log-format.h"
#include "darshan-dynamic.h"
@@ -55,11 +56,9 @@
struct mpiio_file_runtime
{
struct darshan_mpiio_file* file_record;
- double last_mpi_meta_end; /* end time of last MPI meta op (so far) */
- /* TODO: any stateful (but not intended for persistent storage in the log)
- * information about MPI-IO access. If we don't have any then this struct
- * could be eliminated.
- */
+ double last_meta_end;
+ double last_read_end;
+ double last_write_end;
UT_hash_handle hlink;
};
@@ -89,10 +88,14 @@ struct mpiio_file_runtime
struct mpiio_file_runtime_ref
{
struct mpiio_file_runtime* file;
- MPI_File *fh;
+ MPI_File fh;
UT_hash_handle hlink;
};
+/* The mpiio_runtime structure maintains necessary state for storing
+ * MPI-IO file records and for coordinating with darshan-core at
+ * shutdown time.
+ */
struct mpiio_runtime
{
struct mpiio_file_runtime* file_runtime_array;
@@ -110,22 +113,135 @@ static pthread_mutex_t mpiio_runtime_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER
static int instrumentation_disabled = 0;
static int my_rank = -1;
-#define MPIIO_LOCK() pthread_mutex_lock(&mpiio_runtime_mutex)
-#define MPIIO_UNLOCK() pthread_mutex_unlock(&mpiio_runtime_mutex)
-
static void mpiio_runtime_initialize(void);
-static void mpiio_begin_shutdown(void);
-static void mpiio_shutdown(void);
-static void mpiio_get_output_data(
- void **buffer,
- int *size);
-static struct mpiio_file_runtime* mpiio_file_by_name_setfh(const char* name, MPI_File *fh);
static struct mpiio_file_runtime* mpiio_file_by_name(const char *name);
+static struct mpiio_file_runtime* mpiio_file_by_name_setfh(const char* name, MPI_File fh);
+static struct mpiio_file_runtime* mpiio_file_by_fh(MPI_File fh);
+static void mpiio_file_close_fh(MPI_File fh);
+static int mpiio_file_compare(const void* a, const void* b);
+
+static void mpiio_begin_shutdown(void);
+static void mpiio_setup_reduction(darshan_record_id *shared_recs, int *shared_rec_count,
+ void **send_buf, void **recv_buf, int *rec_size);
static void mpiio_record_reduction_op(void* infile_v, void* inoutfile_v,
int *len, MPI_Datatype *datatype);
-static void mpiio_setup_reduction(darshan_record_id *shared_recs,
- int *shared_rec_count, void **send_buf, void **recv_buf, int *rec_size);
-static int mpiio_file_compare(const void* a, const void* b);
+static void mpiio_get_output_data(void **buffer, int *size);
+static void mpiio_shutdown(void);
+
+#define MPIIO_LOCK() pthread_mutex_lock(&mpiio_runtime_mutex)
+#define MPIIO_UNLOCK() pthread_mutex_unlock(&mpiio_runtime_mutex)
+
+#if 0
+/* Some old versions of MPI don't provide all of these COMBINER definitions.
+ * If any are missing then we define them to an arbitrary value just to
+ * prevent compile errors in DATATYPE_INC().
+ */
+#ifndef MPI_COMBINER_NAMED
+ #define MPI_COMBINER_NAMED MPIIO_COMBINER_NAMED
+#endif
+#ifndef MPI_COMBINER_DUP
+ #define MPI_COMBINER_DUP MPIIO_COMBINER_DUP
+#endif
+#ifndef MPI_COMBINER_CONTIGUOUS
+ #define MPI_COMBINER_CONTIGUOUS MPIIO_COMBINER_CONTIGUOUS
+#endif
+#ifndef MPI_COMBINER_VECTOR
+ #define MPI_COMBINER_VECTOR MPIIO_COMBINER_VECTOR
+#endif
+#ifndef MPI_COMBINER_HVECTOR_INTEGER
+ #define MPI_COMBINER_HVECTOR_INTEGER MPIIO_COMBINER_HVECTOR_INTEGER
+#endif
+#ifndef MPI_COMBINER_HVECTOR
+ #define MPI_COMBINER_HVECTOR MPIIO_COMBINER_HVECTOR
+#endif
+#ifndef MPI_COMBINER_INDEXED
+ #define MPI_COMBINER_INDEXED MPIIO_COMBINER_INDEXED
+#endif
+#ifndef MPI_COMBINER_HINDEXED_INTEGER
+ #define MPI_COMBINER_HINDEXED_INTEGER MPIIO_COMBINER_HINDEXED_INTEGER
+#endif
+#ifndef MPI_COMBINER_HINDEXED
+ #define MPI_COMBINER_HINDEXED MPIIO_COMBINER_HINDEXED
+#endif
+#ifndef MPI_COMBINER_INDEXED_BLOCK
+ #define MPI_COMBINER_INDEXED_BLOCK MPIIO_COMBINER_INDEXED_BLOCK
+#endif
+#ifndef MPI_COMBINER_STRUCT_INTEGER
+ #define MPI_COMBINER_STRUCT_INTEGER MPIIO_COMBINER_STRUCT_INTEGER
+#endif
+#ifndef MPI_COMBINER_STRUCT
+ #define MPI_COMBINER_STRUCT MPIIO_COMBINER_STRUCT
+#endif
+#ifndef MPI_COMBINER_SUBARRAY
+ #define MPI_COMBINER_SUBARRAY MPIIO_COMBINER_SUBARRAY
+#endif
+#ifndef MPI_COMBINER_DARRAY
+ #define MPI_COMBINER_DARRAY MPIIO_COMBINER_DARRAY
+#endif
+#ifndef MPI_COMBINER_F90_REAL
+ #define MPI_COMBINER_F90_REAL MPIIO_COMBINER_F90_REAL
+#endif
+#ifndef MPI_COMBINER_F90_COMPLEX
+ #define MPI_COMBINER_F90_COMPLEX MPIIO_COMBINER_F90_COMPLEX
+#endif
+#ifndef MPI_COMBINER_F90_INTEGER
+ #define MPI_COMBINER_F90_INTEGER MPIIO_COMBINER_F90_INTEGER
+#endif
+#ifndef MPI_COMBINER_RESIZED
+ #define MPI_COMBINER_RESIZED MPIIO_COMBINER_RESIZED
+#endif
+#endif
+
+#define MPIIO_DATATYPE_INC(__file, __datatype) do {\
+ int num_integers, num_addresses, num_datatypes, combiner, ret; \
+ struct darshan_mpiio_file* rec = (__file)->file_record; \
+ ret = DARSHAN_MPI_CALL(PMPI_Type_get_envelope)(__datatype, &num_integers, \
+ &num_addresses, &num_datatypes, &combiner); \
+ if(ret == MPI_SUCCESS) { \
+ switch(combiner) { \
+ case MPI_COMBINER_NAMED:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_NAMED,1); break; \
+ case MPI_COMBINER_DUP:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_DUP,1); break; \
+ case MPI_COMBINER_CONTIGUOUS:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_CONTIGUOUS,1); break; \
+ case MPI_COMBINER_VECTOR:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_VECTOR,1); break; \
+ case MPI_COMBINER_HVECTOR_INTEGER:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_HVECTOR_INTEGER,1); break; \
+ case MPI_COMBINER_HVECTOR:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_HVECTOR,1); break; \
+ case MPI_COMBINER_INDEXED:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_INDEXED,1); break; \
+ case MPI_COMBINER_HINDEXED_INTEGER:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_HINDEXED_INTEGER,1); break; \
+ case MPI_COMBINER_HINDEXED:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_HINDEXED,1); break; \
+ case MPI_COMBINER_INDEXED_BLOCK:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_INDEXED_BLOCK,1); break; \
+ case MPI_COMBINER_STRUCT_INTEGER:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_STRUCT_INTEGER,1); break; \
+ case MPI_COMBINER_STRUCT:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_STRUCT,1); break; \
+ case MPI_COMBINER_SUBARRAY:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_SUBARRAY,1); break; \
+ case MPI_COMBINER_DARRAY:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_DARRAY,1); break; \
+ case MPI_COMBINER_F90_REAL:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_F90_REAL,1); break; \
+ case MPI_COMBINER_F90_COMPLEX:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_F90_COMPLEX,1); break; \
+ case MPI_COMBINER_F90_INTEGER:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_F90_INTEGER,1); break; \
+ case MPI_COMBINER_RESIZED:\
+ DARSHAN_COUNTER_INC(rec,MPIIO_COMBINER_RESIZED,1); break; \
+ } \
+ } \
+} while(0)
+
+/**********************************************************
+ * Wrappers for MPI-IO functions of interest *
+ **********************************************************/
#ifdef HAVE_MPIIO_CONST
int MPI_File_open(MPI_Comm comm, const char *filename, int amode, MPI_Info info, MPI_File *fh)
@@ -158,27 +274,27 @@ int MPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_F
filename = tmp + 1;
}
- file = mpiio_file_by_name_setfh(filename, fh);
+ file = mpiio_file_by_name_setfh(filename, (*fh));
if(file)
{
file->file_record->rank = my_rank;
- DARSHAN_COUNTER_F_INC_NO_OVERLAP(file->file_record, tm1, tm2, file->last_mpi_meta_end, DARSHAN_MPIIO_F_META_TIME);
- if(DARSHAN_COUNTER_F_VALUE(file->file_record, DARSHAN_MPIIO_F_OPEN_TIMESTAMP) == 0)
- DARSHAN_COUNTER_F_SET(file->file_record, DARSHAN_MPIIO_F_OPEN_TIMESTAMP,
- tm1);
DARSHAN_MPI_CALL(PMPI_Comm_size)(comm, &comm_size);
if(comm_size == 1)
{
- DARSHAN_COUNTER_INC(file->file_record, DARSHAN_MPIIO_INDEP_OPENS, 1);
+ DARSHAN_COUNTER_INC(file->file_record, MPIIO_INDEP_OPENS, 1);
}
else
{
- DARSHAN_COUNTER_INC(file->file_record, DARSHAN_MPIIO_COLL_OPENS, 1);
+ DARSHAN_COUNTER_INC(file->file_record, MPIIO_COLL_OPENS, 1);
}
if(info != MPI_INFO_NULL)
{
- DARSHAN_COUNTER_INC(file->file_record, DARSHAN_MPIIO_HINTS, 1);
+ DARSHAN_COUNTER_INC(file->file_record, MPIIO_HINTS, 1);
}
+ if(DARSHAN_COUNTER_F_VALUE(file->file_record, MPIIO_F_OPEN_TIMESTAMP) == 0)
+ DARSHAN_COUNTER_F_SET(file->file_record, MPIIO_F_OPEN_TIMESTAMP, tm1);
+ DARSHAN_COUNTER_F_INC_NO_OVERLAP(file->file_record, tm1, tm2,
+ file->last_meta_end, MPIIO_F_META_TIME);
}
MPIIO_UNLOCK();
@@ -187,6 +303,111 @@ int MPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_F
return(ret);
}
+/* TODO: reads and writes */
+
+int MPI_File_sync(MPI_File fh)
+{
+ int ret;
+ struct mpiio_file_runtime* file;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_sync)(fh);
+ tm2 = darshan_core_wtime();
+
+ if(ret == MPI_SUCCESS)
+ {
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ file = mpiio_file_by_fh(fh);
+ if(file)
+ {
+ DARSHAN_COUNTER_INC(file->file_record, MPIIO_SYNCS, 1);
+ DARSHAN_COUNTER_F_INC_NO_OVERLAP(file->file_record, tm1, tm2,
+ file->last_write_end, MPIIO_F_WRITE_TIME);
+ }
+ MPIIO_UNLOCK();
+ }
+
+ return(ret);
+}
+
+/* TODO: test */
+#ifdef HAVE_MPIIO_CONST
+int MPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
+ MPI_Datatype filetype, const char *datarep, MPI_Info info)
+#else
+int MPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
+ MPI_Datatype filetype, char *datarep, MPI_Info info)
+#endif
+{
+ int ret;
+ struct mpiio_file_runtime* file;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_set_view)(fh, disp, etype, filetype,
+ datarep, info);
+ tm2 = darshan_core_wtime();
+
+ if(ret == MPI_SUCCESS)
+ {
+ int num_integers, num_addresses, num_datatypes, combiner;
+ DARSHAN_MPI_CALL(PMPI_Type_get_envelope)(filetype, &num_integers,
+ &num_addresses, &num_datatypes, &combiner);
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ file = mpiio_file_by_fh(fh);
+ if(file)
+ {
+ DARSHAN_COUNTER_INC(file->file_record, MPIIO_VIEWS, 1);
+ if(info != MPI_INFO_NULL)
+ {
+ DARSHAN_COUNTER_INC(file->file_record, MPIIO_HINTS, 1);
+ DARSHAN_COUNTER_F_INC_NO_OVERLAP(file->file_record, tm1, tm2,
+ file->last_meta_end, MPIIO_F_META_TIME);
+ }
+ MPIIO_DATATYPE_INC(file, filetype);
+ }
+ MPIIO_UNLOCK();
+ }
+
+ return(ret);
+}
+
+int MPI_File_close(MPI_File *fh)
+{
+ int ret;
+ struct mpiio_file_runtime* file;
+ MPI_File tmp_fh = *fh;
+ double tm1, tm2;
+
+ tm1 = darshan_core_wtime();
+ ret = DARSHAN_MPI_CALL(PMPI_File_close)(fh);
+ tm2 = darshan_core_wtime();
+
+ MPIIO_LOCK();
+ mpiio_runtime_initialize();
+ file = mpiio_file_by_fh(tmp_fh);
+ if(file)
+ {
+ DARSHAN_COUNTER_F_SET(file->file_record, MPIIO_F_CLOSE_TIMESTAMP,
+ darshan_core_wtime());
+ DARSHAN_COUNTER_F_INC_NO_OVERLAP(file->file_record, tm1, tm2,
+ file->last_meta_end, MPIIO_F_META_TIME);
+ mpiio_file_close_fh(tmp_fh);
+ }
+ MPIIO_UNLOCK();
+
+ return(ret);
+}
+
+/***********************************************************
+ * Internal functions for manipulating MPI-IO module state *
+ ***********************************************************/
+
+/* initialize data structures and register with darshan-core component */
static void mpiio_runtime_initialize()
{
int mem_limit;
@@ -245,62 +466,6 @@ static void mpiio_runtime_initialize()
return;
}
-static void mpiio_begin_shutdown()
-{
- assert(mpiio_runtime);
-
- MPIIO_LOCK();
- instrumentation_disabled = 1;
- MPIIO_UNLOCK();
-
- return;
-}
-
-static void mpiio_get_output_data(
- void **buffer,
- int *size)
-{
- assert(mpiio_runtime);
-
- /* TODO: clean up reduction stuff */
- if(my_rank == 0)
- {
- int tmp_ndx = mpiio_runtime->file_array_ndx - mpiio_runtime->shared_rec_count;
- memcpy(&(mpiio_runtime->file_record_array[tmp_ndx]), mpiio_runtime->red_buf,
- mpiio_runtime->shared_rec_count * sizeof(struct darshan_mpiio_file));
- free(mpiio_runtime->red_buf);
- }
- else
- {
- mpiio_runtime->file_array_ndx -= mpiio_runtime->shared_rec_count;
- }
-
- *buffer = (void *)(mpiio_runtime->file_record_array);
- *size = mpiio_runtime->file_array_ndx * sizeof(struct darshan_mpiio_file);
-
- return;
-}
-
-static void mpiio_shutdown()
-{
- struct mpiio_file_runtime_ref *ref, *tmp;
-
- HASH_ITER(hlink, mpiio_runtime->fh_hash, ref, tmp)
- {
- HASH_DELETE(hlink, mpiio_runtime->fh_hash, ref);
- free(ref);
- }
-
- HASH_CLEAR(hlink, mpiio_runtime->file_hash); /* these entries are freed all at once below */
-
- free(mpiio_runtime->file_runtime_array);
- free(mpiio_runtime->file_record_array);
- free(mpiio_runtime);
- mpiio_runtime = NULL;
-
- return;
-}
-
/* get a MPIIO file record for the given file path */
static struct mpiio_file_runtime* mpiio_file_by_name(const char *name)
{
@@ -354,7 +519,7 @@ static struct mpiio_file_runtime* mpiio_file_by_name(const char *name)
/* get an MPIIO file record for the given file path, and also create a
* reference structure using the corresponding file handle
*/
-static struct mpiio_file_runtime* mpiio_file_by_name_setfh(const char* name, MPI_File *fh)
+static struct mpiio_file_runtime* mpiio_file_by_name_setfh(const char* name, MPI_File fh)
{
struct mpiio_file_runtime* file;
struct mpiio_file_runtime_ref* ref;
@@ -394,52 +559,67 @@ static struct mpiio_file_runtime* mpiio_file_by_name_setfh(const char* name, MPI
return(file);
}
-static void mpiio_record_reduction_op(
- void* infile_v,
- void* inoutfile_v,
- int *len,
- MPI_Datatype *datatype)
+/* get an MPIIO file record for the given file handle */
+static struct mpiio_file_runtime* mpiio_file_by_fh(MPI_File fh)
{
- struct darshan_mpiio_file tmp_file;
- struct darshan_mpiio_file *infile = infile_v;
- struct darshan_mpiio_file *inoutfile = inoutfile_v;
- int i, j;
+ struct mpiio_file_runtime_ref* ref;
- assert(mpiio_runtime);
+ if(!mpiio_runtime || instrumentation_disabled)
+ return(NULL);
- for(i=0; i<*len; i++)
+ /* search hash table for existing file ref for this file handle */
+ HASH_FIND(hlink, mpiio_runtime->fh_hash, &fh, sizeof(fh), ref);
+ if(ref)
+ return(ref->file);
+
+ return(NULL);
+}
+
+/* free up reference data structures for the given file handle */
+static void mpiio_file_close_fh(MPI_File fh)
+{
+ struct mpiio_file_runtime_ref* ref;
+
+ if(!mpiio_runtime || instrumentation_disabled)
+ return;
+
+ /* search hash table for this fd */
+ HASH_FIND(hlink, mpiio_runtime->fh_hash, &fh, sizeof(fh), ref);
+ if(ref)
{
- memset(&tmp_file, 0, sizeof(struct darshan_mpiio_file));
+ /* we have a reference, delete it */
+ HASH_DELETE(hlink, mpiio_runtime->fh_hash, ref);
+ free(ref);
+ }
- tmp_file.f_id = infile->f_id;
- tmp_file.rank = -1;
+ return;
+}
- /* sum */
- for(j=DARSHAN_MPIIO_INDEP_OPENS; j<=DARSHAN_MPIIO_HINTS; j++)
- {
- tmp_file.counters[j] = infile->counters[j] + inoutfile->counters[j];
- }
+/* compare function for sorting file records by descending rank */
+static int mpiio_file_compare(const void* a_p, const void* b_p)
+{
+ const struct darshan_mpiio_file* a = a_p;
+ const struct darshan_mpiio_file* b = b_p;
- /* sum (floating point) */
- for(j=DARSHAN_MPIIO_F_META_TIME; j<=DARSHAN_MPIIO_F_META_TIME; j++)
- {
- tmp_file.fcounters[j] = infile->fcounters[j] + inoutfile->fcounters[j];
- }
+ if(a->rank < b->rank)
+ return 1;
+ if(a->rank > b->rank)
+ return -1;
- /* min non-zero (if available) value */
- for(j=DARSHAN_MPIIO_F_OPEN_TIMESTAMP; j<=DARSHAN_MPIIO_F_OPEN_TIMESTAMP; j++)
- {
- if(infile->fcounters[j] > inoutfile->fcounters[j] && inoutfile->fcounters[j] > 0)
- tmp_file.fcounters[j] = inoutfile->fcounters[j];
- else
- tmp_file.fcounters[j] = infile->fcounters[j];
- }
+ return 0;
+}
- /* update pointers */
- *inoutfile = tmp_file;
- inoutfile++;
- infile++;
- }
+/**************************************************************************
+ * Functions exported by MPI-IO module for coordinating with darshan-core *
+ **************************************************************************/
+
+static void mpiio_begin_shutdown()
+{
+ assert(mpiio_runtime);
+
+ MPIIO_LOCK();
+ instrumentation_disabled = 1;
+ MPIIO_UNLOCK();
return;
}
@@ -495,18 +675,99 @@ static void mpiio_setup_reduction(
return;
}
-/* compare function for sorting file records by descending rank */
-static int mpiio_file_compare(const void* a_p, const void* b_p)
+static void mpiio_record_reduction_op(
+ void* infile_v,
+ void* inoutfile_v,
+ int *len,
+ MPI_Datatype *datatype)
{
- const struct darshan_mpiio_file* a = a_p;
- const struct darshan_mpiio_file* b = b_p;
+ struct darshan_mpiio_file tmp_file;
+ struct darshan_mpiio_file *infile = infile_v;
+ struct darshan_mpiio_file *inoutfile = inoutfile_v;
+ int i, j;
- if(a->rank < b->rank)
- return 1;
- if(a->rank > b->rank)
- return -1;
+ assert(mpiio_runtime);
- return 0;
+ for(i=0; i<*len; i++)
+ {
+ memset(&tmp_file, 0, sizeof(struct darshan_mpiio_file));
+
+ tmp_file.f_id = infile->f_id;
+ tmp_file.rank = -1;
+
+ /* sum */
+ for(j=MPIIO_INDEP_OPENS; j<=MPIIO_HINTS; j++)
+ {
+ tmp_file.counters[j] = infile->counters[j] + inoutfile->counters[j];
+ }
+
+ /* sum (floating point) */
+ for(j=MPIIO_F_META_TIME; j<=MPIIO_F_META_TIME; j++)
+ {
+ tmp_file.fcounters[j] = infile->fcounters[j] + inoutfile->fcounters[j];
+ }
+
+ /* min non-zero (if available) value */
+ for(j=MPIIO_F_OPEN_TIMESTAMP; j<=MPIIO_F_OPEN_TIMESTAMP; j++)
+ {
+ if(infile->fcounters[j] > inoutfile->fcounters[j] && inoutfile->fcounters[j] > 0)
+ tmp_file.fcounters[j] = inoutfile->fcounters[j];
+ else
+ tmp_file.fcounters[j] = infile->fcounters[j];
+ }
+
+ /* update pointers */
+ *inoutfile = tmp_file;
+ inoutfile++;
+ infile++;
+ }
+
+ return;
+}
+
+static void mpiio_get_output_data(
+ void **buffer,
+ int *size)
+{
+ assert(mpiio_runtime);
+
+ /* TODO: clean up reduction stuff */
+ if(my_rank == 0)
+ {
+ int tmp_ndx = mpiio_runtime->file_array_ndx - mpiio_runtime->shared_rec_count;
+ memcpy(&(mpiio_runtime->file_record_array[tmp_ndx]), mpiio_runtime->red_buf,
+ mpiio_runtime->shared_rec_count * sizeof(struct darshan_mpiio_file));
+ free(mpiio_runtime->red_buf);
+ }
+ else
+ {
+ mpiio_runtime->file_array_ndx -= mpiio_runtime->shared_rec_count;
+ }
+
+ *buffer = (void *)(mpiio_runtime->file_record_array);
+ *size = mpiio_runtime->file_array_ndx * sizeof(struct darshan_mpiio_file);
+
+ return;
+}
+
+static void mpiio_shutdown()
+{
+ struct mpiio_file_runtime_ref *ref, *tmp;
+
+ HASH_ITER(hlink, mpiio_runtime->fh_hash, ref, tmp)
+ {
+ HASH_DELETE(hlink, mpiio_runtime->fh_hash, ref);
+ free(ref);
+ }
+
+ HASH_CLEAR(hlink, mpiio_runtime->file_hash); /* these entries are freed all at once below */
+
+ free(mpiio_runtime->file_runtime_array);
+ free(mpiio_runtime->file_record_array);
+ free(mpiio_runtime);
+ mpiio_runtime = NULL;
+
+ return;
}
/*
diff --git a/darshan-util/darshan-mpiio-logutils.c b/darshan-util/darshan-mpiio-logutils.c
index 33fc1c3..9b4c537 100644
--- a/darshan-util/darshan-mpiio-logutils.c
+++ b/darshan-util/darshan-mpiio-logutils.c
@@ -38,9 +38,9 @@ int darshan_log_get_mpiio_file(darshan_fd fd, struct darshan_mpiio_file *file)
/* swap bytes if necessary */
DARSHAN_BSWAP64(&file->f_id);
DARSHAN_BSWAP64(&file->rank);
- for(i=0; i<DARSHAN_MPIIO_NUM_INDICES; i++)
+ for(i=0; i<MPIIO_NUM_INDICES; i++)
DARSHAN_BSWAP64(&file->counters[i]);
- for(i=0; i<DARSHAN_MPIIO_F_NUM_INDICES; i++)
+ for(i=0; i<MPIIO_F_NUM_INDICES; i++)
DARSHAN_BSWAP64(&file->fcounters[i]);
}
}
diff --git a/darshan-util/darshan-mpiio-parser.c b/darshan-util/darshan-mpiio-parser.c
index 683194f..6ddb498 100644
--- a/darshan-util/darshan-mpiio-parser.c
+++ b/darshan-util/darshan-mpiio-parser.c
@@ -184,14 +184,76 @@ int main(int argc, char **argv)
printf(
"\t\tMPIIO_INDEP_OPENS:\t%"PRIu64"\n"
"\t\tMPIIO_COLL_OPENS:\t%"PRIu64"\n"
+ "\t\tMPIIO_INDEP_READS:\t%"PRIu64"\n"
+ "\t\tMPIIO_INDEP_WRITES:\t%"PRIu64"\n"
+ "\t\tMPIIO_COLL_READS:\t%"PRIu64"\n"
+ "\t\tMPIIO_COLL_WRITES:\t%"PRIu64"\n"
+ "\t\tMPIIO_SPLIT_READS:\t%"PRIu64"\n"
+ "\t\tMPIIO_SPLIT_WRITES:\t%"PRIu64"\n"
+ "\t\tMPIIO_NB_READS:\t%"PRIu64"\n"
+ "\t\tMPIIO_NB_WRITES:\t%"PRIu64"\n"
+ "\t\tMPIIO_SYNCS:\t%"PRIu64"\n"
"\t\tMPIIO_HINTS:\t%"PRIu64"\n"
- "\t\tMPIIO_F_META_TIME:\t%lf\n"
- "\t\tMPIIO_F_OPEN_TIMESTAMP:\t%lf\n",
- next_file.counters[DARSHAN_MPIIO_INDEP_OPENS],
- next_file.counters[DARSHAN_MPIIO_COLL_OPENS],
- next_file.counters[DARSHAN_MPIIO_HINTS],
- next_file.fcounters[DARSHAN_MPIIO_F_META_TIME],
- next_file.fcounters[DARSHAN_MPIIO_F_OPEN_TIMESTAMP]);
+ "\t\tMPIIO_VIEWS:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_NAMED:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_DUP:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_CONTIGUOUS:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_VECTOR:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_HVECTOR_INTEGER:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_HVECTOR:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_INDEXED:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_HINDEXED_INTEGER:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_HINDEXED:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_INDEXED_BLOCK:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_STRUCT_INTEGER:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_STRUCT:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_SUBARRAY:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_DARRAY:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_F90_REAL:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_F90_COMPLEX:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_F90_INTEGER:\t%"PRIu64"\n"
+ "\t\tMPIIO_COMBINER_RESIZED:\t%"PRIu64"\n"
+ "\t\tMPIIO_F_OPEN_TIMESTAMP:\t%lf\n"
+ "\t\tMPIIO_F_CLOSE_TIMESTAMP:\t%lf\n"
+ "\t\tMPIIO_F_READ_TIME:\t%lf\n"
+ "\t\tMPIIO_F_WRITE_TIME:\t%lf\n"
+ "\t\tMPIIO_F_META_TIME:\t%lf\n",
+ next_file.counters[MPIIO_INDEP_OPENS],
+ next_file.counters[MPIIO_COLL_OPENS],
+ next_file.counters[MPIIO_INDEP_READS],
+ next_file.counters[MPIIO_INDEP_WRITES],
+ next_file.counters[MPIIO_COLL_READS],
+ next_file.counters[MPIIO_COLL_WRITES],
+ next_file.counters[MPIIO_SPLIT_READS],
+ next_file.counters[MPIIO_SPLIT_WRITES],
+ next_file.counters[MPIIO_NB_READS],
+ next_file.counters[MPIIO_NB_WRITES],
+ next_file.counters[MPIIO_SYNCS],
+ next_file.counters[MPIIO_HINTS],
+ next_file.counters[MPIIO_VIEWS],
+ next_file.counters[MPIIO_COMBINER_NAMED],
+ next_file.counters[MPIIO_COMBINER_DUP],
+ next_file.counters[MPIIO_COMBINER_CONTIGUOUS],
+ next_file.counters[MPIIO_COMBINER_VECTOR],
+ next_file.counters[MPIIO_COMBINER_HVECTOR_INTEGER],
+ next_file.counters[MPIIO_COMBINER_HVECTOR],
+ next_file.counters[MPIIO_COMBINER_INDEXED],
+ next_file.counters[MPIIO_COMBINER_HINDEXED_INTEGER],
+ next_file.counters[MPIIO_COMBINER_HINDEXED],
+ next_file.counters[MPIIO_COMBINER_INDEXED_BLOCK],
+ next_file.counters[MPIIO_COMBINER_STRUCT_INTEGER],
+ next_file.counters[MPIIO_COMBINER_STRUCT],
+ next_file.counters[MPIIO_COMBINER_SUBARRAY],
+ next_file.counters[MPIIO_COMBINER_DARRAY],
+ next_file.counters[MPIIO_COMBINER_F90_REAL],
+ next_file.counters[MPIIO_COMBINER_F90_COMPLEX],
+ next_file.counters[MPIIO_COMBINER_F90_INTEGER],
+ next_file.counters[MPIIO_COMBINER_RESIZED],
+ next_file.fcounters[MPIIO_F_OPEN_TIMESTAMP],
+ next_file.fcounters[MPIIO_F_CLOSE_TIMESTAMP],
+ next_file.fcounters[MPIIO_F_READ_TIME],
+ next_file.fcounters[MPIIO_F_WRITE_TIME],
+ next_file.fcounters[MPIIO_F_META_TIME]);
i++;
} while((ret = darshan_log_get_mpiio_file(fd, &next_file)) == 1);
hooks/post-receive
--
More information about the Darshan-commits
mailing list