[Darshan-commits] [Git][darshan/darshan][dev-detailed-hdf5-mod] 3 commits: changes for h5f/h5d module formats
Shane Snyder
xgitlab at cels.anl.gov
Sun Feb 2 21:40:45 CST 2020
Shane Snyder pushed to branch dev-detailed-hdf5-mod at darshan / darshan
Commits:
ed312f5d by Shane Snyder at 2020-02-03T03:36:22Z
changes for h5f/h5d module formats
- - - - -
9933525a by Shane Snyder at 2020-02-03T03:38:32Z
darshan-runtime support for h5d/h5f
- - - - -
55c46988 by Shane Snyder at 2020-02-03T03:40:00Z
darshan-util support for h5d/h5f
- - - - -
8 changed files:
- darshan-hdf5-log-format.h
- darshan-log-format.h
- darshan-runtime/lib/darshan-hdf5.c
- darshan-runtime/share/ld-opts/darshan-hdf5-ld-opts
- darshan-util/darshan-analyzer.c
- darshan-util/darshan-hdf5-logutils.c
- darshan-util/darshan-hdf5-logutils.h
- darshan-util/darshan-logutils.c
Changes:
=====================================
darshan-hdf5-log-format.h
=====================================
@@ -7,53 +7,195 @@
#ifndef __DARSHAN_HDF5_LOG_FORMAT_H
#define __DARSHAN_HDF5_LOG_FORMAT_H
-/* current HDF5 log format version */
-#define DARSHAN_HDF5_VER 2
+/* current HDF5 log format versions */
+#define DARSHAN_H5F_VER 2
+#define DARSHAN_H5D_VER 1
-#define HDF5_COUNTERS \
- /* count of HDF5 opens */\
- X(HDF5_OPENS) \
+#define H5F_COUNTERS \
+ /* count of HDF5 file opens/creates */\
+ X(H5F_OPENS) \
+ /* count of HDF5 file flushes */\
+ X(H5F_FLUSHES) \
+ /* flag indicating whether MPI-IO is used for accessing this file */\
+ X(H5F_USE_MPIIO) \
/* end of counters */\
- X(HDF5_NUM_INDICES)
-
-#define HDF5_F_COUNTERS \
- /* timestamp of first open */\
- X(HDF5_F_OPEN_START_TIMESTAMP) \
- /* timestamp of first close */\
- X(HDF5_F_CLOSE_START_TIMESTAMP) \
- /* timestamp of last open */\
- X(HDF5_F_OPEN_END_TIMESTAMP) \
- /* timestamp of last close */\
- X(HDF5_F_CLOSE_END_TIMESTAMP) \
+ X(H5F_NUM_INDICES)
+
+#define H5F_F_COUNTERS \
+ /* timestamp of first file open */\
+ X(H5F_F_OPEN_START_TIMESTAMP) \
+ /* timestamp of first file close */\
+ X(H5F_F_CLOSE_START_TIMESTAMP) \
+ /* timestamp of last file open */\
+ X(H5F_F_OPEN_END_TIMESTAMP) \
+ /* timestamp of last file close */\
+ X(H5F_F_CLOSE_END_TIMESTAMP) \
+ /* cumulative H5F meta time */\
+ X(H5F_F_META_TIME) \
+ /* end of counters*/\
+ X(H5F_F_NUM_INDICES)
+
+#define H5D_COUNTERS \
+ /* count of HDF5 dataset opens/creates */\
+ X(H5D_OPENS) \
+ /* count of HDF5 datset reads */\
+ X(H5D_READS) \
+ /* count of HDF5 datset writes */\
+ X(H5D_WRITES) \
+ /* count of HDF5 dataset flushes */\
+ X(H5D_FLUSHES) \
+ /* total bytes read */\
+ X(H5D_BYTES_READ) \
+ /* total bytes written */\
+ X(H5D_BYTES_WRITTEN) \
+ /* number of times switching between H5D read and write */\
+ X(H5D_RW_SWITCHES) \
+ /* number of read/write ops with regular hyperslab selections */\
+ X(H5D_REGULAR_HYPERSLAB_SELECTS) \
+ /* number of read/write ops with irregular hyperslab selections */\
+ X(H5D_IRREGULAR_HYPERSLAB_SELECTS) \
+ /* number of read/write ops with point selections */\
+ X(H5D_POINT_SELECTS) \
+ /* sizes of the maximum read/write operations */\
+ X(H5D_MAX_READ_TIME_SIZE) \
+ X(H5D_MAX_WRITE_TIME_SIZE) \
+ /* buckets for H5D read size ranges */\
+ X(H5D_SIZE_READ_AGG_0_100) \
+ X(H5D_SIZE_READ_AGG_100_1K) \
+ X(H5D_SIZE_READ_AGG_1K_10K) \
+ X(H5D_SIZE_READ_AGG_10K_100K) \
+ X(H5D_SIZE_READ_AGG_100K_1M) \
+ X(H5D_SIZE_READ_AGG_1M_4M) \
+ X(H5D_SIZE_READ_AGG_4M_10M) \
+ X(H5D_SIZE_READ_AGG_10M_100M) \
+ X(H5D_SIZE_READ_AGG_100M_1G) \
+ X(H5D_SIZE_READ_AGG_1G_PLUS) \
+ /* buckets for H5D write size ranges */\
+ X(H5D_SIZE_WRITE_AGG_0_100) \
+ X(H5D_SIZE_WRITE_AGG_100_1K) \
+ X(H5D_SIZE_WRITE_AGG_1K_10K) \
+ X(H5D_SIZE_WRITE_AGG_10K_100K) \
+ X(H5D_SIZE_WRITE_AGG_100K_1M) \
+ X(H5D_SIZE_WRITE_AGG_1M_4M) \
+ X(H5D_SIZE_WRITE_AGG_4M_10M) \
+ X(H5D_SIZE_WRITE_AGG_10M_100M) \
+ X(H5D_SIZE_WRITE_AGG_100M_1G) \
+ X(H5D_SIZE_WRITE_AGG_1G_PLUS) \
+ /* the four most frequently appearing H5D access sizes */\
+ X(H5D_ACCESS1_ACCESS) \
+ X(H5D_ACCESS2_ACCESS) \
+ X(H5D_ACCESS3_ACCESS) \
+ X(H5D_ACCESS4_ACCESS) \
+ /* count of each of the most frequent H5D access sizes */\
+ X(H5D_ACCESS1_COUNT) \
+ X(H5D_ACCESS2_COUNT) \
+ X(H5D_ACCESS3_COUNT) \
+ X(H5D_ACCESS4_COUNT) \
+ /* number of dimensions in dataset's dataspace */\
+ X(H5D_DATASPACE_NDIMS) \
+ /* number of points in dataset's dataspace */\
+ X(H5D_DATASPACE_NPOINTS) \
+ /* size of dataset elements in bytes */\
+ X(H5D_DATATYPE_SIZE) \
+ /* flag indicating use of MPI-IO collectives */\
+ X(H5D_USE_MPIIO_COLLECTIVE) \
+ /* flag indicating whether deprecated create/open calls were used */\
+ X(H5D_USE_DEPRECATED) \
+ /* rank and number of bytes moved for fastest/slowest ranks */\
+ X(H5D_FASTEST_RANK) \
+ X(H5D_FASTEST_RANK_BYTES) \
+ X(H5D_SLOWEST_RANK) \
+ X(H5D_SLOWEST_RANK_BYTES) \
+ /* end of counters */\
+ X(H5D_NUM_INDICES)
+
+#define H5D_F_COUNTERS \
+ /* timestamp of first dataset open */\
+ X(H5D_F_OPEN_START_TIMESTAMP) \
+ /* timestamp of first dataset read */\
+ X(H5D_F_READ_START_TIMESTAMP) \
+ /* timestamp of first dataset write */\
+ X(H5D_F_WRITE_START_TIMESTAMP) \
+ /* timestamp of first dataset close */\
+ X(H5D_F_CLOSE_START_TIMESTAMP) \
+ /* timestamp of last dataset open */\
+ X(H5D_F_OPEN_END_TIMESTAMP) \
+ /* timestamp of last dataset read */\
+ X(H5D_F_READ_END_TIMESTAMP) \
+ /* timestamp of last dataset write */\
+ X(H5D_F_WRITE_END_TIMESTAMP) \
+ /* timestamp of last dataset close */\
+ X(H5D_F_CLOSE_END_TIMESTAMP) \
+ /* cumulative H5D read time */\
+ X(H5D_F_READ_TIME) \
+ /* cumulative H5D write time */\
+ X(H5D_F_WRITE_TIME) \
+ /* cumulative H5D meta time */\
+ X(H5D_F_META_TIME) \
+ /* maximum H5D read duration */\
+ X(H5D_F_MAX_READ_TIME) \
+ /* maximum H5D write duration */\
+ X(H5D_F_MAX_WRITE_TIME) \
+ /* total i/o and meta time for fastest/slowest ranks */\
+ X(H5D_F_FASTEST_RANK_TIME) \
+ X(H5D_F_SLOWEST_RANK_TIME) \
+ /* variance of total i/o time and bytes moved across all ranks */\
+ /* NOTE: for shared records only */\
+ X(H5D_F_VARIANCE_RANK_TIME) \
+ X(H5D_F_VARIANCE_RANK_BYTES) \
/* end of counters*/\
- X(HDF5_F_NUM_INDICES)
+ X(H5D_F_NUM_INDICES)
#define X(a) a,
/* integer statistics for HDF5 file records */
-enum darshan_hdf5_indices
+enum darshan_h5f_indices
+{
+ H5F_COUNTERS
+};
+/* integer statistics for HDF5 dataset records */
+enum darshan_h5d_indices
{
- HDF5_COUNTERS
+ H5D_COUNTERS
};
/* floating point statistics for HDF5 file records */
-enum darshan_hdf5_f_indices
+enum darshan_h5f_f_indices
+{
+ H5F_F_COUNTERS
+};
+/* floating point statistics for HDF5 dataset records */
+enum darshan_h5d_f_indices
{
- HDF5_F_COUNTERS
+ H5D_F_COUNTERS
};
#undef X
-/* file record structure for HDF5 files. a record is created and stored for
+/* record structure for HDF5 files. a record is created and stored for
* every HDF5 file opened by the original application. For the HDF5 module,
* the record includes:
* - a darshan_base_record structure, which contains the record id & rank
* - integer file I/O statistics (open, read/write counts, etc)
- * - floating point I/O statistics (timestamps, cumulative timers, etc.)
+ * - floating point file I/O statistics (timestamps, cumulative timers, etc.)
*/
struct darshan_hdf5_file
{
struct darshan_base_record base_rec;
- int64_t counters[HDF5_NUM_INDICES];
- double fcounters[HDF5_F_NUM_INDICES];
+ int64_t counters[H5F_NUM_INDICES];
+ double fcounters[H5F_F_NUM_INDICES];
+};
+
+/* record structure for HDF5 datasets. a record is created and stored for
+ * every HDF5 dataset opened by the original application. For the HDF5 module,
+ * the record includes:
+ * - a darshan_base_record structure, which contains the record id & rank
+ * - integer dataset I/O statistics (open, read/write counts, etc)
+ * - floating point dataset I/O statistics (timestamps, cumulative timers, etc.)
+ */
+struct darshan_hdf5_dataset
+{
+ struct darshan_base_record base_rec;
+ int64_t counters[H5D_NUM_INDICES];
+ double fcounters[H5D_F_NUM_INDICES];
};
#endif /* __DARSHAN_HDF5_LOG_FORMAT_H */
=====================================
darshan-log-format.h
=====================================
@@ -24,7 +24,7 @@
* log format version, NOT when a new version of a module record is
* introduced -- we have module-specific versions to handle that
*/
-#define DARSHAN_LOG_VERSION "3.10"
+#define DARSHAN_LOG_VERSION "3.20"
/* magic number for validating output files and checking byte order */
#define DARSHAN_MAGIC_NR 6567223
@@ -131,11 +131,13 @@ struct darshan_base_record
* component -- NULL can be passed if there are no
* logutil definitions)
*/
+/* XXX verify no back compat issues when adding HDF5 DATASETS in middle of this list */
#define DARSHAN_MODULE_IDS \
X(DARSHAN_NULL_MOD, "NULL", DARSHAN_NULL_VER, NULL) \
X(DARSHAN_POSIX_MOD, "POSIX", DARSHAN_POSIX_VER, &posix_logutils) \
X(DARSHAN_MPIIO_MOD, "MPI-IO", DARSHAN_MPIIO_VER, &mpiio_logutils) \
- X(DARSHAN_HDF5_MOD, "HDF5", DARSHAN_HDF5_VER, &hdf5_logutils) \
+ X(DARSHAN_H5F_MOD, "H5F", DARSHAN_H5F_VER, &hdf5_file_logutils) \
+ X(DARSHAN_H5D_MOD, "H5D", DARSHAN_H5D_VER, &hdf5_dataset_logutils) \
X(DARSHAN_PNETCDF_MOD, "PNETCDF", DARSHAN_PNETCDF_VER, &pnetcdf_logutils) \
X(DARSHAN_BGQ_MOD, "BG/Q", DARSHAN_BGQ_VER, &bgq_logutils) \
X(DARSHAN_LUSTRE_MOD, "LUSTRE", DARSHAN_LUSTRE_VER, &lustre_logutils) \
=====================================
darshan-runtime/lib/darshan-hdf5.c
=====================================
@@ -24,28 +24,41 @@
#include "darshan.h"
#include "darshan-dynamic.h"
-/* hope this doesn't change any time soon */
-typedef int herr_t; //hf5-1.10.0p1: H5public.h:126
-
-#ifdef __DARSHAN_ENABLE_HDF5110
- typedef int64_t hid_t; //hf5-1.10.0p1: H5Ipublic.h:56
-#else
- typedef int hid_t;
-#endif
+#include <hdf5.h>
+/* H5F prototypes */
DARSHAN_FORWARD_DECL(H5Fcreate, hid_t, (const char *filename, unsigned flags, hid_t create_plist, hid_t access_plist));
DARSHAN_FORWARD_DECL(H5Fopen, hid_t, (const char *filename, unsigned flags, hid_t access_plist));
+DARSHAN_FORWARD_DECL(H5Fflush, herr_t, (hid_t object_id, H5F_scope_t scope));
DARSHAN_FORWARD_DECL(H5Fclose, herr_t, (hid_t file_id));
-/* prototype for HDF symbols that we will call directly from within other
- * wrappers if HDF is linked in
- */
-extern herr_t H5get_libversion(unsigned *majnum, unsigned *minnum, unsigned *relnum);
+/* H5D prototypes */
+DARSHAN_FORWARD_DECL(H5Dcreate1, hid_t, (hid_t loc_id, const char *name, hid_t type_id, hid_t space_id, hid_t dcpl_id));
+DARSHAN_FORWARD_DECL(H5Dcreate2, hid_t, (hid_t loc_id, const char *name, hid_t dtype_id, hid_t space_id, hid_t lcpl_id, hid_t dcpl_id, hid_t dapl_id));
+DARSHAN_FORWARD_DECL(H5Dopen1, hid_t, (hid_t loc_id, const char *name));
+DARSHAN_FORWARD_DECL(H5Dopen2, hid_t, (hid_t loc_id, const char *name, hid_t dapl_id));
+DARSHAN_FORWARD_DECL(H5Dread, herr_t, (hid_t dataset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t xfer_plist_id, void * buf));
+DARSHAN_FORWARD_DECL(H5Dwrite, herr_t, (hid_t dataset_id, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t xfer_plist_id, const void * buf));
+DARSHAN_FORWARD_DECL(H5Dflush, herr_t, (hid_t dataset_id));
+DARSHAN_FORWARD_DECL(H5Dclose, herr_t, (hid_t dataset_id));
/* structure that can track i/o stats for a given HDF5 file record at runtime */
struct hdf5_file_record_ref
{
struct darshan_hdf5_file* file_rec;
+ double last_meta_end;
+};
+
+/* structure that can track i/o stats for a given HDF5 dataset record at runtime */
+struct hdf5_dataset_record_ref
+{
+ struct darshan_hdf5_dataset* dataset_rec;
+ enum darshan_io_type last_io_type;
+ double last_read_end;
+ double last_write_end;
+ double last_meta_end;
+ void *access_root;
+ int access_count;
};
/* struct to encapsulate runtime state for the HDF5 module */
@@ -53,82 +66,106 @@ struct hdf5_runtime
{
void *rec_id_hash;
void *hid_hash;
- int file_rec_count;
+ int rec_count;
};
-static void hdf5_runtime_initialize(
+static void hdf5_file_runtime_initialize(
+ void);
+static void hdf5_dataset_runtime_initialize(
void);
static struct hdf5_file_record_ref *hdf5_track_new_file_record(
- darshan_record_id rec_id, const char *path);
-static void hdf5_cleanup_runtime(
+ darshan_record_id rec_id, const char *rec_name);
+static struct hdf5_dataset_record_ref *hdf5_track_new_dataset_record(
+ darshan_record_id rec_id, const char *rec_name);
+static void hdf5_finalize_dataset_records(
+ void *rec_ref_p, void *user_ptr);
+static void hdf5_cleanup_file_runtime(
+ void);
+static void hdf5_cleanup_dataset_runtime(
void);
#ifdef HAVE_MPI
-static void hdf5_record_reduction_op(
- void* infile_v, void* inoutfile_v, int *len, MPI_Datatype *datatype);
-static void hdf5_mpi_redux(
+static void hdf5_file_record_reduction_op(
+ void* inrec_v, void* inoutrec_v, int *len, MPI_Datatype *datatype);
+static void hdf5_dataset_record_reduction_op(
+ void* inrec_v, void* inoutrec_v, int *len, MPI_Datatype *datatype);
+static void hdf5_shared_dataset_record_variance(
+ MPI_Comm mod_comm, struct darshan_hdf5_dataset *inrec_array,
+ struct darshan_hdf5_dataset *outrec_array, int shared_rec_count);
+static void hdf5_file_mpi_redux(
+ void *hdf5_buf, MPI_Comm mod_comm,
+ darshan_record_id *shared_recs, int shared_rec_count);
+static void hdf5_dataset_mpi_redux(
void *hdf5_buf, MPI_Comm mod_comm,
darshan_record_id *shared_recs, int shared_rec_count);
#endif
-static void hdf5_shutdown(
+static void hdf5_file_shutdown(
+ void **hdf5_buf, int *hdf5_buf_sz);
+static void hdf5_dataset_shutdown(
void **hdf5_buf, int *hdf5_buf_sz);
-static struct hdf5_runtime *hdf5_runtime = NULL;
+static struct hdf5_runtime *hdf5_file_runtime = NULL;
+static struct hdf5_runtime *hdf5_dataset_runtime = NULL;
static pthread_mutex_t hdf5_runtime_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
static int my_rank = -1;
#define HDF5_LOCK() pthread_mutex_lock(&hdf5_runtime_mutex)
#define HDF5_UNLOCK() pthread_mutex_unlock(&hdf5_runtime_mutex)
-#define HDF5_PRE_RECORD() do { \
+/*********************************************************
+ * Wrappers for H5F functions of interest *
+ *********************************************************/
+
+#define H5F_PRE_RECORD() do { \
HDF5_LOCK(); \
if(!darshan_core_disabled_instrumentation()) { \
- if(!hdf5_runtime) hdf5_runtime_initialize(); \
- if(hdf5_runtime) break; \
+ if(!hdf5_file_runtime) hdf5_file_runtime_initialize(); \
+ if(hdf5_file_runtime) break; \
} \
HDF5_UNLOCK(); \
return(ret); \
} while(0)
-#define HDF5_POST_RECORD() do { \
+#define H5F_POST_RECORD() do { \
HDF5_UNLOCK(); \
} while(0)
-#define HDF5_RECORD_OPEN(__ret, __path, __tm1, __tm2) do { \
- darshan_record_id rec_id; \
- struct hdf5_file_record_ref *rec_ref; \
- char *newpath; \
- newpath = darshan_clean_file_path(__path); \
- if(!newpath) newpath = (char *)__path; \
- if(darshan_core_excluded_path(newpath)) { \
- if(newpath != __path) free(newpath); \
+#define H5F_RECORD_OPEN(__ret, __path, __fapl, __tm1, __tm2) do { \
+ darshan_record_id __rec_id; \
+ struct hdf5_file_record_ref *__rec_ref; \
+ char *__newpath; \
+ __newpath = darshan_clean_file_path(__path); \
+ if(!__newpath) __newpath = (char *)__path; \
+ if(darshan_core_excluded_path(__newpath)) { \
+ if(__newpath != __path) free(__newpath); \
break; \
} \
- rec_id = darshan_core_gen_record_id(newpath); \
- rec_ref = darshan_lookup_record_ref(hdf5_runtime->rec_id_hash, &rec_id, sizeof(darshan_record_id)); \
- if(!rec_ref) rec_ref = hdf5_track_new_file_record(rec_id, newpath); \
- if(!rec_ref) { \
- if(newpath != __path) free(newpath); \
+ __rec_id = darshan_core_gen_record_id(__newpath); \
+ __rec_ref = darshan_lookup_record_ref(hdf5_file_runtime->rec_id_hash, &__rec_id, sizeof(darshan_record_id)); \
+ if(!__rec_ref) __rec_ref = hdf5_track_new_file_record(__rec_id, __newpath); \
+ if(!__rec_ref) { \
+ if(__newpath != __path) free(__newpath); \
break; \
} \
- if(rec_ref->file_rec->fcounters[HDF5_F_OPEN_START_TIMESTAMP] == 0 || \
- rec_ref->file_rec->fcounters[HDF5_F_OPEN_START_TIMESTAMP] > __tm1) \
- rec_ref->file_rec->fcounters[HDF5_F_OPEN_START_TIMESTAMP] = __tm1; \
- rec_ref->file_rec->fcounters[HDF5_F_OPEN_END_TIMESTAMP] = __tm2; \
- rec_ref->file_rec->counters[HDF5_OPENS] += 1; \
- darshan_add_record_ref(&(hdf5_runtime->hid_hash), &__ret, sizeof(hid_t), rec_ref); \
- if(newpath != __path) free(newpath); \
+ if(H5Pget_fapl_mpio(__fapl, NULL, NULL) >= 0) \
+ __rec_ref->file_rec->counters[H5F_USE_MPIIO] = 1; \
+ __rec_ref->file_rec->counters[H5F_OPENS] += 1; \
+ if(__rec_ref->file_rec->fcounters[H5F_F_OPEN_START_TIMESTAMP] == 0 || \
+ __rec_ref->file_rec->fcounters[H5F_F_OPEN_START_TIMESTAMP] > __tm1) \
+ __rec_ref->file_rec->fcounters[H5F_F_OPEN_START_TIMESTAMP] = __tm1; \
+ __rec_ref->file_rec->fcounters[H5F_F_OPEN_END_TIMESTAMP] = __tm2; \
+ DARSHAN_TIMER_INC_NO_OVERLAP(__rec_ref->file_rec->fcounters[H5F_F_META_TIME], \
+ __tm1, __tm2, __rec_ref->last_meta_end); \
+ darshan_add_record_ref(&(hdf5_file_runtime->hid_hash), &__ret, sizeof(hid_t), __rec_ref); \
+ if(__newpath != __path) free(__newpath); \
} while(0)
-/*********************************************************
- * Wrappers for HDF5 functions of interest *
- *********************************************************/
-
hid_t DARSHAN_DECL(H5Fcreate)(const char *filename, unsigned flags,
hid_t create_plist, hid_t access_plist)
{
hid_t ret;
char* tmp;
double tm1, tm2;
+#if 0
unsigned majnum, minnum, relnum;
H5get_libversion(&majnum, &minnum, &relnum);
@@ -146,6 +183,7 @@ hid_t DARSHAN_DECL(H5Fcreate)(const char *filename, unsigned flags,
}
return(-1);
}
+#endif
MAP_OR_FAIL(H5Fcreate);
@@ -164,9 +202,9 @@ hid_t DARSHAN_DECL(H5Fcreate)(const char *filename, unsigned flags,
filename = tmp + 1;
}
- HDF5_PRE_RECORD();
- HDF5_RECORD_OPEN(ret, filename, tm1, tm2);
- HDF5_POST_RECORD();
+ H5F_PRE_RECORD();
+ H5F_RECORD_OPEN(ret, filename, access_plist, tm1, tm2);
+ H5F_POST_RECORD();
}
return(ret);
@@ -178,6 +216,7 @@ hid_t DARSHAN_DECL(H5Fopen)(const char *filename, unsigned flags,
hid_t ret;
char* tmp;
double tm1, tm2;
+#if 0
unsigned majnum, minnum, relnum;
H5get_libversion(&majnum, &minnum, &relnum);
@@ -195,6 +234,7 @@ hid_t DARSHAN_DECL(H5Fopen)(const char *filename, unsigned flags,
}
return(-1);
}
+#endif
MAP_OR_FAIL(H5Fopen);
@@ -213,15 +253,48 @@ hid_t DARSHAN_DECL(H5Fopen)(const char *filename, unsigned flags,
filename = tmp + 1;
}
- HDF5_PRE_RECORD();
- HDF5_RECORD_OPEN(ret, filename, tm1, tm2);
- HDF5_POST_RECORD();
+ H5F_PRE_RECORD();
+ H5F_RECORD_OPEN(ret, filename, access_plist, tm1, tm2);
+ H5F_POST_RECORD();
}
return(ret);
}
+herr_t H5Fflush(hid_t object_id, H5F_scope_t scope)
+{
+ struct hdf5_file_record_ref *rec_ref;
+ hid_t file_id;
+ double tm1, tm2;
+ herr_t ret;
+
+ MAP_OR_FAIL(H5Fflush);
+
+ tm1 = darshan_core_wtime();
+ ret = __real_H5Fflush(object_id, scope);
+ tm2 = darshan_core_wtime();
+
+ /* convert object_id to file_id so we can look it up */
+ file_id = H5Iget_file_id(object_id);
+ if(file_id > 0)
+ {
+ H5F_PRE_RECORD();
+ rec_ref = darshan_lookup_record_ref(hdf5_file_runtime->hid_hash,
+ &file_id, sizeof(hid_t));
+ if(rec_ref)
+ {
+ rec_ref->file_rec->counters[H5F_FLUSHES] += 1;
+ DARSHAN_TIMER_INC_NO_OVERLAP(
+ rec_ref->file_rec->fcounters[H5F_F_META_TIME],
+ tm1, tm2, rec_ref->last_meta_end);
+ }
+ H5F_POST_RECORD();
+ }
+
+ return(ret);
+}
+
herr_t DARSHAN_DECL(H5Fclose)(hid_t file_id)
{
struct hdf5_file_record_ref *rec_ref;
@@ -234,37 +307,434 @@ herr_t DARSHAN_DECL(H5Fclose)(hid_t file_id)
ret = __real_H5Fclose(file_id);
tm2 = darshan_core_wtime();
- HDF5_PRE_RECORD();
- rec_ref = darshan_lookup_record_ref(hdf5_runtime->hid_hash,
+ H5F_PRE_RECORD();
+ rec_ref = darshan_lookup_record_ref(hdf5_file_runtime->hid_hash,
&file_id, sizeof(hid_t));
if(rec_ref)
{
- if(rec_ref->file_rec->fcounters[HDF5_F_CLOSE_START_TIMESTAMP] == 0 ||
- rec_ref->file_rec->fcounters[HDF5_F_CLOSE_START_TIMESTAMP] > tm1)
- rec_ref->file_rec->fcounters[HDF5_F_CLOSE_START_TIMESTAMP] = tm1;
- rec_ref->file_rec->fcounters[HDF5_F_CLOSE_END_TIMESTAMP] = tm2;
- darshan_delete_record_ref(&(hdf5_runtime->hid_hash),
+ if(rec_ref->file_rec->fcounters[H5F_F_CLOSE_START_TIMESTAMP] == 0 ||
+ rec_ref->file_rec->fcounters[H5F_F_CLOSE_START_TIMESTAMP] > tm1)
+ rec_ref->file_rec->fcounters[H5F_F_CLOSE_START_TIMESTAMP] = tm1;
+ rec_ref->file_rec->fcounters[H5F_F_CLOSE_END_TIMESTAMP] = tm2;
+ DARSHAN_TIMER_INC_NO_OVERLAP(
+ rec_ref->file_rec->fcounters[H5F_F_META_TIME],
+ tm1, tm2, rec_ref->last_meta_end);
+ darshan_delete_record_ref(&(hdf5_file_runtime->hid_hash),
&file_id, sizeof(hid_t));
}
- HDF5_POST_RECORD();
+ H5F_POST_RECORD();
return(ret);
}
+/*********************************************************
+ * Wrappers for H5D functions of interest *
+ *********************************************************/
+
+#define DARSHAN_HDF5_MAX_NAME_LEN 256
+#define DARSHAN_HDF5_DATASET_DELIM ":"
+
+#define H5D_PRE_RECORD() do { \
+ HDF5_LOCK(); \
+ if(!darshan_core_disabled_instrumentation()) { \
+ if(!hdf5_dataset_runtime) hdf5_dataset_runtime_initialize(); \
+ if(hdf5_dataset_runtime) break; \
+ } \
+ HDF5_UNLOCK(); \
+ return(ret); \
+} while(0)
+
+#define H5D_POST_RECORD() do { \
+ HDF5_UNLOCK(); \
+} while(0)
+
+#define H5D_RECORD_OPEN(__ret, __loc_id, __name, __type_id, __space_id, __use_depr, __tm1, __tm2) do { \
+ char *__file_path, *__tmp_ptr; \
+ char __rec_name[DARSHAN_HDF5_MAX_NAME_LEN] = {0}; \
+ ssize_t __req_name_len = DARSHAN_HDF5_MAX_NAME_LEN-1, __ret_name_len; \
+ darshan_record_id __rec_id; \
+ struct hdf5_dataset_record_ref *__rec_ref; \
+ /* get corresponding file name */\
+ __ret_name_len = H5Fget_name(__loc_id, __rec_name, __req_name_len); \
+ if(__ret_name_len < 0) break; \
+ else if(__ret_name_len < __req_name_len) { \
+ /* fully resolve file path */\
+ __file_path = darshan_clean_file_path(__rec_name); \
+ if(darshan_core_excluded_path(__file_path)) { \
+ free(__file_path); \
+ break; \
+ } \
+ strncpy(__rec_name, __file_path, __req_name_len); \
+ free(__file_path); \
+ if(strlen(__rec_name) + 2 <= __req_name_len) { \
+ /* append dataset name if we have space */\
+ __tmp_ptr = __rec_name + strlen(__rec_name); \
+ strcat(__tmp_ptr, DARSHAN_HDF5_DATASET_DELIM); \
+ __tmp_ptr += 1; \
+ __req_name_len = DARSHAN_HDF5_MAX_NAME_LEN - 1 - strlen(__rec_name); \
+ __ret_name_len = H5Iget_name(__loc_id, __tmp_ptr, __req_name_len); \
+ if(__ret_name_len < 0) return(ret); \
+ else if(__ret_name_len < __req_name_len) { \
+ __tmp_ptr = __rec_name + strlen(__rec_name); \
+ __req_name_len = DARSHAN_HDF5_MAX_NAME_LEN - 1 - strlen(__rec_name); \
+ strncat(__tmp_ptr, __name, __req_name_len); \
+ } \
+ } \
+ } \
+ __rec_id = darshan_core_gen_record_id(__rec_name); \
+ __rec_ref = darshan_lookup_record_ref(hdf5_dataset_runtime->rec_id_hash, &__rec_id, sizeof(darshan_record_id)); \
+ if(!__rec_ref) __rec_ref = hdf5_track_new_dataset_record(__rec_id, __rec_name); \
+ if(!__rec_ref) break; \
+ __rec_ref->dataset_rec->counters[H5D_OPENS] += 1; \
+ __rec_ref->dataset_rec->counters[H5D_USE_DEPRECATED] = __use_depr; \
+ if(__rec_ref->dataset_rec->fcounters[H5D_F_OPEN_START_TIMESTAMP] == 0 || \
+ __rec_ref->dataset_rec->fcounters[H5D_F_OPEN_START_TIMESTAMP] > __tm1) \
+ __rec_ref->dataset_rec->fcounters[H5D_F_OPEN_START_TIMESTAMP] = __tm1; \
+ __rec_ref->dataset_rec->fcounters[H5D_F_OPEN_END_TIMESTAMP] = __tm2; \
+ DARSHAN_TIMER_INC_NO_OVERLAP(__rec_ref->dataset_rec->fcounters[H5D_F_META_TIME], \
+ __tm1, __tm2, __rec_ref->last_meta_end); \
+ __rec_ref->dataset_rec->counters[H5D_DATASPACE_NDIMS] = H5Sget_simple_extent_ndims(__space_id); \
+ __rec_ref->dataset_rec->counters[H5D_DATASPACE_NPOINTS] = H5Sget_simple_extent_npoints(__space_id); \
+ __rec_ref->dataset_rec->counters[H5D_DATATYPE_SIZE] = H5Tget_size(__type_id); \
+ darshan_add_record_ref(&(hdf5_dataset_runtime->hid_hash), &__ret, sizeof(hid_t), __rec_ref); \
+} while(0)
+
+hid_t H5Dcreate1(hid_t loc_id, const char *name, hid_t type_id, hid_t space_id, hid_t dcpl_id)
+{
+ double tm1, tm2;
+ hid_t ret;
+
+ MAP_OR_FAIL(H5Dcreate1);
+
+ tm1 = darshan_core_wtime();
+ ret = __real_H5Dcreate1(loc_id, name, type_id, space_id, dcpl_id);
+ tm2 = darshan_core_wtime();
+
+ H5D_PRE_RECORD();
+ H5D_RECORD_OPEN(ret, loc_id, name, type_id, space_id, 1, tm1, tm2);
+ H5D_POST_RECORD();
+
+ return(ret);
+}
+
+hid_t H5Dcreate2(hid_t loc_id, const char *name, hid_t dtype_id, hid_t space_id,
+ hid_t lcpl_id, hid_t dcpl_id, hid_t dapl_id)
+{
+ double tm1, tm2;
+ hid_t ret;
+
+ MAP_OR_FAIL(H5Dcreate2);
+
+ tm1 = darshan_core_wtime();
+ ret = __real_H5Dcreate2(loc_id, name, dtype_id, space_id, lcpl_id, dcpl_id, dapl_id);
+ tm2 = darshan_core_wtime();
+
+ H5D_PRE_RECORD();
+ H5D_RECORD_OPEN(ret, loc_id, name, dtype_id, space_id, 0, tm1, tm2);
+ H5D_POST_RECORD();
+
+ return(ret);
+}
+
+hid_t H5Dopen1(hid_t loc_id, const char *name)
+{
+ hid_t dtype_id;
+ hid_t space_id;
+ double tm1, tm2;
+ hid_t ret;
+
+ MAP_OR_FAIL(H5Dopen1);
+
+ tm1 = darshan_core_wtime();
+ ret = __real_H5Dopen1(loc_id, name);
+ tm2 = darshan_core_wtime();
+
+ /* query dataset datatype, dataspace, and creation property list */
+ dtype_id = H5Dget_type(ret);
+ if(dtype_id < 0)
+ return(ret);
+ space_id = H5Dget_space(ret);
+ if(space_id < 0)
+ {
+ H5Tclose(dtype_id);
+ return(ret);
+ }
+
+ H5D_PRE_RECORD();
+ H5D_RECORD_OPEN(ret, loc_id, name, dtype_id, space_id, 1, tm1, tm2);
+ H5D_POST_RECORD();
+
+ H5Tclose(dtype_id);
+ H5Sclose(space_id);
+
+ return(ret);
+}
+
+hid_t H5Dopen2(hid_t loc_id, const char *name, hid_t dapl_id)
+{
+ hid_t dtype_id;
+ hid_t space_id;
+ double tm1, tm2;
+ hid_t ret;
+
+ MAP_OR_FAIL(H5Dopen2);
+
+ tm1 = darshan_core_wtime();
+ ret = __real_H5Dopen2(loc_id, name, dapl_id);
+ tm2 = darshan_core_wtime();
+
+ /* query dataset datatype, dataspace, and creation property list */
+ dtype_id = H5Dget_type(ret);
+ if(dtype_id < 0)
+ return(ret);
+ space_id = H5Dget_space(ret);
+ if(space_id < 0)
+ {
+ H5Tclose(dtype_id);
+ return(ret);
+ }
+
+ H5D_PRE_RECORD();
+ H5D_RECORD_OPEN(ret, loc_id, name, dtype_id, space_id, 0, tm1, tm2);
+ H5D_POST_RECORD();
+
+ H5Tclose(dtype_id);
+ H5Sclose(space_id);
+
+ return(ret);
+}
+
+herr_t H5Dread(hid_t dataset_id, hid_t mem_type_id, hid_t mem_space_id,
+ hid_t file_space_id, hid_t xfer_plist_id, void * buf)
+{
+ struct hdf5_dataset_record_ref *rec_ref;
+ size_t access_size;
+ size_t type_size;
+ ssize_t file_sel_npoints;
+ H5S_sel_type file_sel_type;
+ H5FD_mpio_xfer_t xfer_mode;
+ double tm1, tm2, elapsed;
+ herr_t ret;
+ herr_t tmp_ret;
+
+ MAP_OR_FAIL(H5Dread);
+
+ tm1 = darshan_core_wtime();
+ ret = __real_H5Dread(dataset_id, mem_type_id, mem_space_id, file_space_id,
+ xfer_plist_id, buf);
+ tm2 = darshan_core_wtime();
+
+ H5D_PRE_RECORD();
+ rec_ref = darshan_lookup_record_ref(hdf5_dataset_runtime->hid_hash,
+ &dataset_id, sizeof(hid_t));
+ if(rec_ref)
+ {
+ rec_ref->dataset_rec->counters[H5D_READS] += 1;
+ if(rec_ref->last_io_type == DARSHAN_IO_WRITE)
+ rec_ref->dataset_rec->counters[H5D_RW_SWITCHES] += 1;
+ rec_ref->last_io_type = DARSHAN_IO_READ;
+ if(file_space_id == H5S_ALL)
+ {
+ file_sel_npoints = rec_ref->dataset_rec->counters[H5D_DATASPACE_NPOINTS];
+ file_sel_type = H5S_SEL_ALL;
+ }
+ else
+ {
+ file_sel_npoints = H5Sget_select_npoints(file_space_id);
+ file_sel_type = H5Sget_select_type(file_space_id);
+ }
+ if(file_sel_type == H5S_SEL_POINTS)
+ rec_ref->dataset_rec->counters[H5D_POINT_SELECTS] += 1;
+ else if(file_sel_type == H5S_SEL_ALL)
+ rec_ref->dataset_rec->counters[H5D_REGULAR_HYPERSLAB_SELECTS] += 1;
+ else
+ {
+ if(H5Sis_regular_hyperslab(file_space_id))
+ rec_ref->dataset_rec->counters[H5D_REGULAR_HYPERSLAB_SELECTS] += 1;
+ else
+ rec_ref->dataset_rec->counters[H5D_IRREGULAR_HYPERSLAB_SELECTS] += 1;
+ }
+ type_size = rec_ref->dataset_rec->counters[H5D_DATATYPE_SIZE];
+ access_size = file_sel_npoints * type_size;
+ rec_ref->dataset_rec->counters[H5D_BYTES_READ] += access_size;
+ DARSHAN_BUCKET_INC(
+ &(rec_ref->dataset_rec->counters[H5D_SIZE_READ_AGG_0_100]), access_size);
+ darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, access_size,
+ &(rec_ref->dataset_rec->counters[H5D_ACCESS1_ACCESS]),
+ &(rec_ref->dataset_rec->counters[H5D_ACCESS1_COUNT]));
+ tmp_ret = H5Pget_dxpl_mpio(xfer_plist_id, &xfer_mode);
+ if(tmp_ret >= 0 && xfer_mode == H5FD_MPIO_COLLECTIVE)
+ rec_ref->dataset_rec->counters[H5D_USE_MPIIO_COLLECTIVE] = 1;
+ if(rec_ref->dataset_rec->fcounters[H5D_F_READ_START_TIMESTAMP] == 0 ||
+ rec_ref->dataset_rec->fcounters[H5D_F_READ_START_TIMESTAMP] > tm1)
+ rec_ref->dataset_rec->fcounters[H5D_F_READ_START_TIMESTAMP] = tm1;
+ rec_ref->dataset_rec->fcounters[H5D_F_READ_END_TIMESTAMP] = tm2;
+ elapsed = tm2 - tm1;
+ if(rec_ref->dataset_rec->fcounters[H5D_F_MAX_READ_TIME] < elapsed)
+ {
+ rec_ref->dataset_rec->fcounters[H5D_F_MAX_READ_TIME] = elapsed;
+ rec_ref->dataset_rec->counters[H5D_MAX_READ_TIME_SIZE] = access_size;
+ }
+ DARSHAN_TIMER_INC_NO_OVERLAP(
+ rec_ref->dataset_rec->fcounters[H5D_F_READ_TIME],
+ tm1, tm2, rec_ref->last_read_end);
+ }
+ H5D_POST_RECORD();
+
+ return(ret);
+}
+
+herr_t H5Dwrite(hid_t dataset_id, hid_t mem_type_id, hid_t mem_space_id,
+ hid_t file_space_id, hid_t xfer_plist_id, const void * buf)
+{
+ struct hdf5_dataset_record_ref *rec_ref;
+ size_t access_size;
+ size_t type_size;
+ ssize_t file_sel_npoints;
+ H5S_sel_type file_sel_type;
+ H5FD_mpio_xfer_t xfer_mode;
+ double tm1, tm2, elapsed;
+ herr_t ret;
+ herr_t tmp_ret;
+
+ MAP_OR_FAIL(H5Dwrite);
+
+ tm1 = darshan_core_wtime();
+ ret = __real_H5Dwrite(dataset_id, mem_type_id, mem_space_id, file_space_id,
+ xfer_plist_id, buf);
+ tm2 = darshan_core_wtime();
+
+ H5D_PRE_RECORD();
+ rec_ref = darshan_lookup_record_ref(hdf5_dataset_runtime->hid_hash,
+ &dataset_id, sizeof(hid_t));
+ if(rec_ref)
+ {
+ rec_ref->dataset_rec->counters[H5D_WRITES] += 1;
+ if(rec_ref->last_io_type == DARSHAN_IO_READ)
+ rec_ref->dataset_rec->counters[H5D_RW_SWITCHES] += 1;
+ rec_ref->last_io_type = DARSHAN_IO_WRITE;
+ if(file_space_id == H5S_ALL)
+ {
+ file_sel_npoints = rec_ref->dataset_rec->counters[H5D_DATASPACE_NPOINTS];
+ file_sel_type = H5S_SEL_ALL;
+ }
+ else
+ {
+ file_sel_npoints = H5Sget_select_npoints(file_space_id);
+ file_sel_type = H5Sget_select_type(file_space_id);
+ }
+ if(file_sel_type == H5S_SEL_POINTS)
+ rec_ref->dataset_rec->counters[H5D_POINT_SELECTS] += 1;
+ else if(file_sel_type == H5S_SEL_ALL)
+ rec_ref->dataset_rec->counters[H5D_REGULAR_HYPERSLAB_SELECTS] += 1;
+ else
+ {
+ if(H5Sis_regular_hyperslab(file_space_id))
+ rec_ref->dataset_rec->counters[H5D_REGULAR_HYPERSLAB_SELECTS] += 1;
+ else
+ rec_ref->dataset_rec->counters[H5D_IRREGULAR_HYPERSLAB_SELECTS] += 1;
+ }
+ type_size = rec_ref->dataset_rec->counters[H5D_DATATYPE_SIZE];
+ access_size = file_sel_npoints * type_size;
+ rec_ref->dataset_rec->counters[H5D_BYTES_WRITTEN] += access_size;
+ DARSHAN_BUCKET_INC(
+ &(rec_ref->dataset_rec->counters[H5D_SIZE_WRITE_AGG_0_100]), access_size);
+ darshan_common_val_counter(&rec_ref->access_root, &rec_ref->access_count, access_size,
+ &(rec_ref->dataset_rec->counters[H5D_ACCESS1_ACCESS]),
+ &(rec_ref->dataset_rec->counters[H5D_ACCESS1_COUNT]));
+ tmp_ret = H5Pget_dxpl_mpio(xfer_plist_id, &xfer_mode);
+ if(tmp_ret >= 0 && xfer_mode == H5FD_MPIO_COLLECTIVE)
+ rec_ref->dataset_rec->counters[H5D_USE_MPIIO_COLLECTIVE] = 1;
+ if(rec_ref->dataset_rec->fcounters[H5D_F_WRITE_START_TIMESTAMP] == 0 ||
+ rec_ref->dataset_rec->fcounters[H5D_F_WRITE_START_TIMESTAMP] > tm1)
+ rec_ref->dataset_rec->fcounters[H5D_F_WRITE_START_TIMESTAMP] = tm1;
+ rec_ref->dataset_rec->fcounters[H5D_F_WRITE_END_TIMESTAMP] = tm2;
+ elapsed = tm2 - tm1;
+ if(rec_ref->dataset_rec->fcounters[H5D_F_MAX_WRITE_TIME] < elapsed)
+ {
+ rec_ref->dataset_rec->fcounters[H5D_F_MAX_WRITE_TIME] = elapsed;
+ rec_ref->dataset_rec->counters[H5D_MAX_WRITE_TIME_SIZE] = access_size;
+ }
+ DARSHAN_TIMER_INC_NO_OVERLAP(
+ rec_ref->dataset_rec->fcounters[H5D_F_WRITE_TIME],
+ tm1, tm2, rec_ref->last_write_end);
+ }
+ H5D_POST_RECORD();
+
+ return(ret);
+}
+
+herr_t H5Dflush(hid_t dataset_id)
+{
+ struct hdf5_dataset_record_ref *rec_ref;
+ double tm1, tm2;
+ herr_t ret;
+
+ MAP_OR_FAIL(H5Dflush);
+
+ tm1 = darshan_core_wtime();
+ ret = __real_H5Dflush(dataset_id);
+ tm2 = darshan_core_wtime();
+
+ H5D_PRE_RECORD();
+ rec_ref = darshan_lookup_record_ref(hdf5_dataset_runtime->hid_hash,
+ &dataset_id, sizeof(hid_t));
+ if(rec_ref)
+ {
+ rec_ref->dataset_rec->counters[H5D_FLUSHES] += 1;
+ DARSHAN_TIMER_INC_NO_OVERLAP(
+ rec_ref->dataset_rec->fcounters[H5D_F_META_TIME],
+ tm1, tm2, rec_ref->last_meta_end);
+ }
+ H5D_POST_RECORD();
+
+ return(ret);
+}
+
+herr_t H5Dclose(hid_t dataset_id)
+{
+ struct hdf5_dataset_record_ref *rec_ref;
+ double tm1, tm2;
+ herr_t ret;
+
+ MAP_OR_FAIL(H5Dclose);
+
+ tm1 = darshan_core_wtime();
+ ret = __real_H5Dclose(dataset_id);
+ tm2 = darshan_core_wtime();
+
+ H5D_PRE_RECORD();
+ rec_ref = darshan_lookup_record_ref(hdf5_dataset_runtime->hid_hash,
+ &dataset_id, sizeof(hid_t));
+ if(rec_ref)
+ {
+ if(rec_ref->dataset_rec->fcounters[H5D_F_CLOSE_START_TIMESTAMP] == 0 ||
+ rec_ref->dataset_rec->fcounters[H5D_F_CLOSE_START_TIMESTAMP] > tm1)
+ rec_ref->dataset_rec->fcounters[H5D_F_CLOSE_START_TIMESTAMP] = tm1;
+ rec_ref->dataset_rec->fcounters[H5D_F_CLOSE_END_TIMESTAMP] = tm2;
+ DARSHAN_TIMER_INC_NO_OVERLAP(rec_ref->dataset_rec->fcounters[H5D_F_META_TIME],
+ tm1, tm2, rec_ref->last_meta_end);
+ darshan_delete_record_ref(&(hdf5_dataset_runtime->hid_hash), &dataset_id, sizeof(hid_t));
+ }
+ H5D_POST_RECORD();
+
+ return(ret);
+}
+
/*********************************************************
* Internal functions for manipulating HDF5 module state *
*********************************************************/
/* initialize internal HDF5 module data strucutres and register with darshan-core */
-static void hdf5_runtime_initialize()
+static void hdf5_file_runtime_initialize()
{
int hdf5_buf_size;
darshan_module_funcs mod_funcs = {
#ifdef HAVE_MPI
- .mod_redux_func = &hdf5_mpi_redux,
+ .mod_redux_func = &hdf5_file_mpi_redux,
#endif
- .mod_shutdown_func = &hdf5_shutdown
+ .mod_shutdown_func = &hdf5_file_shutdown
};
/* try and store the default number of records for this module */
@@ -272,7 +742,7 @@ static void hdf5_runtime_initialize()
/* register hdf5 module with darshan-core */
darshan_core_register_module(
- DARSHAN_HDF5_MOD,
+ DARSHAN_H5F_MOD,
mod_funcs,
&hdf5_buf_size,
&my_rank,
@@ -281,17 +751,56 @@ static void hdf5_runtime_initialize()
/* return if darshan-core does not provide enough module memory */
if(hdf5_buf_size < sizeof(struct darshan_hdf5_file))
{
- darshan_core_unregister_module(DARSHAN_HDF5_MOD);
+ darshan_core_unregister_module(DARSHAN_H5F_MOD);
+ return;
+ }
+
+ hdf5_file_runtime = malloc(sizeof(*hdf5_file_runtime));
+ if(!hdf5_file_runtime)
+ {
+ darshan_core_unregister_module(DARSHAN_H5F_MOD);
+ return;
+ }
+ memset(hdf5_file_runtime, 0, sizeof(*hdf5_file_runtime));
+
+ return;
+}
+
+static void hdf5_dataset_runtime_initialize()
+{
+ int hdf5_buf_size;
+ darshan_module_funcs mod_funcs = {
+#ifdef HAVE_MPI
+ .mod_redux_func = &hdf5_dataset_mpi_redux,
+#endif
+ .mod_shutdown_func = &hdf5_dataset_shutdown
+ };
+
+ /* try and store the default number of records for this module */
+ hdf5_buf_size = DARSHAN_DEF_MOD_REC_COUNT * sizeof(struct darshan_hdf5_dataset);
+
+ /* register hdf5 module with darshan-core */
+ darshan_core_register_module(
+ DARSHAN_H5D_MOD,
+ mod_funcs,
+ &hdf5_buf_size,
+ &my_rank,
+ NULL);
+
+ /* return if darshan-core does not provide enough module memory */
+ if(hdf5_buf_size < sizeof(struct darshan_hdf5_dataset))
+ {
+ darshan_core_unregister_module(DARSHAN_H5D_MOD);
return;
}
- hdf5_runtime = malloc(sizeof(*hdf5_runtime));
- if(!hdf5_runtime)
+ hdf5_dataset_runtime = malloc(sizeof(*hdf5_dataset_runtime));
+ if(!hdf5_dataset_runtime)
{
- darshan_core_unregister_module(DARSHAN_HDF5_MOD);
+ darshan_core_unregister_module(DARSHAN_H5D_MOD);
return;
}
- memset(hdf5_runtime, 0, sizeof(*hdf5_runtime));
+ memset(hdf5_dataset_runtime, 0, sizeof(*hdf5_dataset_runtime));
return;
}
@@ -309,7 +818,7 @@ static struct hdf5_file_record_ref *hdf5_track_new_file_record(
memset(rec_ref, 0, sizeof(*rec_ref));
/* add a reference to this file record based on record id */
- ret = darshan_add_record_ref(&(hdf5_runtime->rec_id_hash), &rec_id,
+ ret = darshan_add_record_ref(&(hdf5_file_runtime->rec_id_hash), &rec_id,
sizeof(darshan_record_id), rec_ref);
if(ret == 0)
{
@@ -323,88 +832,424 @@ static struct hdf5_file_record_ref *hdf5_track_new_file_record(
file_rec = darshan_core_register_record(
rec_id,
path,
- DARSHAN_HDF5_MOD,
+ DARSHAN_H5F_MOD,
sizeof(struct darshan_hdf5_file),
NULL);
if(!file_rec)
{
- darshan_delete_record_ref(&(hdf5_runtime->rec_id_hash),
+ darshan_delete_record_ref(&(hdf5_file_runtime->rec_id_hash),
&rec_id, sizeof(darshan_record_id));
free(rec_ref);
return(NULL);
}
- /* registering this file record was successful, so initialize some fields */
+ /* registering this dataset record was successful, so initialize some fields */
file_rec->base_rec.id = rec_id;
file_rec->base_rec.rank = my_rank;
rec_ref->file_rec = file_rec;
- hdf5_runtime->file_rec_count++;
+ hdf5_file_runtime->rec_count++;
+
+ return(rec_ref);
+}
+
+static struct hdf5_dataset_record_ref *hdf5_track_new_dataset_record(
+ darshan_record_id rec_id, const char *path)
+{
+ struct darshan_hdf5_dataset *dataset_rec = NULL;
+ struct hdf5_dataset_record_ref *rec_ref = NULL;
+ int ret;
+
+ rec_ref = malloc(sizeof(*rec_ref));
+ if(!rec_ref)
+ return(NULL);
+ memset(rec_ref, 0, sizeof(*rec_ref));
+
+ /* add a reference to this dataset record based on record id */
+ ret = darshan_add_record_ref(&(hdf5_dataset_runtime->rec_id_hash), &rec_id,
+ sizeof(darshan_record_id), rec_ref);
+ if(ret == 0)
+ {
+ free(rec_ref);
+ return(NULL);
+ }
+
+ /* register the actual dataset record with darshan-core so it is persisted
+ * in the log file
+ */
+ dataset_rec = darshan_core_register_record(
+ rec_id,
+ path,
+ DARSHAN_H5D_MOD,
+ sizeof(struct darshan_hdf5_dataset),
+ NULL);
+
+ if(!dataset_rec)
+ {
+ darshan_delete_record_ref(&(hdf5_dataset_runtime->rec_id_hash),
+ &rec_id, sizeof(darshan_record_id));
+ free(rec_ref);
+ return(NULL);
+ }
+
+ /* registering this dataset record was successful, so initialize some fields */
+ dataset_rec->base_rec.id = rec_id;
+ dataset_rec->base_rec.rank = my_rank;
+ rec_ref->dataset_rec = dataset_rec;
+ hdf5_dataset_runtime->rec_count++;
return(rec_ref);
}
-static void hdf5_cleanup_runtime()
+static void hdf5_finalize_dataset_records(void *rec_ref_p, void *user_ptr)
{
- darshan_clear_record_refs(&(hdf5_runtime->hid_hash), 0);
- darshan_clear_record_refs(&(hdf5_runtime->rec_id_hash), 1);
+ struct hdf5_dataset_record_ref *rec_ref =
+ (struct hdf5_dataset_record_ref *)rec_ref_p;
- free(hdf5_runtime);
- hdf5_runtime = NULL;
+ tdestroy(rec_ref->access_root, free);
+ return;
+}
+
+static void hdf5_cleanup_file_runtime()
+{
+ darshan_clear_record_refs(&(hdf5_file_runtime->hid_hash), 0);
+ darshan_clear_record_refs(&(hdf5_file_runtime->rec_id_hash), 1);
+
+ free(hdf5_file_runtime);
+ hdf5_file_runtime = NULL;
+
+ return;
+}
+
+static void hdf5_cleanup_dataset_runtime()
+{
+ darshan_clear_record_refs(&(hdf5_dataset_runtime->hid_hash), 0);
+ darshan_clear_record_refs(&(hdf5_dataset_runtime->rec_id_hash), 1);
+
+ free(hdf5_dataset_runtime);
+ hdf5_dataset_runtime = NULL;
return;
}
#ifdef HAVE_MPI
-static void hdf5_record_reduction_op(void* infile_v, void* inoutfile_v,
+static void hdf5_file_record_reduction_op(void* inrec_v, void* inoutrec_v,
int *len, MPI_Datatype *datatype)
{
struct darshan_hdf5_file tmp_file;
- struct darshan_hdf5_file *infile = infile_v;
- struct darshan_hdf5_file *inoutfile = inoutfile_v;
+ struct darshan_hdf5_file *inrec = inrec_v;
+ struct darshan_hdf5_file *inoutrec = inoutrec_v;
int i, j;
- assert(hdf5_runtime);
-
for(i=0; i<*len; i++)
{
memset(&tmp_file, 0, sizeof(struct darshan_hdf5_file));
- tmp_file.base_rec.id = infile->base_rec.id;
+ tmp_file.base_rec.id = inrec->base_rec.id;
tmp_file.base_rec.rank = -1;
/* sum */
- for(j=HDF5_OPENS; j<=HDF5_OPENS; j++)
+ for(j=H5F_OPENS; j<=H5F_FLUSHES; j++)
{
- tmp_file.counters[j] = infile->counters[j] + inoutfile->counters[j];
+ tmp_file.counters[j] = inrec->counters[j] + inoutrec->counters[j];
}
+ if(inoutrec->counters[H5F_USE_MPIIO] == 1 || inrec->counters[H5F_USE_MPIIO] == 1)
+ tmp_file.counters[H5F_USE_MPIIO] = 1;
+
/* min non-zero (if available) value */
- for(j=HDF5_F_OPEN_START_TIMESTAMP; j<=HDF5_F_CLOSE_START_TIMESTAMP; j++)
+ for(j=H5F_F_OPEN_START_TIMESTAMP; j<=H5F_F_CLOSE_START_TIMESTAMP; j++)
{
- if((infile->fcounters[j] < inoutfile->fcounters[j] &&
- infile->fcounters[j] > 0) || inoutfile->fcounters[j] == 0)
- tmp_file.fcounters[j] = infile->fcounters[j];
+ if((inrec->fcounters[j] < inoutrec->fcounters[j] &&
+ inrec->fcounters[j] > 0) || inoutrec->fcounters[j] == 0)
+ tmp_file.fcounters[j] = inrec->fcounters[j];
else
- tmp_file.fcounters[j] = inoutfile->fcounters[j];
+ tmp_file.fcounters[j] = inoutrec->fcounters[j];
}
/* max */
- for(j=HDF5_F_OPEN_END_TIMESTAMP; j<=HDF5_F_CLOSE_END_TIMESTAMP; j++)
+ for(j=H5F_F_OPEN_END_TIMESTAMP; j<=H5F_F_CLOSE_END_TIMESTAMP; j++)
{
- if(infile->fcounters[j] > inoutfile->fcounters[j])
- tmp_file.fcounters[j] = infile->fcounters[j];
+ if(inrec->fcounters[j] > inoutrec->fcounters[j])
+ tmp_file.fcounters[j] = inrec->fcounters[j];
else
- tmp_file.fcounters[j] = inoutfile->fcounters[j];
+ tmp_file.fcounters[j] = inoutrec->fcounters[j];
}
+ /* sum */
+ tmp_file.fcounters[H5F_F_META_TIME] =
+ inrec->fcounters[H5F_F_META_TIME] + inoutrec->fcounters[H5F_F_META_TIME];
+
/* update pointers */
- *inoutfile = tmp_file;
- inoutfile++;
- infile++;
+ *inoutrec = tmp_file;
+ inoutrec++;
+ inrec++;
}
return;
}
+
+static void hdf5_dataset_record_reduction_op(void* inrec_v, void* inoutrec_v,
+ int *len, MPI_Datatype *datatype)
+{
+ struct darshan_hdf5_dataset tmp_dataset;
+ struct darshan_hdf5_dataset *inrec = inrec_v;
+ struct darshan_hdf5_dataset *inoutrec = inoutrec_v;
+ int i, j, k;
+
+ for(i=0; i<*len; i++)
+ {
+ memset(&tmp_dataset, 0, sizeof(struct darshan_hdf5_dataset));
+ tmp_dataset.base_rec.id = inrec->base_rec.id;
+ tmp_dataset.base_rec.rank = -1;
+
+ /* sum */
+ for(j=H5D_OPENS; j<=H5D_POINT_SELECTS; j++)
+ {
+ tmp_dataset.counters[j] = inrec->counters[j] + inoutrec->counters[j];
+ }
+
+ /* skip H5D_MAX_*_TIME_SIZE; handled in floating point section */
+
+ for(j=H5D_SIZE_READ_AGG_0_100; j<=H5D_SIZE_WRITE_AGG_1G_PLUS; j++)
+ {
+ tmp_dataset.counters[j] = inrec->counters[j] + inoutrec->counters[j];
+ }
+
+ /* first collapse any duplicates */
+ for(j=H5D_ACCESS1_ACCESS; j<=H5D_ACCESS4_ACCESS; j++)
+ {
+ for(k=H5D_ACCESS1_ACCESS; k<=H5D_ACCESS4_ACCESS; k++)
+ {
+ if(inrec->counters[j] == inoutrec->counters[k])
+ {
+ inrec->counters[j+4] += inoutrec->counters[k+4];
+ inoutrec->counters[k] = 0;
+ inoutrec->counters[k+4] = 0;
+ }
+ }
+ }
+
+ /* first set */
+ for(j=H5D_ACCESS1_ACCESS; j<=H5D_ACCESS4_ACCESS; j++)
+ {
+ DARSHAN_COMMON_VAL_COUNTER_INC(&(tmp_dataset.counters[H5D_ACCESS1_ACCESS]),
+ &(tmp_dataset.counters[H5D_ACCESS1_COUNT]), inrec->counters[j],
+ inrec->counters[j+4], 0);
+ }
+
+ /* second set */
+ for(j=H5D_ACCESS1_ACCESS; j<=H5D_ACCESS4_ACCESS; j++)
+ {
+ DARSHAN_COMMON_VAL_COUNTER_INC(&(tmp_dataset.counters[H5D_ACCESS1_ACCESS]),
+ &(tmp_dataset.counters[H5D_ACCESS1_COUNT]), inoutrec->counters[j],
+ inoutrec->counters[j+4], 0);
+ }
+
+ tmp_dataset.counters[H5D_DATASPACE_NDIMS] = inrec->counters[H5D_DATASPACE_NDIMS];
+ tmp_dataset.counters[H5D_DATASPACE_NPOINTS] = inrec->counters[H5D_DATASPACE_NPOINTS];
+ tmp_dataset.counters[H5D_DATATYPE_SIZE] = inrec->counters[H5D_DATATYPE_SIZE];
+
+ if(inoutrec->counters[H5D_USE_MPIIO_COLLECTIVE] == 1 ||
+ inrec->counters[H5D_USE_MPIIO_COLLECTIVE] == 1)
+ tmp_dataset.counters[H5D_USE_MPIIO_COLLECTIVE] = 1;
+
+ if(inoutrec->counters[H5D_USE_DEPRECATED] == 1 ||
+ inrec->counters[H5D_USE_DEPRECATED] == 1)
+ tmp_dataset.counters[H5D_USE_DEPRECATED] = 1;
+
+ /* min non-zero (if available) value */
+ for(j=H5D_F_OPEN_START_TIMESTAMP; j<=H5D_F_CLOSE_START_TIMESTAMP; j++)
+ {
+ if((inrec->fcounters[j] < inoutrec->fcounters[j] &&
+ inrec->fcounters[j] > 0) || inoutrec->fcounters[j] == 0)
+ tmp_dataset.fcounters[j] = inrec->fcounters[j];
+ else
+ tmp_dataset.fcounters[j] = inoutrec->fcounters[j];
+ }
+
+ /* max */
+ for(j=H5D_F_OPEN_END_TIMESTAMP; j<=H5D_F_CLOSE_END_TIMESTAMP; j++)
+ {
+ if(inrec->fcounters[j] > inoutrec->fcounters[j])
+ tmp_dataset.fcounters[j] = inrec->fcounters[j];
+ else
+ tmp_dataset.fcounters[j] = inoutrec->fcounters[j];
+ }
+
+ /* sum */
+ for(j=H5D_F_READ_TIME; j<=H5D_F_META_TIME; j++)
+ {
+ tmp_dataset.fcounters[j] = inrec->fcounters[j] + inoutrec->fcounters[j];
+ }
+
+ /* max (special case) */
+ if(inrec->fcounters[H5D_F_MAX_READ_TIME] >
+ inoutrec->fcounters[H5D_F_MAX_READ_TIME])
+ {
+ tmp_dataset.fcounters[H5D_F_MAX_READ_TIME] =
+ inrec->fcounters[H5D_F_MAX_READ_TIME];
+ tmp_dataset.counters[H5D_MAX_READ_TIME_SIZE] =
+ inrec->counters[H5D_MAX_READ_TIME_SIZE];
+ }
+ else
+ {
+ tmp_dataset.fcounters[H5D_F_MAX_READ_TIME] =
+ inoutrec->fcounters[H5D_F_MAX_READ_TIME];
+ tmp_dataset.counters[H5D_MAX_READ_TIME_SIZE] =
+ inoutrec->counters[H5D_MAX_READ_TIME_SIZE];
+ }
+
+ /* max (special case) */
+ if(inrec->fcounters[H5D_F_MAX_WRITE_TIME] >
+ inoutrec->fcounters[H5D_F_MAX_WRITE_TIME])
+ {
+ tmp_dataset.fcounters[H5D_F_MAX_WRITE_TIME] =
+ inrec->fcounters[H5D_F_MAX_WRITE_TIME];
+ tmp_dataset.counters[H5D_MAX_WRITE_TIME_SIZE] =
+ inrec->counters[H5D_MAX_WRITE_TIME_SIZE];
+ }
+ else
+ {
+ tmp_dataset.fcounters[H5D_F_MAX_WRITE_TIME] =
+ inoutrec->fcounters[H5D_F_MAX_WRITE_TIME];
+ tmp_dataset.counters[H5D_MAX_WRITE_TIME_SIZE] =
+ inoutrec->counters[H5D_MAX_WRITE_TIME_SIZE];
+ }
+
+ /* min (zeroes are ok here; some procs don't do I/O) */
+ if(inrec->fcounters[H5D_F_FASTEST_RANK_TIME] <
+ inoutrec->fcounters[H5D_F_FASTEST_RANK_TIME])
+ {
+ tmp_dataset.counters[H5D_FASTEST_RANK] =
+ inrec->counters[H5D_FASTEST_RANK];
+ tmp_dataset.counters[H5D_FASTEST_RANK_BYTES] =
+ inrec->counters[H5D_FASTEST_RANK_BYTES];
+ tmp_dataset.fcounters[H5D_F_FASTEST_RANK_TIME] =
+ inrec->fcounters[H5D_F_FASTEST_RANK_TIME];
+ }
+ else
+ {
+ tmp_dataset.counters[H5D_FASTEST_RANK] =
+ inoutrec->counters[H5D_FASTEST_RANK];
+ tmp_dataset.counters[H5D_FASTEST_RANK_BYTES] =
+ inoutrec->counters[H5D_FASTEST_RANK_BYTES];
+ tmp_dataset.fcounters[H5D_F_FASTEST_RANK_TIME] =
+ inoutrec->fcounters[H5D_F_FASTEST_RANK_TIME];
+ }
+
+ /* max */
+ if(inrec->fcounters[H5D_F_SLOWEST_RANK_TIME] >
+ inoutrec->fcounters[H5D_F_SLOWEST_RANK_TIME])
+ {
+ tmp_dataset.counters[H5D_SLOWEST_RANK] =
+ inrec->counters[H5D_SLOWEST_RANK];
+ tmp_dataset.counters[H5D_SLOWEST_RANK_BYTES] =
+ inrec->counters[H5D_SLOWEST_RANK_BYTES];
+ tmp_dataset.fcounters[H5D_F_SLOWEST_RANK_TIME] =
+ inrec->fcounters[H5D_F_SLOWEST_RANK_TIME];
+ }
+ else
+ {
+ tmp_dataset.counters[H5D_SLOWEST_RANK] =
+ inoutrec->counters[H5D_SLOWEST_RANK];
+ tmp_dataset.counters[H5D_SLOWEST_RANK_BYTES] =
+ inoutrec->counters[H5D_SLOWEST_RANK_BYTES];
+ tmp_dataset.fcounters[H5D_F_SLOWEST_RANK_TIME] =
+ inoutrec->fcounters[H5D_F_SLOWEST_RANK_TIME];
+ }
+
+ /* update pointers */
+ *inoutrec = tmp_dataset;
+ inoutrec++;
+ inrec++;
+ }
+
+ return;
+}
+
+static void hdf5_shared_dataset_record_variance(
+ MPI_Comm mod_comm, struct darshan_hdf5_dataset *inrec_array,
+ struct darshan_hdf5_dataset *outrec_array, int shared_rec_count)
+{
+ MPI_Datatype var_dt;
+ MPI_Op var_op;
+ int i;
+ struct darshan_variance_dt *var_send_buf = NULL;
+ struct darshan_variance_dt *var_recv_buf = NULL;
+
+ PMPI_Type_contiguous(sizeof(struct darshan_variance_dt),
+ MPI_BYTE, &var_dt);
+ PMPI_Type_commit(&var_dt);
+
+ PMPI_Op_create(darshan_variance_reduce, 1, &var_op);
+
+ var_send_buf = malloc(shared_rec_count * sizeof(struct darshan_variance_dt));
+ if(!var_send_buf)
+ return;
+
+ if(my_rank == 0)
+ {
+ var_recv_buf = malloc(shared_rec_count * sizeof(struct darshan_variance_dt));
+
+ if(!var_recv_buf)
+ return;
+ }
+
+ /* get total i/o time variances for shared records */
+
+ /* get total i/o time variances for shared records */
+
+ for(i=0; i<shared_rec_count; i++)
+ {
+ var_send_buf[i].n = 1;
+ var_send_buf[i].S = 0;
+ var_send_buf[i].T = inrec_array[i].fcounters[H5D_F_READ_TIME] +
+ inrec_array[i].fcounters[H5D_F_WRITE_TIME] +
+ inrec_array[i].fcounters[H5D_F_META_TIME];
+ }
+
+ PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
+ var_dt, var_op, 0, mod_comm);
+
+ if(my_rank == 0)
+ {
+ for(i=0; i<shared_rec_count; i++)
+ {
+ outrec_array[i].fcounters[H5D_F_VARIANCE_RANK_TIME] =
+ (var_recv_buf[i].S / var_recv_buf[i].n);
+ }
+ }
+
+ /* get total bytes moved variances for shared records */
+
+ for(i=0; i<shared_rec_count; i++)
+ {
+ var_send_buf[i].n = 1;
+ var_send_buf[i].S = 0;
+ var_send_buf[i].T = (double)
+ inrec_array[i].counters[H5D_BYTES_READ] +
+ inrec_array[i].counters[H5D_BYTES_WRITTEN];
+ }
+
+ PMPI_Reduce(var_send_buf, var_recv_buf, shared_rec_count,
+ var_dt, var_op, 0, mod_comm);
+
+ if(my_rank == 0)
+ {
+ for(i=0; i<shared_rec_count; i++)
+ {
+ outrec_array[i].fcounters[H5D_F_VARIANCE_RANK_BYTES] =
+ (var_recv_buf[i].S / var_recv_buf[i].n);
+ }
+ }
+
+ PMPI_Type_free(&var_dt);
+ PMPI_Op_free(&var_op);
+ free(var_send_buf);
+ free(var_recv_buf);
+
+ return;
+}
#endif
/************************************************************************
@@ -412,13 +1257,13 @@ static void hdf5_record_reduction_op(void* infile_v, void* inoutfile_v,
************************************************************************/
#ifdef HAVE_MPI
-static void hdf5_mpi_redux(
+static void hdf5_file_mpi_redux(
void *hdf5_buf,
MPI_Comm mod_comm,
darshan_record_id *shared_recs,
int shared_rec_count)
{
- int hdf5_rec_count;
+ int rec_count;
struct hdf5_file_record_ref *rec_ref;
struct darshan_hdf5_file *hdf5_rec_buf = (struct darshan_hdf5_file *)hdf5_buf;
struct darshan_hdf5_file *red_send_buf = NULL;
@@ -428,14 +1273,14 @@ static void hdf5_mpi_redux(
int i;
HDF5_LOCK();
- assert(hdf5_runtime);
+ assert(hdf5_file_runtime);
- hdf5_rec_count = hdf5_runtime->file_rec_count;
+ rec_count = hdf5_file_runtime->rec_count;
/* necessary initialization of shared records */
for(i = 0; i < shared_rec_count; i++)
{
- rec_ref = darshan_lookup_record_ref(hdf5_runtime->rec_id_hash,
+ rec_ref = darshan_lookup_record_ref(hdf5_file_runtime->rec_id_hash,
&shared_recs[i], sizeof(darshan_record_id));
assert(rec_ref);
@@ -445,11 +1290,11 @@ static void hdf5_mpi_redux(
/* sort the array of records so we get all of the shared records
* (marked by rank -1) in a contiguous portion at end of the array
*/
- darshan_record_sort(hdf5_rec_buf, hdf5_rec_count,
+ darshan_record_sort(hdf5_rec_buf, rec_count,
sizeof(struct darshan_hdf5_file));
- /* make *send_buf point to the shared files at the end of sorted array */
- red_send_buf = &(hdf5_rec_buf[hdf5_rec_count-shared_rec_count]);
+ /* make *send_buf point to the shared records at the end of sorted array */
+ red_send_buf = &(hdf5_rec_buf[rec_count-shared_rec_count]);
/* allocate memory for the reduction output on rank 0 */
if(my_rank == 0)
@@ -462,31 +1307,143 @@ static void hdf5_mpi_redux(
}
}
- /* construct a datatype for a HDF5 file record. This is serving no purpose
+ /* construct a datatype for a HDF5 dataset record. This is serving no purpose
* except to make sure we can do a reduction on proper boundaries
*/
- PMPI_Type_contiguous(sizeof(struct darshan_hdf5_file),
- MPI_BYTE, &red_type);
+ PMPI_Type_contiguous(sizeof(struct darshan_hdf5_file), MPI_BYTE, &red_type);
PMPI_Type_commit(&red_type);
- /* register a HDF5 file record reduction operator */
- PMPI_Op_create(hdf5_record_reduction_op, 1, &red_op);
+ /* register a HDF5 dataset record reduction operator */
+ PMPI_Op_create(hdf5_file_record_reduction_op, 1, &red_op);
- /* reduce shared HDF5 file records */
+ /* reduce shared HDF5 dataset records */
PMPI_Reduce(red_send_buf, red_recv_buf,
shared_rec_count, red_type, red_op, 0, mod_comm);
/* clean up reduction state */
if(my_rank == 0)
{
- int tmp_ndx = hdf5_rec_count - shared_rec_count;
+ int tmp_ndx = rec_count - shared_rec_count;
memcpy(&(hdf5_rec_buf[tmp_ndx]), red_recv_buf,
shared_rec_count * sizeof(struct darshan_hdf5_file));
free(red_recv_buf);
}
else
{
- hdf5_runtime->file_rec_count -= shared_rec_count;
+ hdf5_file_runtime->rec_count -= shared_rec_count;
+ }
+
+ PMPI_Type_free(&red_type);
+ PMPI_Op_free(&red_op);
+
+ HDF5_UNLOCK();
+ return;
+}
+
+static void hdf5_dataset_mpi_redux(
+ void *hdf5_buf,
+ MPI_Comm mod_comm,
+ darshan_record_id *shared_recs,
+ int shared_rec_count)
+{
+ int rec_count;
+ struct hdf5_dataset_record_ref *rec_ref;
+ struct darshan_hdf5_dataset *hdf5_rec_buf = (struct darshan_hdf5_dataset *)hdf5_buf;
+ double hdf5_time;
+ struct darshan_hdf5_dataset *red_send_buf = NULL;
+ struct darshan_hdf5_dataset *red_recv_buf = NULL;
+ MPI_Datatype red_type;
+ MPI_Op red_op;
+ int i;
+
+ HDF5_LOCK();
+ assert(hdf5_dataset_runtime);
+
+ rec_count = hdf5_dataset_runtime->rec_count;
+
+ /* necessary initialization of shared records */
+ for(i = 0; i < shared_rec_count; i++)
+ {
+ rec_ref = darshan_lookup_record_ref(hdf5_dataset_runtime->rec_id_hash,
+ &shared_recs[i], sizeof(darshan_record_id));
+ assert(rec_ref);
+
+ hdf5_time =
+ rec_ref->dataset_rec->fcounters[H5D_F_READ_TIME] +
+ rec_ref->dataset_rec->fcounters[H5D_F_WRITE_TIME] +
+ rec_ref->dataset_rec->fcounters[H5D_F_META_TIME];
+
+ /* until reduction occurs, we assume that this rank is both
+ * the fastest and slowest. It is up to the reduction operator
+ * to find the true min and max.
+ */
+ rec_ref->dataset_rec->counters[H5D_FASTEST_RANK] =
+ rec_ref->dataset_rec->base_rec.rank;
+ rec_ref->dataset_rec->counters[H5D_FASTEST_RANK_BYTES] =
+ rec_ref->dataset_rec->counters[H5D_BYTES_READ] +
+ rec_ref->dataset_rec->counters[H5D_BYTES_WRITTEN];
+ rec_ref->dataset_rec->fcounters[H5D_F_FASTEST_RANK_TIME] =
+ hdf5_time;
+
+ rec_ref->dataset_rec->counters[H5D_SLOWEST_RANK] =
+ rec_ref->dataset_rec->counters[H5D_FASTEST_RANK];
+ rec_ref->dataset_rec->counters[H5D_SLOWEST_RANK_BYTES] =
+ rec_ref->dataset_rec->counters[H5D_FASTEST_RANK_BYTES];
+ rec_ref->dataset_rec->fcounters[H5D_F_SLOWEST_RANK_TIME] =
+ rec_ref->dataset_rec->fcounters[H5D_F_FASTEST_RANK_TIME];
+
+ rec_ref->dataset_rec->base_rec.rank = -1;
+ }
+
+ /* sort the array of records so we get all of the shared records
+ * (marked by rank -1) in a contiguous portion at end of the array
+ */
+ darshan_record_sort(hdf5_rec_buf, rec_count,
+ sizeof(struct darshan_hdf5_dataset));
+
+ /* make *send_buf point to the shared records at the end of sorted array */
+ red_send_buf = &(hdf5_rec_buf[rec_count-shared_rec_count]);
+
+ /* allocate memory for the reduction output on rank 0 */
+ if(my_rank == 0)
+ {
+ red_recv_buf = malloc(shared_rec_count * sizeof(struct darshan_hdf5_dataset));
+ if(!red_recv_buf)
+ {
+ HDF5_UNLOCK();
+ return;
+ }
+ }
+
+ /* construct a datatype for a HDF5 dataset record. This is serving no purpose
+ * except to make sure we can do a reduction on proper boundaries
+ */
+ PMPI_Type_contiguous(sizeof(struct darshan_hdf5_dataset),
+ MPI_BYTE, &red_type);
+ PMPI_Type_commit(&red_type);
+
+ /* register a HDF5 dataset record reduction operator */
+ PMPI_Op_create(hdf5_dataset_record_reduction_op, 1, &red_op);
+
+ /* reduce shared HDF5 dataset records */
+ PMPI_Reduce(red_send_buf, red_recv_buf,
+ shared_rec_count, red_type, red_op, 0, mod_comm);
+
+ /* get the time and byte variances for shared files */
+ hdf5_shared_dataset_record_variance(mod_comm, red_send_buf, red_recv_buf,
+ shared_rec_count);
+
+ /* clean up reduction state */
+ if(my_rank == 0)
+ {
+ int tmp_ndx = rec_count - shared_rec_count;
+ memcpy(&(hdf5_rec_buf[tmp_ndx]), red_recv_buf,
+ shared_rec_count * sizeof(struct darshan_hdf5_dataset));
+ free(red_recv_buf);
+ }
+ else
+ {
+ hdf5_dataset_runtime->rec_count -= shared_rec_count;
}
PMPI_Type_free(&red_type);
@@ -497,22 +1454,49 @@ static void hdf5_mpi_redux(
}
#endif
-static void hdf5_shutdown(
+static void hdf5_file_shutdown(
+ void **hdf5_buf,
+ int *hdf5_buf_sz)
+{
+ int rec_count;
+
+ HDF5_LOCK();
+ assert(hdf5_file_runtime);
+
+ rec_count = hdf5_file_runtime->rec_count;
+
+ /* shutdown internal structures used for instrumenting */
+ hdf5_cleanup_file_runtime();
+
+ /* update output buffer size to account for shared dataset reduction */
+ *hdf5_buf_sz = rec_count * sizeof(struct darshan_hdf5_file);
+
+ HDF5_UNLOCK();
+ return;
+}
+
+static void hdf5_dataset_shutdown(
void **hdf5_buf,
int *hdf5_buf_sz)
{
- int hdf5_rec_count;
+ int rec_count;
HDF5_LOCK();
- assert(hdf5_runtime);
+ assert(hdf5_dataset_runtime);
+
+ rec_count = hdf5_dataset_runtime->rec_count;
- hdf5_rec_count = hdf5_runtime->file_rec_count;
+ /* perform any final transformations on MPIIO file records before
+ * writing them out to log file
+ */
+ darshan_iter_record_refs(hdf5_dataset_runtime->rec_id_hash,
+ &hdf5_finalize_dataset_records, NULL);
/* shutdown internal structures used for instrumenting */
- hdf5_cleanup_runtime();
+ hdf5_cleanup_dataset_runtime();
- /* update output buffer size to account for shared file reduction */
- *hdf5_buf_sz = hdf5_rec_count * sizeof(struct darshan_hdf5_file);
+ /* update output buffer size to account for shared dataset reduction */
+ *hdf5_buf_sz = rec_count * sizeof(struct darshan_hdf5_dataset);
HDF5_UNLOCK();
return;
=====================================
darshan-runtime/share/ld-opts/darshan-hdf5-ld-opts
=====================================
@@ -1,4 +1,13 @@
--undefined=__wrap_H5Fcreate
--wrap=H5Fcreate
--wrap=H5Fopen
+--wrap=H5Fflush
--wrap=H5Fclose
+--wrap=H5Dcreate1
+--wrap=H5Dcreate2
+--wrap=H5Dopen1
+--wrap=H5Dopen2
+--wrap=H5Dread
+--wrap=H5Dwrite
+--wrap=H5Dflush
+--wrap=H5Dclose
=====================================
darshan-util/darshan-analyzer.c
=====================================
@@ -88,7 +88,7 @@ int process_log(const char *fname, double *io_ratio, int *used_mpio, int *used_p
if (file->mod_map[DARSHAN_MPIIO_MOD].len > 0)
*used_mpio += 1;
- if (file->mod_map[DARSHAN_HDF5_MOD].len > 0)
+ if (file->mod_map[DARSHAN_H5F_MOD].len > 0 || file->mod_map[DARSHAN_H5D_MOD].len > 0)
*used_hdf5 += 1;
if (file->mod_map[DARSHAN_PNETCDF_MOD].len > 0)
*used_pnet += 1;
=====================================
darshan-util/darshan-hdf5-logutils.c
=====================================
@@ -21,36 +21,61 @@
/* counter name strings for the HDF5 module */
#define X(a) #a,
-char *hdf5_counter_names[] = {
- HDF5_COUNTERS
+char *h5f_counter_names[] = {
+ H5F_COUNTERS
+};
+char *h5f_f_counter_names[] = {
+ H5F_F_COUNTERS
};
-char *hdf5_f_counter_names[] = {
- HDF5_F_COUNTERS
+char *h5d_counter_names[] = {
+ H5D_COUNTERS
+};
+char *h5d_f_counter_names[] = {
+ H5D_F_COUNTERS
};
#undef X
-#define DARSHAN_HDF5_FILE_SIZE_1 40
+#define DARSHAN_H5F_FILE_SIZE_1 40
static int darshan_log_get_hdf5_file(darshan_fd fd, void** hdf5_buf_p);
static int darshan_log_put_hdf5_file(darshan_fd fd, void* hdf5_buf);
-static void darshan_log_print_hdf5_file(void *file_rec,
- char *file_name, char *mnt_pt, char *fs_type);
-static void darshan_log_print_hdf5_description(int ver);
-static void darshan_log_print_hdf5_file_diff(void *file_rec1, char *file_name1,
- void *file_rec2, char *file_name2);
+static void darshan_log_print_hdf5_file(void *ds_rec,
+ char *ds_name, char *mnt_pt, char *fs_type);
+static void darshan_log_print_hdf5_file_description(int ver);
+static void darshan_log_print_hdf5_file_diff(void *ds_rec1, char *ds_name1,
+ void *ds_rec2, char *ds_name2);
static void darshan_log_agg_hdf5_files(void *rec, void *agg_rec, int init_flag);
-struct darshan_mod_logutil_funcs hdf5_logutils =
+static int darshan_log_get_hdf5_dataset(darshan_fd fd, void** hdf5_buf_p);
+static int darshan_log_put_hdf5_dataset(darshan_fd fd, void* hdf5_buf);
+static void darshan_log_print_hdf5_dataset(void *ds_rec,
+ char *ds_name, char *mnt_pt, char *fs_type);
+static void darshan_log_print_hdf5_dataset_description(int ver);
+static void darshan_log_print_hdf5_dataset_diff(void *ds_rec1, char *ds_name1,
+ void *ds_rec2, char *ds_name2);
+static void darshan_log_agg_hdf5_datasets(void *rec, void *agg_rec, int init_flag);
+
+struct darshan_mod_logutil_funcs hdf5_file_logutils =
{
.log_get_record = &darshan_log_get_hdf5_file,
.log_put_record = &darshan_log_put_hdf5_file,
.log_print_record = &darshan_log_print_hdf5_file,
- .log_print_description = &darshan_log_print_hdf5_description,
+ .log_print_description = &darshan_log_print_hdf5_file_description,
.log_print_diff = &darshan_log_print_hdf5_file_diff,
.log_agg_records = &darshan_log_agg_hdf5_files
};
+struct darshan_mod_logutil_funcs hdf5_dataset_logutils =
+{
+ .log_get_record = &darshan_log_get_hdf5_dataset,
+ .log_put_record = &darshan_log_put_hdf5_dataset,
+ .log_print_record = &darshan_log_print_hdf5_dataset,
+ .log_print_description = &darshan_log_print_hdf5_dataset_description,
+ .log_print_diff = &darshan_log_print_hdf5_dataset_diff,
+ .log_agg_records = &darshan_log_agg_hdf5_datasets
+};
+
static int darshan_log_get_hdf5_file(darshan_fd fd, void** hdf5_buf_p)
{
struct darshan_hdf5_file *file = *((struct darshan_hdf5_file **)hdf5_buf_p);
@@ -58,7 +83,7 @@ static int darshan_log_get_hdf5_file(darshan_fd fd, void** hdf5_buf_p)
int i;
int ret;
- if(fd->mod_map[DARSHAN_HDF5_MOD].len == 0)
+ if(fd->mod_map[DARSHAN_H5F_MOD].len == 0)
return(0);
if(*hdf5_buf_p == NULL)
@@ -68,13 +93,13 @@ static int darshan_log_get_hdf5_file(darshan_fd fd, void** hdf5_buf_p)
return(-1);
}
- if(fd->mod_ver[DARSHAN_HDF5_MOD] == DARSHAN_HDF5_VER)
+ if(fd->mod_ver[DARSHAN_H5F_MOD] == DARSHAN_H5F_VER)
{
/* log format is in current version, so we don't need to do any
* translation of counters while reading
*/
rec_len = sizeof(struct darshan_hdf5_file);
- ret = darshan_log_get_mod(fd, DARSHAN_HDF5_MOD, file, rec_len);
+ ret = darshan_log_get_mod(fd, DARSHAN_H5F_MOD, file, rec_len);
}
else
{
@@ -82,8 +107,8 @@ static int darshan_log_get_hdf5_file(darshan_fd fd, void** hdf5_buf_p)
char *src_p, *dest_p;
int len;
- rec_len = DARSHAN_HDF5_FILE_SIZE_1;
- ret = darshan_log_get_mod(fd, DARSHAN_HDF5_MOD, scratch, rec_len);
+ rec_len = DARSHAN_H5F_FILE_SIZE_1;
+ ret = darshan_log_get_mod(fd, DARSHAN_H5F_MOD, scratch, rec_len);
if(ret != rec_len)
goto exit;
@@ -120,16 +145,16 @@ exit:
{
DARSHAN_BSWAP64(&(file->base_rec.id));
DARSHAN_BSWAP64(&(file->base_rec.rank));
- for(i=0; i<HDF5_NUM_INDICES; i++)
+ for(i=0; i<H5F_NUM_INDICES; i++)
DARSHAN_BSWAP64(&file->counters[i]);
- for(i=0; i<HDF5_F_NUM_INDICES; i++)
+ for(i=0; i<H5F_F_NUM_INDICES; i++)
{
/* skip counters we explicitly set to -1 since they don't
* need to be byte swapped
*/
- if((fd->mod_ver[DARSHAN_HDF5_MOD] == 1) &&
- ((i == HDF5_F_CLOSE_START_TIMESTAMP) ||
- (i == HDF5_F_OPEN_END_TIMESTAMP)))
+ if((fd->mod_ver[DARSHAN_H5F_MOD] == 1) &&
+ ((i == H5F_F_CLOSE_START_TIMESTAMP) ||
+ (i == H5F_F_OPEN_END_TIMESTAMP)))
continue;
DARSHAN_BSWAP64(&file->fcounters[i]);
}
@@ -139,13 +164,82 @@ exit:
}
}
+static int darshan_log_get_hdf5_dataset(darshan_fd fd, void** hdf5_buf_p)
+{
+ struct darshan_hdf5_dataset *ds = *((struct darshan_hdf5_dataset **)hdf5_buf_p);
+ int rec_len;
+ int i;
+ int ret;
+
+ if(fd->mod_map[DARSHAN_H5D_MOD].len == 0)
+ return(0);
+
+ if(*hdf5_buf_p == NULL)
+ {
+ ds = malloc(sizeof(*ds));
+ if(!ds)
+ return(-1);
+ }
+
+ if(fd->mod_ver[DARSHAN_H5D_MOD] == DARSHAN_H5D_VER)
+ {
+ /* log format is in current version, so we don't need to do any
+ * translation of counters while reading
+ */
+ rec_len = sizeof(struct darshan_hdf5_dataset);
+ ret = darshan_log_get_mod(fd, DARSHAN_H5D_MOD, ds, rec_len);
+ }
+
+exit:
+ if(*hdf5_buf_p == NULL)
+ {
+ if(ret == rec_len)
+ *hdf5_buf_p = ds;
+ else
+ free(ds);
+ }
+
+ if(ret < 0)
+ return(-1);
+ else if(ret < rec_len)
+ return(0);
+ else
+ {
+ /* if the read was successful, do any necessary byte-swapping */
+ if(fd->swap_flag)
+ {
+ DARSHAN_BSWAP64(&(ds->base_rec.id));
+ DARSHAN_BSWAP64(&(ds->base_rec.rank));
+ for(i=0; i<H5D_NUM_INDICES; i++)
+ DARSHAN_BSWAP64(&ds->counters[i]);
+ for(i=0; i<H5D_F_NUM_INDICES; i++)
+ DARSHAN_BSWAP64(&ds->fcounters[i]);
+ }
+
+ return(1);
+ }
+}
+
static int darshan_log_put_hdf5_file(darshan_fd fd, void* hdf5_buf)
{
struct darshan_hdf5_file *file = (struct darshan_hdf5_file *)hdf5_buf;
int ret;
- ret = darshan_log_put_mod(fd, DARSHAN_HDF5_MOD, file,
- sizeof(struct darshan_hdf5_file), DARSHAN_HDF5_VER);
+ ret = darshan_log_put_mod(fd, DARSHAN_H5F_MOD, file,
+ sizeof(struct darshan_hdf5_file), DARSHAN_H5F_VER);
+ if(ret < 0)
+ return(-1);
+
+ return(0);
+}
+
+static int darshan_log_put_hdf5_dataset(darshan_fd fd, void* hdf5_buf)
+{
+ struct darshan_hdf5_dataset *ds = (struct darshan_hdf5_dataset *)hdf5_buf;
+ int ret;
+
+ ret = darshan_log_put_mod(fd, DARSHAN_H5D_MOD, ds,
+ sizeof(struct darshan_hdf5_dataset), DARSHAN_H5D_VER);
if(ret < 0)
return(-1);
@@ -159,42 +253,102 @@ static void darshan_log_print_hdf5_file(void *file_rec, char *file_name,
struct darshan_hdf5_file *hdf5_file_rec =
(struct darshan_hdf5_file *)file_rec;
- for(i=0; i<HDF5_NUM_INDICES; i++)
+ for(i=0; i<H5F_NUM_INDICES; i++)
{
- DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_HDF5_MOD],
+ DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_H5F_MOD],
hdf5_file_rec->base_rec.rank, hdf5_file_rec->base_rec.id,
- hdf5_counter_names[i], hdf5_file_rec->counters[i],
+ h5f_counter_names[i], hdf5_file_rec->counters[i],
file_name, mnt_pt, fs_type);
}
- for(i=0; i<HDF5_F_NUM_INDICES; i++)
+ for(i=0; i<H5F_F_NUM_INDICES; i++)
{
- DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_HDF5_MOD],
+ DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_H5F_MOD],
hdf5_file_rec->base_rec.rank, hdf5_file_rec->base_rec.id,
- hdf5_f_counter_names[i], hdf5_file_rec->fcounters[i],
+ h5f_f_counter_names[i], hdf5_file_rec->fcounters[i],
file_name, mnt_pt, fs_type);
}
return;
}
-static void darshan_log_print_hdf5_description(int ver)
+static void darshan_log_print_hdf5_dataset(void *ds_rec, char *ds_name,
+ char *mnt_pt, char *fs_type)
+{
+ int i;
+ struct darshan_hdf5_dataset *hdf5_ds_rec =
+ (struct darshan_hdf5_dataset *)ds_rec;
+
+ for(i=0; i<H5D_NUM_INDICES; i++)
+ {
+ DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_H5D_MOD],
+ hdf5_ds_rec->base_rec.rank, hdf5_ds_rec->base_rec.id,
+ h5d_counter_names[i], hdf5_ds_rec->counters[i],
+ ds_name, mnt_pt, fs_type);
+ }
+
+ for(i=0; i<H5D_F_NUM_INDICES; i++)
+ {
+ DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_H5D_MOD],
+ hdf5_ds_rec->base_rec.rank, hdf5_ds_rec->base_rec.id,
+ h5d_f_counter_names[i], hdf5_ds_rec->fcounters[i],
+ ds_name, mnt_pt, fs_type);
+ }
+
+ return;
+}
+
+static void darshan_log_print_hdf5_file_description(int ver)
{
printf("\n# description of HDF5 counters:\n");
- printf("# HDF5_OPENS: HDF5 file open operation counts.\n");
- printf("# HDF5_F_*_START_TIMESTAMP: timestamp of first HDF5 file open/close.\n");
- printf("# HDF5_F_*_END_TIMESTAMP: timestamp of last HDF5 file open/close.\n");
+ printf("# H5F_OPENS: HDF5 file open/create operation counts.\n");
+ printf("# H5F_FLUSHES: HDF5 file flush operation counts.\n");
+ printf("# H5F_USE_MPIIO: flag indicating whether MPI-IO was used to access this file.\n");
+ printf("# H5F_F_*_START_TIMESTAMP: timestamp of first HDF5 file open/close.\n");
+ printf("# H5F_F_*_END_TIMESTAMP: timestamp of last HDF5 file open/close.\n");
+ printf("# H5F_F_META_TIME: cumulative time spent in HDF5 metadata operations.\n");
if(ver == 1)
{
printf("\n# WARNING: HDF5 module log format version 1 does not support the following counters:\n");
- printf("# - HDF5_F_CLOSE_START_TIMESTAMP\n");
- printf("# - HDF5_F_OPEN_END_TIMESTAMP\n");
+ printf("# - H5F_F_CLOSE_START_TIMESTAMP\n");
+ printf("# - H5F_F_OPEN_END_TIMESTAMP\n");
}
return;
}
+static void darshan_log_print_hdf5_dataset_description(int ver)
+{
+ printf("\n# description of HDF5 counters:\n");
+ printf("# H5D_OPENS: HDF5 dataset open/create operation counts.\n");
+ printf("# H5D_READS: HDF5 dataset read operation counts.\n");
+ printf("# H5D_WRITES: HDF5 dataset write operation counts.\n");
+ printf("# H5D_FLUSHES: HDF5 dataset flush operation counts.\n");
+ printf("# H5D_BYTES_*: total bytes read and written at HDF5 dataset layer.\n");
+ printf("# H5D_RW_SWITCHES: number of times access alternated between read and write.\n");
+ printf("# H5D_*_SELECTS: number of different access selections (regular/irregular hyperslab or points).\n");
+ printf("# H5D_MAX_*_TIME_SIZE: size of the slowest read and write operations.\n");
+ printf("# H5D_SIZE_*_AGG_*: histogram of H5D total access sizes for read and write operations.\n");
+ printf("# H5D_ACCESS*_ACCESS: the four most common total access sizes.\n");
+ printf("# H5D_ACCESS*_COUNT: count of the four most common total access sizes.\n");
+ printf("# H5D_DATASPACE_NDIMS: number of dimensions in dataset's dataspace.\n");
+ printf("# H5D_DATASPACE_NPOINTS: number of points in dataset's dataspace.\n");
+ printf("# H5D_DATATYPE_SIZE: size of each dataset element.\n");
+ printf("# H5F_USE_MPIIO_COLLECTIVE: flag indicating whether MPI-IO collectives were used to access this file.\n");
+ printf("# H5F_USE_DEPRECATED: flag indicating whether deprecated H5D calls were used.\n");
+ printf("# H5D_*_RANK: rank of the processes that were the fastest and slowest at I/O (for shared files).\n");
+ printf("# H5D_*_RANK_BYTES: total bytes transferred at H5D layer by the fastest and slowest ranks (for shared files).\n");
+ printf("# H5D_F_*_START_TIMESTAMP: timestamp of first HDF5 file open/read/write/close.\n");
+ printf("# H5D_F_*_END_TIMESTAMP: timestamp of last HDF5 file open/read/write/close.\n");
+ printf("# H5D_F_READ/WRITE/META_TIME: cumulative time spent in H5D read, write, or metadata operations.\n");
+ printf("# H5D_F_MAX_*_TIME: duration of the slowest H5D read and write operations.\n");
+ printf("# H5D_F_*_RANK_TIME: fastest and slowest I/O time for a single rank (for shared files).\n");
+ printf("# H5D_F_VARIANCE_RANK_*: variance of total I/O time and bytes moved for all ranks (for shared files).\n");
+
+ return;
+}
+
static void darshan_log_print_hdf5_file_diff(void *file_rec1, char *file_name1,
void *file_rec2, char *file_name2)
{
@@ -204,62 +358,62 @@ static void darshan_log_print_hdf5_file_diff(void *file_rec1, char *file_name1,
/* NOTE: we assume that both input records are the same module format version */
- for(i=0; i<HDF5_NUM_INDICES; i++)
+ for(i=0; i<H5F_NUM_INDICES; i++)
{
if(!file2)
{
printf("- ");
- DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_HDF5_MOD],
- file1->base_rec.rank, file1->base_rec.id, hdf5_counter_names[i],
+ DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_H5F_MOD],
+ file1->base_rec.rank, file1->base_rec.id, h5f_counter_names[i],
file1->counters[i], file_name1, "", "");
}
else if(!file1)
{
printf("+ ");
- DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_HDF5_MOD],
- file2->base_rec.rank, file2->base_rec.id, hdf5_counter_names[i],
+ DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_H5F_MOD],
+ file2->base_rec.rank, file2->base_rec.id, h5f_counter_names[i],
file2->counters[i], file_name2, "", "");
}
else if(file1->counters[i] != file2->counters[i])
{
printf("- ");
- DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_HDF5_MOD],
- file1->base_rec.rank, file1->base_rec.id, hdf5_counter_names[i],
+ DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_H5F_MOD],
+ file1->base_rec.rank, file1->base_rec.id, h5f_counter_names[i],
file1->counters[i], file_name1, "", "");
printf("+ ");
- DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_HDF5_MOD],
- file2->base_rec.rank, file2->base_rec.id, hdf5_counter_names[i],
+ DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_H5F_MOD],
+ file2->base_rec.rank, file2->base_rec.id, h5f_counter_names[i],
file2->counters[i], file_name2, "", "");
}
}
- for(i=0; i<HDF5_F_NUM_INDICES; i++)
+ for(i=0; i<H5F_F_NUM_INDICES; i++)
{
if(!file2)
{
printf("- ");
- DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_HDF5_MOD],
- file1->base_rec.rank, file1->base_rec.id, hdf5_f_counter_names[i],
+ DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_H5F_MOD],
+ file1->base_rec.rank, file1->base_rec.id, h5f_f_counter_names[i],
file1->fcounters[i], file_name1, "", "");
}
else if(!file1)
{
printf("+ ");
- DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_HDF5_MOD],
- file2->base_rec.rank, file2->base_rec.id, hdf5_f_counter_names[i],
+ DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_H5F_MOD],
+ file2->base_rec.rank, file2->base_rec.id, h5f_f_counter_names[i],
file2->fcounters[i], file_name2, "", "");
}
else if(file1->fcounters[i] != file2->fcounters[i])
{
printf("- ");
- DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_HDF5_MOD],
- file1->base_rec.rank, file1->base_rec.id, hdf5_f_counter_names[i],
+ DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_H5F_MOD],
+ file1->base_rec.rank, file1->base_rec.id, h5f_f_counter_names[i],
file1->fcounters[i], file_name1, "", "");
printf("+ ");
- DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_HDF5_MOD],
- file2->base_rec.rank, file2->base_rec.id, hdf5_f_counter_names[i],
+ DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_H5F_MOD],
+ file2->base_rec.rank, file2->base_rec.id, h5f_f_counter_names[i],
file2->fcounters[i], file_name2, "", "");
}
}
@@ -267,32 +421,310 @@ static void darshan_log_print_hdf5_file_diff(void *file_rec1, char *file_name1,
return;
}
+static void darshan_log_print_hdf5_dataset_diff(void *ds_rec1, char *ds_name1,
+ void *ds_rec2, char *ds_name2)
+{
+ struct darshan_hdf5_dataset *ds1 = (struct darshan_hdf5_dataset *)ds_rec1;
+ struct darshan_hdf5_dataset *ds2 = (struct darshan_hdf5_dataset *)ds_rec2;
+ int i;
+
+ /* NOTE: we assume that both input records are the same module format version */
+
+ for(i=0; i<H5D_NUM_INDICES; i++)
+ {
+ if(!ds2)
+ {
+ printf("- ");
+ DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_H5D_MOD],
+ ds1->base_rec.rank, ds1->base_rec.id, h5d_counter_names[i],
+ ds1->counters[i], ds_name1, "", "");
+
+ }
+ else if(!ds1)
+ {
+ printf("+ ");
+ DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_H5D_MOD],
+ ds2->base_rec.rank, ds2->base_rec.id, h5d_counter_names[i],
+ ds2->counters[i], ds_name2, "", "");
+ }
+ else if(ds1->counters[i] != ds2->counters[i])
+ {
+ printf("- ");
+ DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_H5D_MOD],
+ ds1->base_rec.rank, ds1->base_rec.id, h5d_counter_names[i],
+ ds1->counters[i], ds_name1, "", "");
+ printf("+ ");
+ DARSHAN_D_COUNTER_PRINT(darshan_module_names[DARSHAN_H5D_MOD],
+ ds2->base_rec.rank, ds2->base_rec.id, h5d_counter_names[i],
+ ds2->counters[i], ds_name2, "", "");
+ }
+ }
+
+ for(i=0; i<H5D_F_NUM_INDICES; i++)
+ {
+ if(!ds2)
+ {
+ printf("- ");
+ DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_H5D_MOD],
+ ds1->base_rec.rank, ds1->base_rec.id, h5d_f_counter_names[i],
+ ds1->fcounters[i], ds_name1, "", "");
+
+ }
+ else if(!ds1)
+ {
+ printf("+ ");
+ DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_H5D_MOD],
+ ds2->base_rec.rank, ds2->base_rec.id, h5d_f_counter_names[i],
+ ds2->fcounters[i], ds_name2, "", "");
+ }
+ else if(ds1->fcounters[i] != ds2->fcounters[i])
+ {
+ printf("- ");
+ DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_H5D_MOD],
+ ds1->base_rec.rank, ds1->base_rec.id, h5d_f_counter_names[i],
+ ds1->fcounters[i], ds_name1, "", "");
+ printf("+ ");
+ DARSHAN_F_COUNTER_PRINT(darshan_module_names[DARSHAN_H5D_MOD],
+ ds2->base_rec.rank, ds2->base_rec.id, h5d_f_counter_names[i],
+ ds2->fcounters[i], ds_name2, "", "");
+ }
+ }
+
+ return;
+}
+
static void darshan_log_agg_hdf5_files(void *rec, void *agg_rec, int init_flag)
{
struct darshan_hdf5_file *hdf5_rec = (struct darshan_hdf5_file *)rec;
struct darshan_hdf5_file *agg_hdf5_rec = (struct darshan_hdf5_file *)agg_rec;
int i;
- for(i = 0; i < HDF5_NUM_INDICES; i++)
+ for(i = 0; i < H5F_NUM_INDICES; i++)
+ {
+ switch(i)
+ {
+ case H5F_OPENS:
+ case H5F_FLUSHES:
+ /* sum */
+ agg_hdf5_rec->counters[i] += hdf5_rec->counters[i];
+ break;
+ case H5F_USE_MPIIO:
+ if(hdf5_rec->counters[i] > 0)
+ agg_hdf5_rec->counters[i] = 1;
+ break;
+ default:
+ agg_hdf5_rec->counters[i] = -1;
+ break;
+ }
+ }
+
+ for(i = 0; i < H5F_F_NUM_INDICES; i++)
+ {
+ switch(i)
+ {
+ case H5F_F_OPEN_START_TIMESTAMP:
+ case H5F_F_CLOSE_START_TIMESTAMP:
+ /* minimum non-zero */
+ if((hdf5_rec->fcounters[i] > 0) &&
+ ((agg_hdf5_rec->fcounters[i] == 0) ||
+ (hdf5_rec->fcounters[i] < agg_hdf5_rec->fcounters[i])))
+ {
+ agg_hdf5_rec->fcounters[i] = hdf5_rec->fcounters[i];
+ }
+ break;
+ case H5F_F_OPEN_END_TIMESTAMP:
+ case H5F_F_CLOSE_END_TIMESTAMP:
+ /* maximum */
+ if(hdf5_rec->fcounters[i] > agg_hdf5_rec->fcounters[i])
+ {
+ agg_hdf5_rec->fcounters[i] = hdf5_rec->fcounters[i];
+ }
+ break;
+ case H5F_F_META_TIME:
+ /* sum */
+ agg_hdf5_rec->counters[i] += hdf5_rec->counters[i];
+ break;
+ default:
+ agg_hdf5_rec->fcounters[i] = -1;
+ break;
+ }
+ }
+
+ return;
+}
+
+/* simple helper struct for determining time & byte variances */
+struct var_t
+{
+ double n;
+ double M;
+ double S;
+};
+
+static void darshan_log_agg_hdf5_datasets(void *rec, void *agg_rec, int init_flag)
+{
+ struct darshan_hdf5_dataset *hdf5_rec = (struct darshan_hdf5_dataset *)rec;
+ struct darshan_hdf5_dataset *agg_hdf5_rec = (struct darshan_hdf5_dataset *)agg_rec;
+ int i, j, k;
+ int total_count;
+ int64_t tmp_val[4];
+ int64_t tmp_cnt[4];
+ int tmp_ndx;
+ double old_M;
+ double hdf5_time = hdf5_rec->fcounters[H5D_F_READ_TIME] +
+ hdf5_rec->fcounters[H5D_F_WRITE_TIME] +
+ hdf5_rec->fcounters[H5D_F_META_TIME];
+ double hdf5_bytes = (double)hdf5_rec->counters[H5D_BYTES_READ] +
+ hdf5_rec->counters[H5D_BYTES_WRITTEN];
+ struct var_t *var_time_p = (struct var_t *)
+ ((char *)rec + sizeof(struct darshan_hdf5_dataset));
+ struct var_t *var_bytes_p = (struct var_t *)
+ ((char *)var_time_p + sizeof(struct var_t));
+
+ for(i = 0; i < H5D_NUM_INDICES; i++)
{
switch(i)
{
- case HDF5_OPENS:
+ case H5D_OPENS:
+ case H5D_READS:
+ case H5D_WRITES:
+ case H5D_FLUSHES:
+ case H5D_BYTES_READ:
+ case H5D_BYTES_WRITTEN:
+ case H5D_RW_SWITCHES:
+ case H5D_REGULAR_HYPERSLAB_SELECTS:
+ case H5D_IRREGULAR_HYPERSLAB_SELECTS:
+ case H5D_POINT_SELECTS:
+ case H5D_SIZE_READ_AGG_0_100:
+ case H5D_SIZE_READ_AGG_100_1K:
+ case H5D_SIZE_READ_AGG_1K_10K:
+ case H5D_SIZE_READ_AGG_10K_100K:
+ case H5D_SIZE_READ_AGG_100K_1M:
+ case H5D_SIZE_READ_AGG_1M_4M:
+ case H5D_SIZE_READ_AGG_4M_10M:
+ case H5D_SIZE_READ_AGG_10M_100M:
+ case H5D_SIZE_READ_AGG_100M_1G:
+ case H5D_SIZE_READ_AGG_1G_PLUS:
+ case H5D_SIZE_WRITE_AGG_0_100:
+ case H5D_SIZE_WRITE_AGG_100_1K:
+ case H5D_SIZE_WRITE_AGG_1K_10K:
+ case H5D_SIZE_WRITE_AGG_10K_100K:
+ case H5D_SIZE_WRITE_AGG_100K_1M:
+ case H5D_SIZE_WRITE_AGG_1M_4M:
+ case H5D_SIZE_WRITE_AGG_4M_10M:
+ case H5D_SIZE_WRITE_AGG_10M_100M:
+ case H5D_SIZE_WRITE_AGG_100M_1G:
+ case H5D_SIZE_WRITE_AGG_1G_PLUS:
/* sum */
agg_hdf5_rec->counters[i] += hdf5_rec->counters[i];
break;
+ case H5D_MAX_READ_TIME_SIZE:
+ case H5D_MAX_WRITE_TIME_SIZE:
+ case H5D_FASTEST_RANK:
+ case H5D_FASTEST_RANK_BYTES:
+ case H5D_SLOWEST_RANK:
+ case H5D_SLOWEST_RANK_BYTES:
+ /* these are set with the FP counters */
+ break;
+ case H5D_ACCESS1_ACCESS:
+ /* increment common value counters */
+ if(hdf5_rec->counters[i] == 0) break;
+
+ /* first, collapse duplicates */
+ for(j = i; j < i + 4; j++)
+ {
+ for(k = 0; k < 4; k++)
+ {
+ if(agg_hdf5_rec->counters[i + k] == hdf5_rec->counters[j])
+ {
+ agg_hdf5_rec->counters[i + k + 4] += hdf5_rec->counters[j + 4];
+ hdf5_rec->counters[j] = hdf5_rec->counters[j + 4] = 0;
+ }
+ }
+ }
+
+ /* second, add new counters */
+ for(j = i; j < i + 4; j++)
+ {
+ tmp_ndx = 0;
+ memset(tmp_val, 0, 4 * sizeof(int64_t));
+ memset(tmp_cnt, 0, 4 * sizeof(int64_t));
+
+ if(hdf5_rec->counters[j] == 0) break;
+ for(k = 0; k < 4; k++)
+ {
+ if(agg_hdf5_rec->counters[i + k] == hdf5_rec->counters[j])
+ {
+ total_count = agg_hdf5_rec->counters[i + k + 4] +
+ hdf5_rec->counters[j + 4];
+ break;
+ }
+ }
+ if(k == 4) total_count = hdf5_rec->counters[j + 4];
+
+ for(k = 0; k < 4; k++)
+ {
+ if((agg_hdf5_rec->counters[i + k + 4] > total_count) ||
+ ((agg_hdf5_rec->counters[i + k + 4] == total_count) &&
+ (agg_hdf5_rec->counters[i + k] > hdf5_rec->counters[j])))
+ {
+ tmp_val[tmp_ndx] = agg_hdf5_rec->counters[i + k];
+ tmp_cnt[tmp_ndx] = agg_hdf5_rec->counters[i + k + 4];
+ tmp_ndx++;
+ }
+ else break;
+ }
+ if(tmp_ndx == 4) break;
+
+ tmp_val[tmp_ndx] = hdf5_rec->counters[j];
+ tmp_cnt[tmp_ndx] = hdf5_rec->counters[j + 4];
+ tmp_ndx++;
+
+ while(tmp_ndx != 4)
+ {
+ if(agg_hdf5_rec->counters[i + k] != hdf5_rec->counters[j])
+ {
+ tmp_val[tmp_ndx] = agg_hdf5_rec->counters[i + k];
+ tmp_cnt[tmp_ndx] = agg_hdf5_rec->counters[i + k + 4];
+ tmp_ndx++;
+ }
+ k++;
+ }
+ memcpy(&(agg_hdf5_rec->counters[i]), tmp_val, 4 * sizeof(int64_t));
+ memcpy(&(agg_hdf5_rec->counters[i + 4]), tmp_cnt, 4 * sizeof(int64_t));
+ }
+ break;
+ case H5D_DATASPACE_NDIMS:
+ case H5D_DATASPACE_NPOINTS:
+ case H5D_DATATYPE_SIZE:
+ /* just set to the input value */
+ agg_hdf5_rec->counters[i] = hdf5_rec->counters[i];
+ break;
+ case H5D_USE_MPIIO_COLLECTIVE:
+ case H5D_USE_DEPRECATED:
+ if(hdf5_rec->counters[i] > 0)
+ agg_hdf5_rec->counters[i] = 1;
+ break;
default:
agg_hdf5_rec->counters[i] = -1;
break;
}
}
- for(i = 0; i < HDF5_F_NUM_INDICES; i++)
+ for(i = 0; i < H5D_F_NUM_INDICES; i++)
{
switch(i)
{
- case HDF5_F_OPEN_START_TIMESTAMP:
- case HDF5_F_CLOSE_START_TIMESTAMP:
+ case H5D_F_READ_TIME:
+ case H5D_F_WRITE_TIME:
+ case H5D_F_META_TIME:
+ /* sum */
+ agg_hdf5_rec->fcounters[i] += hdf5_rec->fcounters[i];
+ break;
+ case H5D_F_OPEN_START_TIMESTAMP:
+ case H5D_F_READ_START_TIMESTAMP:
+ case H5D_F_WRITE_START_TIMESTAMP:
+ case H5D_F_CLOSE_START_TIMESTAMP:
/* minimum non-zero */
if((hdf5_rec->fcounters[i] > 0) &&
((agg_hdf5_rec->fcounters[i] == 0) ||
@@ -301,14 +733,106 @@ static void darshan_log_agg_hdf5_files(void *rec, void *agg_rec, int init_flag)
agg_hdf5_rec->fcounters[i] = hdf5_rec->fcounters[i];
}
break;
- case HDF5_F_OPEN_END_TIMESTAMP:
- case HDF5_F_CLOSE_END_TIMESTAMP:
+ case H5D_F_OPEN_END_TIMESTAMP:
+ case H5D_F_READ_END_TIMESTAMP:
+ case H5D_F_WRITE_END_TIMESTAMP:
+ case H5D_F_CLOSE_END_TIMESTAMP:
/* maximum */
if(hdf5_rec->fcounters[i] > agg_hdf5_rec->fcounters[i])
{
agg_hdf5_rec->fcounters[i] = hdf5_rec->fcounters[i];
}
break;
+ case H5D_F_MAX_READ_TIME:
+ if(hdf5_rec->fcounters[i] > agg_hdf5_rec->fcounters[i])
+ {
+ agg_hdf5_rec->fcounters[i] = hdf5_rec->fcounters[i];
+ agg_hdf5_rec->counters[H5D_MAX_READ_TIME_SIZE] =
+ hdf5_rec->counters[H5D_MAX_READ_TIME_SIZE];
+ }
+ break;
+ case H5D_F_MAX_WRITE_TIME:
+ if(hdf5_rec->fcounters[i] > agg_hdf5_rec->fcounters[i])
+ {
+ agg_hdf5_rec->fcounters[i] = hdf5_rec->fcounters[i];
+ agg_hdf5_rec->counters[H5D_MAX_WRITE_TIME_SIZE] =
+ hdf5_rec->counters[H5D_MAX_WRITE_TIME_SIZE];
+ }
+ break;
+ case H5D_F_FASTEST_RANK_TIME:
+ if(init_flag)
+ {
+ /* set fastest rank counters according to root rank. these counters
+ * will be determined as the aggregation progresses.
+ */
+ agg_hdf5_rec->counters[H5D_FASTEST_RANK] = hdf5_rec->base_rec.rank;
+ agg_hdf5_rec->counters[H5D_FASTEST_RANK_BYTES] = hdf5_bytes;
+ agg_hdf5_rec->fcounters[H5D_F_FASTEST_RANK_TIME] = hdf5_time;
+ }
+
+ if(hdf5_time < agg_hdf5_rec->fcounters[H5D_F_FASTEST_RANK_TIME])
+ {
+ agg_hdf5_rec->counters[H5D_FASTEST_RANK] = hdf5_rec->base_rec.rank;
+ agg_hdf5_rec->counters[H5D_FASTEST_RANK_BYTES] = hdf5_bytes;
+ agg_hdf5_rec->fcounters[H5D_F_FASTEST_RANK_TIME] = hdf5_time;
+ }
+ break;
+ case H5D_F_SLOWEST_RANK_TIME:
+ if(init_flag)
+ {
+ /* set slowest rank counters according to root rank. these counters
+ * will be determined as the aggregation progresses.
+ */
+ agg_hdf5_rec->counters[H5D_SLOWEST_RANK] = hdf5_rec->base_rec.rank;
+ agg_hdf5_rec->counters[H5D_SLOWEST_RANK_BYTES] = hdf5_bytes;
+ agg_hdf5_rec->fcounters[H5D_F_SLOWEST_RANK_TIME] = hdf5_time;
+ }
+
+ if(hdf5_time > agg_hdf5_rec->fcounters[H5D_F_SLOWEST_RANK_TIME])
+ {
+ agg_hdf5_rec->counters[H5D_SLOWEST_RANK] = hdf5_rec->base_rec.rank;
+ agg_hdf5_rec->counters[H5D_SLOWEST_RANK_BYTES] = hdf5_bytes;
+ agg_hdf5_rec->fcounters[H5D_F_SLOWEST_RANK_TIME] = hdf5_time;
+ }
+ break;
+ case H5D_F_VARIANCE_RANK_TIME:
+ if(init_flag)
+ {
+ var_time_p->n = 1;
+ var_time_p->M = hdf5_time;
+ var_time_p->S = 0;
+ }
+ else
+ {
+ old_M = var_time_p->M;
+
+ var_time_p->n++;
+ var_time_p->M += (hdf5_time - var_time_p->M) / var_time_p->n;
+ var_time_p->S += (hdf5_time - var_time_p->M) * (hdf5_time - old_M);
+
+ agg_hdf5_rec->fcounters[H5D_F_VARIANCE_RANK_TIME] =
+ var_time_p->S / var_time_p->n;
+ }
+ break;
+ case H5D_F_VARIANCE_RANK_BYTES:
+ if(init_flag)
+ {
+ var_bytes_p->n = 1;
+ var_bytes_p->M = hdf5_bytes;
+ var_bytes_p->S = 0;
+ }
+ else
+ {
+ old_M = var_bytes_p->M;
+
+ var_bytes_p->n++;
+ var_bytes_p->M += (hdf5_bytes - var_bytes_p->M) / var_bytes_p->n;
+ var_bytes_p->S += (hdf5_bytes - var_bytes_p->M) * (hdf5_bytes - old_M);
+
+ agg_hdf5_rec->fcounters[H5D_F_VARIANCE_RANK_BYTES] =
+ var_bytes_p->S / var_bytes_p->n;
+ }
+ break;
default:
agg_hdf5_rec->fcounters[i] = -1;
break;
=====================================
darshan-util/darshan-hdf5-logutils.h
=====================================
@@ -7,9 +7,13 @@
#ifndef __DARSHAN_HDF5_LOG_UTILS_H
#define __DARSHAN_HDF5_LOG_UTILS_H
-extern char *hdf5_counter_names[];
-extern char *hdf5_f_counter_names[];
+extern char *h5f_counter_names[];
+extern char *h5f_f_counter_names[];
-extern struct darshan_mod_logutil_funcs hdf5_logutils;
+extern char *h5d_counter_names[];
+extern char *h5d_f_counter_names[];
+
+extern struct darshan_mod_logutil_funcs hdf5_file_logutils;
+extern struct darshan_mod_logutil_funcs hdf5_dataset_logutils;
#endif
=====================================
darshan-util/darshan-logutils.c
=====================================
@@ -896,6 +896,10 @@ static int darshan_log_get_header(darshan_fd fd)
fd->state->get_namerecs = darshan_log_get_namerecs_3_00;
}
else if(strcmp(fd->version, "3.10") == 0)
+ {
+ /* XXX */
+ }
+ else if(strcmp(fd->version, "3.20") == 0)
{
fd->state->get_namerecs = darshan_log_get_namerecs;
}
View it on GitLab: https://xgitlab.cels.anl.gov/darshan/darshan/compare/d89229b7b1c04aa4900ec7fa66e95bb678a53839...55c469885b7e17d0995c4c67a1e205d0dc4d18ee
--
View it on GitLab: https://xgitlab.cels.anl.gov/darshan/darshan/compare/d89229b7b1c04aa4900ec7fa66e95bb678a53839...55c469885b7e17d0995c4c67a1e205d0dc4d18ee
You're receiving this email because of your account on xgitlab.cels.anl.gov.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.mcs.anl.gov/pipermail/darshan-commits/attachments/20200202/a2a6680f/attachment-0001.html>
More information about the Darshan-commits
mailing list