[MOAB-dev] r3898 - MOAB/trunk/src/io
kraftche at cae.wisc.edu
kraftche at cae.wisc.edu
Thu May 13 14:31:01 CDT 2010
Author: kraftche
Date: 2010-05-13 14:31:01 -0500 (Thu, 13 May 2010)
New Revision: 3898
Modified:
MOAB/trunk/src/io/ReadHDF5.cpp
MOAB/trunk/src/io/ReadHDF5.hpp
Log:
fix deadlock when one or more procs get no elements, and add more debug output
Modified: MOAB/trunk/src/io/ReadHDF5.cpp
===================================================================
--- MOAB/trunk/src/io/ReadHDF5.cpp 2010-05-13 17:38:16 UTC (rev 3897)
+++ MOAB/trunk/src/io/ReadHDF5.cpp 2010-05-13 19:31:01 UTC (rev 3898)
@@ -94,6 +94,19 @@
result.merge( s, e );
}
+#define debug_barrier() debug_barrier_line(__LINE__)
+void ReadHDF5::debug_barrier_line(int lineno)
+{
+#ifdef USE_MPI
+ const unsigned threshold = 2;
+ static unsigned long count = 0;
+ if (dbgOut.get_verbosity() >= threshold) {
+ dbgOut.printf( threshold, "*********** Debug Barrier %lu (@%d)***********\n", ++count, lineno);
+ MPI_Barrier( myPcomm->proc_config().proc_comm() );
+ }
+#endif
+}
+
ReaderIface* ReadHDF5::factory( Interface* iface )
{ return new ReadHDF5( iface ); }
@@ -144,6 +157,7 @@
idMap.clear();
fileInfo = 0;
debugTrack = false;
+ myPcomm = 0;
return MB_SUCCESS;
}
@@ -185,7 +199,7 @@
bool use_mpio = (MB_SUCCESS == opts.get_null_option("USE_MPIO"));
rval = opts.match_option("PARALLEL", "READ_PART");
bool parallel = (rval != MB_ENTITY_NOT_FOUND);
- bool native_parallel = (rval == MB_SUCCESS);
+ nativeParallel = (rval == MB_SUCCESS);
if (use_mpio && !parallel) {
readUtil->report_error( "'USE_MPIO' option specified w/out 'PARALLEL' option" );
return MB_NOT_IMPLEMENTED;
@@ -207,7 +221,7 @@
if (!dataBuffer)
return error(MB_MEMORY_ALLOCATION_FAILED);
- if (use_mpio || native_parallel) {
+ if (use_mpio || nativeParallel) {
#ifndef HDF5_PARALLEL
readUtil->report_error("MOAB not configured with parallel HDF5 support");
free(dataBuffer);
@@ -219,7 +233,7 @@
More information about the moab-dev
mailing list