[MOAB-dev] r2003 - MOAB/trunk
kraftche at mcs.anl.gov
kraftche at mcs.anl.gov
Wed Jul 9 12:28:02 CDT 2008
Author: kraftche
Date: 2008-07-09 12:28:02 -0500 (Wed, 09 Jul 2008)
New Revision: 2003
Modified:
MOAB/trunk/ReadHDF5.cpp
MOAB/trunk/ReadHDF5.hpp
Log:
o Clean up error handling in ReadHDF5
o If reading same file on all procs in parallel and
'USE_MPIO' option is specified, request collective
IO for all for all reads from HDF5 tables.
Modified: MOAB/trunk/ReadHDF5.cpp
===================================================================
--- MOAB/trunk/ReadHDF5.cpp 2008-07-09 17:10:15 UTC (rev 2002)
+++ MOAB/trunk/ReadHDF5.cpp 2008-07-09 17:28:02 UTC (rev 2003)
@@ -65,7 +65,8 @@
iFace( iface ),
filePtr( 0 ),
readUtil( 0 ),
- handleType( 0 )
+ handleType( 0 ),
+ ioProp( H5P_DEFAULT )
{
}
@@ -76,6 +77,7 @@
if (readUtil)
return MB_SUCCESS;
+ ioProp = H5P_DEFAULT;
//WriteHDF5::register_known_tag_types( iFace );
handleType = H5Tcopy( H5T_NATIVE_ULONG );
@@ -119,74 +121,132 @@
MBErrorCode ReadHDF5::load_file( const char* filename,
MBEntityHandle& file_set,
- const FileOptions&opts,
- const int*,
+ const FileOptions& opts,
+ const int* p,
const int num_blocks )
{
MBErrorCode rval;
mhdf_Status status;
- std::string tagname;
- int num_tags = 0;
- char** tag_names = NULL;
- char** groups = NULL;
- std::list<ElemSet>::iterator el_itor;
- std::list<ElemSet>::reverse_iterator rel_itor;
- unsigned int i, num_groups;
- bool have_nodes = true;
- file_set = 0;
+ ioProp = H5P_DEFAULT;
- if (num_blocks)
- return MB_FAILURE;
-
if (MB_SUCCESS != init())
return MB_FAILURE;
-DEBUGOUT( "Opening File\n" );
+ bool use_mpio = (MB_SUCCESS == opts.get_null_option("USE_MPIO"));
+ if (use_mpio) {
+#ifndef USE_MPI
+ return MB_NOT_IMPLEMENTED;
+#else
+ int parallel_mode;
+ rval = opts.match_option( "PARALLEL",
+ ReadParallel::parallelOptsNames,
+ parallel_mode );
+ if (MB_FAILURE == rval) {
+ readUtil->report_error("Unexpected value for 'PARALLEL' option\n");
+ return MB_FAILURE;
+ }
+ else if (MB_SUCCESS != rval ||
+ parallel_mode != ReadParallel::POPT_READ_DELETE) {
+ use_mpio = false;
+ }
+#endif
+ }
-#ifdef USE_MPI
- int parallel_mode;
- MBErrorCode result = opts.match_option( "PARALLEL", ReadParallel::parallelOptsNames,
- parallel_mode );
- if (MB_FAILURE == result) {
- readUtil->report_error("Unexpected value for 'PARALLEL' option\n");
- return MB_FAILURE;
- }
- else if (MB_ENTITY_NOT_FOUND == result) {
- parallel_mode = 0;
- }
+ dataBuffer = (char*)malloc( bufferSize );
+ if (!dataBuffer)
+ return MB_MEMORY_ALLOCATION_FAILED;
- bool use_mpio = false;
- result = opts.get_null_option("USE_MPIO");
- if (MB_SUCCESS == result) use_mpio = true;
-
- if (parallel_mode == ReadParallel::POPT_READ_DELETE && use_mpio) {
- hid_t plist_id = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
- filePtr = mhdf_openFileWithOpt( filename, 0, NULL, plist_id, &status );
+ rval = iFace->create_meshset( MESHSET_SET, file_set );
+ if (MB_SUCCESS != rval) {
+ free(dataBuffer);
+ return rval;
}
- else filePtr = mhdf_openFile( filename, 0, NULL, &status );
-
-#else
+
// Open the file
- filePtr = mhdf_openFile( filename, 0, NULL, &status );
+ hid_t file_prop = H5P_DEFAULT;
+#ifdef USE_MPI
+ if (use_mpio) {
+ file_prop = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(file_prop, MPI_COMM_WORLD, MPI_INFO_NULL);
+ ioProp = H5Pcreate(H5P_DATASET_XFER);
+ H5Pset_dxpl_mpio(ioProp, H5FD_MPIO_COLLECTIVE);
+ }
#endif
+
+
+ filePtr = mhdf_openFileWithOpt( filename, 0, NULL, file_prop, &status );
+ if (file_prop != H5P_DEFAULT)
+ H5Pclose( file_prop );
if (!filePtr)
{
readUtil->report_error( mhdf_message( &status ));
+ free( dataBuffer );
+ if (ioProp != H5P_DEFAULT)
+ H5Pclose( ioProp );
+ iFace->delete_entities( &file_set, 1 );
return MB_FAILURE;
}
+
- dataBuffer = (char*)malloc( bufferSize );
- if (!dataBuffer)
- goto read_fail;
+ rval = load_file_impl( file_set, p, num_blocks, use_mpio );
+ mhdf_closeFile( filePtr, &status );
+ filePtr = 0;
+ if (ioProp != H5P_DEFAULT)
+ H5Pclose( ioProp );
+ if (mhdf_isError( &status )) {
+ if (MB_SUCCESS == rval)
+ rval = MB_FAILURE;
+ readUtil->report_error( mhdf_message( &status ));
+ }
+
+ // delete everything that was read in if read failed part-way through
+ if (MB_SUCCESS != rval) {
+ iFace->delete_entities( &file_set, 1 );
+ file_set = 0;
+ iFace->delete_entities( setSet.range );
+ for (std::list<ElemSet>::reverse_iterator rel_itor = elemList.rbegin();
+ rel_itor != elemList.rend(); ++rel_itor)
+ iFace->delete_entities( rel_itor->range );
+ iFace->delete_entities( nodeSet.range );
+ }
+
+ elemList.clear();
+ nodeSet.range.clear();
+ setSet.range.clear();
+ free( dataBuffer );
+ return rval;
+}
+
+
+MBErrorCode ReadHDF5::load_file_impl(
+ MBEntityHandle file_set,
+ const int*,
+ const int num_blocks,
+ bool use_mpio )
+{
+ MBErrorCode rval;
+ mhdf_Status status;
+ std::string tagname;
+ int num_tags = 0;
+ char** tag_names = NULL;
+ char** groups = NULL;
+ std::list<ElemSet>::iterator el_itor;
+ unsigned int i, num_groups;
+ bool have_nodes = true;
+
+ if (num_blocks)
+ return MB_FAILURE;
+
DEBUGOUT("Reading Nodes.\n");
- if (read_nodes() != MB_SUCCESS) {
+ rval = read_nodes();
+ if (MB_ENTITY_NOT_FOUND == rval) {
DEBUGOUT("No nodes in file.!\n");
have_nodes = false;
- //goto read_fail;
}
+ else if (MB_SUCCESS != rval)
+ return rval;
DEBUGOUT("Reading element connectivity.\n");
@@ -194,7 +254,7 @@
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message( &status ));
- goto read_fail;
+ return MB_FAILURE;
}
for (i = 0; i < num_groups; ++i)
@@ -203,7 +263,8 @@
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message( &status ));
- goto read_fail;
+ free(groups);
+ return MB_FAILURE;
}
if (poly)
@@ -211,22 +272,34 @@
else
rval = read_elems( groups[i] );
- if (MB_SUCCESS != rval)
- goto read_fail;
+ if (MB_SUCCESS != rval) {
+ free( groups );
+ return rval;
+ }
}
DEBUGOUT("Reading sets.\n");
- if (read_sets() != MB_SUCCESS)
- goto read_fail;
-
+ rval = read_sets();
+ if (rval != MB_SUCCESS) {
+ free( groups );
+ return rval;
+ }
+
DEBUGOUT("Reading adjacencies.\n");
- if (read_adjacencies( nodeSet ) != MB_SUCCESS)
- goto read_fail;
- for (el_itor = elemList.begin(); el_itor != elemList.end(); ++el_itor)
- if (read_adjacencies( *el_itor ) != MB_SUCCESS)
- goto read_fail;
+ rval = read_adjacencies( nodeSet );
+ if (rval != MB_SUCCESS) {
+ free( groups );
+ return rval;
+ }
+ for (el_itor = elemList.begin(); el_itor != elemList.end(); ++el_itor) {
+ rval = read_adjacencies( *el_itor );
+ if (MB_SUCCESS != rval) {
+ free( groups );
+ return rval;
+ }
+ }
DEBUGOUT("Reading tags.\n");
@@ -234,94 +307,43 @@
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message( &status ));
- goto read_fail;
+ free( groups );
+ return MB_FAILURE;
}
for (int t = 0; t < num_tags; ++t)
{
rval = read_tag( tag_names[t] );
free( tag_names[t] );
- tag_names[t] = NULL;
- if (MB_SUCCESS != rval)
- goto read_fail;
+ if (MB_SUCCESS != rval) {
+ for (; t < num_tags; ++t)
+ free( tag_names[t] );
+ free( tag_names );
+ free( groups );
+ return MB_FAILURE;
+ }
}
free( tag_names );
- tag_names = 0;
-
-DEBUGOUT("Creating entity set for file contents\n")
- if (MB_SUCCESS != iFace->create_meshset( MESHSET_SET, file_set ))
- goto read_fail;
- if (MB_SUCCESS != iFace->add_entities( file_set, setSet.range ))
- goto read_fail;
- for (rel_itor = elemList.rbegin(); rel_itor != elemList.rend(); ++rel_itor)
- if (MB_SUCCESS != iFace->add_entities( file_set, rel_itor->range))
- goto read_fail;
- if (MB_SUCCESS != iFace->add_entities( file_set, nodeSet.range ))
- goto read_fail;
-
-DEBUGOUT("Finishing read.\n");
- if (MB_SUCCESS != read_qa( file_set ))
- goto read_fail;
-
- // Clean up and exit.
- free( dataBuffer );
- dataBuffer = 0;
- mhdf_closeFile( filePtr, &status );
- filePtr = 0;
- elemList.clear();
- if (groups) free( groups );
- if (mhdf_isError( &status ))
- {
- readUtil->report_error( mhdf_message( &status ));
- return MB_FAILURE;
+ free( groups );
+ if (file_set) {
+ DEBUGOUT("Creating entity set for file contents\n")
+ rval = iFace->add_entities( file_set, nodeSet.range );
+ if (MB_SUCCESS != rval)
+ return rval;
+ for (el_itor = elemList.begin(); el_itor != elemList.end(); ++el_itor) {
+ rval = iFace->add_entities( file_set, el_itor->range);
+ if (MB_SUCCESS != rval)
+ return rval;
+ }
+ rval = iFace->add_entities( file_set, setSet.range );
+ if (MB_SUCCESS != rval)
+ return rval;
}
- return MB_SUCCESS;
-read_fail:
-
- if (file_set)
- iFace->delete_entities( &file_set, 1 );
-
- if (dataBuffer)
- {
- free( dataBuffer );
- dataBuffer = 0;
- }
-
- if (tag_names)
- {
- for (int tt = 0; tt < num_tags; ++tt)
- if (NULL != tag_names[tt])
- free( tag_names[tt] );
- free( tag_names );
- }
-
- if (groups) free( groups );
-
- mhdf_closeFile( filePtr, &status );
- filePtr = 0;
-
- /* Destroy any mesh that we've read in */
- if (!setSet.range.empty())
- {
- iFace->clear_meshset( setSet.range );
- iFace->delete_entities( setSet.range );
- setSet.range.clear();
- }
- for (el_itor = elemList.begin(); el_itor != elemList.end(); ++el_itor)
- {
- if (!el_itor->range.empty())
- iFace->delete_entities( el_itor->range );
- }
- elemList.clear();
- if (!nodeSet.range.empty())
- {
- iFace->delete_entities( nodeSet.range );
- nodeSet.range.clear();
- }
-
- return MB_FAILURE;
+DEBUGOUT("Finishing read.\n");
+ rval = read_qa( file_set );
+ return rval;
}
MBErrorCode ReadHDF5::read_nodes()
@@ -373,7 +395,7 @@
nodeSet.type2 = mhdf_node_type_handle();
for (int i = 0; i < dim; i++)
{
- mhdf_readNodeCoord( data_id, 0, count, i, arrays[i], &status );
+ mhdf_readNodeCoordWithOpt( data_id, 0, count, i, arrays[i], ioProp, &status );
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message(&status) );
@@ -453,7 +475,7 @@
}
elems.range.insert( handle, handle + count - 1 );
- mhdf_readConnectivity( data_id, 0, count, handleType, array, &status );
+ mhdf_readConnectivityWithOpt( data_id, 0, count, handleType, array, ioProp, &status );
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message( &status ) );
@@ -517,7 +539,7 @@
std::vector<MBEntityHandle> connectivity;
for (long i = 0; i < count; ++i) {
long prevend = connend;
- mhdf_readPolyConnIndices( handles[0], i, 1, H5T_NATIVE_LONG, &connend, &status );
+ mhdf_readPolyConnIndicesWithOpt( handles[0], i, 1, H5T_NATIVE_LONG, &connend, ioProp, &status );
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message( &status ) );
@@ -527,8 +549,8 @@
}
connectivity.resize( connend - prevend );
- mhdf_readPolyConnIDs( handles[1], prevend+1, connectivity.size(), handleType,
- &connectivity[0], &status );
+ mhdf_readPolyConnIDsWithOpt( handles[1], prevend+1, connectivity.size(), handleType,
+ &connectivity[0], ioProp, &status );
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message( &status ) );
@@ -626,14 +648,14 @@
while (sets_remaining) {
// read end indices from meta data
unsigned long set_count = sets_remaining < offset_size ? sets_remaining : offset_size;
- mhdf_readSetContentEndIndices( meta_id, set_offset, set_count,
- H5T_NATIVE_LONG, offsets.get(), &status );
+ mhdf_readSetContentEndIndicesWithOpt( meta_id, set_offset, set_count,
+ H5T_NATIVE_LONG, offsets.get(), ioProp, &status );
if (mhdf_isError( &status )) {
readUtil->report_error( mhdf_message( &status ) );
return MB_FAILURE;
}
- mhdf_readSetFlags( meta_id, set_offset, set_count, H5T_NATIVE_USHORT,
- flags.get(), &status );
+ mhdf_readSetFlagsWithOpt( meta_id, set_offset, set_count, H5T_NATIVE_USHORT,
+ flags.get(), ioProp, &status );
if (mhdf_isError( &status )) {
readUtil->report_error( mhdf_message( &status ) );
return MB_FAILURE;
@@ -659,7 +681,7 @@
{
size_t count = remaining > chunk_size ? chunk_size : remaining;
remaining -= count;
- mhdf_readSetData( data_id, file_offset, count, handleType, buffer, &status );
+ mhdf_readSetDataWithOpt( data_id, file_offset, count, handleType, buffer, ioProp, &status );
if (mhdf_isError( &status )) {
readUtil->report_error( mhdf_message( &status ) );
return MB_FAILURE;
@@ -703,7 +725,7 @@
// read data for sets in [r,i)
size_t count = offsets[i-1] + 1 - file_offset;
- mhdf_readSetData( data_id, file_offset, count, handleType, buffer, &status );
+ mhdf_readSetDataWithOpt( data_id, file_offset, count, handleType, buffer, ioProp, &status );
if (mhdf_isError( &status )) {
readUtil->report_error( mhdf_message( &status ) );
return MB_FAILURE;
@@ -782,11 +804,13 @@
// read end indices from meta data
unsigned long set_count = sets_remaining < offset_size ? sets_remaining : offset_size;
if (parents)
- mhdf_readSetParentEndIndices( meta_id, set_offset, set_count,
- H5T_NATIVE_LONG, offsets.get(), &status );
+ mhdf_readSetParentEndIndicesWithOpt( meta_id, set_offset, set_count,
+ H5T_NATIVE_LONG, offsets.get(),
+ ioProp, &status );
else
- mhdf_readSetChildEndIndices( meta_id, set_offset, set_count,
- H5T_NATIVE_LONG, offsets.get(), &status );
+ mhdf_readSetChildEndIndicesWithOpt( meta_id, set_offset, set_count,
+ H5T_NATIVE_LONG, offsets.get(),
+ ioProp, &status );
if (mhdf_isError( &status )) {
readUtil->report_error( mhdf_message( &status ) );
return MB_FAILURE;
@@ -809,7 +833,8 @@
{
size_t count = remaining > chunk_size ? chunk_size : remaining;
remaining -= count;
- mhdf_readSetParentsChildren( data_id, file_offset, count, handleType, buffer, &status );
+ mhdf_readSetParentsChildrenWithOpt( data_id, file_offset, count, handleType,
+ buffer, ioProp, &status );
if (mhdf_isError( &status )) {
readUtil->report_error( mhdf_message( &status ) );
return MB_FAILURE;
@@ -841,7 +866,8 @@
// read data for sets in [r,i)
size_t count = offsets[i-1] + 1 - file_offset;
- mhdf_readSetParentsChildren( data_id, file_offset, count, handleType, buffer, &status );
+ mhdf_readSetParentsChildrenWithOpt( data_id, file_offset, count, handleType,
+ buffer, ioProp, &status );
if (mhdf_isError( &status )) {
readUtil->report_error( mhdf_message( &status ) );
return MB_FAILURE;
@@ -940,7 +966,8 @@
while (remaining) {
// Get a block of set flags
size_t count = remaining > chunk_size ? chunk_size : remaining;
- mhdf_readSetFlags( meta_id, offset, count, H5T_NATIVE_UINT, buffer, &status );
+ mhdf_readSetFlagsWithOpt( meta_id, offset, count, H5T_NATIVE_UINT,
+ buffer, ioProp, &status );
if (mhdf_isError( &status )) {
readUtil->report_error( mhdf_message( &status ) );
mhdf_closeData( filePtr, meta_id, &status );
@@ -1066,7 +1093,8 @@
count -= leading;
remaining -= count;
- mhdf_readAdjacency( table, offset, count, handleType, buffer + leading, &status );
+ mhdf_readAdjacencyWithOpt( table, offset, count, handleType, buffer + leading,
+ ioProp, &status );
if (mhdf_isError(&status))
{
readUtil->report_error( mhdf_message( &status ) );
@@ -1499,7 +1527,8 @@
subrange.merge( iter, stop );
iter = stop;
- mhdf_readDenseTag( data, offset, count, hdf_read_type, dataBuffer, &status );
+ mhdf_readDenseTagWithOpt( data, offset, count, hdf_read_type, dataBuffer,
+ ioProp, &status );
offset += count;
if (mhdf_isError( &status ))
{
@@ -1593,7 +1622,8 @@
size_t count = remaining > chunk_size ? chunk_size : remaining;
remaining -= count;
- mhdf_readSparseTagEntities( data[0], offset, count, handleType, idbuf, &status );
+ mhdf_readSparseTagEntitiesWithOpt( data[0], offset, count, handleType, idbuf,
+ ioProp, &status );
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message( &status ) );
@@ -1602,7 +1632,8 @@
return MB_FAILURE;
}
- mhdf_readSparseTagValues( data[1], offset, count, hdf_read_type, databuf, &status );
+ mhdf_readSparseTagValuesWithOpt( data[1], offset, count, hdf_read_type,
+ databuf, ioProp, &status );
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message( &status ) );
@@ -1727,7 +1758,8 @@
// read entity IDs
assert_range( handle_buffer, count * sizeof(MBEntityHandle) );
- mhdf_readSparseTagEntities( data[0], offset, count, handleType, handle_buffer, &status );
+ mhdf_readSparseTagEntitiesWithOpt( data[0], offset, count, handleType,
+ handle_buffer, ioProp, &status );
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message( &status ) );
@@ -1747,7 +1779,8 @@
}
// read end index of tag value
assert_range( end_idx_buffer, count * sizeof(long) );
- mhdf_readSparseTagIndices( data[2], offset, count, H5T_NATIVE_LONG, end_idx_buffer, &status );
+ mhdf_readSparseTagIndicesWithOpt( data[2], offset, count, H5T_NATIVE_LONG,
+ end_idx_buffer, ioProp, &status );
if (mhdf_isError( &status ))
{
readUtil->report_error( mhdf_message( &status ) );
@@ -1790,7 +1823,9 @@
// read data
assert( num_val <= data_buffer_size );
assert_range( data_buffer, num_val * elem_size );
- mhdf_readSparseTagValues( data[1], data_offset, num_val, hdf_read_type, data_buffer, &status );
+ mhdf_readSparseTagValuesWithOpt( data[1], data_offset, num_val,
+ hdf_read_type, data_buffer,
+ ioProp, &status );
if (mhdf_isError( &status )) {
readUtil->report_error( mhdf_message( &status ) );
mhdf_closeData( filePtr, data[0], &status );
@@ -1827,7 +1862,9 @@
else {
const int size = (end_idx_buffer[i] - prev_end_idx);
std::vector<char*> tmp_buffer( size * elem_size );
- mhdf_readSparseTagValues( data[1], data_offset, size, hdf_read_type, &tmp_buffer[0], &status );
+ mhdf_readSparseTagValuesWithOpt( data[1], data_offset, size,
+ hdf_read_type, &tmp_buffer[0],
+ ioProp, &status );
if (mhdf_isError( &status )) {
readUtil->report_error( mhdf_message( &status ) );
mhdf_closeData( filePtr, data[0], &status );
Modified: MOAB/trunk/ReadHDF5.hpp
===================================================================
--- MOAB/trunk/ReadHDF5.hpp 2008-07-09 17:10:15 UTC (rev 2002)
+++ MOAB/trunk/ReadHDF5.hpp 2008-07-09 17:28:02 UTC (rev 2003)
@@ -51,7 +51,13 @@
const FileOptions& opts,
const int* material_set_list,
int material_set_count );
+protected:
+ MBErrorCode load_file_impl( MBEntityHandle file_set,
+ const int*,
+ const int num_blocks,
+ bool use_mpio );
+
private:
MBErrorCode init();
@@ -93,6 +99,9 @@
//! The type of an MBEntityHandle
hid_t handleType;
+
+ //! read/write property handle
+ hid_t ioProp;
//! Read node coordinates.
MBErrorCode read_nodes( );
More information about the moab-dev
mailing list