[MOAB-dev] r3061 - in MOAB/trunk: . parallel test/h5file
kraftche at cae.wisc.edu
kraftche at cae.wisc.edu
Mon Jul 27 16:11:27 CDT 2009
Author: kraftche
Date: 2009-07-27 16:11:26 -0500 (Mon, 27 Jul 2009)
New Revision: 3061
Modified:
MOAB/trunk/MBCore.cpp
MOAB/trunk/MBCore.hpp
MOAB/trunk/MBReaderIface.hpp
MOAB/trunk/README.IO
MOAB/trunk/ReadHDF5.cpp
MOAB/trunk/ReadHDF5.hpp
MOAB/trunk/ReadNCDF.cpp
MOAB/trunk/TestUtil.hpp
MOAB/trunk/parallel/MBParallelComm.cpp
MOAB/trunk/parallel/MBParallelComm.hpp
MOAB/trunk/parallel/ReadParallel.cpp
MOAB/trunk/parallel/ReadParallel.hpp
MOAB/trunk/parallel/parallel_hdf5_test.cc
MOAB/trunk/test/h5file/h5partial.cpp
Log:
o Expose more file reader capabilities in through MBCore
o Add partition options passed to individual readers to for set tags
o Implement the above for ReadHDF5
o Remove 'PARTITION_DISTRIBUTE' option. Specifying the 'PARTITION'
option (with or without a value) now results in a distributed mesh.
o Add 'PARTITION_BY_RANK' option to specify that set IDs should be
mapped to CPU rank (implies 1 to 1 mapping.)
o Add unit tests (and one timing test) for parallel hdf5 read.
Modified: MOAB/trunk/MBCore.cpp
===================================================================
--- MOAB/trunk/MBCore.cpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/MBCore.cpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -212,7 +212,7 @@
#ifdef USE_MPI
std::vector<MBParallelComm*> pc_list;
- MBErrorCode result = MBParallelComm::get_all_pcomm(this, pc_list);
+ MBParallelComm::get_all_pcomm(this, pc_list);
for (std::vector<MBParallelComm*>::iterator vit = pc_list.begin();
vit != pc_list.end(); vit++)
delete *vit;
@@ -372,11 +372,12 @@
MBEntityHandle& file_set,
const char* options,
const char* set_tag_name,
- const int* set_tag_values,
- int num_set_tag_values )
+ const int* set_tag_vals,
+ int num_set_tag_vals )
{
FileOptions opts(options);
MBErrorCode rval;
+ MBReaderIface::IDTag t = { set_tag_name, set_tag_vals, num_set_tag_vals, 0, 0 };
// if reading in parallel, call a different reader
std::string parallel_opt;
@@ -393,9 +394,10 @@
}
else if (rval != MB_ENTITY_NOT_FOUND)
return rval;
- return ReadParallel(this,pcomm).load_file(file_name, file_set, opts,
- set_tag_name, set_tag_values,
- num_set_tag_values);
+ if (set_tag_name && num_set_tag_vals)
+ return ReadParallel(this,pcomm).load_file( file_name, file_set, opts, &t, 1 );
+ else
+ return ReadParallel(this,pcomm).load_file( file_name, file_set, opts );
#else
mError->set_last_error( "PARALLEL option not valid, this instance"
" compiled for serial execution.\n" );
@@ -403,20 +405,21 @@
#endif
}
else {
- return serial_load_file( file_name, file_set,
- opts, set_tag_name,
- set_tag_values, num_set_tag_values );
+ if (set_tag_name && num_set_tag_vals)
+ return serial_load_file( file_name, file_set, opts, &t, 1 );
+ else
+ return serial_load_file( file_name, file_set, opts );
}
}
MBErrorCode MBCore::serial_load_file( const char* file_name,
MBEntityHandle& file_set,
const FileOptions& opts,
- const char* set_tag_name,
- const int* set_tag_values,
- int num_set_tag_values )
+ const MBReaderIface::IDTag* subsets,
+ int num_sets,
+ const MBTag* id_tag )
{
- if (num_set_tag_values < 0)
+ if (num_sets < 0)
return MB_INDEX_OUT_OF_RANGE;
file_set = 0;
@@ -424,23 +427,51 @@
MBErrorCode rval;
const MBReaderWriterSet* set = reader_writer_set();
- MBReaderIface::IDTag subset = { set_tag_name, set_tag_values, num_set_tag_values };
- MBReaderIface::IDTag* subsets;
- int num_sets;
- if (set_tag_name && num_set_tag_values) {
- num_sets = 1;
- subsets = ⊂
+ // otherwise try using the file extension to select a reader
+ MBReaderIface* reader = set->get_file_extension_reader( file_name );
+ if (reader)
+ {
+ rval = reader->load_file( file_name, file_set, opts, subsets, num_sets, id_tag );
+ delete reader;
}
- else {
- num_sets = 0;
- subsets = 0;
+ else
+ {
+ // Try all the readers
+ MBReaderWriterSet::iterator iter;
+ for (iter = set->begin(); iter != set->end(); ++iter)
+ {
+ MBReaderIface* reader = iter->make_reader( this );
+ if (NULL != reader)
+ {
+ rval = reader->load_file( file_name, file_set, opts, subsets, num_sets, id_tag );
+ delete reader;
+ if (MB_SUCCESS == rval)
+ break;
+ }
+ }
}
+
+ return rval;
+}
+MBErrorCode MBCore::serial_read_tag( const char* file_name,
+ const char* tag_name,
+ const FileOptions& opts,
+ std::vector<int>& vals,
+ const MBReaderIface::IDTag* subsets,
+ int num_sets )
+{
+ if (num_sets < 0)
+ return MB_INDEX_OUT_OF_RANGE;
+
+ MBErrorCode rval;
+ const MBReaderWriterSet* set = reader_writer_set();
+
// otherwise try using the file extension to select a reader
MBReaderIface* reader = set->get_file_extension_reader( file_name );
if (reader)
{
- rval = reader->load_file( file_name, file_set, opts, subsets, num_sets );
+ rval = reader->read_tag_values( file_name, tag_name, opts, vals, subsets, num_sets );
delete reader;
}
else
@@ -452,7 +483,7 @@
MBReaderIface* reader = iter->make_reader( this );
if (NULL != reader)
{
- rval = reader->load_file( file_name, file_set, opts, subsets, num_sets );
+ rval = reader->read_tag_values( file_name, tag_name, opts, vals, subsets, num_sets );
delete reader;
if (MB_SUCCESS == rval)
break;
Modified: MOAB/trunk/MBCore.hpp
===================================================================
--- MOAB/trunk/MBCore.hpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/MBCore.hpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -17,6 +17,7 @@
#define MB_IMPL_GENERAL_HPP
#include "MBInterface.hpp"
+#include "MBReaderIface.hpp"
#include <map>
class MBWriteUtil;
@@ -102,12 +103,19 @@
int num_set_tag_values = 0 );
/**Load or import a file. */
- virtual MBErrorCode serial_load_file( const char* file_name,
- MBEntityHandle& file_set,
- const FileOptions& opts,
- const char* set_tag_name = 0,
- const int* set_tag_values = 0,
- int num_set_tag_values = 0 );
+ MBErrorCode serial_load_file( const char* file_name,
+ MBEntityHandle& file_set,
+ const FileOptions& opts,
+ const MBReaderIface::IDTag* subset_list = 0,
+ int subset_list_length = 0,
+ const MBTag* file_id_tag = 0 );
+
+ MBErrorCode serial_read_tag( const char* file_name,
+ const char* tag_name,
+ const FileOptions& opts,
+ std::vector<int>& tag_vals,
+ const MBReaderIface::IDTag* subset_list = 0,
+ int subset_list_length = 0 );
virtual MBErrorCode write_mesh(const char *file_name,
const MBEntityHandle *output_list = NULL,
Modified: MOAB/trunk/MBReaderIface.hpp
===================================================================
--- MOAB/trunk/MBReaderIface.hpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/MBReaderIface.hpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -39,6 +39,8 @@
const char* tag_name; //!< Name of tag containing integer IDs
const int* tag_values; //!< Array of integer ID values
int num_tag_values; //!< Length of tag_values array
+ int num_parts; //!< If non-zero, load 1/num_parts of the matching sets
+ int part_number; //!< If num_parts is non-zero, load part_number-th fraction of the sets
};
/**
Modified: MOAB/trunk/README.IO
===================================================================
--- MOAB/trunk/README.IO 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/README.IO 2009-07-27 21:11:26 UTC (rev 3061)
@@ -98,6 +98,7 @@
I/O if available (this option is not supported for all
file formats.)
- FORMAT - depricated (use WRITE_PART)
+
PARTITION
PARTITION=<tag_name>
@@ -106,14 +107,25 @@
in a tag. If the tag name is not specified, the default ("PARTITION")
is used.
- PARTITION_VALS=<int_list>
-Specify, for each processor, the list of partition IDs.
+ PARTITION_VAL=<int_list>
+If a tag name is specified to the 'PARTITION' option, then treat as
+partitions only those sets for which the tag *value* is a single integer
+for which the value is one of integers in thie specified list.
+
+
PARTITION_DISTRIBUTE
-?
+Depricated. Implied by "PARTITION" option.
+
+ PARTITION_BY_RANK
+
+Assume 1-1 mapping between MPI rank and part ID. Assing parts
+to processors for which rank == part id.
+
+
MPI_IO_RANK=<RANK>
For IO modes in which a single processor handles all disk access, the
Modified: MOAB/trunk/ReadHDF5.cpp
===================================================================
--- MOAB/trunk/ReadHDF5.cpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/ReadHDF5.cpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -462,13 +462,46 @@
return error(rval);
MBRange tmp_file_ids;
- std::vector<int> ids( subset_list[i].tag_values,
- subset_list[i].tag_values + subset_list[i].num_tag_values );
- std::sort( ids.begin(), ids.end() );
- rval = search_tag_values( tag_index, ids, tmp_file_ids );
- if (MB_SUCCESS != rval)
- return error(rval);
+ if (!subset_list[i].num_tag_values) {
+ rval = get_tagged_entities( tag_index, tmp_file_ids );
+ }
+ else {
+ std::vector<int> ids( subset_list[i].tag_values,
+ subset_list[i].tag_values + subset_list[i].num_tag_values );
+ std::sort( ids.begin(), ids.end() );
+ rval = search_tag_values( tag_index, ids, tmp_file_ids );
+ if (MB_SUCCESS != rval)
+ return error(rval);
+ }
+ if (tmp_file_ids.empty())
+ return error(MB_ENTITY_NOT_FOUND);
+
+ if (subset_list[i].num_parts) {
+ // check that the tag only identified sets
+ if ((unsigned long)fileInfo->sets.start_id > tmp_file_ids.front() ||
+ tmp_file_ids.back() >= (unsigned long)fileInfo->sets.start_id + fileInfo->sets.count) {
+ return error(MB_TYPE_OUT_OF_RANGE);
+ }
+
+ MBRange::iterator s = tmp_file_ids.begin();
+ size_t num_per_proc = tmp_file_ids.size() / subset_list[i].num_parts;
+ size_t num_extra = tmp_file_ids.size() % subset_list[i].num_parts;
+ MBRange::iterator e;
+ if (subset_list[i].part_number < (long)num_extra) {
+ s += (num_per_proc+1) * subset_list[i].part_number;
+ e = s;
+ e += (num_per_proc+1);
+ }
+ else {
+ s += num_per_proc * subset_list[i].part_number + num_extra;
+ e = s;
+ e += num_per_proc;
+ }
+ tmp_file_ids.erase(e, tmp_file_ids.end());
+ tmp_file_ids.erase(tmp_file_ids.begin(), s);
+ }
+
if (i == 0)
file_ids.swap( tmp_file_ids );
else
@@ -766,8 +799,8 @@
while (iter != ranges.end()) {
long begin = *iter; ++iter;
long end = *iter; ++iter;
- mhdf_readSparseTagEntities( tables[0], begin, end - begin + 1,
- handleType, &indices[offset], &status );
+ mhdf_readSparseTagEntitiesWithOpt( tables[0], begin, end - begin + 1,
+ handleType, &indices[offset], indepIO, &status );
if (is_error(status)) {
mhdf_closeData( filePtr, tables[0], &status );
return error(MB_FAILURE);
@@ -784,6 +817,75 @@
return MB_SUCCESS;
}
+MBErrorCode ReadHDF5::get_tagged_entities( int tag_index, MBRange& file_ids )
+{
+ const mhdf_TagDesc& tag = fileInfo->tags[tag_index];
+
+ // do dense data
+ MBRange::iterator hint = file_ids.begin();
+ for (int i = 0; i < tag.num_dense_indices; ++i)
+ {
+ int idx = tag.dense_elem_indices[i];
+ mhdf_EntDesc* ents;
+ if (idx == -2)
+ ents = &fileInfo->sets;
+ else if (idx == -1)
+ ents = &fileInfo->nodes;
+ else {
+ if (idx < 0 || idx >= fileInfo->num_elem_desc)
+ return error(MB_FAILURE);
+ ents = &(fileInfo->elems[idx].desc);
+ }
+
+ MBEntityHandle h = (MBEntityHandle)ents->start_id;
+ hint = file_ids.insert( hint, h, h + ents->count );
+ }
+
+ if (!tag.have_sparse)
+ return MB_SUCCESS;
+
+ // do sparse data
+
+ mhdf_Status status;
+ hid_t tables[2];
+ long size, junk;
+ mhdf_openSparseTagData( filePtr, tag.name, &size, &junk, tables, &status );
+ if (is_error(status))
+ return error(MB_FAILURE);
+ mhdf_closeData( filePtr, tables[1], &status );
+ if (is_error(status)) {
+ mhdf_closeData( filePtr, tables[0], &status );
+ return error(MB_FAILURE);
+ }
+
+ hint = file_ids.begin();
+ MBEntityHandle* buffer = reinterpret_cast<MBEntityHandle*>(dataBuffer);
+ const long buffer_size = bufferSize / sizeof(MBEntityHandle);
+ long remaining = size, offset = 0;
+ while (remaining) {
+ long count = std::min( buffer_size, remaining );
+ mhdf_readSparseTagEntitiesWithOpt( *tables, offset, count,
+ handleType, buffer, collIO, &status );
+ if (is_error(status)) {
+ mhdf_closeData( filePtr, *tables, &status );
+ return error(MB_FAILURE);
+ }
+
+ std::sort( buffer, buffer + count );
+ for (long i = 0; i < count; ++i)
+ hint = file_ids.insert( hint, buffer[i], buffer[i] );
+
+ remaining -= count;
+ offset += count;
+ }
+
+ mhdf_closeData( filePtr, *tables, &status );
+ if (is_error(status))
+ return error(MB_FAILURE);
+
+ return MB_SUCCESS;
+}
+
MBErrorCode ReadHDF5::search_tag_values( hid_t tag_table,
unsigned long table_size,
const std::vector<int>& sorted_values,
Modified: MOAB/trunk/ReadHDF5.hpp
===================================================================
--- MOAB/trunk/ReadHDF5.hpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/ReadHDF5.hpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -252,6 +252,15 @@
MBErrorCode search_tag_values( int tag_index,
const std::vector<int>& sorted_values,
MBRange& file_ids_out );
+
+ /**\brief Search for entities with specified tag
+ *
+ *\NOTE For parallel reads, this function does collective IO.
+ *
+ *\param tag_index Index into info->tags specifying which tag to search.
+ *\param file_ids_out File IDs for entities with specified tag values.
+ */
+ MBErrorCode get_tagged_entities( int tag_index, MBRange& file_ids_out );
/**\brief Search a table of tag data for a specified set of values.
*
Modified: MOAB/trunk/ReadNCDF.cpp
===================================================================
--- MOAB/trunk/ReadNCDF.cpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/ReadNCDF.cpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -508,6 +508,10 @@
readMeshIface->report_error( "ExodusII reader supports subset read only by material ID." );
return MB_UNSUPPORTED_OPERATION;
}
+ if (subset_list[0].num_parts) {
+ readMeshIface->report_error( "ExodusII reader does not support mesh partitioning");
+ return MB_UNSUPPORTED_OPERATION;
+ }
blocks_to_load = subset_list[0].tag_values;
num_blocks = subset_list[0].num_tag_values;
}
Modified: MOAB/trunk/TestUtil.hpp
===================================================================
--- MOAB/trunk/TestUtil.hpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/TestUtil.hpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -535,6 +535,23 @@
check_array_equal( &A[0], A.size(), &B[0], B.size(), sA, sB, line, file );
}
+#ifdef MB_RANGE_HPP
+
+void check_equal( const MBRange& A, const MBRange& B, const char* sA, const char* sB, int line, const char* file )
+{
+ if (A == B)
+ return;
+
+ std::cout << "MBErrorCode Test Failed: " << sA << " == " << sB << std::endl;
+ std::cout << " at line " << line << " of '" << file << "'" << std::endl;
+ std::cout << " Expected: " << A << std::endl;
+ std::cout << " Actual : " << B << std::endl;
+ std::cout << std::endl;
+ flag_error();
+}
+
+#endif
+
#endif
#endif
Modified: MOAB/trunk/parallel/MBParallelComm.cpp
===================================================================
--- MOAB/trunk/parallel/MBParallelComm.cpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/parallel/MBParallelComm.cpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -2518,7 +2518,7 @@
MBErrorCode MBParallelComm::resolve_shared_ents(MBEntityHandle this_set,
int resolve_dim,
int shared_dim,
- MBTag* id_tag)
+ const MBTag* id_tag)
{
MBErrorCode result;
MBRange proc_ents;
@@ -2555,7 +2555,7 @@
MBRange &proc_ents,
int resolve_dim,
int shared_dim,
- MBTag* id_tag)
+ const MBTag* id_tag)
{
#ifdef DEBUG_MPE
define_mpe();
@@ -4608,8 +4608,8 @@
MBErrorCode MBParallelComm::update_shared_mesh()
{
- MBErrorCode result;
- int success;
+// MBErrorCode result;
+// int success;
// ,,,
/*
Modified: MOAB/trunk/parallel/MBParallelComm.hpp
===================================================================
--- MOAB/trunk/parallel/MBParallelComm.hpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/parallel/MBParallelComm.hpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -268,7 +268,7 @@
MBRange &proc_ents,
int resolve_dim = -1,
int shared_dim = -1,
- MBTag* id_tag = 0);
+ const MBTag* id_tag = 0);
/** \brief Resolve shared entities between processors
*
@@ -284,7 +284,7 @@
MBErrorCode resolve_shared_ents(MBEntityHandle this_set,
int resolve_dim = 3,
int shared_dim = -1,
- MBTag* id_tag = 0);
+ const MBTag* id_tag = 0);
static MBErrorCode resolve_shared_ents(MBParallelComm **pc,
const unsigned int np,
@@ -400,6 +400,10 @@
//! Get proc config for this communication object
MBProcConfig &proc_config() {return procConfig;}
+ unsigned rank() const { return proc_config().proc_rank(); }
+ unsigned size() const { return proc_config().proc_size(); }
+ MPI_Comm comm() const { return proc_config().proc_comm(); }
+
//! return the tags used to indicate shared procs and handles
MBErrorCode get_shared_proc_tags(MBTag &sharedp_tag,
MBTag &sharedps_tag,
Modified: MOAB/trunk/parallel/ReadParallel.cpp
===================================================================
--- MOAB/trunk/parallel/ReadParallel.cpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/parallel/ReadParallel.cpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -10,6 +10,8 @@
#include "MBCN.hpp"
#include <iostream>
+#include <iomanip>
+#include <iterator>
#include <sstream>
#include <algorithm>
#include <assert.h>
@@ -20,12 +22,18 @@
dynamic_cast<MBCore*>(mbImpl)->get_error_handler()->set_last_error(a); \
return result;}
-enum ParallelActions {PA_READ=0, PA_BROADCAST, PA_DELETE_NONLOCAL,
- PA_CHECK_GIDS_SERIAL, PA_GET_FILESET_ENTS,
+enum ParallelActions {PA_READ=0,
+ PA_READ_PART,
+ PA_BROADCAST,
+ PA_DELETE_NONLOCAL,
+ PA_CHECK_GIDS_SERIAL,
+ PA_GET_FILESET_ENTS,
PA_RESOLVE_SHARED_ENTS,
- PA_EXCHANGE_GHOSTS, PA_PRINT_PARALLEL};
+ PA_EXCHANGE_GHOSTS,
+ PA_PRINT_PARALLEL};
const char *ParallelActionsNames[] = {
"PARALLEL READ",
+ "PARALLEL READ PART",
"PARALLEL BROADCAST",
"PARALLEL DELETE NONLOCAL",
"PARALLEL CHECK_GIDS_SERIAL",
@@ -35,8 +43,13 @@
"PARALLEL PRINT_PARALLEL"
};
-const char* ReadParallel::parallelOptsNames[] = { "NONE", "BCAST", "BCAST_DELETE",
- "READ_DELETE", "READ_PART", "", 0 };
+const char* ReadParallel::parallelOptsNames[] = { "NONE",
+ "BCAST",
+ "BCAST_DELETE",
+ "READ_DELETE",
+ "READ_PART",
+ "",
+ 0 };
ReadParallel::ReadParallel(MBInterface* impl,
MBParallelComm *pc)
@@ -52,9 +65,9 @@
const int num_files,
MBEntityHandle& file_set,
const FileOptions &opts,
- const char* set_tag_name,
- const int* set_tag_values,
- const int num_tag_values )
+ const MBReaderIface::IDTag* subset_list,
+ int subset_list_length,
+ const MBTag* file_id_tag )
{
MBError *merror = ((MBCore*)mbImpl)->get_error_handler();
@@ -70,18 +83,23 @@
parallel_mode = 0;
}
// Get partition setting
+ bool distrib;
std::string partition_tag_name;
result = opts.get_option("PARTITION", partition_tag_name);
- if (MB_ENTITY_NOT_FOUND == result || partition_tag_name.empty())
- partition_tag_name = PARALLEL_PARTITION_TAG_NAME;
+ if (MB_ENTITY_NOT_FOUND == result) {
+ distrib = false;
+ partition_tag_name = "";
+ }
+ else {
+ distrib = true;
+ if (partition_tag_name.empty())
+ partition_tag_name = PARALLEL_PARTITION_TAG_NAME;
+ }
// Get partition tag value(s), if any, and whether they're to be
// distributed or assigned
std::vector<int> partition_tag_vals;
result = opts.get_ints_option("PARTITION_VAL", partition_tag_vals);
- bool distrib = false;
- result = opts.get_null_option("PARTITION_DISTRIBUTE");
- if (MB_SUCCESS == result) distrib = true;
// see if we need to report times
bool cputime = false;
@@ -143,6 +161,15 @@
std::vector<int> pa_vec;
bool is_reader = (reader_rank == (int) myPcomm->proc_config().proc_rank());
+ bool partition_by_rank = false;
+ if (MB_SUCCESS == opts.get_null_option("PARTITION_BY_RANK")) {
+ partition_by_rank = true;
+ if (!partition_tag_vals.empty()) {
+ merror->set_last_error("Cannot specify both PARTITION_VALS and PARTITION_BY_RANK");
+ return MB_FAILURE;
+ }
+ }
+
switch (parallel_mode) {
case POPT_BCAST:
if (is_reader) {
@@ -175,8 +202,8 @@
break;
case POPT_READ_PART:
- merror->set_last_error( "Access to format-specific parallel read not implemented.\n");
- return MB_NOT_IMPLEMENTED;
+ pa_vec.push_back(PA_READ_PART);
+ break;
default:
return MB_FAILURE;
}
@@ -189,8 +216,9 @@
return load_file(file_names, num_files, file_set, parallel_mode,
partition_tag_name,
- partition_tag_vals, distrib, pa_vec, opts,
- set_tag_name, set_tag_values, num_tag_values,
+ partition_tag_vals, distrib,
+ partition_by_rank, pa_vec, opts,
+ subset_list, subset_list_length, file_id_tag,
reader_rank, cputime,
resolve_dim, shared_dim,
ghost_dim, bridge_dim, num_layers);
@@ -203,11 +231,12 @@
std::string &partition_tag_name,
std::vector<int> &partition_tag_vals,
bool distrib,
+ bool partition_by_rank,
std::vector<int> &pa_vec,
const FileOptions &opts,
- const char* set_tag_name,
- const int* set_tag_values,
- const int num_tag_values,
+ const MBReaderIface::IDTag* subset_list,
+ int subset_list_length,
+ const MBTag* file_id_tag,
const int reader_rank,
const bool cputime,
const int resolve_dim,
@@ -220,6 +249,8 @@
if (myPcomm == NULL)
myPcomm = new MBParallelComm(mbImpl);
+ MBError *merror = ((MBCore*)mbImpl)->get_error_handler();
+
MBRange entities;
MBTag file_set_tag = 0;
int other_sets = 0;
@@ -238,6 +269,8 @@
result = mbImpl->create_meshset(MESHSET_SET, file_set);
if (MB_SUCCESS != result) return result;
bool i_read = false;
+ MBTag id_tag = 0;
+ bool use_id_tag = false;
for (i = 1, vit = pa_vec.begin();
vit != pa_vec.end(); vit++, i++) {
@@ -256,9 +289,9 @@
tmp_result = impl->serial_load_file( file_names[j],
new_file_set,
opts,
- set_tag_name,
- set_tag_values,
- num_tag_values );
+ subset_list,
+ subset_list_length,
+ file_id_tag );
if (MB_SUCCESS != tmp_result) break;
// put the contents of each file set for the reader into the
@@ -283,6 +316,47 @@
tmp_result = mbImpl->tag_set_data(file_set_tag, &file_set, 1,
&other_sets);
break;
+//==================
+ case PA_READ_PART: {
+ i_read = true;
+ if (num_files != 1) {
+ merror->set_last_error("Multiple file read not supported for READ_PART");
+ return MB_NOT_IMPLEMENTED;
+ }
+
+ use_id_tag = true;
+ if (!file_id_tag) {
+ tmp_result = mbImpl->tag_create( "", sizeof(int), MB_TAG_DENSE, MB_TYPE_INTEGER, id_tag, 0 );
+ if (MB_SUCCESS != tmp_result)
+ break;
+ file_id_tag = &id_tag;
+ }
+
+ MBReaderIface::IDTag parts = { partition_tag_name.c_str(),
+ 0, 0, 0, 0 };
+ int rank = myPcomm->rank();
+ if (partition_by_rank) {
+ assert(partition_tag_vals.empty());
+ parts.tag_values = &rank;
+ parts.num_tag_values = 1;
+ }
+ else {
+ parts.num_parts = myPcomm->size();
+ parts.part_number = myPcomm->rank();
+ if (!partition_tag_vals.empty()) {
+ parts.tag_values = &partition_tag_vals[0];
+ parts.num_tag_values = partition_tag_vals.size();
+ }
+ }
+ std::vector<MBReaderIface::IDTag> subset( subset_list,
+ subset_list + subset_list_length );
+ subset.push_back( parts );
+ tmp_result = impl->serial_load_file( *file_names, file_set, opts,
+ &subset[0], subset.size(), file_id_tag );
+
+ if (MB_SUCCESS == tmp_result)
+ tmp_result = create_partition_sets( partition_tag_name, file_set );
+ } break;
//==================
case PA_GET_FILESET_ENTS:
@@ -333,6 +407,10 @@
std::cerr << "Delete nonlocal done; entities:" << std::endl;
mbImpl->list_entities(0, 0);
}
+
+ if (MB_SUCCESS == tmp_result)
+ tmp_result = create_partition_sets( partition_tag_name, file_set );
+
break;
//==================
@@ -348,7 +426,8 @@
if (debug)
std::cout << "Resolving shared entities." << std::endl;
- tmp_result = myPcomm->resolve_shared_ents(file_set, resolve_dim, shared_dim);
+ tmp_result = myPcomm->resolve_shared_ents(file_set, resolve_dim, shared_dim,
+ use_id_tag ? file_id_tag : 0);
break;
//==================
@@ -388,6 +467,12 @@
if (cputime) act_times[i] = MPI_Wtime();
}
+ if (id_tag) {
+ MBErrorCode tmp_result = mbImpl->tag_delete( id_tag );
+ if (MB_SUCCESS != tmp_result && MB_SUCCESS == result)
+ result = tmp_result;
+ }
+
if (cputime && 0 == myPcomm->proc_config().proc_rank()) {
std::cout << "Read times: ";
for (i = 1, vit = pa_vec.begin();
@@ -474,7 +559,29 @@
result = delete_nonlocal_entities(file_set); RR(" ");
- if (ptag_name != PARALLEL_PARTITION_TAG_NAME) {
+ return result;
+}
+
+MBErrorCode ReadParallel::create_partition_sets( std::string &ptag_name,
+ MBEntityHandle file_set )
+{
+ if (ptag_name == PARALLEL_PARTITION_TAG_NAME)
+ return MB_SUCCESS;
+
+ int proc_rk = myPcomm->proc_config().proc_rank();
+ MBRange partition_sets;
+ MBErrorCode result;
+
+ MBTag ptag;
+ result = mbImpl->tag_get_handle(ptag_name.c_str(), ptag);
+ RR("Failed getting tag handle in create_partition_sets.");
+
+ result = mbImpl->get_entities_by_type_and_tag(file_set, MBENTITYSET,
+ &ptag, NULL, 1,
+ myPcomm->partition_sets());
+ RR("Failed to get sets with partition-type tag.");
+
+
// tag the partition sets with a standard tag name
result = mbImpl->tag_create(PARALLEL_PARTITION_TAG_NAME, sizeof(int),
MB_TAG_SPARSE,
@@ -499,8 +606,8 @@
for (unsigned int i = 0; i < myPcomm->partition_sets().size(); i++)
values[i] = proc_rk;
result = mbImpl->tag_set_data(ptag, myPcomm->partition_sets(), &values[0]); RR(" ");
- }
+
return result;
}
Modified: MOAB/trunk/parallel/ReadParallel.hpp
===================================================================
--- MOAB/trunk/parallel/ReadParallel.hpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/parallel/ReadParallel.hpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -20,18 +20,18 @@
MBErrorCode load_file(const char *file_name,
MBEntityHandle& file_set,
const FileOptions &opts,
- const char* set_tag_name,
- const int* set_tag_values,
- const int num_tag_values );
+ const MBReaderIface::IDTag* subset_list = 0,
+ int subset_list_length = 0,
+ const MBTag* file_id_tag = 0 );
//! load multiple files
MBErrorCode load_file(const char **file_names,
const int num_files,
MBEntityHandle& file_set,
const FileOptions &opts,
- const char* set_tag_name,
- const int* set_tag_values,
- const int num_tag_values );
+ const MBReaderIface::IDTag* subset_list = 0,
+ int subset_list_length = 0,
+ const MBTag* file_id_tag = 0 );
MBErrorCode load_file(const char **file_names,
const int num_files,
@@ -40,11 +40,12 @@
std::string &partition_tag_name,
std::vector<int> &partition_tag_vals,
bool distrib,
+ bool partition_by_rank,
std::vector<int> &pa_vec,
const FileOptions &opts,
- const char* set_tag_name,
- const int* set_tag_values,
- const int num_tag_values,
+ const MBReaderIface::IDTag* subset_list,
+ int subset_list_length,
+ const MBTag* file_id_tag,
const int reader_rank,
const bool cputime,
const int resolve_dim,
@@ -60,8 +61,12 @@
static const char *parallelOptsNames[];
- enum ParallelOpts {POPT_NONE=0, POPT_BCAST, POPT_BCAST_DELETE,
- POPT_READ_DELETE, POPT_READ_PART, POPT_DEFAULT};
+ enum ParallelOpts { POPT_NONE=0,
+ POPT_BCAST,
+ POPT_BCAST_DELETE,
+ POPT_READ_DELETE,
+ POPT_READ_PART,
+ POPT_DEFAULT};
//! PUBLIC TO ALLOW TESTING
MBErrorCode delete_nonlocal_entities(std::string &ptag_name,
@@ -72,6 +77,8 @@
MBErrorCode delete_nonlocal_entities(MBEntityHandle file_set);
protected:
+ MBErrorCode create_partition_sets( std::string &ptag_name,
+ MBEntityHandle file_set );
private:
@@ -84,12 +91,12 @@
inline MBErrorCode ReadParallel::load_file(const char *file_name,
MBEntityHandle& file_set,
const FileOptions &opts,
- const char* set_tag_name,
- const int* set_tag_values,
- const int num_tag_values )
+ const MBReaderIface::IDTag* subset_list,
+ int subset_list_length,
+ const MBTag* file_id_tag )
{
return load_file(&file_name, 1, file_set, opts,
- set_tag_name, set_tag_values, num_tag_values);
+ subset_list, subset_list_length, file_id_tag);
}
#endif
Modified: MOAB/trunk/parallel/parallel_hdf5_test.cc
===================================================================
--- MOAB/trunk/parallel/parallel_hdf5_test.cc 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/parallel/parallel_hdf5_test.cc 2009-07-27 21:11:26 UTC (rev 3061)
@@ -1,3 +1,4 @@
+#include "MBRange.hpp"
#include "TestUtil.hpp"
#include "MBCore.hpp"
@@ -8,8 +9,11 @@
#include <iostream>
#include <sstream>
+#include <algorithm>
#include <mpi.h>
#include <unistd.h>
+#include <float.h>
+#include <stdio.h>
#define STRINGIFY_(X) #X
#define STRINGIFY(X) STRINGIFY_(X)
@@ -30,8 +34,20 @@
void test_write_shared_sets();
void test_var_length_parallel();
+void test_read_elements_common( bool by_rank, int intervals, bool print_time );
+
+int ReadIntervals = 0;
+void test_read_elements() { test_read_elements_common( false, ReadIntervals, false ); }
+void test_read_elements_by_rank() { test_read_elements_common( true, ReadIntervals, false ); }
+void test_read_time();
+
+void test_read_tags();
+void test_read_global_tags();
+void test_read_sets();
+
bool KeepTmpFiles = false;
bool PauseOnStart = false;
+const int DefaultReadIntervals = 2;
int main( int argc, char* argv[] )
{
@@ -43,8 +59,14 @@
KeepTmpFiles = true;
else if (!strcmp( argv[i], "-p"))
PauseOnStart = true;
+ else if (!strcmp( argv[i], "-r")) {
+ ++i;
+ CHECK( i < argc );
+ ReadIntervals = atoi( argv[i] );
+ CHECK( ReadIntervals > 0 );
+ }
else {
- std::cerr << "Usage: " << argv[0] << " [-k] [-p]" << std::endl;
+ std::cerr << "Usage: " << argv[0] << " [-k] [-p] [-r <int>]" << std::endl;
return 1;
}
}
@@ -57,9 +79,28 @@
}
int result = 0;
- result += RUN_TEST( test_write_elements );
- result += RUN_TEST( test_write_shared_sets );
- result += RUN_TEST( test_var_length_parallel );
+ if (ReadIntervals) {
+ result = RUN_TEST( test_read_time );
+ }
+ else {
+ ReadIntervals = DefaultReadIntervals;
+ result += RUN_TEST( test_write_elements );
+ MPI_Barrier(MPI_COMM_WORLD);
+ result += RUN_TEST( test_write_shared_sets );
+ MPI_Barrier(MPI_COMM_WORLD);
+ result += RUN_TEST( test_var_length_parallel );
+ MPI_Barrier(MPI_COMM_WORLD);
+ result += RUN_TEST( test_read_elements );
+ MPI_Barrier(MPI_COMM_WORLD);
+ result += RUN_TEST( test_read_elements_by_rank );
+ MPI_Barrier(MPI_COMM_WORLD);
+ result += RUN_TEST( test_read_tags );
+ MPI_Barrier(MPI_COMM_WORLD);
+ result += RUN_TEST( test_read_global_tags );
+ MPI_Barrier(MPI_COMM_WORLD);
+ result += RUN_TEST( test_read_sets );
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
MPI_Finalize();
return result;
@@ -358,7 +399,7 @@
// load and partition a .cub file
MBCore moab_instance;
MBInterface& moab = moab_instance;
- load_and_partition( moab, InputFile, true );
+ load_and_partition( moab, InputFile, false );
// count number of owned entities of each type and sum over all procs
count_owned_entities( moab, proc_counts );
@@ -587,3 +628,447 @@
CHECK_EQUAL( 0, vtx_counts[j] );
}
}
+
+// create row of cubes of mesh
+void create_input_file( const char* file_name,
+ int intervals,
+ int num_cpu,
+ const char* ijk_vert_tag_name = 0,
+ const char* ij_set_tag_name = 0,
+ const char* global_tag_name = 0,
+ const int* global_mesh_value = 0,
+ const int* global_default_value = 0 )
+{
+ MBCore moab;
+ MBInterface& mb = moab;
+ MBErrorCode rval;
+
+ MBTag ijk_vert_tag = 0, ij_set_tag = 0, global_tag = 0;
+ if (ijk_vert_tag_name) {
+ rval = mb.tag_create( ijk_vert_tag_name, 3*sizeof(int), MB_TAG_DENSE,
+ MB_TYPE_INTEGER, ijk_vert_tag, 0 );
+ CHECK_ERR(rval);
+ }
+ if (ij_set_tag_name) {
+ rval = mb.tag_create( ij_set_tag_name, 2*sizeof(int), MB_TAG_SPARSE,
+ MB_TYPE_INTEGER, ij_set_tag, 0 );
+ CHECK_ERR(rval);
+ }
+ if (global_tag_name) {
+ rval = mb.tag_create( global_tag_name, sizeof(int), MB_TAG_DENSE,
+ MB_TYPE_INTEGER, global_tag, global_default_value );
+ CHECK_ERR(rval);
+ if (global_mesh_value) {
+ rval = mb.tag_set_data( global_tag, 0, 0, global_mesh_value );
+ CHECK_ERR(rval);
+ }
+ }
+
+
+ int iv = intervals+1, ii = num_cpu*intervals+1;
+ std::vector<MBEntityHandle> verts(iv*iv*ii);
+ int idx = 0;
+ for (int i = 0; i < ii; ++i) {
+ for (int j = 0; j < iv; ++j) {
+ int start = idx;
+ for (int k = 0; k < iv; ++k) {
+ const double coords[3] = {i, j, k};
+ rval = mb.create_vertex( coords, verts[idx] );
+ CHECK_ERR(rval);
+ if (ijk_vert_tag) {
+ int vals[] = {i,j,k};
+ rval = mb.tag_set_data( ijk_vert_tag, &verts[idx], 1, vals );
+ CHECK_ERR(rval);
+ }
+ ++idx;
+ }
+
+ if (ij_set_tag) {
+ MBEntityHandle set;
+ rval = mb.create_meshset( MESHSET_SET, set );
+ CHECK_ERR(rval);
+ rval = mb.add_entities( set, &verts[start], idx - start );
+ CHECK_ERR(rval);
+ int vals[] = { i, j };
+ rval = mb.tag_set_data( ij_set_tag, &set, 1, vals );
+ CHECK_ERR(rval);
+ }
+ }
+ }
+
+ const int eb = intervals*intervals*intervals;
+ std::vector<MBEntityHandle> elems(num_cpu*eb);
+ idx = 0;
+ for (int c = 0; c < num_cpu; ++c) {
+ for (int i = c*intervals; i < (c+1)*intervals; ++i) {
+ for (int j = 0; j < intervals; ++j) {
+ for (int k = 0; k < intervals; ++k) {
+ MBEntityHandle conn[8] = { verts[iv*(iv* i + j ) + k ],
+ verts[iv*(iv*(i + 1) + j ) + k ],
+ verts[iv*(iv*(i + 1) + j + 1) + k ],
+ verts[iv*(iv* i + j + 1) + k ],
+ verts[iv*(iv* i + j ) + k + 1],
+ verts[iv*(iv*(i + 1) + j ) + k + 1],
+ verts[iv*(iv*(i + 1) + j + 1) + k + 1],
+ verts[iv*(iv* i + j + 1) + k + 1] };
+
+ rval = mb.create_element( MBHEX, conn, 8, elems[idx++] );
+ CHECK_ERR(rval);
+ }
+ }
+ }
+ }
+
+ MBTag part_tag;
+ rval = mb.tag_create( "PARTITION", sizeof(int), MB_TAG_SPARSE, MB_TYPE_INTEGER, part_tag, 0 );
+ CHECK_ERR(rval);
+
+ std::vector<MBEntityHandle> parts(num_cpu);
+ for (int i = 0; i < num_cpu; ++i) {
+ rval = mb.create_meshset( MESHSET_SET, parts[i] );
+ CHECK_ERR(rval);
+ rval = mb.add_entities( parts[i], &elems[i*eb], eb );
+ CHECK_ERR(rval);
+ rval = mb.tag_set_data( part_tag, &parts[i], 1, &i );
+ CHECK_ERR(rval);
+ }
+
+ rval = mb.write_file( file_name, "MOAB" );
+ CHECK_ERR(rval);
+}
+
+void test_read_elements_common( bool by_rank, int intervals, bool print_time )
+{
+ const char *file_name = by_rank ? "test_read_rank.h5m" : "test_read.h5m";
+ int numproc, rank;
+ MPI_Comm_size( MPI_COMM_WORLD, &numproc );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MBCore moab;
+ MBInterface &mb = moab;
+ MBErrorCode rval;
+ MBEntityHandle file_set;
+
+ // if root processor, create hdf5 file for use in testing
+ if (0 == rank)
+ create_input_file( file_name, intervals, numproc );
+ MPI_Barrier(MPI_COMM_WORLD); // make sure root has completed writing the file
+
+ // do parallel read unless only one processor
+ const char opt1[] = "PARALLEL=READ_PART;PARTITION=PARTITION";
+ const char opt2[] = "PARALLEL=READ_PART;PARTITION=PARTITION;PARTITION_BY_RANK";
+ const char* opt = numproc == 1 ? 0 : by_rank ? opt2 : opt1;
+ rval = mb.load_file( file_name, file_set, opt );
+ MPI_Barrier(MPI_COMM_WORLD); // make sure all procs complete before removing file
+ if (0 == rank && !KeepTmpFiles) remove( file_name );
+ CHECK_ERR(rval);
+
+
+ MBTag part_tag;
+ rval = mb.tag_get_handle( "PARTITION", part_tag );
+ CHECK_ERR(rval);
+
+ MBRange parts;
+ rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &part_tag, 0, 1, parts );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( 1, (int)parts.size() );
+ MBEntityHandle part = parts.front();
+ int id;
+ rval = mb.tag_get_data( part_tag, &part, 1, &id );
+ CHECK_ERR(rval);
+ if (by_rank) {
+ CHECK_EQUAL( rank, id );
+ }
+
+ // check that all of the elements in the mesh are in the part
+ int npart, nall;
+ rval = mb.get_number_entities_by_dimension( part, 3, npart );
+ CHECK_ERR(rval);
+ rval = mb.get_number_entities_by_dimension( 0, 3, nall );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( npart, nall );
+
+ // check that we have the correct vertices
+ const double x_min = intervals*rank;
+ const double x_max = intervals*(rank+1);
+ MBRange verts;
+ rval = mb.get_entities_by_type( 0, MBVERTEX, verts );
+ CHECK_ERR(rval);
+ std::vector<double> coords(verts.size());
+ rval = mb.get_coords( verts, &coords[0], 0, 0 );
+ CHECK_ERR(rval);
+ const double act_x_min = *std::min_element( coords.begin(), coords.end() );
+ const double act_x_max = *std::max_element( coords.begin(), coords.end() );
+ CHECK_REAL_EQUAL( x_min, act_x_min, DBL_EPSILON );
+ CHECK_REAL_EQUAL( x_max, act_x_max, DBL_EPSILON );
+}
+
+void test_read_time()
+{
+ const char file_name[] = "read_time.h5m";
+ int numproc, rank;
+ MPI_Comm_size( MPI_COMM_WORLD, &numproc );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MBErrorCode rval;
+ MBEntityHandle file_set;
+
+ // if root processor, create hdf5 file for use in testing
+ if (0 == rank)
+ create_input_file( file_name, ReadIntervals, numproc );
+ MPI_Barrier( MPI_COMM_WORLD );
+
+ // CPU Time for true paralle, wall time for true paralle,
+ // CPU time for read and delete, wall time for read and delete
+ double times[6];
+ clock_t tmp_t;
+
+ // Time true parallel read
+ MBCore moab;
+ MBInterface &mb = moab;
+ times[0] = MPI_Wtime();
+ tmp_t = clock();
+ const char opt[] = "PARALLEL=READ_PART;PARTITION=PARTITION;PARTITION_BY_RANK";
+ rval = mb.load_file( file_name, file_set, opt );
+ CHECK_ERR(rval);
+ times[0] = MPI_Wtime() - times[0];
+ times[1] = double(clock() - tmp_t) / CLOCKS_PER_SEC;
+
+ // Time read and delete
+ mb.delete_mesh();
+ times[2] = MPI_Wtime();
+ tmp_t = clock();
+ const char opt2[] = "PARALLEL=READ_DELETE;PARTITION=PARTITION;PARTITION_BY_RANK";
+ rval = mb.load_file( file_name, file_set, opt2 );
+ CHECK_ERR(rval);
+ times[2] = MPI_Wtime() - times[2];
+ times[3] = double(clock() - tmp_t) / CLOCKS_PER_SEC;
+
+ // Time broadcast and delete
+ mb.delete_mesh();
+ times[4] = MPI_Wtime();
+ tmp_t = clock();
+ const char opt3[] = "PARALLEL=BCAST_DELETE;PARTITION=PARTITION;PARTITION_BY_RANK";
+ rval = mb.load_file( file_name, file_set, opt3 );
+ CHECK_ERR(rval);
+ times[4] = MPI_Wtime() - times[4];
+ times[5] = double(clock() - tmp_t) / CLOCKS_PER_SEC;
+
+ double max_times[6] = {0,0,0,0,0,0}, sum_times[6] = {0,0,0,0,0,0};
+ MPI_Reduce( ×, &max_times, 6, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD );
+ MPI_Reduce( ×, &sum_times, 6, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );
+ MPI_Barrier( MPI_COMM_WORLD );
+ if (0 == rank) {
+ printf( "%12s %12s %12s %12s\n", "", "READ_PART", "READ_DELETE", "BCAST_DELETE" );
+ printf( "%12s %12g %12g %12g\n", "Max Wall", max_times[0], max_times[2], max_times[4] );
+ printf( "%12s %12g %12g %12g\n", "Total Wall", sum_times[0], sum_times[2], sum_times[4] );
+ printf( "%12s %12g %12g %12g\n", "Max CPU", max_times[1], max_times[3], max_times[5] );
+ printf( "%12s %12g %12g %12g\n", "Total CPU", sum_times[1], sum_times[3], sum_times[5] );
+ }
+
+ MPI_Barrier( MPI_COMM_WORLD );
+ if (0 == rank && !KeepTmpFiles) remove( file_name );
+}
+
+void test_read_tags()
+{
+ const char tag_name[] = "test_tag_xx";
+ const char file_name[] = "test_read_tags.h5m";
+ int numproc, rank;
+ MPI_Comm_size( MPI_COMM_WORLD, &numproc );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MBCore moab;
+ MBInterface &mb = moab;
+ MBErrorCode rval;
+ MBEntityHandle file_set;
+
+ // if root processor, create hdf5 file for use in testing
+ if (0 == rank)
+ create_input_file( file_name, DefaultReadIntervals, numproc, tag_name );
+ MPI_Barrier(MPI_COMM_WORLD); // make sure root has completed writing the file
+
+ // do parallel read unless only one processor
+ const char opt1[] = "PARALLEL=READ_PART;PARTITION=PARTITION";
+ const char* opt = numproc == 1 ? 0 : opt1;
+ rval = mb.load_file( file_name, file_set, opt );
+ MPI_Barrier(MPI_COMM_WORLD); // make sure all procs complete before removing file
+ if (0 == rank && !KeepTmpFiles) remove( file_name );
+ CHECK_ERR(rval);
+
+ MBTag tag;
+ rval = mb.tag_get_handle( tag_name, tag );
+ CHECK_ERR(rval);
+
+ int size = -1;
+ rval = mb.tag_get_size( tag, size );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( 3*(int)sizeof(int), size );
+
+ MBTagType storage;
+ rval = mb.tag_get_type( tag, storage );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( MB_TAG_DENSE, storage );
+
+ MBDataType type;
+ rval = mb.tag_get_data_type( tag, type );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( MB_TYPE_INTEGER, type );
+
+ MBRange verts, tagged;
+ rval = mb.get_entities_by_type( 0, MBVERTEX, verts );
+ CHECK_ERR(rval);
+ rval = mb.get_entities_by_type_and_tag( 0, MBVERTEX, &tag, 0, 1, tagged );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( verts, tagged );
+
+ for (MBRange::iterator i = verts.begin(); i != verts.end(); ++i) {
+ double coords[3];
+ rval = mb.get_coords( &*i, 1, coords );
+ CHECK_ERR(rval);
+ int ijk[3];
+ rval = mb.tag_get_data( tag, &*i, 1, ijk );
+ CHECK_ERR(rval);
+
+ CHECK( ijk[0] >= DefaultReadIntervals * rank );
+ CHECK( ijk[0] <= DefaultReadIntervals * (rank+1) );
+ CHECK( ijk[1] >= 0 );
+ CHECK( ijk[1] <= DefaultReadIntervals );
+ CHECK( ijk[2] >= 0 );
+ CHECK( ijk[2] <= DefaultReadIntervals );
+
+ CHECK_REAL_EQUAL( coords[0], (double)ijk[0], 1e-100 );
+ CHECK_REAL_EQUAL( coords[1], (double)ijk[1], 1e-100 );
+ CHECK_REAL_EQUAL( coords[2], (double)ijk[2], 1e-100 );
+ }
+}
+
+void test_read_global_tags()
+{
+ const char tag_name[] = "test_tag_g";
+ const char file_name[] = "test_read_global_tags.h5m";
+ int numproc, rank;
+ MPI_Comm_size( MPI_COMM_WORLD, &numproc );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MBCore moab;
+ MBInterface &mb = moab;
+ MBErrorCode rval;
+ MBEntityHandle file_set;
+ const int def_val = 0xdeadcad;
+ const int global_val = -11;
+
+ // if root processor, create hdf5 file for use in testing
+ if (0 == rank)
+ create_input_file( file_name, 1, numproc, 0, 0, tag_name, &global_val, &def_val );
+ MPI_Barrier(MPI_COMM_WORLD); // make sure root has completed writing the file
+
+ // do parallel read unless only one processor
+ const char opt1[] = "PARALLEL=READ_PART;PARTITION=PARTITION";
+ const char* opt = numproc == 1 ? 0 : opt1;
+ rval = mb.load_file( file_name, file_set, opt );
+ MPI_Barrier(MPI_COMM_WORLD); // make sure all procs complete before removing file
+ if (0 == rank && !KeepTmpFiles) remove( file_name );
+ CHECK_ERR(rval);
+
+ MBTag tag;
+ rval = mb.tag_get_handle( tag_name, tag );
+ CHECK_ERR(rval);
+
+ int size = -1;
+ rval = mb.tag_get_size( tag, size );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( (int)sizeof(int), size );
+
+ MBTagType storage;
+ rval = mb.tag_get_type( tag, storage );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( MB_TAG_DENSE, storage );
+
+ MBDataType type;
+ rval = mb.tag_get_data_type( tag, type );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( MB_TYPE_INTEGER, type );
+
+ int mesh_def_val, mesh_gbl_val;
+ rval = mb.tag_get_default_value( tag, &mesh_def_val );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( def_val, mesh_def_val );
+ rval = mb.tag_get_data( tag, 0, 0, &mesh_gbl_val );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( global_val, mesh_gbl_val );
+}
+
+void test_read_sets()
+{
+ const char tag_name[] = "test_tag_s";
+ const char file_name[] = "test_read_sets.h5m";
+ int numproc, rank;
+ MPI_Comm_size( MPI_COMM_WORLD, &numproc );
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MBCore moab;
+ MBInterface &mb = moab;
+ MBErrorCode rval;
+ MBEntityHandle file_set;
+
+ // if root processor, create hdf5 file for use in testing
+ if (0 == rank)
+ create_input_file( file_name, DefaultReadIntervals, numproc, 0, tag_name );
+ MPI_Barrier(MPI_COMM_WORLD); // make sure root has completed writing the file
+
+ // do parallel read unless only one processor
+ const char opt1[] = "PARALLEL=READ_PART;PARTITION=PARTITION";
+ const char* opt = numproc == 1 ? 0 : opt1;
+ rval = mb.load_file( file_name, file_set, opt );
+ MPI_Barrier(MPI_COMM_WORLD); // make sure all procs complete before removing file
+ if (0 == rank && !KeepTmpFiles) remove( file_name );
+ CHECK_ERR(rval);
+
+ MBTag tag;
+ rval = mb.tag_get_handle( tag_name, tag );
+ CHECK_ERR(rval);
+
+ int size = -1;
+ rval = mb.tag_get_size( tag, size );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( 2*(int)sizeof(int), size );
+
+ MBTagType storage;
+ rval = mb.tag_get_type( tag, storage );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( MB_TAG_SPARSE, storage );
+
+ MBDataType type;
+ rval = mb.tag_get_data_type( tag, type );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( MB_TYPE_INTEGER, type );
+
+ const int iv = DefaultReadIntervals + 1;
+ MBRange sets;
+ rval = mb.get_entities_by_type_and_tag( 0, MBENTITYSET, &tag, 0, 1, sets );
+ CHECK_ERR(rval);
+ CHECK_EQUAL( (MBEntityHandle)(iv*iv), sets.size() );
+
+ for (MBRange::iterator i = sets.begin(); i != sets.end(); ++i) {
+ int ij[2];
+ rval = mb.tag_get_data( tag, &*i, 1, &ij );
+ CHECK_ERR(rval);
+
+ CHECK( ij[0] >= DefaultReadIntervals * rank );
+ CHECK( ij[0] <= DefaultReadIntervals * (rank+1) );
+ CHECK( ij[1] >= 0 );
+ CHECK( ij[1] <= DefaultReadIntervals );
+
+ MBRange contents;
+ rval = mb.get_entities_by_handle( *i, contents );
+ CHECK_ERR(rval);
+ CHECK(contents.all_of_type(MBVERTEX));
+ CHECK_EQUAL( (MBEntityHandle)iv, contents.size() );
+
+ for (MBRange::iterator v = contents.begin(); v != contents.end(); ++v) {
+ double coords[3];
+ rval = mb.get_coords( &*v, 1, coords );
+ CHECK_ERR(rval);
+ CHECK_REAL_EQUAL( coords[0], (double)ij[0], 1e-100 );
+ CHECK_REAL_EQUAL( coords[1], (double)ij[1], 1e-100 );
+ }
+ }
+}
+
+
Modified: MOAB/trunk/test/h5file/h5partial.cpp
===================================================================
--- MOAB/trunk/test/h5file/h5partial.cpp 2009-07-27 18:05:42 UTC (rev 3060)
+++ MOAB/trunk/test/h5file/h5partial.cpp 2009-07-27 21:11:26 UTC (rev 3061)
@@ -228,7 +228,12 @@
MBEntityHandle file_set;
int id = non_existant ? 8 : 7;
rval = mb.load_file( TEST_FILE, file_set, READ_OPTS, ID_TAG_NAME, &id, 1 );
- CHECK_ERR( rval );
+ if (non_existant) {
+ CHECK_EQUAL( MB_ENTITY_NOT_FOUND, rval );
+ return;
+ }
+ else
+ CHECK_ERR( rval );
// the file should contain exactly two sets (the specified one and the new
// file set, and nothing else.)
More information about the moab-dev
mailing list