[MOAB-dev] r1988 - MOAB/trunk/parallel

kraftche at mcs.anl.gov kraftche at mcs.anl.gov
Mon Jul 7 17:03:19 CDT 2008


Author: kraftche
Date: 2008-07-07 17:03:19 -0500 (Mon, 07 Jul 2008)
New Revision: 1988

Added:
   MOAB/trunk/parallel/parallel_hdf5_test.cc
   MOAB/trunk/parallel/ptest.cub
Modified:
   MOAB/trunk/parallel/Makefile.am
   MOAB/trunk/parallel/WriteHDF5Parallel.cpp
   MOAB/trunk/parallel/WriteHDF5Parallel.hpp
Log:
o re-implement communication of file ids using MBParallelComm::exchange_tags
   (not working yet)
o fix 64-bit bug (int vs long) in MPI communication call
o fix build errors in debug code
o add new test for parallel hdf5 writer


Modified: MOAB/trunk/parallel/Makefile.am
===================================================================
--- MOAB/trunk/parallel/Makefile.am	2008-07-07 21:12:48 UTC (rev 1987)
+++ MOAB/trunk/parallel/Makefile.am	2008-07-07 22:03:19 UTC (rev 1988)
@@ -42,6 +42,8 @@
   INCLUDES += -I$(top_srcdir)/mhdf/include
   MOAB_PARALLEL_SRCS += WriteHDF5Parallel.cpp 
   MOAB_PARALLEL_HDRS += WriteHDF5Parallel.hpp
+  
+  MOAB_PARALLEL_TEST += parallel_hdf5_test
 endif
 
   MOAB_PARALLEL_CHECK += mbparallelcomm_test
@@ -62,9 +64,11 @@
 
 # Tests and such
 
-
 check_PROGRAMS = $(MOAB_PARALLEL_CHECK) $(MOAB_PARALLEL_TEST)
 TESTS = $(MOAB_PARALLEL_TEST)
 pcomm_unit_SOURCES = pcomm_unit.cpp
 pcomm_unit_LDADD = $(top_builddir)/libMOAB.la
+parallel_hdf5_test_SOURCES = parallel_hdf5_test.cc
+parallel_hdf5_test_LDADD = ../libMOAB.la
+parallel_hdf5_test_CPPFLAGS = -DSRCDIR=$(srcdir)
 

Modified: MOAB/trunk/parallel/WriteHDF5Parallel.cpp
===================================================================
--- MOAB/trunk/parallel/WriteHDF5Parallel.cpp	2008-07-07 21:12:48 UTC (rev 1987)
+++ MOAB/trunk/parallel/WriteHDF5Parallel.cpp	2008-07-07 22:03:19 UTC (rev 1988)
@@ -41,7 +41,7 @@
 
 #ifdef DEBUG
 #  define START_SERIAL                     \
-     for (int _x = 0; _x < myPcomm->proc_config().proc_size(); ++_x) {\
+     for (unsigned _x = 0; _x < myPcomm->proc_config().proc_size(); ++_x) {\
        MPI_Barrier( MPI_COMM_WORLD );      \
        if (_x != myPcomm->proc_config().proc_rank()) continue     
 #  define END_SERIAL                       \
@@ -122,20 +122,19 @@
 #ifndef DEBUG
 static void print_type_sets( MBInterface* , int , int , MBRange& ) {}
 #else
-static void print_type_sets( MBInterface* iFace, int myPcomm->proc_config().proc_rank(), int myPcomm->proc_config().proc_size(), MBRange& sets )
+static void print_type_sets( MBInterface* iFace, int rank, int size, MBRange& sets )
 {
-  MBTag gid, did, bid, sid, nid, iid;
+  MBTag gid, did, bid, sid, nid;
   iFace->tag_get_handle( GLOBAL_ID_TAG_NAME, gid ); 
   iFace->tag_get_handle( GEOM_DIMENSION_TAG_NAME, did );
   iFace->tag_get_handle( MATERIAL_SET_TAG_NAME, bid );
   iFace->tag_get_handle( DIRICHLET_SET_TAG_NAME, nid );
   iFace->tag_get_handle( NEUMANN_SET_TAG_NAME, sid );
-  iFace->tag_get_handle( PARALLEL_PARTITION_TAG_NAME, iid );
   MBRange typesets[10];
-  const char* typenames[] = {"Block", "Sideset", "NodeSet", "Vertex", "Curve", "Surface", "Volume", "Body", "Partition", "Other"};
+  const char* typenames[] = {"Block", "Sideset", "NodeSet", "Vertex", "Curve", "Surface", "Volume", "Body", "Other"};
   for (MBRange::iterator riter = sets.begin(); riter != sets.end(); ++riter)
   {
-    unsigned dim, id, proc[2], oldsize;
+    unsigned dim, id, oldsize;
     if (MB_SUCCESS == iFace->tag_get_data(bid, &*riter, 1, &id)) 
       dim = 0;
     else if (MB_SUCCESS == iFace->tag_get_data(sid, &*riter, 1, &id))
@@ -147,11 +146,6 @@
       iFace->tag_get_data(gid, &*riter, 1, &id);
       dim += 3;
     }
-    else if (MB_SUCCESS == iFace->tag_get_data(iid, &*riter, 1, proc)) {
-      assert(proc[0] == (unsigned)myPcomm->proc_config().proc_rank() || proc[1] == (unsigned)myPcomm->proc_config().proc_rank());
-      id = proc[proc[0] == (unsigned)myPcomm->proc_config().proc_rank()];
-      dim = 8;
-    }
     else {
       id = *riter;
       dim = 9;
@@ -161,7 +155,7 @@
     typesets[dim].insert( id );
     assert( typesets[dim].size() - oldsize == 1 );  
   }
-  for (int ii = 0; ii < 10; ++ii)
+  for (int ii = 0; ii < 9; ++ii)
   {
     char num[16];
     std::string line(typenames[ii]);
@@ -415,8 +409,6 @@
  
   rval = create_node_table( dimension );
   if (MB_SUCCESS != rval) return rval;
-  rval = communicate_remote_ids( MBVERTEX );
-  if (MB_SUCCESS != rval) return rval;
   
     /**************** Create element tables ***************/
 
@@ -424,14 +416,14 @@
   if (MB_SUCCESS != rval) return rval;
   rval = create_element_tables();
   if (MB_SUCCESS != rval) return rval;
-  for (std::list<ExportSet>::iterator ex_itor = exportList.begin(); 
-       ex_itor != exportList.end(); ++ex_itor)
-  {
-    rval = communicate_remote_ids( ex_itor->type );
-    assert(MB_SUCCESS == rval);
-  }
   
   
+    /*************** Exchange file IDs *****************/
+
+  rval = exchange_file_ids();
+  if (MB_SUCCESS != rval) return rval;
+ 
+
     /**************** Create adjacency tables *********************/
   
   rval = create_adjacency_tables();
@@ -616,7 +608,7 @@
       return MB_FAILURE;
     }
     mhdf_closeData( filePtr, handle, &status );
- }
+  }
     
     // send id offset to every proc
   result = MPI_Bcast( &first_id, 1, MPI_LONG, 0, MPI_COMM_WORLD );
@@ -637,13 +629,13 @@
   }
   
     // send each proc it's offset in the node table
-  int offset;
-  result = MPI_Scatter( &node_counts[0], 1, MPI_INT, 
-                        &offset, 1, MPI_INT,
+  long offset;
+  result = MPI_Scatter( &node_counts[0], 1, MPI_LONG, 
+                        &offset, 1, MPI_LONG,
                         0, MPI_COMM_WORLD );
   assert(MPI_SUCCESS == result);
   nodeSet.offset = offset;
-  
+
   return assign_ids( nodeSet.range, nodeSet.first_id + nodeSet.offset );
 }
 
@@ -1903,129 +1895,72 @@
 }
 
 
-MBErrorCode WriteHDF5Parallel::communicate_remote_ids( MBEntityType type )
+MBErrorCode WriteHDF5Parallel::exchange_file_ids()
 {
-  int result;
-  MBErrorCode rval;
+    // create tag to store file IDs
+  MBEntityHandle default_val = 0;
+  MBTag file_id_tag = 0;
+  MBErrorCode rval = iFace->tag_create( "__hdf5_ll_fileid", 
+                                        sizeof(MBEntityHandle),
+                                        MB_TAG_DENSE,
+                                        MB_TYPE_HANDLE,
+                                        file_id_tag,
+                                        &default_val );
+  if (MB_SUCCESS != rval)
+    return rval;
 
-    // Get the export set for the specified type
-  ExportSet* export_set = 0;
-  if (type == MBVERTEX)
-    export_set = &nodeSet;
-  else if(type == MBENTITYSET)
-    export_set = &setSet;
-  else
-  {
-    for (std::list<ExportSet>::iterator esiter = exportList.begin();
-         esiter != exportList.end(); ++esiter)
-      if (esiter->type == type)
-      {
-        export_set = &*esiter;
-        break;
-      }
+    // get file ids for my interface entities
+  MBRange::const_iterator i;
+  const MBRange& imesh = remoteMesh[myPcomm->proc_config().proc_rank()];
+  std::vector<MBEntityHandle> file_id_vect( imesh.size() );
+  std::vector<MBEntityHandle>::iterator j = file_id_vect.begin();
+  for (i = imesh.begin(); i != imesh.end(); ++i, ++j) {
+    *j = idMap.find( *i );
+    if (!*j) {
+      iFace->tag_delete( file_id_tag );
+      return MB_FAILURE;
+    }
   }
-  assert(export_set != NULL);
   
-    // Get the ranges in the set
-  std::vector<unsigned long> myranges;
-  MBRange::const_pair_iterator p_iter = export_set->range.const_pair_begin();
-  const MBRange::const_pair_iterator p_end = export_set->range.const_pair_end();
-  for ( ; p_iter != p_end; ++p_iter)
-  {
-    myranges.push_back( (*p_iter).first );
-    myranges.push_back( (*p_iter).second );
+    // store file IDs in tag
+  rval = iFace->tag_set_data( file_id_tag, imesh, &file_id_vect[0] );
+  if (MB_SUCCESS != rval) {
+    iFace->tag_delete( file_id_tag );
+    return rval;
   }
-
-  START_SERIAL;
-  printdebug("%s ranges to communicate:\n", MBCN::EntityTypeName(type));
-  for (unsigned long xx = 0; xx != myranges.size(); xx+=2)
-    printdebug("  %lu - %lu\n", myranges[xx], myranges[xx+1] );
-  END_SERIAL;
   
-    // Communicate the number of ranges and the start_id for
-    // each processor.
-  std::vector<int> counts(myPcomm->proc_config().proc_size()), offsets(myPcomm->proc_config().proc_size()), displs(myPcomm->proc_config().proc_size());
-  int mycount = myranges.size();
-  int mystart = export_set->first_id + export_set->offset;
-  result = MPI_Allgather( &mycount, 1, MPI_INT, &counts[0], 1, MPI_INT, MPI_COMM_WORLD );
-  assert(MPI_SUCCESS == result);
-  result = MPI_Allgather( &mystart, 1, MPI_INT, &offsets[0], 1, MPI_INT, MPI_COMM_WORLD );
-  assert(MPI_SUCCESS == result);
+    // do communication
+  rval = myPcomm->exchange_tags( file_id_tag );
+  if (MB_SUCCESS != rval) {
+    iFace->tag_delete( file_id_tag );
+    return rval;
+  }
   
-    // Communicate the ranges 
-  displs[0] = 0;
-  for (unsigned int i = 1; i < myPcomm->proc_config().proc_size(); ++i)
-    displs[i] = displs[i-1] + counts[i-1];
-  std::vector<unsigned long> allranges( displs[myPcomm->proc_config().proc_size()-1] + counts[myPcomm->proc_config().proc_size()-1] );
-  result = MPI_Allgatherv( &myranges[0], myranges.size(), MPI_UNSIGNED_LONG,
-                           &allranges[0], &counts[0], &displs[0],
-                           MPI_UNSIGNED_LONG, MPI_COMM_WORLD );
-  assert(MPI_SUCCESS == result);
-  
-  MBTag global_id_tag;
-  rval = iFace->tag_get_handle( PARALLEL_GID_TAG_NAME, global_id_tag );
-  assert(MB_SUCCESS == rval);
-  
-    // Set file IDs for each communicated entity
-    
-    // For each processor
-  for (unsigned int proc = 0; proc < myPcomm->proc_config().proc_size(); ++proc)
-  {
-    if (proc == myPcomm->proc_config().proc_rank())
+    // store file IDs for remote entities
+  for (unsigned p = 0; p < myPcomm->proc_config().proc_size(); ++p) {
+    if (p == myPcomm->proc_config().proc_rank())
       continue;
     
-      // Get data for corresponding processor
-    const int offset = offsets[proc];
-    const int count = counts[proc];
-    const unsigned long* const ranges = &allranges[displs[proc]];
+    file_id_vect.resize( remoteMesh[p].size() );
+    rval = iFace->tag_get_data( file_id_tag, remoteMesh[p], &file_id_vect[0] );
+    if (MB_SUCCESS != rval) {
+      iFace->tag_delete( file_id_tag );
+      return rval;
+    }
     
-      // For each geometry meshset in the interface
-    MBRange::iterator r_iter = MBRange::lower_bound( remoteMesh[proc].begin(),
-                                                     remoteMesh[proc].end(),
-                                                     CREATE_HANDLE(type,0,result) );
-    MBRange::iterator r_stop = MBRange::lower_bound( r_iter,
-                                                     remoteMesh[proc].end(),
-                                                     CREATE_HANDLE(type+1,0,result) );
-    for ( ; r_iter != r_stop; ++r_iter)
-    {
-      MBEntityHandle entity = *r_iter;
-
-        // Get handle on other processor
-      MBEntityHandle global;
-      rval = iFace->tag_get_data( global_id_tag, &entity, 1, &global );
-      assert(MB_SUCCESS == rval);
-
-        // Find corresponding fileid on other processor.
-        // This could potentially be n**2, but we will assume that
-        // the range list from each processor is short (typically 1).
-      int j, steps = 0;
-      unsigned long low, high;
-      for (j = 0; j < count; j += 2)
-      {
-        low = ranges[j];
-        high = ranges[j+1];
-        if (low <= global && high >= global)
-          break;
-        steps += (high - low) + 1;
+    j = file_id_vect.begin();
+    for (i = remoteMesh[p].begin(); i != remoteMesh[p].end(); ++i, ++j) {
+      if (*j == 0 || idMap.insert( *i, *j, 1 ) == idMap.end()) {
+         iFace->tag_delete( file_id_tag );
+         return MB_FAILURE;
       }
-      if (j >= count) {
-      printdebug("*** handle = %u, type = %d, id = %d, proc = %d\n",
-      (unsigned)global, (int)(iFace->type_from_handle(global)), (int)(iFace->id_from_handle(global)), proc);
-      for (int ii = 0; ii < count; ii+=2) 
-      printdebug("***  %u to %u\n", (unsigned)ranges[ii], (unsigned)ranges[ii+1] );
-      MBRange junk; junk.insert( global );
-      print_type_sets( iFace, myPcomm->proc_config().proc_rank(), myPcomm->proc_config().proc_size(), junk );
-      }
-      assert(j < count);
-      int fileid = offset + steps + (global - low);
-      RangeMap<MBEntityHandle,id_t>::iterator ri = idMap.insert( entity, fileid, 1 );
-      assert( ri != idMap.end() );
-    } // for(r_iter->range)
-  } // for(each processor)
+    }
+  }
   
-  return MB_SUCCESS;
+  return iFace->tag_delete( file_id_tag );
 }
 
+
 MBErrorCode WriteHDF5Parallel::get_sharedset_tags() 
 {
     // get all the sets

Modified: MOAB/trunk/parallel/WriteHDF5Parallel.hpp
===================================================================
--- MOAB/trunk/parallel/WriteHDF5Parallel.hpp	2008-07-07 21:12:48 UTC (rev 1987)
+++ MOAB/trunk/parallel/WriteHDF5Parallel.hpp	2008-07-07 22:03:19 UTC (rev 1988)
@@ -112,10 +112,11 @@
     MBErrorCode gather_interface_meshes();
     
       //! For entities that will be written by another 
+      //! processor but are referenced by entities on this
       //! processor, get the file Ids that will be assigned
       //! to those so they can be referenced by
       //! entities to be written on this processor.
-    MBErrorCode communicate_remote_ids(MBEntityType type);
+    MBErrorCode exchange_file_ids();
     
       //! Sort the list of tag information in the parent
       //! class by name so all procs have them in the same

Added: MOAB/trunk/parallel/parallel_hdf5_test.cc
===================================================================
--- MOAB/trunk/parallel/parallel_hdf5_test.cc	                        (rev 0)
+++ MOAB/trunk/parallel/parallel_hdf5_test.cc	2008-07-07 22:03:19 UTC (rev 1988)
@@ -0,0 +1,294 @@
+#include "TestUtil.hpp"
+
+#include "MBCore.hpp"
+#include "MBParallelComm.hpp"
+#include "MBTagConventions.hpp"
+#include "MBCN.hpp"
+
+#include <iostream>
+#include <mpi.h>
+
+#define STRINGIFY_(X) #X
+#define STRINGIFY(X) STRINGIFY_(X)
+
+#ifdef SRCDIR
+const char* InputFile = STRINGIFY(SRCDIR) "/ptest.cub";
+#else
+const char* InputFile = "ptest.cub";
+#endif
+
+void load_and_partition( MBInterface& moab, const char* filename );
+
+void save_and_load_on_root( MBInterface& moab, const char* tmp_filename );
+
+void check_identical_mesh( MBInterface& moab1, MBInterface& moab2 );
+
+void test_write_elements();
+
+void test_write_shared_sets();
+
+bool KeepTmpFiles = false;
+
+int main( int argc, char* argv[] )
+{
+  int err = MPI_Init( &argc, &argv );
+  CHECK(!err);
+
+  if (argc == 2 && !strcmp(argv[1],"-k"))
+    KeepTmpFiles = true;
+  else if (argc != 1) {
+    std::cerr << "Usage: " << argv[0] << " [-k]" << std::endl;
+    return 1;
+  }
+  
+  int result = 0;
+  result += RUN_TEST( test_write_elements );
+  result += RUN_TEST( test_write_shared_sets );
+  
+  MPI_Finalize();
+  return result;
+}
+
+void load_and_partition( MBInterface& moab, const char* filename )
+{
+  MBErrorCode rval;
+  MBEntityHandle set;
+  
+  rval = moab.load_file( filename, set, 
+                         "PARALLEL=READ_DELETE;"
+                         "PARTITION=GEOM_DIMENSION;PARTITION_VAL=3;"
+                         "PARTITION_DISTRIBUTE;"
+                         "PARALLEL_RESOLVE_SHARED_ENTS" );
+  CHECK_ERR(rval);
+}
+
+void save_and_load_on_root( MBInterface& moab, const char* tmp_filename )
+{
+  MBErrorCode rval;
+  MBEntityHandle set;
+  int procnum;
+  MPI_Comm_rank( MPI_COMM_WORLD, &procnum );
+  
+  rval = moab.write_file( tmp_filename, 0, "PARALLEL=FORMAT" );
+  if (MB_SUCCESS != rval) {
+    std::cerr << "Parallel write filed on processor " << procnum << std::endl;
+    if (procnum == 0 && !KeepTmpFiles)
+      remove( tmp_filename );
+    CHECK_ERR(rval);
+  }
+  
+  moab.delete_mesh();
+  if (procnum == 0) {
+    rval = moab.load_file( tmp_filename, set );
+    remove( tmp_filename );
+    CHECK_ERR(rval);
+  }
+}
+
+void count_owned_entities( MBInterface& moab, int counts[MBENTITYSET] )
+{
+  MBErrorCode rval;
+  MBParallelComm* pcomm = MBParallelComm::get_pcomm( &moab, 0 );
+  CHECK(0 != pcomm);
+  std::fill( counts, counts+MBENTITYSET, 0u );
+  
+  for (MBEntityType t = MBVERTEX; t < MBENTITYSET; ++t) {
+    MBRange range;
+    rval = moab.get_entities_by_type( 0, t, range );
+    CHECK_ERR(rval);
+    rval = pcomm->remove_nonowned_shared( range, -1, true, false );
+    CHECK_ERR(rval);
+    counts[t] = range.size();
+  }
+}
+
+void check_identical_mesh( MBInterface& mb1, MBInterface& mb2 )
+{
+  MBErrorCode rval;
+  std::map<MBEntityHandle,MBEntityHandle> entmap;
+  
+    // match vertices by coordinate
+  MBRange r1, r2;
+  MBRange::iterator i1, i2;
+  rval = mb1.get_entities_by_type( 0, MBVERTEX, r1 );
+  CHECK_ERR(rval);
+  rval = mb2.get_entities_by_type( 0, MBVERTEX, r2 );
+  CHECK_ERR(rval);
+  CHECK_EQUAL( r1.size(), r2.size() );
+  for (i1 = r1.begin(); i1 != r1.end(); ++i1) {
+    double coords1[3];
+    rval = mb1.get_coords( &*i1, 1, coords1 );
+    CHECK_ERR(rval);
+    for (i2 = r2.begin(); i2 != r2.end(); ++i2) {
+      double coords2[3];
+      rval = mb2.get_coords( &*i2, 1, coords2 );
+      CHECK_ERR(rval);
+      coords2[0] -= coords1[0];
+      coords2[1] -= coords1[1];
+      coords2[2] -= coords1[2];
+      double lensqr = coords2[0]*coords2[0] + coords2[1]*coords2[1] + coords2[2]*coords2[2];
+      if (lensqr < 1e-12)
+        break;
+    }
+    CHECK( i2 != r2.end() );
+    entmap[*i2] = *i1;
+    r2.erase( i2 );
+  }
+  
+    // match element connectivity
+  std::vector<MBEntityHandle> conn1, conn2;
+  for (MBEntityType t = MBEDGE; t < MBENTITYSET; ++t) {
+    r1.clear();
+    rval = mb1.get_entities_by_type( 0, t, r1 );
+    CHECK_ERR(rval);
+    r2.clear();
+    rval = mb2.get_entities_by_type( 0, t, r2 );
+    CHECK_ERR(rval);
+    CHECK_EQUAL( r1.size(), r2.size() );
+    
+    for (i1 = r1.begin(); i1 != r1.end(); ++i1) {
+      conn1.clear();
+      rval = mb1.get_connectivity( &*i1, 1, conn1 );
+      CHECK_ERR(rval);
+      for (i2 = r2.begin(); i2 != r2.end(); ++i2) {
+        conn2.clear();
+        rval = mb2.get_connectivity( &*i2, 1, conn2 );
+        CHECK_ERR(rval);
+        if (conn1.size() != conn2.size())
+          continue;
+        for (std::vector<MBEntityHandle>::iterator j = conn2.begin(); j != conn2.end(); ++j)
+          *j = entmap[*j];
+        if (conn1 == conn2)
+          break;
+      }
+
+      CHECK( i2 != r2.end() );
+      entmap[*i2] = *i1;
+      r2.erase( i2 );
+    }
+  }
+}
+
+void test_write_elements()
+{
+  int proc_counts[MBENTITYSET], all_counts[MBENTITYSET], file_counts[MBENTITYSET];
+  int err, rank, size;
+  MBErrorCode rval;
+  err = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+  CHECK(!err);
+  err = MPI_Comm_size( MPI_COMM_WORLD, &size );
+  CHECK(!err);
+  
+  MBCore moab_instance;
+  MBInterface& moab = moab_instance;
+  load_and_partition( moab, InputFile );
+  
+  count_owned_entities( moab, proc_counts );
+  std::fill( all_counts, all_counts+MBENTITYSET, 0u );
+  err = MPI_Allreduce( proc_counts, all_counts, MBENTITYSET, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+  CHECK(!err);
+  
+  save_and_load_on_root( moab, "test_write_elements.h5m" );
+  if (rank == 0) {
+    for (MBEntityType t = MBVERTEX; t < MBENTITYSET; ++t) {
+      rval = moab.get_number_entities_by_type( 0, t, file_counts[t] );
+      CHECK_ERR(rval);
+    }
+  }
+  
+  err = MPI_Bcast( file_counts, MBENTITYSET, MPI_INT, 0, MPI_COMM_WORLD );
+  CHECK(!err);
+  
+  bool all_equal = true;
+  for (MBEntityType t = MBVERTEX; t < MBENTITYSET; ++t) 
+    if (file_counts[t] != all_counts[t])
+      all_equal = false;
+    
+  if (rank == 0 && !all_equal) {
+    std::cerr << "Type\tPartnd\tWritten" << std::endl;
+    for (MBEntityType t = MBVERTEX; t < MBENTITYSET; ++t) 
+      std::cerr << MBCN::EntityTypeName(t) << '\t' << all_counts[t] << '\t' << file_counts[t] << std::endl;
+  }
+  
+  CHECK(all_equal);
+  
+  if (rank == 0) {
+    MBCore moab2;
+    MBEntityHandle set;
+    rval = moab2.load_file( InputFile, set );
+    CHECK_ERR(rval);
+    check_identical_mesh( moab, moab2 );
+  }
+}
+
+bool check_sets_sizes( MBInterface& mb1, MBEntityHandle set1,
+                       MBInterface& mb2, MBEntityHandle set2 )
+{
+  MBErrorCode rval;
+  bool result = true;
+  for (MBEntityType t = MBVERTEX; t < MBMAXTYPE; ++t) {
+    int count1, count2;
+    rval = mb1.get_number_entities_by_type( set1, t, count1 );
+    CHECK_ERR(rval);
+    rval = mb2.get_number_entities_by_type( set2, t, count2 );
+    CHECK_ERR(rval);
+    if (count1 != count2) {
+      std::cerr << "Sets differ in number of " << MBCN::EntityTypeName(t)
+                << " : " << count1 << " vs. " << count2 << std::endl;
+      result = false;
+    }
+  }
+  return result;
+}
+
+void test_write_shared_sets()
+{
+  int err, rank, size;
+  MBErrorCode rval;
+  err = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+  CHECK(!err);
+  err = MPI_Comm_size( MPI_COMM_WORLD, &size );
+  CHECK(!err);
+  
+  MBCore moab_instance;
+  MBInterface& moab = moab_instance;
+  load_and_partition( moab, InputFile );
+  save_and_load_on_root( moab, "test_write_shared_sets.h5m" );
+
+  if (rank != 0)
+    return;
+  
+  MBCore moab2_instance;
+  MBInterface& moab2 = moab2_instance;
+  MBEntityHandle set;
+  rval = moab2.load_file( InputFile, set );
+  CHECK_ERR(rval);
+  
+  MBTag mattag1, mattag2;
+  rval = moab.tag_get_handle( MATERIAL_SET_TAG_NAME, mattag1 );
+  CHECK_ERR(rval);
+  rval = moab2.tag_get_handle( MATERIAL_SET_TAG_NAME, mattag2 );
+  CHECK_ERR(rval);
+  
+  MBRange matsets;
+  rval = moab2.get_entities_by_type_and_tag( 0, MBENTITYSET, &mattag2, 0, 1, matsets );
+  CHECK_ERR(rval);
+  for (MBRange::iterator i = matsets.begin(); i != matsets.end(); ++i) {
+    int block_id;
+    rval = moab2.tag_get_data( mattag2, &*i, 1, &block_id );
+    CHECK_ERR(rval);
+    
+    MBRange tmpents;
+    void* tagdata[] = {&block_id};
+    rval = moab.get_entities_by_type_and_tag( 0, MBENTITYSET, &mattag1, tagdata, 1, tmpents );
+    if (tmpents.size() != 1) 
+      std::cerr << tmpents.size() << " sets with material set id " << block_id << std::endl;
+    CHECK_EQUAL( (int)tmpents.size(), 1 );
+  
+    CHECK( check_sets_sizes( moab2, *i, moab, tmpents.front() ) );
+  }
+}
+ 
+  
+    
+  

Added: MOAB/trunk/parallel/ptest.cub
===================================================================
(Binary files differ)


Property changes on: MOAB/trunk/parallel/ptest.cub
___________________________________________________________________
Name: svn:mime-type
   + application/octet-stream




More information about the moab-dev mailing list