[MOAB-dev] r2873 - in MOAB/trunk: . test/h5file

kraftche at cae.wisc.edu kraftche at cae.wisc.edu
Fri May 1 17:15:23 CDT 2009


Author: kraftche
Date: 2009-05-01 17:15:22 -0500 (Fri, 01 May 2009)
New Revision: 2873

Modified:
   MOAB/trunk/README.IO
   MOAB/trunk/ReadHDF5.cpp
   MOAB/trunk/ReadHDF5.hpp
   MOAB/trunk/test/h5file/h5partial.cpp
Log:
o Fix bugs
o Read recursively contained sets and their contents
o Add tests for recursively contained sets
o Add misc tests for sets stored in ranged format.


Modified: MOAB/trunk/README.IO
===================================================================
--- MOAB/trunk/README.IO	2009-05-01 21:55:05 UTC (rev 2872)
+++ MOAB/trunk/README.IO	2009-05-01 22:15:22 UTC (rev 2873)
@@ -144,9 +144,11 @@
 case NODES will be used.
 
    CHILDREN={NONE|SETS|CONTENTS}
+   SETS={NONE|SETS|CONTETS}
 
-If reading only part of a file, specify which whether or not child sets
-of input sets are to be read.  The options are:
+If reading only part of a file, specify which whether or not child or
+contained sets (CHILDREN an SETS, respectively) of input sets are to be read.  
+The options are:
    NONE     - Do not read sets because they are children of designated sets.
    SETS     - Read all child sets of designated input sets.
    CONTENTS - (Default).  Read all child sets and any entities contained

Modified: MOAB/trunk/ReadHDF5.cpp
===================================================================
--- MOAB/trunk/ReadHDF5.cpp	2009-05-01 21:55:05 UTC (rev 2872)
+++ MOAB/trunk/ReadHDF5.cpp	2009-05-01 22:15:22 UTC (rev 2873)
@@ -392,20 +392,31 @@
   
 DEBUGOUT( "GATHERING ADDITIONAL ENTITIES" );
   
-  int set_mode;
   const char* const set_opts[] = { "NONE", "SETS", "CONTENTS" };
-  rval = opts.match_option( "CHILDREN", set_opts, set_mode );
+  int child_mode;
+  rval = opts.match_option( "CHILDREN", set_opts, child_mode );
   if (MB_ENTITY_NOT_FOUND == rval)
-    set_mode = 2;
+    child_mode = 2;
   else if (MB_SUCCESS != rval) {
     readUtil->report_error( "Invalid value for 'CHILDREN' option" );
     return error(rval);
   }
+  int content_mode;
+  rval = opts.match_option( "SETS", set_opts, content_mode );
+  if (MB_ENTITY_NOT_FOUND == rval)
+    content_mode = 2;
+  else if (MB_SUCCESS != rval) {
+    readUtil->report_error( "Invalid value for 'SETS' option" );
+    return error(rval);
+  }
   
+    // If we want the contents of contained/child sets, 
+    // search for them now (before gathering the non-set contents
+    // of the sets.)
   MBRange sets;
   intersect( fileInfo->sets, file_ids, sets );
-  if (set_mode == 2) {
-    rval = read_child_sets( sets );
+  if (content_mode == 2 || child_mode == 2) {
+    rval = read_set_ids_recursive( sets, content_mode == 2, child_mode == 2 );
     if (MB_SUCCESS != rval)
       return error(rval);
   }
@@ -516,10 +527,11 @@
   
 DEBUGOUT( "READING SETS" );
     
-    // If reading child sets, but not all contents of child sets,
-    // need to get child sets after having read other entity types.
-  if (set_mode == 1) {
-    rval = read_child_sets( sets );
+    // If reading contained/child sets but not their contents then find
+    // them now. If we were also reading their contents we would
+    // have found them already.
+  if (content_mode == 1 || child_mode == 1) {
+    rval = read_set_ids_recursive( sets, content_mode != 0, child_mode != 0 );
     if (MB_SUCCESS != rval)
       return error(rval);
   }
@@ -1228,9 +1240,15 @@
   return is_error(status) ? error(MB_FAILURE) : MB_SUCCESS;
 }
 
-MBErrorCode ReadHDF5::read_child_sets( MBRange& sets_in_out )
+MBErrorCode ReadHDF5::read_set_ids_recursive( MBRange& sets_in_out,
+                                              bool contained_sets,
+                                              bool child_sets )
 {
   if (!fileInfo->have_set_children)
+    child_sets = false;
+  if (!fileInfo->have_set_contents)
+    contained_sets = false;
+  if (!child_sets && !contained_sets)
     return MB_SUCCESS;
 
     // open data tables
@@ -1239,23 +1257,64 @@
     return MB_SUCCESS;
   }
   
+  if (!contained_sets && !child_sets)
+    return MB_SUCCESS;
+  
+  hid_t meta_handle, content_handle = 0, child_handle = 0;
+  
   mhdf_Status status;
-  hid_t meta_handle = mhdf_openSetMetaSimple( filePtr, &status );
+  meta_handle = mhdf_openSetMetaSimple( filePtr, &status );
   if (is_error(status))
     return error(MB_FAILURE);
   
-  long child_len = 0;
-  hid_t child_handle = mhdf_openSetChildren( filePtr, &child_len, &status );
-  if (is_error(status)) {
-    mhdf_closeData( filePtr, meta_handle, &status );
-    return error(MB_FAILURE);
+  if (contained_sets) {
+    long content_len = 0;
+    content_handle = mhdf_openSetData( filePtr, &content_len, &status );
+    if (is_error(status)) {
+      mhdf_closeData( filePtr, meta_handle, &status );
+      return error(MB_FAILURE);
+    }
   }
   
-  MBErrorCode rval = read_child_ids_recursive( sets_in_out, meta_handle, child_handle );
+  if (child_sets) {
+    long child_len = 0;
+    child_handle = mhdf_openSetChildren( filePtr, &child_len, &status );
+    if (is_error(status)) {
+      if (contained_sets)
+        mhdf_closeData( filePtr, content_handle, &status );
+      mhdf_closeData( filePtr, meta_handle, &status );
+      return error(MB_FAILURE);
+    }
+  }
   
-  mhdf_closeData( filePtr, child_handle, &status );
-  if (MB_SUCCESS == rval && is_error(status))
-    rval = error(MB_FAILURE);
+  MBErrorCode rval = MB_SUCCESS;
+  MBRange children, new_children(sets_in_out);
+  do {
+    children.clear();
+    if (child_sets) {
+      rval = read_child_ids( new_children, meta_handle, child_handle, children );
+      if (MB_SUCCESS != rval)
+        break;
+    }
+    if (contained_sets) {
+      rval = read_contained_set_ids( new_children, meta_handle, content_handle, children );
+      if (MB_SUCCESS != rval)
+        break;
+    }
+    new_children = children.subtract( sets_in_out );
+    sets_in_out.merge( new_children );
+  } while (!new_children.empty());
+  
+  if (child_sets) {
+    mhdf_closeData( filePtr, child_handle, &status );
+    if (MB_SUCCESS == rval && is_error(status))
+      rval = error(MB_FAILURE);
+  }
+  if (contained_sets) {
+    mhdf_closeData( filePtr, content_handle, &status );
+    if (MB_SUCCESS == rval && is_error(status))
+      rval = error(MB_FAILURE);
+  }
   mhdf_closeData( filePtr, meta_handle, &status );
   if (MB_SUCCESS == rval && is_error(status))
     rval = error(MB_FAILURE);
@@ -1395,8 +1454,8 @@
         
         long* buff_iter = content_buffer;
         for (long i = 0; i < sets_count; ++i) {
-          long set_size = offset_buffer[i] - prev_idx;
-          prev_idx = offset_buffer[i];
+          long set_size = offset_buffer[i+sets_offset] - prev_idx;
+          prev_idx += set_size;
           if (set_map_intersect( flag_buffer[sets_offset+i],
                                  buff_iter, set_size, idMap )) {
             long id = fileInfo->sets.start_id + offset + sets_offset + i;
@@ -1417,24 +1476,6 @@
   return MB_SUCCESS;
 }
 
-MBErrorCode ReadHDF5::read_child_ids_recursive( MBRange& file_ids,
-                                                hid_t meta_handle,
-                                                hid_t child_handle )
-{
-  MBErrorCode rval = MB_SUCCESS;
-  MBRange children, new_children(file_ids);
-  do {
-    children.clear();
-    rval = read_child_ids( new_children, meta_handle, child_handle, children );
-    if (MB_SUCCESS != rval)
-      break;
-    
-    new_children = children.subtract( file_ids );
-    file_ids.merge( new_children );
-  } while (!new_children.empty());
-  return rval;
-}
-
 MBErrorCode ReadHDF5::read_child_ids( const MBRange& input_file_ids,
                                       hid_t meta_handle,
                                       hid_t child_handle,
@@ -1491,6 +1532,106 @@
   return MB_SUCCESS;
 }
 
+MBErrorCode ReadHDF5::read_contained_set_ids( const MBRange& input_file_ids,
+                                              hid_t meta_handle,
+                                              hid_t content_handle,
+                                              MBRange& contained_set_file_ids )
+{
+  mhdf_Status status;
+  long buffer_size = bufferSize / (sizeof(long) + sizeof(short));
+    // don't want to worry about reading half of a range pair later
+  if (buffer_size % 2) --buffer_size;
+  long* content_buffer = reinterpret_cast<long*>(dataBuffer);
+  unsigned short* flag_buffer = reinterpret_cast<unsigned short*>(content_buffer + buffer_size);
+  long first, range[2], count, remaining, sets_offset;
+
+  MBRange sets(input_file_ids);
+  MBRange::iterator hint;
+  while (!sets.empty()) {
+    count = (long)sets.const_pair_begin()->second - sets.front() + 1;
+    first = (long)sets.front() - fileInfo->sets.start_id;
+    if (count > buffer_size)
+      count = buffer_size;
+    sets.erase( sets.begin(), sets.begin() + count );
+    
+    mhdf_readSetFlags( meta_handle, first, count, H5T_NATIVE_USHORT, flag_buffer, &status );
+    if (is_error(status))
+      return MB_FAILURE;
+    
+    sets_offset = 0;
+    while (sets_offset < count) {
+        // Find block of sets with same value for ranged flag
+      long start_idx = sets_offset;
+      unsigned short ranged = flag_buffer[start_idx] & mhdf_SET_RANGE_BIT;
+      for (++sets_offset; sets_offset < count; ++sets_offset)
+        if ((flag_buffer[sets_offset] & mhdf_SET_RANGE_BIT) != ranged)
+          break;
+          
+      if (!first && !start_idx) { // first set
+        range[0] = -1;
+        mhdf_readSetContentEndIndices( meta_handle, first+sets_offset-1, 1, 
+                                       H5T_NATIVE_LONG, range+1, &status );
+        if (is_error(status))
+          return error(MB_FAILURE);
+      }
+      else if (count == 1) {
+        mhdf_readSetContentEndIndices( meta_handle, first+start_idx-1, 2, 
+                                       H5T_NATIVE_LONG, range, &status );
+        if (is_error(status))
+          return error(MB_FAILURE);
+      }
+      else {
+        mhdf_readSetContentEndIndices( meta_handle, first+start_idx-1, 1, 
+                                       H5T_NATIVE_LONG, range, &status );
+        if (is_error(status))
+          return error(MB_FAILURE);
+        mhdf_readSetContentEndIndices( meta_handle, first+sets_offset-1, 1, 
+                                       H5T_NATIVE_LONG, range+1, &status );
+        if (is_error(status))
+          return error(MB_FAILURE);
+      }
+    
+      remaining = range[1] - range[0];
+      long offset = range[0] + 1;
+      while (remaining) {
+        assert( !ranged || !(remaining % 2) );
+        long content_count = std::min( buffer_size, remaining );
+        remaining -= content_count;
+        mhdf_readSetData( content_handle, offset, content_count, H5T_NATIVE_LONG, content_buffer, &status );
+  
+        if (ranged) {
+          hint = contained_set_file_ids.begin();
+          for (long i = 0; i < content_count; i += 2) {
+            MBEntityHandle s = (MBEntityHandle)content_buffer[i];
+            MBEntityHandle e = s + content_buffer[i+1];
+            if ((long)s < fileInfo->sets.start_id)
+              s = fileInfo->sets.start_id;
+            if ((long)e > fileInfo->sets.start_id + fileInfo->sets.count)
+              e = fileInfo->sets.start_id + fileInfo->sets.count;
+            if (s < e) 
+              hint = contained_set_file_ids.insert( hint, s, e - 1 );
+          }
+        }
+        else {
+          std::sort( content_buffer, content_buffer + content_count );
+          long* s = std::lower_bound( content_buffer, content_buffer + content_count,
+                                      fileInfo->sets.start_id );
+          long* e = std::lower_bound( s, content_buffer + content_count, 
+                                      fileInfo->sets.start_id + fileInfo->sets.count );
+          e = std::unique( s, e );
+          hint = contained_set_file_ids.begin();
+          for ( ; s != e; ++s) {
+            MBEntityHandle h = *s;
+            hint = contained_set_file_ids.insert( hint, h, h );
+          }
+        }
+      }
+    }
+  }
+  
+  return MB_SUCCESS;
+}
+
 MBErrorCode ReadHDF5::read_sets( const MBRange& file_ids,
                                  hid_t meta_handle, 
                                  MBRange& ranged_file_ids,

Modified: MOAB/trunk/ReadHDF5.hpp
===================================================================
--- MOAB/trunk/ReadHDF5.hpp	2009-05-01 21:55:05 UTC (rev 2872)
+++ MOAB/trunk/ReadHDF5.hpp	2009-05-01 22:15:22 UTC (rev 2873)
@@ -246,7 +246,9 @@
   /** Given a list of file IDs for entity sets, find all child sets
    *  (at any depth) and append them to the MBRange of file IDs.
    */
-  MBErrorCode read_child_sets( MBRange& sets_in_out );
+  MBErrorCode read_set_ids_recursive( MBRange& sets_in_out,
+                                      bool containted_sets,
+                                      bool child_sets );
   
   /** Find all sets containing one or more entities read from the file
    *  and added to idMap 
@@ -269,11 +271,6 @@
                                     hid_t content_handle, 
                                     long content_len,
                                     MBRange& file_ids );  
-  
-  /** Recursively find all child sets contained in file */
-  MBErrorCode read_child_ids_recursive( MBRange& file_ids,
-                                        hid_t meta_handle,
-                                        hid_t child_handle );
  
   /** Given a list of file IDs for entity sets, read the list of 
    *  file IDs for all child entity sets.
@@ -282,6 +279,14 @@
                               hid_t meta_handle,
                               hid_t child_handle,
                               MBRange& child_file_ids );
+ 
+  /** Given a list of file IDs for entity sets, read the list of 
+   *  file IDs for all contained entity sets.
+   */
+  MBErrorCode read_contained_set_ids( const MBRange& set_file_ids,
+                                      hid_t meta_handle,
+                                      hid_t contents_handle,
+                                      MBRange& containd_set_file_ids );
     
     /**\brief Create sets 
      *

Modified: MOAB/trunk/test/h5file/h5partial.cpp
===================================================================
--- MOAB/trunk/test/h5file/h5partial.cpp	2009-05-01 21:55:05 UTC (rev 2872)
+++ MOAB/trunk/test/h5file/h5partial.cpp	2009-05-01 22:15:22 UTC (rev 2873)
@@ -8,7 +8,7 @@
 #include <limits>
 
 const char TEST_FILE[] = "partial.h5m";
-const char READ_OPTS[] = "BUFFER_SIZE=256";
+#define READ_OPTS "BUFFER_SIZE=256"
 const char ID_TAG_NAME[] = "test_id_tag";
 
 
@@ -43,8 +43,9 @@
                         MBDataType type,
                         int size );
 
-enum ChildTestMode { CHILD_SETS, CHILD_CONTENTS, CHILD_NONE };
-void test_read_children_common( ChildTestMode mode );
+enum GatherTestMode { GATHER_SETS, GATHER_CONTENTS, GATHER_NONE };
+void test_gather_sets_common( bool contained_sets, GatherTestMode mode );
+void test_gather_sets_ranged( bool contained_sets, GatherTestMode mode );
 
 
 //! Read a set containing no entities
@@ -82,12 +83,28 @@
 //! containing read entities, or contained in an explcitly designated
 //! set, any child sets are also read.  Check that here.
 void test_read_child_sets_only()
-{ test_read_children_common( CHILD_SETS ); }
+{ test_gather_sets_common( false, GATHER_SETS );
+  test_gather_sets_ranged( false, GATHER_SETS ); }
 void test_read_child_set_contents()
-{ test_read_children_common( CHILD_CONTENTS ); }
+{ test_gather_sets_common( false, GATHER_CONTENTS ); 
+  test_gather_sets_ranged( false, GATHER_CONTENTS ); }
 void test_read_no_child_sets()
-{ test_read_children_common( CHILD_NONE ); }
+{ test_gather_sets_common( false, GATHER_NONE );
+  test_gather_sets_ranged( false, GATHER_NONE ); }
 
+//! For any set selected to be read by either explicit designation,
+//! containing read entities, or contained in an explcitly designated
+//! set, any contained sets are also read.  Check that here.
+void test_read_contained_sets_only()
+{ test_gather_sets_common( true, GATHER_SETS );
+  test_gather_sets_ranged( true, GATHER_SETS ); }
+void test_read_contained_set_contents()
+{ test_gather_sets_common( true, GATHER_CONTENTS );
+  test_gather_sets_ranged( true, GATHER_CONTENTS ); }
+void test_read_no_contained_sets()
+{ test_gather_sets_common( true, GATHER_NONE );
+  test_gather_sets_ranged( true, GATHER_NONE ); }
+
 //! Read in the sets contained in a set.  
 //! Should read all sets containing read elements or nodes
 //! and all sets that are contained the the specified "read"
@@ -139,6 +156,9 @@
   result += RUN_TEST(test_read_child_sets_only);
   result += RUN_TEST(test_read_child_set_contents);
   result += RUN_TEST(test_read_no_child_sets);
+  result += RUN_TEST(test_read_contained_sets_only);
+  result += RUN_TEST(test_read_contained_set_contents);
+  result += RUN_TEST(test_read_no_contained_sets);
   result += RUN_TEST(test_read_containing_sets);
   result += RUN_TEST(test_read_double_tag);
   result += RUN_TEST(test_read_opaque_tag);
@@ -932,7 +952,7 @@
     
     MBEntityHandle file;
     int id = i+1;
-    rval = mb.load_file( TEST_FILE, file, READ_OPTS, ID_TAG_NAME, &id, 1 );
+    rval = mb.load_file( TEST_FILE, file, READ_OPTS ";SETS=NONE", ID_TAG_NAME, &id, 1 );
     CHECK_ERR(rval);
     
       // check that the total number of sets read is as expected
@@ -970,10 +990,12 @@
   }
 }
 
-static void check_children( ChildTestMode mode, MBInterface& mb, int id, MBTag id_tag, MBEntityHandle file )
+static void check_children( bool contents, GatherTestMode mode, MBInterface& mb, int id, MBTag id_tag, MBEntityHandle file )
 {
-  const int exp_num_sets = (mode == CHILD_NONE) ? 1 : id;
-  const int exp_num_edges = (mode == CHILD_CONTENTS) ? id : 1;
+    // Increase number of expected sets by one if contents is true because
+    // we always read immediately contained (depth 1) sets.
+  const int exp_num_sets = (mode == GATHER_NONE) ? 1+contents : id;
+  const int exp_num_edges = (mode == GATHER_CONTENTS) ? id : 1;
   
   MBErrorCode rval;
   MBRange range;
@@ -996,7 +1018,7 @@
   CHECK_EQUAL( 1, (int)range.size() );
   set = range.front();
   
-  if (mode == CHILD_NONE) {
+  if (mode == GATHER_NONE) {
     range.clear();
     rval = mb.get_entities_by_type( set, MBEDGE , range );
     CHECK_ERR(rval);
@@ -1013,7 +1035,7 @@
     range.clear();
     rval = mb.get_entities_by_type( set, MBEDGE, range );
     CHECK_ERR(rval);
-    if (mode == CHILD_CONTENTS || i == id) {
+    if (mode == GATHER_CONTENTS || i == id) {
       CHECK_EQUAL( 1, (int)range.size() );
       const MBEntityHandle* conn;
       int len;
@@ -1028,9 +1050,12 @@
     else {
       CHECK( range.empty() );
     }
-      
+    
     std::vector<MBEntityHandle> children;
-    rval = mb.get_child_meshsets( set, children );
+    if (contents)
+      rval = mb.get_entities_by_type( set, MBENTITYSET, children );
+    else
+      rval = mb.get_child_meshsets( set, children );
     CHECK_ERR(rval);
     if (i == 1) {
       CHECK( children.empty() );
@@ -1044,7 +1069,7 @@
 
 
 const char* set_read_opts[] = { "SETS", "CONTENTS", "NONE" };
-void test_read_children_common( ChildTestMode mode )
+void test_gather_sets_common( bool contents, GatherTestMode mode )
 {
   MBErrorCode rval;
   MBCore instance;
@@ -1078,7 +1103,10 @@
     rval = mb.tag_set_data( id_tag, sets + i, 1, &id );
     CHECK_ERR(rval);
     if (i > 0) {
-      rval = mb.add_child_meshset( sets[i], sets[i-1] );
+      if (contents) 
+        rval = mb.add_entities( sets[i], sets + (i-1), 1 );
+      else
+        rval = mb.add_child_meshset( sets[i], sets[i-1] );
       CHECK_ERR(rval);
     }
   }
@@ -1089,7 +1117,10 @@
  
   MBEntityHandle file;
   std::string opt( READ_OPTS );
-  opt += ";CHILDREN=";
+  if (contents)
+    opt += ";CHILDREN=NONE;SETS=";
+  else
+    opt += ";SETS=NONE;CHILDREN=";
   opt += set_read_opts[mode];
 
   const int test_ids[] = { 2, 7, INT/3-1, INT/2+1, INT-3 };
@@ -1105,10 +1136,114 @@
     rval = mb.tag_get_handle( ID_TAG_NAME, id_tag );
     CHECK_ERR(rval);
     
-    check_children( mode, mb, test_ids[i], id_tag, file );
+    check_children( contents, mode, mb, test_ids[i], id_tag, file );
   }
 }
 
+
+void test_gather_sets_ranged( bool contents, GatherTestMode mode )
+{
+  MBErrorCode rval;
+  MBCore instance;
+  MBInterface& mb = instance;
+  
+  MBRange verts;
+  MBTag id_tag;
+  rval = mb.tag_create( ID_TAG_NAME, sizeof(int), MB_TAG_SPARSE, MB_TYPE_INTEGER, id_tag, 0 );
+  CHECK_ERR(rval);
+  
+    // create four groups of vertices, where all vertices in the same group
+    // have the same x-coordinate
+  const int NUM_GRP_VTX = 20;
+  const int NUM_GRP = 4;
+  MBEntityHandle sets[NUM_GRP];
+  for (int i = 0; i < NUM_GRP; ++i) {
+    double coords[3*NUM_GRP_VTX];
+    for (int j = 0; j < NUM_GRP_VTX; ++j) {
+      coords[3*j  ] = i;
+      coords[3*j+1] = j;
+      coords[3*j+2] = 0;
+    }
+    rval = mb.create_vertices( coords, NUM_GRP_VTX, verts );
+    CHECK_ERR(rval);
+    
+    rval = mb.create_meshset( MESHSET_SET, sets[i] );
+    CHECK_ERR(rval);
+    rval = mb.add_entities( sets[i], verts );
+    CHECK_ERR(rval);
+    int id = i + 1;
+    rval = mb.tag_set_data( id_tag, sets+i, 1, &id );
+    CHECK_ERR(rval);
+  }
+  
+    // place two of the sets inside the others
+  if (contents) {
+    rval = mb.add_entities( sets[0], &sets[1], 1 ); CHECK_ERR(rval);
+    rval = mb.add_entities( sets[2], &sets[3], 1 ); CHECK_ERR(rval);
+  }
+  else {
+    rval = mb.add_child_meshset( sets[0], sets[1] ); CHECK_ERR(rval);
+    rval = mb.add_child_meshset( sets[2], sets[3] ); CHECK_ERR(rval);
+  }
+    
+    // Write the data
+  rval = mb.write_file( TEST_FILE, "MOAB" );
+  CHECK_ERR(rval);
+ 
+    // Read the data
+  std::string opt( READ_OPTS );
+  if (contents)
+    opt += ";CHILDREN=NONE;SETS=";
+  else
+    opt += ";SETS=NONE;CHILDREN=";
+  opt += set_read_opts[mode];
+
+  MBEntityHandle file;
+  const int read_id = 3;
+  rval = mb.delete_mesh(); CHECK_ERR(rval);
+  rval = mb.load_file( TEST_FILE, file, opt.c_str(), ID_TAG_NAME, &read_id, 1 );
+  CHECK_ERR(rval);
+  
+    // get any sets that were read it
+  MBRange read_sets;
+  rval = mb.get_entities_by_type( file, MBENTITYSET, read_sets );
+  CHECK_ERR(rval);
+  
+    // count number of vertices in each group
+  int counts[NUM_GRP];
+  memset( counts, 0, sizeof(counts) );
+  verts.clear();
+  rval = mb.get_entities_by_type( 0, MBVERTEX, verts );
+  CHECK_ERR(rval);
+  for (MBRange::iterator it = verts.begin(); it != verts.end(); ++it) {
+    double coords[3];
+    rval = mb.get_coords( &*it, 1, coords );
+    CHECK_ERR(rval);
+    int i = (int)(coords[0]+1e-12);
+    CHECK( i >= 0 && i < NUM_GRP );
+    counts[i]++;
+  }
+  
+    // check expected counts
+  CHECK_EQUAL( 0, counts[0] );
+  CHECK_EQUAL( 0, counts[1] );
+  CHECK_EQUAL( NUM_GRP_VTX, counts[2] );
+  switch (mode) {
+    case GATHER_NONE:
+      CHECK_EQUAL( 0, counts[3] );
+      CHECK_EQUAL( 1+contents, (int)read_sets.size() );
+      break;
+    case GATHER_SETS:
+      CHECK_EQUAL( 0, counts[3] );
+      CHECK_EQUAL( 2, (int)read_sets.size() );
+      break;
+    case GATHER_CONTENTS:
+      CHECK_EQUAL( NUM_GRP_VTX, counts[3] );
+      CHECK_EQUAL( 2, (int)read_sets.size() );
+      break;
+  }
+}
+
 static void check_num_verts( MBInterface& mb, MBTag tag, int id, int num_vtx )
 {
   MBErrorCode rval;



More information about the moab-dev mailing list