[MOAB-dev] r1972 - in MOAB/trunk: . parallel

kraftche at mcs.anl.gov kraftche at mcs.anl.gov
Tue Jul 1 16:31:20 CDT 2008


Author: kraftche
Date: 2008-07-01 16:31:19 -0500 (Tue, 01 Jul 2008)
New Revision: 1972

Modified:
   MOAB/trunk/README.CONFIGURE
   MOAB/trunk/TestTypeSequenceManager.cpp
   MOAB/trunk/TypeSequenceManager.cpp
   MOAB/trunk/moab.make.in
   MOAB/trunk/parallel/MBParallelComm.hpp
   MOAB/trunk/parallel/Makefile.am
   MOAB/trunk/parallel/WriteHDF5Parallel.cpp
   MOAB/trunk/parallel/WriteHDF5Parallel.hpp
Log:
remove unnecessary std::min (start_handle of SequenceData is less than or equal to start_handle of all containted SequenceDatas)

Modified: MOAB/trunk/README.CONFIGURE
===================================================================
--- MOAB/trunk/README.CONFIGURE	2008-07-01 06:31:18 UTC (rev 1971)
+++ MOAB/trunk/README.CONFIGURE	2008-07-01 21:31:19 UTC (rev 1972)
@@ -11,9 +11,9 @@
     
     autoheader
     aclocal -I m4
-    libtoolize
+    libtoolize -f
+    autoconf 
     automake -a
-    autoconf 
     
 
 Why aren't the configure script and other generated files in the

Modified: MOAB/trunk/TestTypeSequenceManager.cpp
===================================================================
--- MOAB/trunk/TestTypeSequenceManager.cpp	2008-07-01 06:31:18 UTC (rev 1971)
+++ MOAB/trunk/TestTypeSequenceManager.cpp	2008-07-01 21:31:19 UTC (rev 1972)
@@ -19,6 +19,10 @@
 void test_is_free_sequence();
 void test_is_free_handle();
 
+void regression_svn1952();
+void regression_svn1958();
+void regression_svn1960();
+
 /* Construct sequence of vertex handles containing 
    the ID ranges: { [3,7], [100,111], [1001] }
    referencing a SequnceData with the range [1,22000]
@@ -66,6 +70,10 @@
   error_count += RUN_TEST( test_find_free_handle );
   error_count += RUN_TEST( test_find_free_sequence );
   error_count += RUN_TEST( test_is_free_sequence );
+
+  error_count += RUN_TEST( regression_svn1952 );
+  error_count += RUN_TEST( regression_svn1958 );
+  error_count += RUN_TEST( regression_svn1960 );
   
   if (!error_count) 
     printf( "ALL TESTS PASSED\n");
@@ -972,3 +980,80 @@
   CHECK_EQUAL( (MBEntityHandle)3001, first );
   CHECK_EQUAL( (MBEntityHandle)MB_END_ID, last );
 }
+
+// Regression test for bug fixed in SVN revision 1952.  
+// SVN checkin message:
+//  Fixing a bug in allocation of sequences.  Before this commit, when:
+//  - Existing sequence starts at one place, e.g. 41686, but its
+//  sequenceData starts earlier, e.g. 41673, and
+//  - You try to allocate a new sequence at a lower handle but with a
+//  range which is more than the room available in the sequenceData,
+//  
+//  then
+//  - the code wants to allocate right up against the lower end of the
+//  existing sequence, and sizes the new sequenceData accordingly
+//  (i.e. with a max of 41685).
+//  
+//  In this case, that new sequenceData will overlap the existing
+//  sequenceData (which starts at 41673, even though it has an empty chunk
+//  in front).
+//  
+//  So, this change passes back a sequence data size from the code which
+//  computes where to put the start handle when allocating the new
+//  sequence.
+void regression_svn1952()
+{
+  const MBEntityHandle last_handle = 50000;
+  TypeSequenceManager seqman;
+  SequenceData* data = new SequenceData( 0, 41673, last_handle );
+  CHECK( insert_seq( seqman, 41686, 1000, data, true  ) );
+  
+  SequenceData* result_data;
+  MBEntityID result_size;
+  MBEntityHandle result_handle;
+  result_handle = seqman.find_free_sequence( 1686, 4000, 2*last_handle, result_data, result_size );
+  CHECK( result_handle > last_handle );
+  CHECK( result_handle + result_size <= 2*last_handle );
+}
+
+
+// Regression test for bug fixed in SVN revision 1958.  
+// SVN checkin message:
+//  Another fix to SequenceManager.  This time:
+//  - there was no sequence data allocated for the target range
+//  - there was exactly enough room in unallocated sequence data for the target range
+//  - the next data sequence up had a couple of empty slots, so the first occupied handle was a couple spaces from the start of the sequence data
+//  - the upper end of the range of the allocated sequence data was computed based on the first handle in the next sequence data
+//  
+//  Fixed by setting the size using the parameter I added before.
+void regression_svn1958()
+{
+  const int data_size = 100;
+  const MBEntityHandle data_start = 100;
+  const MBEntityHandle data_end = data_start + data_size - 1;
+  const int seq_offset = 2;
+
+  TypeSequenceManager seqman;
+  SequenceData* data = new SequenceData( 0, data_start, data_end );
+  CHECK( insert_seq( seqman, data_start+seq_offset, data_size-seq_offset, data, true  ) );
+  
+  SequenceData* result_data;
+  MBEntityID result_size;
+  MBEntityHandle result_handle;
+  result_handle = seqman.find_free_sequence( data_start-1, 1, 100000, result_data, result_size );
+  CHECK( result_handle > data_end || result_handle + result_size <= data_start );
+}
+
+
+// Regression test for bug fixed in SVN revision 1960.  
+// SVN checkin message:
+//  Third time pays for all.  This time, in TSM::is_free_sequence was true, 
+//  so there was a free sequence; SM::create_entity_sequence then called 
+//  SM::new_sequence_size, which used the first handle of the next higher 
+//  sequence to compute the size, when the start handle of the data on 
+//  which that first handle was allocated was smaller.  Sigh.
+void regression_svn1960()
+{
+  
+}
+

Modified: MOAB/trunk/TypeSequenceManager.cpp
===================================================================
--- MOAB/trunk/TypeSequenceManager.cpp	2008-07-01 06:31:18 UTC (rev 1971)
+++ MOAB/trunk/TypeSequenceManager.cpp	2008-07-01 21:31:19 UTC (rev 1972)
@@ -578,7 +578,7 @@
     return CREATE_HANDLE( TYPE_FROM_HANDLE(after_this), MB_END_ID, junk );
   else if ((*it)->start_handle() > after_this) {
       // need to check against the sequence data first
-    MBEntityHandle rhandle = std::min((*it)->start_handle(), (*it)->data()->start_handle());
+    MBEntityHandle rhandle = (*it)->data()->start_handle();
     return rhandle - 1;
   }
   else

Modified: MOAB/trunk/moab.make.in
===================================================================
--- MOAB/trunk/moab.make.in	2008-07-01 06:31:18 UTC (rev 1971)
+++ MOAB/trunk/moab.make.in	2008-07-01 21:31:19 UTC (rev 1972)
@@ -7,7 +7,7 @@
 
 MOAB_INCLUDES = -I$(MOAB_INCLUDEDIR)
 
-MOAB_LIBS_LINK = ${MOAB_LDFLAGS} -L${MOAB_LIBDIR} -lMOAB @LIBS@ @HDF5_LIBS@ @NETCDF_LIBS@
+MOAB_LIBS_LINK = ${MOAB_LDFLAGS} -L${MOAB_LIBDIR} -lMOAB @HDF5_LIBS@ @NETCDF_LIBS@ @LIBS@
 
 DAGMC_CONFIG_OPTIONS = @DAGMC_CONFIG_OPTIONS@
 

Modified: MOAB/trunk/parallel/MBParallelComm.hpp
===================================================================
--- MOAB/trunk/parallel/MBParallelComm.hpp	2008-07-01 06:31:18 UTC (rev 1971)
+++ MOAB/trunk/parallel/MBParallelComm.hpp	2008-07-01 21:31:19 UTC (rev 1972)
@@ -490,7 +490,8 @@
   MBErrorCode update_iface_sets(MBRange &sent_ents,
                                 std::vector<MBEntityHandle> &remote_handles, 
                                 int from_proc);
-  
+
+public:  
     //! replace handles in from_vec with corresponding handles on
     //! to_proc (by checking shared[p/h]_tag and shared[p/h]s_tag;
     //! if no remote handle and new_ents is non-null, substitute
@@ -516,7 +517,8 @@
                                  MBEntityHandle *to_vec,
                                  int to_proc,
                                  const MBRange &new_ents);
-  
+
+private:  
     //! goes through from_vec, and for any with type MBMAXTYPE, replaces with
     //! new_ents value at index corresponding to id of entity in from_vec
   MBErrorCode get_local_handles(MBEntityHandle *from_vec, 

Modified: MOAB/trunk/parallel/Makefile.am
===================================================================
--- MOAB/trunk/parallel/Makefile.am	2008-07-01 06:31:18 UTC (rev 1971)
+++ MOAB/trunk/parallel/Makefile.am	2008-07-01 21:31:19 UTC (rev 1972)
@@ -11,6 +11,10 @@
 DEFS = $(DEFINES) -DIS_BUILDING_MB
 INCLUDES += -I$(top_srcdir) -I$(top_builddir) 
 
+# Let tests know where the source directory is in case they 
+# need to load input files
+CPPFLAGS += '-DSRCDIR=$(srcdir)'
+
 # The directory in which to install headers
 libMOABpar_la_includedir = $(includedir)
 
@@ -19,6 +23,7 @@
 MOAB_PARALLEL_SRCS =
 MOAB_PARALLEL_HDRS =
 MOAB_PARALLEL_TEST = 
+HDF5_PARALLEL_TEST = 
 if PARALLEL
   MOAB_PARALLEL_SRCS += \
      MBParallelComm.cpp \
@@ -41,13 +46,10 @@
   INCLUDES += -I$(top_srcdir)/mhdf/include
   MOAB_PARALLEL_SRCS += WriteHDF5Parallel.cpp 
   MOAB_PARALLEL_HDRS += WriteHDF5Parallel.hpp
+  HDF5_PARALLEL_TEST = parallel_hdf5_test
 endif
 
-  bin_PROGRAMS = mbparallelcomm_test
-  mbparallelcomm_test_SOURCES = mbparallelcomm_test.cpp
-  mbparallelcomm_test_LDADD = $(top_builddir)/libMOAB.la
-  mbparallelcomm_test_DEPENDENCIES = $(mbparallelcomm_test_LDADD)
-
+  MOAB_PARALLEL_TEST += mbparallelcomm_test
   MOAB_PARALLEL_TEST += pcomm_unit
 endif
 
@@ -62,8 +64,14 @@
 # Tests and such
 
 
-check_PROGRAMS = $(MOAB_PARALLEL_TEST)
-TESTS = $(MOAB_PARALLEL_TEST)
+check_PROGRAMS = $(MOAB_PARALLEL_TEST) $(HDF5_PARALLEL_TEST)
+TESTS = $(check_PROGRAMS)
 pcomm_unit_SOURCES = pcomm_unit.cpp
 pcomm_unit_LDADD = $(top_builddir)/libMOAB.la
+parallel_hdf5_test_SOURCES = parallel_hdf5_test.cc
+parallel_hdf5_test_LDADD = $(top_builddir)/libMOAB.la
+parallel_hdf5_test_DEPENDENCIES = $(mbparallelcomm_test_LDADD)
+mbparallelcomm_test_SOURCES = mbparallelcomm_test.cpp
+mbparallelcomm_test_LDADD = $(top_builddir)/libMOAB.la
+mbparallelcomm_test_DEPENDENCIES = $(mbparallelcomm_test_LDADD)
 

Modified: MOAB/trunk/parallel/WriteHDF5Parallel.cpp
===================================================================
--- MOAB/trunk/parallel/WriteHDF5Parallel.cpp	2008-07-01 06:31:18 UTC (rev 1971)
+++ MOAB/trunk/parallel/WriteHDF5Parallel.cpp	2008-07-01 21:31:19 UTC (rev 1972)
@@ -1,5 +1,5 @@
 
-#undef DEBUG
+#define DEBUG
 
 #ifdef DEBUG
 #  include <stdio.h>
@@ -41,7 +41,7 @@
 
 #ifdef DEBUG
 #  define START_SERIAL                     \
-     for (int _x = 0; _x < myPcomm->proc_config().proc_size(); ++_x) {\
+     for (unsigned _x = 0; _x < myPcomm->proc_config().proc_size(); ++_x) {\
        MPI_Barrier( MPI_COMM_WORLD );      \
        if (_x != myPcomm->proc_config().proc_rank()) continue     
 #  define END_SERIAL                       \
@@ -122,20 +122,19 @@
 #ifndef DEBUG
 static void print_type_sets( MBInterface* , int , int , MBRange& ) {}
 #else
-static void print_type_sets( MBInterface* iFace, int myPcomm->proc_config().proc_rank(), int myPcomm->proc_config().proc_size(), MBRange& sets )
+static void print_type_sets( MBInterface* iFace, int rank, int size, MBRange& sets )
 {
-  MBTag gid, did, bid, sid, nid, iid;
+  MBTag gid, did, bid, sid, nid;
   iFace->tag_get_handle( GLOBAL_ID_TAG_NAME, gid ); 
   iFace->tag_get_handle( GEOM_DIMENSION_TAG_NAME, did );
   iFace->tag_get_handle( MATERIAL_SET_TAG_NAME, bid );
   iFace->tag_get_handle( DIRICHLET_SET_TAG_NAME, nid );
   iFace->tag_get_handle( NEUMANN_SET_TAG_NAME, sid );
-  iFace->tag_get_handle( PARALLEL_PARTITION_TAG_NAME, iid );
   MBRange typesets[10];
-  const char* typenames[] = {"Block", "Sideset", "NodeSet", "Vertex", "Curve", "Surface", "Volume", "Body", "Partition", "Other"};
+  const char* typenames[] = {"Block", "Sideset", "NodeSet", "Vertex", "Curve", "Surface", "Volume", "Body", "Other"};
   for (MBRange::iterator riter = sets.begin(); riter != sets.end(); ++riter)
   {
-    unsigned dim, id, proc[2], oldsize;
+    unsigned dim, id, oldsize;
     if (MB_SUCCESS == iFace->tag_get_data(bid, &*riter, 1, &id)) 
       dim = 0;
     else if (MB_SUCCESS == iFace->tag_get_data(sid, &*riter, 1, &id))
@@ -147,11 +146,6 @@
       iFace->tag_get_data(gid, &*riter, 1, &id);
       dim += 3;
     }
-    else if (MB_SUCCESS == iFace->tag_get_data(iid, &*riter, 1, proc)) {
-      assert(proc[0] == (unsigned)myPcomm->proc_config().proc_rank() || proc[1] == (unsigned)myPcomm->proc_config().proc_rank());
-      id = proc[proc[0] == (unsigned)myPcomm->proc_config().proc_rank()];
-      dim = 8;
-    }
     else {
       id = *riter;
       dim = 9;
@@ -161,7 +155,7 @@
     typesets[dim].insert( id );
     assert( typesets[dim].size() - oldsize == 1 );  
   }
-  for (int ii = 0; ii < 10; ++ii)
+  for (int ii = 0; ii < 9; ++ii)
   {
     char num[16];
     std::string line(typenames[ii]);
@@ -329,7 +323,7 @@
   
     // For the 'remoteMesh' list for this processor, just remove
     // entities we aren't writing.
-  MBRange& my_remote_mesh = remoteMesh[myPcomm->proc_config().proc_size()];
+  MBRange& my_remote_mesh = remoteMesh[myPcomm->proc_config().proc_rank()];
   tmpset = my_remote_mesh.subtract( nodeSet.range );
   if (!tmpset.empty())
     my_remote_mesh = my_remote_mesh.subtract( tmpset );
@@ -616,7 +610,7 @@
       return MB_FAILURE;
     }
     mhdf_closeData( filePtr, handle, &status );
- }
+  }
     
     // send id offset to every proc
   result = MPI_Bcast( &first_id, 1, MPI_LONG, 0, MPI_COMM_WORLD );
@@ -637,13 +631,13 @@
   }
   
     // send each proc it's offset in the node table
-  int offset;
-  result = MPI_Scatter( &node_counts[0], 1, MPI_INT, 
-                        &offset, 1, MPI_INT,
+  long offset;
+  result = MPI_Scatter( &node_counts[0], 1, MPI_LONG, 
+                        &offset, 1, MPI_LONG,
                         0, MPI_COMM_WORLD );
   assert(MPI_SUCCESS == result);
   nodeSet.offset = offset;
-  
+
   return assign_ids( nodeSet.range, nodeSet.first_id + nodeSet.offset );
 }
 
@@ -1905,27 +1899,35 @@
 
 MBErrorCode WriteHDF5Parallel::communicate_remote_ids( MBEntityType type )
 {
-  int result;
-  MBErrorCode rval;
-
     // Get the export set for the specified type
-  ExportSet* export_set = 0;
   if (type == MBVERTEX)
-    export_set = &nodeSet;
+    return communicate_remote_ids( &nodeSet );
   else if(type == MBENTITYSET)
-    export_set = &setSet;
+    return communicate_remote_ids( &setSet );
   else
   {
     for (std::list<ExportSet>::iterator esiter = exportList.begin();
          esiter != exportList.end(); ++esiter)
       if (esiter->type == type)
       {
-        export_set = &*esiter;
-        break;
+        MBErrorCode rval = communicate_remote_ids( &*esiter );
+        if (MB_SUCCESS != rval)
+          return rval;
       }
+    return MB_SUCCESS;
   }
-  assert(export_set != NULL);
+}
   
+
+MBErrorCode WriteHDF5Parallel::communicate_remote_ids( ExportSet* export_set )
+{
+  RangeMap<MBEntityHandle,id_t>::iterator ri;
+  std::vector<MBEntityHandle> remote_handles;
+  MBErrorCode rval;
+  int result;
+  const MBEntityType type = export_set->type;
+  assert(export_set != NULL);
+
     // Get the ranges in the set
   std::vector<unsigned long> myranges;
   MBRange::const_pair_iterator p_iter = export_set->range.const_pair_begin();
@@ -1969,60 +1971,56 @@
     // Set file IDs for each communicated entity
     
     // For each processor
-  for (unsigned int proc = 0; proc < myPcomm->proc_config().proc_size(); ++proc)
-  {
-    if (proc == myPcomm->proc_config().proc_rank())
+  for (unsigned int proc = 0; proc < myPcomm->proc_config().proc_size(); ++proc) {
+    if (proc == myPcomm->proc_config().proc_rank() || remoteMesh[proc].empty())
       continue;
     
-      // Get data for corresponding processor
-    const int offset = offsets[proc];
-    const int count = counts[proc];
-    const unsigned long* const ranges = &allranges[displs[proc]];
+      // constuct RangeMap to map from handles on remote processor
+      // to file IDs
+    RangeMap<MBEntityHandle,id_t> remote_ids;
+    unsigned long* i = &allranges[displs[proc]];
+    unsigned long* const e = i + counts[proc];
+    unsigned long file_id = offsets[proc];
+    while (i < e) {
+      MBEntityHandle start_handle = (MBEntityHandle)*i; ++i;
+      MBEntityHandle end_handle = (MBEntityHandle)*i; ++i;
+      MBEntityHandle count = end_handle - start_handle + 1;
+      ri = remote_ids.insert( (MBEntityHandle)start_handle, file_id, count );
+      assert( ri != remote_ids.end() );
+      file_id += count;
+    }
     
-      // For each geometry meshset in the interface
-    MBRange::iterator r_iter = MBRange::lower_bound( remoteMesh[proc].begin(),
-                                                     remoteMesh[proc].end(),
-                                                     CREATE_HANDLE(type,0,result) );
-    MBRange::iterator r_stop = MBRange::lower_bound( r_iter,
-                                                     remoteMesh[proc].end(),
-                                                     CREATE_HANDLE(type+1,0,result) );
-    for ( ; r_iter != r_stop; ++r_iter)
-    {
-      MBEntityHandle entity = *r_iter;
-
-        // Get handle on other processor
-      MBEntityHandle global;
-      rval = iFace->tag_get_data( global_id_tag, &entity, 1, &global );
-      assert(MB_SUCCESS == rval);
-
-        // Find corresponding fileid on other processor.
-        // This could potentially be n**2, but we will assume that
-        // the range list from each processor is short (typically 1).
-      int j, steps = 0;
-      unsigned long low, high;
-      for (j = 0; j < count; j += 2)
-      {
-        low = ranges[j];
-        high = ranges[j+1];
-        if (low <= global && high >= global)
-          break;
-        steps += (high - low) + 1;
+      // for entities on other processor that we must reference from this
+      // processor, get the handle on the remote proc and from there 
+      // look up the file id in the map constructed above
+    std::pair<MBRange::iterator,MBRange::iterator> iters = remoteMesh[proc].equal_range(type);
+    MBRange subrange, empty;
+    subrange.merge( iters.first, iters.second );
+    remote_handles.resize( subrange.size() );
+    rval = myPcomm->get_remote_handles( true, subrange, &remote_handles[0], proc, empty );
+    if (MB_SUCCESS != rval) {
+      printdebug( "Query for remote handles for type %s on proc %d failed with error code %d\n",
+                   MBCN::EntityTypeName(type), (int)proc, (int)rval );
+      return rval;
+    }
+    std::vector<MBEntityHandle>::const_iterator rem = remote_handles.begin();
+    MBRange::const_iterator loc = subrange.begin();
+    for( ; rem != remote_handles.end(); ++rem, ++loc) {
+      id_t id = remote_ids.find( *rem );
+      if (!id) {
+        printdebug( "Invalid remote handle (0x%lx, %s) on proc %d.\n",
+                     (unsigned long)*rem, MBCN::EntityTypeName(type), (int)proc );
+        return MB_FAILURE;
       }
-      if (j >= count) {
-      printdebug("*** handle = %u, type = %d, id = %d, proc = %d\n",
-      (unsigned)global, (int)(iFace->type_from_handle(global)), (int)(iFace->id_from_handle(global)), proc);
-      for (int ii = 0; ii < count; ii+=2) 
-      printdebug("***  %u to %u\n", (unsigned)ranges[ii], (unsigned)ranges[ii+1] );
-      MBRange junk; junk.insert( global );
-      print_type_sets( iFace, myPcomm->proc_config().proc_rank(), myPcomm->proc_config().proc_size(), junk );
+      
+      ri = idMap.insert( *loc, id, 1 );
+      if (ri == idMap.end()) {
+        printdebug( "Conflicting file ID (%ul) for remote handle (0x%lx, %s) on proc %d.\n",
+                     (unsigned long)id, (unsigned long)*rem, MBCN::EntityTypeName(type), (int)proc );
+        return MB_FAILURE;
       }
-      assert(j < count);
-      int fileid = offset + steps + (global - low);
-      RangeMap<MBEntityHandle,id_t>::iterator ri = idMap.insert( entity, fileid, 1 );
-      assert( ri != idMap.end() );
-    } // for(r_iter->range)
-  } // for(each processor)
-  
+    }
+  }
   return MB_SUCCESS;
 }
 

Modified: MOAB/trunk/parallel/WriteHDF5Parallel.hpp
===================================================================
--- MOAB/trunk/parallel/WriteHDF5Parallel.hpp	2008-07-01 06:31:18 UTC (rev 1971)
+++ MOAB/trunk/parallel/WriteHDF5Parallel.hpp	2008-07-01 21:31:19 UTC (rev 1972)
@@ -117,6 +117,12 @@
       //! entities to be written on this processor.
     MBErrorCode communicate_remote_ids(MBEntityType type);
     
+      //! For entities that will be written by another 
+      //! processor, get the file Ids that will be assigned
+      //! to those so they can be referenced by
+      //! entities to be written on this processor.
+    MBErrorCode communicate_remote_ids(ExportSet* ents);
+    
       //! Sort the list of tag information in the parent
       //! class by name so all procs have them in the same
       //! order.




More information about the moab-dev mailing list