[MOAB-dev] r1973 - in MOAB/trunk: . parallel
kraftche at mcs.anl.gov
kraftche at mcs.anl.gov
Tue Jul 1 16:34:03 CDT 2008
Author: kraftche
Date: 2008-07-01 16:34:03 -0500 (Tue, 01 Jul 2008)
New Revision: 1973
Modified:
MOAB/trunk/TestTypeSequenceManager.cpp
MOAB/trunk/moab.make.in
MOAB/trunk/parallel/Makefile.am
MOAB/trunk/parallel/WriteHDF5Parallel.cpp
MOAB/trunk/parallel/WriteHDF5Parallel.hpp
Log:
back out a whole bunch of stuff I didn't intend to check in yet. Sorry about the noise
Modified: MOAB/trunk/TestTypeSequenceManager.cpp
===================================================================
--- MOAB/trunk/TestTypeSequenceManager.cpp 2008-07-01 21:31:19 UTC (rev 1972)
+++ MOAB/trunk/TestTypeSequenceManager.cpp 2008-07-01 21:34:03 UTC (rev 1973)
@@ -19,10 +19,6 @@
void test_is_free_sequence();
void test_is_free_handle();
-void regression_svn1952();
-void regression_svn1958();
-void regression_svn1960();
-
/* Construct sequence of vertex handles containing
the ID ranges: { [3,7], [100,111], [1001] }
referencing a SequnceData with the range [1,22000]
@@ -70,10 +66,6 @@
error_count += RUN_TEST( test_find_free_handle );
error_count += RUN_TEST( test_find_free_sequence );
error_count += RUN_TEST( test_is_free_sequence );
-
- error_count += RUN_TEST( regression_svn1952 );
- error_count += RUN_TEST( regression_svn1958 );
- error_count += RUN_TEST( regression_svn1960 );
if (!error_count)
printf( "ALL TESTS PASSED\n");
@@ -980,80 +972,3 @@
CHECK_EQUAL( (MBEntityHandle)3001, first );
CHECK_EQUAL( (MBEntityHandle)MB_END_ID, last );
}
-
-// Regression test for bug fixed in SVN revision 1952.
-// SVN checkin message:
-// Fixing a bug in allocation of sequences. Before this commit, when:
-// - Existing sequence starts at one place, e.g. 41686, but its
-// sequenceData starts earlier, e.g. 41673, and
-// - You try to allocate a new sequence at a lower handle but with a
-// range which is more than the room available in the sequenceData,
-//
-// then
-// - the code wants to allocate right up against the lower end of the
-// existing sequence, and sizes the new sequenceData accordingly
-// (i.e. with a max of 41685).
-//
-// In this case, that new sequenceData will overlap the existing
-// sequenceData (which starts at 41673, even though it has an empty chunk
-// in front).
-//
-// So, this change passes back a sequence data size from the code which
-// computes where to put the start handle when allocating the new
-// sequence.
-void regression_svn1952()
-{
- const MBEntityHandle last_handle = 50000;
- TypeSequenceManager seqman;
- SequenceData* data = new SequenceData( 0, 41673, last_handle );
- CHECK( insert_seq( seqman, 41686, 1000, data, true ) );
-
- SequenceData* result_data;
- MBEntityID result_size;
- MBEntityHandle result_handle;
- result_handle = seqman.find_free_sequence( 1686, 4000, 2*last_handle, result_data, result_size );
- CHECK( result_handle > last_handle );
- CHECK( result_handle + result_size <= 2*last_handle );
-}
-
-
-// Regression test for bug fixed in SVN revision 1958.
-// SVN checkin message:
-// Another fix to SequenceManager. This time:
-// - there was no sequence data allocated for the target range
-// - there was exactly enough room in unallocated sequence data for the target range
-// - the next data sequence up had a couple of empty slots, so the first occupied handle was a couple spaces from the start of the sequence data
-// - the upper end of the range of the allocated sequence data was computed based on the first handle in the next sequence data
-//
-// Fixed by setting the size using the parameter I added before.
-void regression_svn1958()
-{
- const int data_size = 100;
- const MBEntityHandle data_start = 100;
- const MBEntityHandle data_end = data_start + data_size - 1;
- const int seq_offset = 2;
-
- TypeSequenceManager seqman;
- SequenceData* data = new SequenceData( 0, data_start, data_end );
- CHECK( insert_seq( seqman, data_start+seq_offset, data_size-seq_offset, data, true ) );
-
- SequenceData* result_data;
- MBEntityID result_size;
- MBEntityHandle result_handle;
- result_handle = seqman.find_free_sequence( data_start-1, 1, 100000, result_data, result_size );
- CHECK( result_handle > data_end || result_handle + result_size <= data_start );
-}
-
-
-// Regression test for bug fixed in SVN revision 1960.
-// SVN checkin message:
-// Third time pays for all. This time, in TSM::is_free_sequence was true,
-// so there was a free sequence; SM::create_entity_sequence then called
-// SM::new_sequence_size, which used the first handle of the next higher
-// sequence to compute the size, when the start handle of the data on
-// which that first handle was allocated was smaller. Sigh.
-void regression_svn1960()
-{
-
-}
-
Modified: MOAB/trunk/moab.make.in
===================================================================
--- MOAB/trunk/moab.make.in 2008-07-01 21:31:19 UTC (rev 1972)
+++ MOAB/trunk/moab.make.in 2008-07-01 21:34:03 UTC (rev 1973)
@@ -7,7 +7,7 @@
MOAB_INCLUDES = -I$(MOAB_INCLUDEDIR)
-MOAB_LIBS_LINK = ${MOAB_LDFLAGS} -L${MOAB_LIBDIR} -lMOAB @HDF5_LIBS@ @NETCDF_LIBS@ @LIBS@
+MOAB_LIBS_LINK = ${MOAB_LDFLAGS} -L${MOAB_LIBDIR} -lMOAB @LIBS@ @HDF5_LIBS@ @NETCDF_LIBS@
DAGMC_CONFIG_OPTIONS = @DAGMC_CONFIG_OPTIONS@
Modified: MOAB/trunk/parallel/Makefile.am
===================================================================
--- MOAB/trunk/parallel/Makefile.am 2008-07-01 21:31:19 UTC (rev 1972)
+++ MOAB/trunk/parallel/Makefile.am 2008-07-01 21:34:03 UTC (rev 1973)
@@ -11,10 +11,6 @@
DEFS = $(DEFINES) -DIS_BUILDING_MB
INCLUDES += -I$(top_srcdir) -I$(top_builddir)
-# Let tests know where the source directory is in case they
-# need to load input files
-CPPFLAGS += '-DSRCDIR=$(srcdir)'
-
# The directory in which to install headers
libMOABpar_la_includedir = $(includedir)
@@ -23,7 +19,6 @@
MOAB_PARALLEL_SRCS =
MOAB_PARALLEL_HDRS =
MOAB_PARALLEL_TEST =
-HDF5_PARALLEL_TEST =
if PARALLEL
MOAB_PARALLEL_SRCS += \
MBParallelComm.cpp \
@@ -46,10 +41,13 @@
INCLUDES += -I$(top_srcdir)/mhdf/include
MOAB_PARALLEL_SRCS += WriteHDF5Parallel.cpp
MOAB_PARALLEL_HDRS += WriteHDF5Parallel.hpp
- HDF5_PARALLEL_TEST = parallel_hdf5_test
endif
- MOAB_PARALLEL_TEST += mbparallelcomm_test
+ bin_PROGRAMS = mbparallelcomm_test
+ mbparallelcomm_test_SOURCES = mbparallelcomm_test.cpp
+ mbparallelcomm_test_LDADD = $(top_builddir)/libMOAB.la
+ mbparallelcomm_test_DEPENDENCIES = $(mbparallelcomm_test_LDADD)
+
MOAB_PARALLEL_TEST += pcomm_unit
endif
@@ -64,14 +62,8 @@
# Tests and such
-check_PROGRAMS = $(MOAB_PARALLEL_TEST) $(HDF5_PARALLEL_TEST)
-TESTS = $(check_PROGRAMS)
+check_PROGRAMS = $(MOAB_PARALLEL_TEST)
+TESTS = $(MOAB_PARALLEL_TEST)
pcomm_unit_SOURCES = pcomm_unit.cpp
pcomm_unit_LDADD = $(top_builddir)/libMOAB.la
-parallel_hdf5_test_SOURCES = parallel_hdf5_test.cc
-parallel_hdf5_test_LDADD = $(top_builddir)/libMOAB.la
-parallel_hdf5_test_DEPENDENCIES = $(mbparallelcomm_test_LDADD)
-mbparallelcomm_test_SOURCES = mbparallelcomm_test.cpp
-mbparallelcomm_test_LDADD = $(top_builddir)/libMOAB.la
-mbparallelcomm_test_DEPENDENCIES = $(mbparallelcomm_test_LDADD)
Modified: MOAB/trunk/parallel/WriteHDF5Parallel.cpp
===================================================================
--- MOAB/trunk/parallel/WriteHDF5Parallel.cpp 2008-07-01 21:31:19 UTC (rev 1972)
+++ MOAB/trunk/parallel/WriteHDF5Parallel.cpp 2008-07-01 21:34:03 UTC (rev 1973)
@@ -1,5 +1,5 @@
-#define DEBUG
+#undef DEBUG
#ifdef DEBUG
# include <stdio.h>
@@ -41,7 +41,7 @@
#ifdef DEBUG
# define START_SERIAL \
- for (unsigned _x = 0; _x < myPcomm->proc_config().proc_size(); ++_x) {\
+ for (int _x = 0; _x < myPcomm->proc_config().proc_size(); ++_x) {\
MPI_Barrier( MPI_COMM_WORLD ); \
if (_x != myPcomm->proc_config().proc_rank()) continue
# define END_SERIAL \
@@ -122,19 +122,20 @@
#ifndef DEBUG
static void print_type_sets( MBInterface* , int , int , MBRange& ) {}
#else
-static void print_type_sets( MBInterface* iFace, int rank, int size, MBRange& sets )
+static void print_type_sets( MBInterface* iFace, int myPcomm->proc_config().proc_rank(), int myPcomm->proc_config().proc_size(), MBRange& sets )
{
- MBTag gid, did, bid, sid, nid;
+ MBTag gid, did, bid, sid, nid, iid;
iFace->tag_get_handle( GLOBAL_ID_TAG_NAME, gid );
iFace->tag_get_handle( GEOM_DIMENSION_TAG_NAME, did );
iFace->tag_get_handle( MATERIAL_SET_TAG_NAME, bid );
iFace->tag_get_handle( DIRICHLET_SET_TAG_NAME, nid );
iFace->tag_get_handle( NEUMANN_SET_TAG_NAME, sid );
+ iFace->tag_get_handle( PARALLEL_PARTITION_TAG_NAME, iid );
MBRange typesets[10];
- const char* typenames[] = {"Block", "Sideset", "NodeSet", "Vertex", "Curve", "Surface", "Volume", "Body", "Other"};
+ const char* typenames[] = {"Block", "Sideset", "NodeSet", "Vertex", "Curve", "Surface", "Volume", "Body", "Partition", "Other"};
for (MBRange::iterator riter = sets.begin(); riter != sets.end(); ++riter)
{
- unsigned dim, id, oldsize;
+ unsigned dim, id, proc[2], oldsize;
if (MB_SUCCESS == iFace->tag_get_data(bid, &*riter, 1, &id))
dim = 0;
else if (MB_SUCCESS == iFace->tag_get_data(sid, &*riter, 1, &id))
@@ -146,6 +147,11 @@
iFace->tag_get_data(gid, &*riter, 1, &id);
dim += 3;
}
+ else if (MB_SUCCESS == iFace->tag_get_data(iid, &*riter, 1, proc)) {
+ assert(proc[0] == (unsigned)myPcomm->proc_config().proc_rank() || proc[1] == (unsigned)myPcomm->proc_config().proc_rank());
+ id = proc[proc[0] == (unsigned)myPcomm->proc_config().proc_rank()];
+ dim = 8;
+ }
else {
id = *riter;
dim = 9;
@@ -155,7 +161,7 @@
typesets[dim].insert( id );
assert( typesets[dim].size() - oldsize == 1 );
}
- for (int ii = 0; ii < 9; ++ii)
+ for (int ii = 0; ii < 10; ++ii)
{
char num[16];
std::string line(typenames[ii]);
@@ -323,7 +329,7 @@
// For the 'remoteMesh' list for this processor, just remove
// entities we aren't writing.
- MBRange& my_remote_mesh = remoteMesh[myPcomm->proc_config().proc_rank()];
+ MBRange& my_remote_mesh = remoteMesh[myPcomm->proc_config().proc_size()];
tmpset = my_remote_mesh.subtract( nodeSet.range );
if (!tmpset.empty())
my_remote_mesh = my_remote_mesh.subtract( tmpset );
@@ -610,7 +616,7 @@
return MB_FAILURE;
}
mhdf_closeData( filePtr, handle, &status );
- }
+ }
// send id offset to every proc
result = MPI_Bcast( &first_id, 1, MPI_LONG, 0, MPI_COMM_WORLD );
@@ -631,13 +637,13 @@
}
// send each proc it's offset in the node table
- long offset;
- result = MPI_Scatter( &node_counts[0], 1, MPI_LONG,
- &offset, 1, MPI_LONG,
+ int offset;
+ result = MPI_Scatter( &node_counts[0], 1, MPI_INT,
+ &offset, 1, MPI_INT,
0, MPI_COMM_WORLD );
assert(MPI_SUCCESS == result);
nodeSet.offset = offset;
-
+
return assign_ids( nodeSet.range, nodeSet.first_id + nodeSet.offset );
}
@@ -1899,35 +1905,27 @@
MBErrorCode WriteHDF5Parallel::communicate_remote_ids( MBEntityType type )
{
+ int result;
+ MBErrorCode rval;
+
// Get the export set for the specified type
+ ExportSet* export_set = 0;
if (type == MBVERTEX)
- return communicate_remote_ids( &nodeSet );
+ export_set = &nodeSet;
else if(type == MBENTITYSET)
- return communicate_remote_ids( &setSet );
+ export_set = &setSet;
else
{
for (std::list<ExportSet>::iterator esiter = exportList.begin();
esiter != exportList.end(); ++esiter)
if (esiter->type == type)
{
- MBErrorCode rval = communicate_remote_ids( &*esiter );
- if (MB_SUCCESS != rval)
- return rval;
+ export_set = &*esiter;
+ break;
}
- return MB_SUCCESS;
}
-}
+ assert(export_set != NULL);
-
-MBErrorCode WriteHDF5Parallel::communicate_remote_ids( ExportSet* export_set )
-{
- RangeMap<MBEntityHandle,id_t>::iterator ri;
- std::vector<MBEntityHandle> remote_handles;
- MBErrorCode rval;
- int result;
- const MBEntityType type = export_set->type;
- assert(export_set != NULL);
-
// Get the ranges in the set
std::vector<unsigned long> myranges;
MBRange::const_pair_iterator p_iter = export_set->range.const_pair_begin();
@@ -1971,56 +1969,60 @@
// Set file IDs for each communicated entity
// For each processor
- for (unsigned int proc = 0; proc < myPcomm->proc_config().proc_size(); ++proc) {
- if (proc == myPcomm->proc_config().proc_rank() || remoteMesh[proc].empty())
+ for (unsigned int proc = 0; proc < myPcomm->proc_config().proc_size(); ++proc)
+ {
+ if (proc == myPcomm->proc_config().proc_rank())
continue;
- // constuct RangeMap to map from handles on remote processor
- // to file IDs
- RangeMap<MBEntityHandle,id_t> remote_ids;
- unsigned long* i = &allranges[displs[proc]];
- unsigned long* const e = i + counts[proc];
- unsigned long file_id = offsets[proc];
- while (i < e) {
- MBEntityHandle start_handle = (MBEntityHandle)*i; ++i;
- MBEntityHandle end_handle = (MBEntityHandle)*i; ++i;
- MBEntityHandle count = end_handle - start_handle + 1;
- ri = remote_ids.insert( (MBEntityHandle)start_handle, file_id, count );
- assert( ri != remote_ids.end() );
- file_id += count;
- }
+ // Get data for corresponding processor
+ const int offset = offsets[proc];
+ const int count = counts[proc];
+ const unsigned long* const ranges = &allranges[displs[proc]];
- // for entities on other processor that we must reference from this
- // processor, get the handle on the remote proc and from there
- // look up the file id in the map constructed above
- std::pair<MBRange::iterator,MBRange::iterator> iters = remoteMesh[proc].equal_range(type);
- MBRange subrange, empty;
- subrange.merge( iters.first, iters.second );
- remote_handles.resize( subrange.size() );
- rval = myPcomm->get_remote_handles( true, subrange, &remote_handles[0], proc, empty );
- if (MB_SUCCESS != rval) {
- printdebug( "Query for remote handles for type %s on proc %d failed with error code %d\n",
- MBCN::EntityTypeName(type), (int)proc, (int)rval );
- return rval;
- }
- std::vector<MBEntityHandle>::const_iterator rem = remote_handles.begin();
- MBRange::const_iterator loc = subrange.begin();
- for( ; rem != remote_handles.end(); ++rem, ++loc) {
- id_t id = remote_ids.find( *rem );
- if (!id) {
- printdebug( "Invalid remote handle (0x%lx, %s) on proc %d.\n",
- (unsigned long)*rem, MBCN::EntityTypeName(type), (int)proc );
- return MB_FAILURE;
+ // For each geometry meshset in the interface
+ MBRange::iterator r_iter = MBRange::lower_bound( remoteMesh[proc].begin(),
+ remoteMesh[proc].end(),
+ CREATE_HANDLE(type,0,result) );
+ MBRange::iterator r_stop = MBRange::lower_bound( r_iter,
+ remoteMesh[proc].end(),
+ CREATE_HANDLE(type+1,0,result) );
+ for ( ; r_iter != r_stop; ++r_iter)
+ {
+ MBEntityHandle entity = *r_iter;
+
+ // Get handle on other processor
+ MBEntityHandle global;
+ rval = iFace->tag_get_data( global_id_tag, &entity, 1, &global );
+ assert(MB_SUCCESS == rval);
+
+ // Find corresponding fileid on other processor.
+ // This could potentially be n**2, but we will assume that
+ // the range list from each processor is short (typically 1).
+ int j, steps = 0;
+ unsigned long low, high;
+ for (j = 0; j < count; j += 2)
+ {
+ low = ranges[j];
+ high = ranges[j+1];
+ if (low <= global && high >= global)
+ break;
+ steps += (high - low) + 1;
}
-
- ri = idMap.insert( *loc, id, 1 );
- if (ri == idMap.end()) {
- printdebug( "Conflicting file ID (%ul) for remote handle (0x%lx, %s) on proc %d.\n",
- (unsigned long)id, (unsigned long)*rem, MBCN::EntityTypeName(type), (int)proc );
- return MB_FAILURE;
+ if (j >= count) {
+ printdebug("*** handle = %u, type = %d, id = %d, proc = %d\n",
+ (unsigned)global, (int)(iFace->type_from_handle(global)), (int)(iFace->id_from_handle(global)), proc);
+ for (int ii = 0; ii < count; ii+=2)
+ printdebug("*** %u to %u\n", (unsigned)ranges[ii], (unsigned)ranges[ii+1] );
+ MBRange junk; junk.insert( global );
+ print_type_sets( iFace, myPcomm->proc_config().proc_rank(), myPcomm->proc_config().proc_size(), junk );
}
- }
- }
+ assert(j < count);
+ int fileid = offset + steps + (global - low);
+ RangeMap<MBEntityHandle,id_t>::iterator ri = idMap.insert( entity, fileid, 1 );
+ assert( ri != idMap.end() );
+ } // for(r_iter->range)
+ } // for(each processor)
+
return MB_SUCCESS;
}
Modified: MOAB/trunk/parallel/WriteHDF5Parallel.hpp
===================================================================
--- MOAB/trunk/parallel/WriteHDF5Parallel.hpp 2008-07-01 21:31:19 UTC (rev 1972)
+++ MOAB/trunk/parallel/WriteHDF5Parallel.hpp 2008-07-01 21:34:03 UTC (rev 1973)
@@ -117,12 +117,6 @@
//! entities to be written on this processor.
MBErrorCode communicate_remote_ids(MBEntityType type);
- //! For entities that will be written by another
- //! processor, get the file Ids that will be assigned
- //! to those so they can be referenced by
- //! entities to be written on this processor.
- MBErrorCode communicate_remote_ids(ExportSet* ents);
-
//! Sort the list of tag information in the parent
//! class by name so all procs have them in the same
//! order.
More information about the moab-dev
mailing list