[MOAB-dev] r2010 - MOAB/trunk/refiner
dcthomp at mcs.anl.gov
dcthomp at mcs.anl.gov
Mon Jul 14 21:36:25 CDT 2008
Author: dcthomp
Date: 2008-07-14 21:36:25 -0500 (Mon, 14 Jul 2008)
New Revision: 2010
Modified:
MOAB/trunk/refiner/MBMeshOutputFunctor.hpp
MOAB/trunk/refiner/MBMeshRefiner.cpp
Log:
ENH: Progress on parallelized tet refiner. The communication
required for each process to assign global IDs to all
new entities now takes place (with lots of debug printouts
for the moment).
Modified: MOAB/trunk/refiner/MBMeshOutputFunctor.hpp
===================================================================
--- MOAB/trunk/refiner/MBMeshOutputFunctor.hpp 2008-07-15 02:33:21 UTC (rev 2009)
+++ MOAB/trunk/refiner/MBMeshOutputFunctor.hpp 2008-07-15 02:36:25 UTC (rev 2010)
@@ -11,6 +11,111 @@
#include <iterator>
#include <algorithm>
+#include <string.h>
+
+/**\brief Represent a set of processes using a bit vector.
+ *
+ * This is used by the mesh refiner when determining where to record
+ * split vertices so that labeling can be inferred across process
+ * boundaries without communicating anything other than the number of
+ * entities in a given partition.
+ */
+class MBProcessSet
+{
+public:
+ enum
+ {
+ SHARED_PROC_BYTES = (MAX_SHARING_PROCS / 8 + (MAX_SHARING_PROCS % 8 ? 1 : 0))
+ };
+
+ MBProcessSet()
+ {
+ this->clear();
+ }
+
+ MBProcessSet( const unsigned char* psetbits )
+ {
+ for ( int i = 0; i < SHARED_PROC_BYTES; ++ i )
+ this->processes[i] = psetbits[i];
+ }
+
+ ~MBProcessSet()
+ {
+ }
+
+ void unite( const MBProcessSet& other )
+ {
+ for ( int i = 0; i < SHARED_PROC_BYTES; ++ i )
+ {
+ this->processes[i] |= other.processes[i];
+ }
+ }
+
+ void intersect( const MBProcessSet& other )
+ {
+ for ( int i = 0; i < SHARED_PROC_BYTES; ++ i )
+ {
+ this->processes[i] &= other.processes[i];
+ }
+ }
+
+ void clear()
+ {
+ memset( this->processes, 0, SHARED_PROC_BYTES );
+ }
+
+ void set_process_member( int i )
+ {
+ int byte = i / 8;
+ int bitmask = 1 << ( i % 8 );
+ this->processes[byte] |= bitmask;
+ }
+
+ void set_process_members( const std::vector<int>& procs )
+ {
+ for ( std::vector<int>::const_iterator it = procs.begin(); it != procs.end() && *it != -1; ++ it )
+ {
+ this->set_process_member( *it );
+ }
+ }
+
+ bool is_process_member( int i ) const
+ {
+ int byte = i / 8;
+ int bitmask = 1 << ( i % 8 );
+ return ( this->processes[byte] & bitmask ) ? true : false;
+ }
+
+ const unsigned char* data() const
+ {
+ return this->processes;
+ }
+
+ bool operator < ( const MBProcessSet& other ) const
+ {
+ for ( int i = 0; i < SHARED_PROC_BYTES; ++ i )
+ {
+ if ( this->processes[i] < other.processes[i] )
+ return true;
+ else if ( this->processes[i] > other.processes[i] )
+ return false;
+ }
+ return false; // equality
+ }
+
+ friend std::ostream& operator << ( std::ostream& os, const MBProcessSet& pset )
+ {
+ for ( int i = 0; i < MAX_SHARING_PROCS; ++ i )
+ {
+ os << ( pset.is_process_member( i ) ? "1" : "0" );
+ }
+ return os;
+ }
+
+protected:
+ unsigned char processes[SHARED_PROC_BYTES];
+};
+
template< int _n >
class MBSplitVertexIndex
{
@@ -49,61 +154,65 @@
this->rank = ipcomm ? ipcomm->proc_config().proc_rank() : 0;
}
virtual ~MBSplitVerticesBase() { }
- virtual bool find_or_create( const MBEntityHandle* split_src, const double* coords, MBEntityHandle& vert_handle ) = 0;
+ virtual bool find_or_create(
+ const MBEntityHandle* split_src, const double* coords, MBEntityHandle& vert_handle,
+ std::map<MBProcessSet,int>& proc_partition_counts ) = 0;
+ /// Determine which processes will contain an output vertex given the split vertices defining it.
+ void update_partition_counts( int num, const MBEntityHandle* split_src, std::map<MBProcessSet,int>& proc_partition_counts )
+ {
+ this->begin_vertex_procs();
+ for ( int i = 0; i < num; ++ i )
+ {
+ this->add_vertex_procs( split_src[i] );
+ }
+ this->end_vertex_procs();
+ proc_partition_counts[this->common_shared_procs]++;
+ }
+
/// Prepare to compute the processes on which a new split-vertex will live.
void begin_vertex_procs()
{
this->first_vertex = true;
+ this->common_shared_procs.clear();
}
/// Call this for each existing corner vertex used to define a split-vertex.
void add_vertex_procs( MBEntityHandle vert_in )
{
- std::set<int> current_shared_procs;
- if ( ! this->first_vertex )
- {
- current_shared_procs = this->common_shared_procs;
- }
- this->common_shared_procs.clear();
int stat;
bool got = false;
+ this->current_shared_procs.clear();
stat = this->mesh_in->tag_get_data(
this->tag_manager->shared_proc(), &vert_in, 1, &this->shared_procs_val[0] );
- std::cout << "sstat: " << stat;
if ( stat == MB_SUCCESS && this->shared_procs_val[0] != -1 )
{
got = true;
std::cout << " s" << this->rank << " s" << this->shared_procs_val[0] << " | ";
- this->shared_procs_val.resize( 1 );
+ this->shared_procs_val[1] = -1;
}
stat = this->mesh_in->tag_get_data(
this->tag_manager->shared_procs(), &vert_in, 1, &this->shared_procs_val[0] );
- std::cout << "mstat: " << stat;
if ( stat == MB_SUCCESS && this->shared_procs_val[0] != -1 )
{
got = true;
int i;
for ( i = 0; i < MAX_SHARING_PROCS && this->shared_procs_val[i] != -1; ++ i )
std::cout << " m" << this->shared_procs_val[i];
- this->shared_procs_val.resize( i );
std::cout << " | ";
}
if ( got )
{
+ this->current_shared_procs.set_process_members( this->shared_procs_val );
+ this->current_shared_procs.set_process_member( this->rank );
if ( this->first_vertex )
{
- std::copy(
- this->shared_procs_val.begin(), this->shared_procs_val.end(),
- std::insert_iterator< std::set<int> >( this->common_shared_procs, this->common_shared_procs.begin() ) );
+ this->common_shared_procs.unite( this->current_shared_procs );
this->first_vertex = false;
}
else
{
- std::set_intersection(
- this->shared_procs_val.begin(), this->shared_procs_val.end(),
- current_shared_procs.begin(), current_shared_procs.end(),
- std::insert_iterator< std::set<int> >( this->common_shared_procs, this->common_shared_procs.begin() ) );
+ this->common_shared_procs.intersect( this->current_shared_procs );
}
}
else
@@ -115,11 +224,7 @@
/// Call this once after all the add_vertex_procs() calls for a split-vertex to prepare queues for the second stage MPI send.
void end_vertex_procs()
{
- std::cout << " Common procs ";
- std::copy(
- this->common_shared_procs.begin(), this->common_shared_procs.end(),
- std::ostream_iterator<int>( std::cout, " " ) );
- std::cout << " " << this->rank;
+ std::cout << " Common procs " << this->common_shared_procs;
std::cout << "\n";
// FIXME: Here is where we add the vertex to the appropriate queues.
}
@@ -128,7 +233,8 @@
MBInterface* mesh_out; // Output mesh. Needed for new vertex set in vert_handle
MBRefinerTagManager* tag_manager;
std::vector<int> shared_procs_val; // Used to hold procs sharing an input vert.
- std::set<int> common_shared_procs; // Holds intersection of several shared_procs_vals.
+ MBProcessSet current_shared_procs; // Holds process list as it is being accumulated
+ MBProcessSet common_shared_procs; // Holds intersection of several shared_procs_vals.
int rank; // This process' rank.
bool first_vertex; // True just after begin_vertex_procs() is called.
};
@@ -146,7 +252,9 @@
this->shared_procs_val.resize( _n * MAX_SHARING_PROCS );
}
virtual ~MBSplitVertices() { }
- virtual bool find_or_create( const MBEntityHandle* split_src, const double* coords, MBEntityHandle& vert_handle )
+ virtual bool find_or_create(
+ const MBEntityHandle* split_src, const double* coords, MBEntityHandle& vert_handle,
+ std::map<MBProcessSet,int>& proc_partition_counts )
{
MBSplitVertexIndex<_n> key( split_src );
MapIteratorType it = this->find( key );
@@ -157,16 +265,7 @@
return false;
}
(*this)[key] = vert_handle;
- std::cout << "New vertex " << vert_handle << " shared with ";
- this->begin_vertex_procs();
- for ( int i = 0; i < _n; ++ i )
- {
- this->add_vertex_procs( split_src[i] );
- }
- std::cout << "\n";
- this->end_vertex_procs();
- // Decide whether local process owns new vert.
- // Add to the appropriate queues for transmitting handles.
+ this->update_partition_counts( _n, split_src, proc_partition_counts );
return true;
}
vert_handle = it->second;
@@ -184,6 +283,7 @@
std::vector<MBEntityHandle> elem_vert;
MBRefinerTagManager* tag_manager;
MBEntityHandle destination_set;
+ std::map<MBProcessSet,int> proc_partition_counts;
MBMeshOutputFunctor( MBRefinerTagManager* tag_mgr )
{
@@ -230,6 +330,75 @@
std::cout << " >\n";
}
+ void print_partition_counts( MBParallelComm* comm )
+ {
+ int lnparts = this->proc_partition_counts.size();
+ std::vector<unsigned char> lpdefns;
+ std::vector<int> lpsizes;
+ lpdefns.resize( MBProcessSet::SHARED_PROC_BYTES * lnparts );
+ lpsizes.resize( lnparts );
+ std::cout << "**** Partition Counts ****\n";
+ int i = 0;
+ std::map<MBProcessSet,int>::iterator it;
+ for ( it = this->proc_partition_counts.begin(); it != this->proc_partition_counts.end(); ++ it, ++ i )
+ {
+ for ( int j = 0; j < MBProcessSet::SHARED_PROC_BYTES; ++ j )
+ lpdefns[MBProcessSet::SHARED_PROC_BYTES * i + j] = it->first.data()[j];
+ lpsizes[i] = it->second;
+ std::cout << "Partition " << it->first << ": " << it->second << "\n";
+ }
+
+ if ( ! comm )
+ return;
+
+ std::vector<int> nparts;
+ std::vector<int> dparts;
+ unsigned long prank = comm->proc_config().proc_rank();
+ unsigned long psize = comm->proc_config().proc_size();
+ nparts.resize( psize );
+ dparts.resize( psize + 1 );
+ MPI_Allgather( &lnparts, 1, MPI_INT, &nparts[0], 1, MPI_INT, comm->proc_config().proc_comm() );
+ unsigned long ndefs = 0;
+ for ( int rank = 1; rank <= psize; ++ rank )
+ {
+ dparts[rank] = nparts[rank - 1] + dparts[rank - 1];
+ std::cout << "Proc " << rank << ": " << nparts[rank-1] << " partitions, offset: " << dparts[rank] << "\n";
+ }
+ std::vector<unsigned char> part_defns;
+ std::vector<int> part_sizes;
+ part_defns.resize( MBProcessSet::SHARED_PROC_BYTES * dparts[psize] );
+ part_sizes.resize( dparts[psize] );
+ MPI_Allgatherv(
+ &lpsizes[0], lnparts, MPI_INT,
+ &part_sizes[0], &nparts[0], &dparts[0], MPI_INT, comm->proc_config().proc_comm() );
+ for ( int rank = 0; rank < psize; ++ rank )
+ {
+ nparts[rank] *= MBProcessSet::SHARED_PROC_BYTES;
+ dparts[rank] *= MBProcessSet::SHARED_PROC_BYTES;
+ }
+ MPI_Allgatherv(
+ &lpdefns[0], MBProcessSet::SHARED_PROC_BYTES * lnparts, MPI_UNSIGNED_CHAR,
+ &part_defns[0], &nparts[0], &dparts[0], MPI_UNSIGNED_CHAR, comm->proc_config().proc_comm() );
+ for ( int i = 0; i < dparts[psize]; ++ i )
+ {
+ MBProcessSet pset( &part_defns[MBProcessSet::SHARED_PROC_BYTES * i] );
+ std::map<MBProcessSet,int>::iterator it = this->proc_partition_counts.find( pset );
+ if ( it != this->proc_partition_counts.end() )
+ {
+ std::cout << "Partition " << pset << ( it->second == part_sizes[i] ? " matches" : " broken" ) << ".\n";
+ }
+ else
+ {
+ this->proc_partition_counts[pset] = part_sizes[i];
+ }
+ }
+ std::map<MBProcessSet,int>::iterator pcit;
+ for ( pcit = this->proc_partition_counts.begin(); pcit != this->proc_partition_counts.end(); ++ pcit )
+ {
+ std::cout << "Partition " << pcit->first << ": " << pcit->second << " #\n";
+ }
+ }
+
void assign_tags( MBEntityHandle vhandle, const void* vtags )
{
if ( ! vhandle )
@@ -271,7 +440,8 @@
}
else if ( nvhash < 4 )
{
- bool newly_created = this->split_vertices[nvhash]->find_or_create( vhash, vcoords, vertex_handle );
+ bool newly_created = this->split_vertices[nvhash]->find_or_create(
+ vhash, vcoords, vertex_handle, this->proc_partition_counts );
if ( newly_created )
{
this->assign_tags( vertex_handle, vtags );
Modified: MOAB/trunk/refiner/MBMeshRefiner.cpp
===================================================================
--- MOAB/trunk/refiner/MBMeshRefiner.cpp 2008-07-15 02:33:21 UTC (rev 2009)
+++ MOAB/trunk/refiner/MBMeshRefiner.cpp 2008-07-15 02:36:25 UTC (rev 2010)
@@ -26,7 +26,7 @@
this->tag_manager = new MBRefinerTagManager( this->mesh_in, this->mesh_out );
this->output_functor = new MBMeshOutputFunctor( this->tag_manager );
this->entity_refiner = 0;
- this->comm = 0;
+ this->comm = MBParallelComm::get_pcomm( this->mesh_out, 0 );
}
/**\brief Destroy a mesh refiner.
@@ -125,6 +125,7 @@
}
}
}
+ this->output_functor->print_partition_counts( this->comm );
return true;
}
More information about the moab-dev
mailing list