[MOAB-dev] commit/MOAB: tautges: Reverting change from yesterday that broke tests.
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed Sep 4 09:34:05 CDT 2013
1 new commit in MOAB:
https://bitbucket.org/fathomteam/moab/commits/f0b744730e34/
Changeset: f0b744730e34
Branch: master
User: tautges
Date: 2013-09-04 16:33:58
Summary: Reverting change from yesterday that broke tests.
Affected #: 5 files
diff --git a/src/ScdInterface.cpp b/src/ScdInterface.cpp
index 5413e3e..9747af1 100644
--- a/src/ScdInterface.cpp
+++ b/src/ScdInterface.cpp
@@ -659,7 +659,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, EntityHandle se
if (MB_SUCCESS != rval) return rval;
// create interface sets
- rval = pcomm->create_interface_sets(proc_nvecs);
+ rval = pcomm->create_interface_sets(proc_nvecs, 3, 2);
if (MB_SUCCESS != rval) return rval;
// add the box to the PComm's partitionSets
diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 20fc5da..4977f91 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -250,7 +250,7 @@ namespace moab {
if (tag < MB_MESG_REMOTEH_ACK) myDebug->print(3, ", recv_ent_reqs=");
else if (tag < MB_MESG_TAGS_ACK) myDebug->print(3, ", recv_remoteh_reqs=");
else myDebug->print(3, ", recv_tag_reqs=");
- for (unsigned int i = 0; i < reqs.size(); i++) myDebug->printf(3, " %p", (void*)reqs[i]);
+ for (unsigned int i = 0; i < reqs.size(); i++) myDebug->printf(3, " %x", reqs[i]);
myDebug->print(3, "\n");
}
}
@@ -409,6 +409,7 @@ namespace moab {
const bool owned_only)
{
Range entities[4];
+ int local_num_elements[4];
ErrorCode result;
std::vector<unsigned char> pstatus;
for (int dim = 0; dim <= dimension; dim++) {
@@ -429,23 +430,7 @@ namespace moab {
if (pstatus[i] & PSTATUS_NOT_OWNED)
dum_range.insert(*rit);
entities[dim] = subtract( entities[dim], dum_range);
- }
-
- return assign_global_ids(entities, dimension, start_id, parallel, owned_only);
- }
- //! assign a global id space, for largest-dimension or all entities (and
- //! in either case for vertices too)
- ErrorCode ParallelComm::assign_global_ids( Range entities[],
- const int dimension,
- const int start_id,
- const bool parallel,
- const bool owned_only)
- {
- int local_num_elements[4];
- ErrorCode result;
- std::vector<unsigned char> pstatus;
- for (int dim = 0; dim <= dimension; dim++) {
local_num_elements[dim] = entities[dim].size();
}
@@ -3542,9 +3527,9 @@ ErrorCode ParallelComm::reduce_void(int tag_data_type, const MPI_Op mpi_op, int
}
ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
- int resolve_dim,
- int shared_dim,
- const Tag* id_tag)
+ int resolve_dim,
+ int shared_dim,
+ const Tag* id_tag)
{
ErrorCode result;
Range proc_ents;
@@ -3556,7 +3541,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
result = scdi->tag_shared_vertices(this, this_set);
if (MB_SUCCESS == result) {
myDebug->tprintf(1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size());
- if (shared_dim == 0) return result;
+ return result;
}
}
@@ -3587,14 +3572,13 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// must call even if we don't have any entities, to make sure
// collective comm'n works
- return resolve_shared_ents(this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag);
+ return resolve_shared_ents(this_set, proc_ents, resolve_dim, shared_dim, id_tag);
}
ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
Range &proc_ents,
int resolve_dim,
int shared_dim,
- Range *skin_ents,
const Tag* id_tag)
{
#ifdef USE_MPE
@@ -3607,13 +3591,12 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
ErrorCode result;
myDebug->tprintf(1, "Resolving shared entities.\n");
- if (resolve_dim < shared_dim) {
- result = MB_FAILURE;
- RRA("MOAB does not support vertex-based partitions, only element-based ones.");
- }
-
if (-1 == shared_dim) {
- if (!proc_ents.empty())
+ if (0 == resolve_dim) {
+ result = mbImpl->get_dimension(shared_dim);
+ RRA("Couldn't get dimension.");
+ }
+ else if (!proc_ents.empty())
shared_dim = mbImpl->dimension_from_handle(*proc_ents.begin())-1;
else if (resolve_dim == 3)
shared_dim = 2;
@@ -3622,37 +3605,60 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
return MB_FAILURE;
}
}
- assert(shared_dim >= 0 && resolve_dim >= shared_dim);
+ assert(shared_dim >= 0 && resolve_dim >= 0);
// get the skin entities by dimension
- Range tmp_skin_ents[4];
+ Range skin_ents[4];
+ std::vector<int> gid_data;
std::vector<EntityHandle> handle_vec;
int skin_dim;
- if (!skin_ents) {
- // need to compute the skin here
- skin_ents = tmp_skin_ents;
-
- // get the entities to be skinned
+ // get the entities to be skinned
+ if (resolve_dim < shared_dim) {
+ // for vertex-based partition, it's the elements adj to the vertices
+ result = mbImpl->get_adjacencies(proc_ents, shared_dim,
+ false, skin_ents[resolve_dim],
+ Interface::UNION);
+ RRA("Failed getting skinned entities.");
+ skin_dim = shared_dim-1;
+ }
+ else {
+ // for element-based partition, it's just the elements
skin_ents[resolve_dim] = proc_ents;
skin_dim = resolve_dim-1;
+ }
- // find the skin
- Skinner skinner(mbImpl);
- result = skinner.find_skin(this_set, skin_ents[resolve_dim], false, skin_ents[skin_dim],
- NULL, true, true, true);
- RRA("Failed to find skin.");
- myDebug->tprintf(1, "Found skin, now resolving.\n");
-
- // get entities adjacent to skin ents from shared_dim down to zero
- for (int this_dim = skin_dim-1; this_dim >= 0; this_dim--) {
- result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
- true, skin_ents[this_dim],
- Interface::UNION);
- RRA("Failed getting skin adjacencies.");
- }
+ // find the skin
+ Skinner skinner(mbImpl);
+ result = skinner.find_skin(this_set, skin_ents[skin_dim+1], false, skin_ents[skin_dim],
+ NULL, true, true, true);
+ RRA("Failed to find skin.");
+ myDebug->tprintf(1, "Found skin, now resolving.\n");
+
+ // get entities adjacent to skin ents from shared_dim down to zero
+ for (int this_dim = skin_dim-1; this_dim >= 0; this_dim--) {
+ result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
+ true, skin_ents[this_dim],
+ Interface::UNION);
+ RRA("Failed getting skin adjacencies.");
}
-
+
+ return resolve_shared_ents(this_set, proc_ents, skin_ents,
+ resolve_dim, shared_dim, id_tag);
+ }
+
+ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
+ Range &proc_ents,
+ Range skin_ents[],
+ int resolve_dim,
+ int shared_dim,
+ const Tag* id_tag)
+ { // resolve shared vertices first
+ ErrorCode result;
+ std::vector<int> gid_data;
+ std::vector<EntityHandle> handle_vec;
+ int skin_dim = resolve_dim-1;
+
// global id tag
Tag gid_tag; int def_val = -1;
if (id_tag)
@@ -3666,17 +3672,25 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
else if (tag_created) {
// just created it, so we need global ids
- Range tmp_ents[4];
- tmp_ents[resolve_dim] = proc_ents.subset_by_dimension(resolve_dim);
- result = mbImpl->get_adjacencies(tmp_ents[resolve_dim], 0, false, tmp_ents[0]);
- RRA("Failed to get adjacent vertices.");
- result = assign_global_ids(tmp_ents, resolve_dim, 1, true, true);
+ result = assign_global_ids(this_set, skin_dim+1,true,true,true);
RRA("Failed assigning global ids.");
}
}
+ // store index in temp tag; reuse gid_data
+ gid_data.resize(2*skin_ents[0].size());
+ int idx = 0;
+ for (Range::iterator rit = skin_ents[0].begin();
+ rit != skin_ents[0].end(); rit++)
+ gid_data[idx] = idx, idx++;
+ Tag idx_tag;
+ result = mbImpl->tag_get_handle("__idx_tag", 1, MB_TYPE_INTEGER,
+ idx_tag, MB_TAG_DENSE|MB_TAG_CREAT, &def_val );
+ if (MB_SUCCESS != result) return result;
+ result = mbImpl->tag_set_data(idx_tag, skin_ents[0], &gid_data[0]);
+ RRA("Couldn't assign index tag.");
+
// get gids for skin ents in a vector, to pass to gs
- std::vector<int> gid_data(skin_ents[0].size());
result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);
RRA("Couldn't get gid tag for skin vertices.");
@@ -3693,6 +3707,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// get a crystal router
gs_data::crystal_data *cd = procConfig.crystal_router();
+
/*
// get total number of entities; will overshoot highest global id, but
// that's ok
@@ -3764,7 +3779,8 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
Interface::UNION);
RRA("Couldn't get proc_verts.");
- result = tag_shared_verts(shared_verts, skin_ents, proc_nvecs, proc_verts);
+ result = tag_shared_verts(shared_verts, skin_ents,
+ proc_nvecs, proc_verts);
RRA("Trouble tagging shared verts.");
#ifdef USE_MPE
@@ -3774,7 +3790,8 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
#endif
// get entities shared by 1 or n procs
- result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
+ result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
+ proc_nvecs);
RRA("Trouble tagging shared entities.");
shared_verts.reset();
@@ -3792,7 +3809,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// create the sets for each interface; store them as tags on
// the interface instance
Range iface_sets;
- result = create_interface_sets(proc_nvecs);
+ result = create_interface_sets(proc_nvecs, resolve_dim, shared_dim);
RRA("Trouble creating iface sets.");
// establish comm procs and buffers for them
@@ -3805,7 +3822,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
RRA("Shared handle check failed after iface vertex exchange.");
#endif
- // resolve shared non-vertex remote handles; implemented in ghost cell exchange
+ // resolve shared entity remote handles; implemented in ghost cell exchange
// code because it's so similar
result = exchange_ghost_cells(-1, -1, 0, 0, true, true);
RRA("Trouble resolving shared entity remote handles.");
@@ -4351,12 +4368,15 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
RRA("");
}
- result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
+ result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
+ proc_nvecs);
- return create_interface_sets(proc_nvecs);
+ return create_interface_sets(proc_nvecs, resolve_dim, shared_dim);
}
- ErrorCode ParallelComm::create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
+ ErrorCode ParallelComm::create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
+ int /*resolve_dim*/,
+ int /*shared_dim*/)
{
if (proc_nvecs.empty()) return MB_SUCCESS;
@@ -4510,10 +4530,10 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
return MB_SUCCESS;
}
- ErrorCode ParallelComm::get_proc_nvecs(int resolve_dim,
- int shared_dim,
- Range *skin_ents,
- std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
+ ErrorCode ParallelComm::tag_shared_ents(int resolve_dim,
+ int shared_dim,
+ Range *skin_ents,
+ std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
{
// set sharing procs tags on other skin ents
ErrorCode result;
@@ -6227,7 +6247,7 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
}
// create interface sets from shared entities
- result = create_interface_sets(proc_nvecs);
+ result = create_interface_sets(proc_nvecs, 3, 2);
RRA("Trouble creating iface sets.");
return MB_SUCCESS;
diff --git a/src/parallel/ParallelMergeMesh.cpp b/src/parallel/ParallelMergeMesh.cpp
index 138e32c..f0b99c6 100644
--- a/src/parallel/ParallelMergeMesh.cpp
+++ b/src/parallel/ParallelMergeMesh.cpp
@@ -559,7 +559,7 @@ namespace moab{
}
// get entities shared by 1 or n procs
- rval = myPcomm->get_proc_nvecs(dim,dim-1, &mySkinEnts[0],proc_nranges);
+ rval = myPcomm->tag_shared_ents(dim,dim-1, &mySkinEnts[0],proc_nranges);
if(rval != MB_SUCCESS){
return rval;
}
@@ -567,7 +567,7 @@ namespace moab{
// create the sets for each interface; store them as tags on
// the interface instance
Range iface_sets;
- rval = myPcomm->create_interface_sets(proc_nranges);
+ rval = myPcomm->create_interface_sets(proc_nranges, dim, dim-1);
if(rval != MB_SUCCESS){
return rval;
}
diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index 5468de1..90cbad6 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -96,47 +96,17 @@ namespace moab {
// \section GLOBAL IDS
// ==================================
- /* \brief Assign a global id space for entities
- *
- * Spaces are separate (and overlapping) for each dimension (i.e. global id space
- * is not unique over all dimension entities)
- *
- * \param this_set Assign ids for entities in this set
- * \param max_dim Assign for entities up to max_dim
- * \param start_id Starting id for entities, defaults to 1
- * \param largest_dim_only Assign only for max_dim and vertices, otherwise for edges/faces too
- * \param parallel If true, use MPI_Scan-like function to properly assign over all processors,
- * otherwise gids are assigned locally only
- * \param owned_only If false, exchanged gids with sharing processors so gids are valid for
- * non-owned entities too
- */
+ //! assign a global id space, for largest-dimension or all entities (and
+ //! in either case for vertices too)
+ //!\param owned_only If true, do not get global IDs for non-owned entities
+ //! from remote processors.
ErrorCode assign_global_ids(EntityHandle this_set,
- const int max_dim,
+ const int dimension,
const int start_id = 1,
const bool largest_dim_only = true,
const bool parallel = true,
const bool owned_only = false);
- /* \brief Assign a global id space for entities
- *
- * Same as set-based variant of this function, except entity Range vectors are input directly
- * (by dimension, so entities[0] contains vertices, etc.)
- *
- * \param entities Assign ids for entities in the vector by dimension
- * \param max_dim Assign for entities up to max_dim
- * \param start_id Starting id for entities, defaults to 1
- * \param largest_dim_only Assign only for max_dim and vertices, otherwise for edges/faces too
- * \param parallel If true, use MPI_Scan-like function to properly assign over all processors,
- * otherwise gids are assigned locally only
- * \param owned_only If false, exchanged gids with sharing processors so gids are valid for
- * non-owned entities too
- */
- ErrorCode assign_global_ids( Range entities[],
- const int dimension,
- const int start_id,
- const bool parallel = true,
- const bool owned_only = true);
-
//! check for global ids; based only on tag handle being there or not;
//! if it's not there, create them for the specified dimensions
//!\param owned_only If true, do not get global IDs for non-owned entities
@@ -426,51 +396,54 @@ namespace moab {
*
* Resolve shared entities between processors for entities in proc_ents,
* by comparing global id tag values on vertices on skin of elements in
- * proc_ents.
+ * proc_ents. Shared entities are assigned a tag that's either
+ * PARALLEL_SHARED_PROC_TAG_NAME, which is 1 integer in length, or
+ * PARALLEL_SHARED_PROCS_TAG_NAME, whose length depends on the maximum
+ * number of sharing processors. Values in these tags denote the ranks
+ * of sharing processors, and the list ends with the value -1.
+ *
+ * If shared_dim is input as -1 or not input, a value one less than the
+ * maximum dimension of entities in proc_ents is used.
*
- * \param this_set Set from which entities for proc_ents are taken
* \param proc_ents Entities for which to resolve shared entities
- * \param resolve_dim Dimension of entities in part/partition
- * \param shared_dim Maximum dimension of shared entities to look for; if -1,
- * resolve_dim-1 is used
- * \param id_tag If non-NULL, use this tag to get global id on which vertex resolution
- * is based
+ * \param shared_dim Maximum dimension of shared entities to look for
*/
ErrorCode resolve_shared_ents(EntityHandle this_set,
- int resolve_dim = 3,
+ Range &proc_ents,
+ int resolve_dim = -1,
int shared_dim = -1,
const Tag* id_tag = 0);
-
+
/** \brief Resolve shared entities between processors
*
- * Same as resolve_shared_ents with entity set as first argument, except instead
- * a range is input containing entities in the part/partition.
+ * Same as resolve_shared_ents(Range&), except works for
+ * all entities in instance of dimension dim.
*
- * \param this_set In this function variant, set is used to speed up skinning
- * \param proc_ents Entities for which to resolve shared entities
- * \param resolve_dim Dimension of entities in part/partition
- * \param shared_dim Maximum dimension of shared entities to look for; if -1,
- * resolve_dim-1 is used
- * \param id_tag If non-NULL, use this tag to get global id on which vertex resolution
- * is based
+ * If shared_dim is input as -1 or not input, a value one less than the
+ * maximum dimension of entities is used.
+
+ * \param dim Dimension of entities in the partition
+ * \param shared_dim Maximum dimension of shared entities to look for
*/
ErrorCode resolve_shared_ents(EntityHandle this_set,
- Range &proc_ents,
- int resolve_dim = -1,
+ int resolve_dim = 3,
int shared_dim = -1,
- Range *skin_ents = NULL,
const Tag* id_tag = 0);
-
+
/** \brief Resolve shared entities between processors
*
- * This version can be used statically, with messages exchanged via direct calls to buffer
- * pack/unpack functions instead of MPI
- *
- * \param pc Array of ParallelComm instances
- * \param np Number of ParallelComm objects in previous argument
- * \param this_set Set of entities in parts/partition
- * \param to_dim Maximum dimension of shared entities to look for
+ * Entity skin array is offered by user not by skinner
+ * It is used by other resolve_shared_ents functions above
+
+ * \param skin_ents[] entity skin array by user
*/
+ ErrorCode resolve_shared_ents(EntityHandle this_set,
+ Range &proc_ents,
+ Range skin_ents[],
+ int resolve_dim = 3,
+ int shared_dim = -1,
+ const Tag* id_tag = 0);
+
static ErrorCode resolve_shared_ents(ParallelComm **pc,
const unsigned int np,
EntityHandle this_set,
@@ -890,7 +863,8 @@ namespace moab {
// and tags the set with the procs sharing it; interface sets are optionally
// returned; NOTE: a subsequent step is used to verify entities on the interface
// and remove them if they're not shared
- ErrorCode create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
+ ErrorCode create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
+ int resolve_dim, int shared_dim);
// do the same but working straight from sharedEnts
ErrorCode create_interface_sets(EntityHandle this_set, int resolve_dim, int shared_dim);
@@ -1256,10 +1230,10 @@ namespace moab {
std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
Range &proc_verts);
- ErrorCode get_proc_nvecs(int resolve_dim,
- int shared_dim,
- Range *skin_ents,
- std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
+ ErrorCode tag_shared_ents(int resolve_dim,
+ int shared_dim,
+ Range *skin_ents,
+ std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
// after verifying shared entities, now parent/child links between sets can be established
ErrorCode create_iface_pc_links();
diff --git a/test/parallel/parallel_hdf5_test.cc b/test/parallel/parallel_hdf5_test.cc
index 355e99e..2b2a1c8 100644
--- a/test/parallel/parallel_hdf5_test.cc
+++ b/test/parallel/parallel_hdf5_test.cc
@@ -1491,7 +1491,7 @@ void test_write_unbalanced()
ParallelComm* pcomm = ParallelComm::get_pcomm( &mb, 0 );
if (0 == pcomm)
pcomm = new ParallelComm( &mb, MPI_COMM_WORLD );
- rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, NULL, &idtag );
+ rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, &idtag );
CHECK_ERR(rval);
rval = pcomm->resolve_shared_sets( sets, idtag );
CHECK_ERR(rval);
Repository URL: https://bitbucket.org/fathomteam/moab/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the moab-dev
mailing list