[MOAB-dev] commit/MOAB: 3 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Tue Sep 3 15:44:58 CDT 2013
3 new commits in MOAB:
https://bitbucket.org/fathomteam/moab/commits/452ee983e8a9/
Changeset: 452ee983e8a9
Branch: None
User: tautges
Date: 2013-09-03 20:03:31
Summary: Cleaning up some of the ParallelComm resolve_shared functionality, in preparation for
more extensive bug fixing regarding shared non-vertices.
Affected #: 6 files
diff --git a/src/ScdInterface.cpp b/src/ScdInterface.cpp
index 054b8dd..21a81dd 100644
--- a/src/ScdInterface.cpp
+++ b/src/ScdInterface.cpp
@@ -659,7 +659,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, EntityHandle se
if (MB_SUCCESS != rval) return rval;
// create interface sets
- rval = pcomm->create_interface_sets(proc_nvecs, -1, -1);
+ rval = pcomm->create_interface_sets(proc_nvecs);
if (MB_SUCCESS != rval) return rval;
// make sure buffers are allocated for communicating procs
diff --git a/src/Skinner.cpp b/src/Skinner.cpp
index 57b3d51..b5fd24e 100644
--- a/src/Skinner.cpp
+++ b/src/Skinner.cpp
@@ -264,66 +264,13 @@ ErrorCode Skinner::find_skin( const EntityHandle meshset,
!this_core->a_entity_factory()->vert_elem_adjacencies())
this_core->a_entity_factory()->create_vert_elem_adjacencies();
- if (this_core && this_core->a_entity_factory()->vert_elem_adjacencies())
- return find_skin_vertices( meshset,
- source_entities,
- get_vertices ? &output_handles : 0,
- get_vertices ? 0 : &output_handles,
- output_reverse_handles,
- create_skin_elements );
+ return find_skin_vertices( meshset,
+ source_entities,
+ get_vertices ? &output_handles : 0,
+ get_vertices ? 0 : &output_handles,
+ output_reverse_handles,
+ create_skin_elements );
- Range forward, reverse;
- Range prev;
- const int d = CN::Dimension(TYPE_FROM_HANDLE(source_entities.front()));
- if (!source_entities.all_of_dimension(d))
- return MB_TYPE_OUT_OF_RANGE;
-
- rval = thisMB->get_entities_by_dimension( meshset, d-1, prev );
- if (MB_SUCCESS != rval)
- return rval;
-
- rval = find_skin_noadj( source_entities, forward, reverse );
- if (MB_SUCCESS != rval)
- return rval;
-
- if (get_vertices && !output_reverse_handles) {
- forward.merge( reverse );
- reverse.clear();
- }
-
- if (get_vertices) {
- rval = thisMB->get_connectivity( forward, output_handles );
- if (MB_SUCCESS != rval)
- return rval;
- }
-
- if (!create_skin_elements) {
- Range new_skin;
- rval = thisMB->get_entities_by_dimension( meshset, d-1, new_skin);
- if (MB_SUCCESS != rval)
- return rval;
- new_skin = subtract( new_skin, prev );
- forward = subtract( forward, new_skin );
- reverse = subtract( reverse, new_skin );
- rval = thisMB->delete_entities( new_skin );
- if (MB_SUCCESS != rval)
- return rval;
- }
-
- if (!get_vertices) {
- if (output_handles.empty())
- output_handles.swap( forward );
- else
- output_handles.merge( forward );
- if (!output_reverse_handles)
- output_handles.merge( reverse );
- else if (output_reverse_handles->empty())
- output_reverse_handles->swap( reverse );
- else
- output_reverse_handles->merge( reverse );
- }
-
- return MB_SUCCESS;
}
ErrorCode Skinner::find_skin_scd(const Range& source_entities,
diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 4977f91..114a3d9 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -250,7 +250,7 @@ namespace moab {
if (tag < MB_MESG_REMOTEH_ACK) myDebug->print(3, ", recv_ent_reqs=");
else if (tag < MB_MESG_TAGS_ACK) myDebug->print(3, ", recv_remoteh_reqs=");
else myDebug->print(3, ", recv_tag_reqs=");
- for (unsigned int i = 0; i < reqs.size(); i++) myDebug->printf(3, " %x", reqs[i]);
+ for (unsigned int i = 0; i < reqs.size(); i++) myDebug->printf(3, " %p", (void*)reqs[i]);
myDebug->print(3, "\n");
}
}
@@ -409,7 +409,6 @@ namespace moab {
const bool owned_only)
{
Range entities[4];
- int local_num_elements[4];
ErrorCode result;
std::vector<unsigned char> pstatus;
for (int dim = 0; dim <= dimension; dim++) {
@@ -430,7 +429,23 @@ namespace moab {
if (pstatus[i] & PSTATUS_NOT_OWNED)
dum_range.insert(*rit);
entities[dim] = subtract( entities[dim], dum_range);
+ }
+
+ return assign_global_ids(entities, dimension, start_id, parallel, owned_only);
+ }
+ //! assign a global id space, for largest-dimension or all entities (and
+ //! in either case for vertices too)
+ ErrorCode ParallelComm::assign_global_ids( Range entities[],
+ const int dimension,
+ const int start_id,
+ const bool parallel,
+ const bool owned_only)
+ {
+ int local_num_elements[4];
+ ErrorCode result;
+ std::vector<unsigned char> pstatus;
+ for (int dim = 0; dim <= dimension; dim++) {
local_num_elements[dim] = entities[dim].size();
}
@@ -3572,13 +3587,14 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// must call even if we don't have any entities, to make sure
// collective comm'n works
- return resolve_shared_ents(this_set, proc_ents, resolve_dim, shared_dim, id_tag);
+ return resolve_shared_ents(this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag);
}
ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
Range &proc_ents,
int resolve_dim,
int shared_dim,
+ Range *skin_ents,
const Tag* id_tag)
{
#ifdef USE_MPE
@@ -3591,12 +3607,13 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
ErrorCode result;
myDebug->tprintf(1, "Resolving shared entities.\n");
+ if (resolve_dim < shared_dim) {
+ result = MB_FAILURE;
+ RRA("MOAB does not support vertex-based partitions, only element-based ones.");
+ }
+
if (-1 == shared_dim) {
- if (0 == resolve_dim) {
- result = mbImpl->get_dimension(shared_dim);
- RRA("Couldn't get dimension.");
- }
- else if (!proc_ents.empty())
+ if (!proc_ents.empty())
shared_dim = mbImpl->dimension_from_handle(*proc_ents.begin())-1;
else if (resolve_dim == 3)
shared_dim = 2;
@@ -3605,60 +3622,38 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
return MB_FAILURE;
}
}
- assert(shared_dim >= 0 && resolve_dim >= 0);
+ assert(shared_dim >= 0 && resolve_dim >= shared_dim);
// get the skin entities by dimension
- Range skin_ents[4];
+ Range tmp_skin_ents[4];
std::vector<int> gid_data;
std::vector<EntityHandle> handle_vec;
int skin_dim;
- // get the entities to be skinned
- if (resolve_dim < shared_dim) {
- // for vertex-based partition, it's the elements adj to the vertices
- result = mbImpl->get_adjacencies(proc_ents, shared_dim,
- false, skin_ents[resolve_dim],
- Interface::UNION);
- RRA("Failed getting skinned entities.");
- skin_dim = shared_dim-1;
- }
- else {
- // for element-based partition, it's just the elements
+ if (!skin_ents) {
+ // need to compute the skin here
+ skin_ents = tmp_skin_ents;
+
+ // get the entities to be skinned
skin_ents[resolve_dim] = proc_ents;
skin_dim = resolve_dim-1;
- }
- // find the skin
- Skinner skinner(mbImpl);
- result = skinner.find_skin(this_set, skin_ents[skin_dim+1], false, skin_ents[skin_dim],
- NULL, true, true, true);
- RRA("Failed to find skin.");
- myDebug->tprintf(1, "Found skin, now resolving.\n");
-
- // get entities adjacent to skin ents from shared_dim down to zero
- for (int this_dim = skin_dim-1; this_dim >= 0; this_dim--) {
- result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
- true, skin_ents[this_dim],
- Interface::UNION);
- RRA("Failed getting skin adjacencies.");
+ // find the skin
+ Skinner skinner(mbImpl);
+ result = skinner.find_skin(this_set, skin_ents[skin_dim+1], false, skin_ents[skin_dim],
+ NULL, true, true, true);
+ RRA("Failed to find skin.");
+ myDebug->tprintf(1, "Found skin, now resolving.\n");
+
+ // get entities adjacent to skin ents from shared_dim down to zero
+ for (int this_dim = skin_dim-1; this_dim >= 0; this_dim--) {
+ result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
+ true, skin_ents[this_dim],
+ Interface::UNION);
+ RRA("Failed getting skin adjacencies.");
+ }
}
-
- return resolve_shared_ents(this_set, proc_ents, skin_ents,
- resolve_dim, shared_dim, id_tag);
- }
-
- ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
- Range &proc_ents,
- Range skin_ents[],
- int resolve_dim,
- int shared_dim,
- const Tag* id_tag)
- { // resolve shared vertices first
- ErrorCode result;
- std::vector<int> gid_data;
- std::vector<EntityHandle> handle_vec;
- int skin_dim = resolve_dim-1;
-
+
// global id tag
Tag gid_tag; int def_val = -1;
if (id_tag)
@@ -3672,7 +3667,11 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
else if (tag_created) {
// just created it, so we need global ids
- result = assign_global_ids(this_set, skin_dim+1,true,true,true);
+ Range tmp_ents[4];
+ tmp_ents[resolve_dim] = proc_ents.subset_by_dimension(resolve_dim);
+ result = mbImpl->get_adjacencies(tmp_ents[resolve_dim], 0, false, tmp_ents[0]);
+ RRA("Failed to get adjacent vertices.");
+ result = assign_global_ids(tmp_ents, resolve_dim, 1, true, true);
RRA("Failed assigning global ids.");
}
}
@@ -3707,7 +3706,6 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// get a crystal router
gs_data::crystal_data *cd = procConfig.crystal_router();
-
/*
// get total number of entities; will overshoot highest global id, but
// that's ok
@@ -3790,8 +3788,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
#endif
// get entities shared by 1 or n procs
- result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
- proc_nvecs);
+ result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
RRA("Trouble tagging shared entities.");
shared_verts.reset();
@@ -3809,7 +3806,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// create the sets for each interface; store them as tags on
// the interface instance
Range iface_sets;
- result = create_interface_sets(proc_nvecs, resolve_dim, shared_dim);
+ result = create_interface_sets(proc_nvecs);
RRA("Trouble creating iface sets.");
// establish comm procs and buffers for them
@@ -4368,15 +4365,12 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
RRA("");
}
- result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
- proc_nvecs);
+ result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
- return create_interface_sets(proc_nvecs, resolve_dim, shared_dim);
+ return create_interface_sets(proc_nvecs);
}
- ErrorCode ParallelComm::create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
- int /*resolve_dim*/,
- int /*shared_dim*/)
+ ErrorCode ParallelComm::create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
{
if (proc_nvecs.empty()) return MB_SUCCESS;
@@ -4530,10 +4524,10 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
return MB_SUCCESS;
}
- ErrorCode ParallelComm::tag_shared_ents(int resolve_dim,
- int shared_dim,
- Range *skin_ents,
- std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
+ ErrorCode ParallelComm::get_proc_nvecs(int resolve_dim,
+ int shared_dim,
+ Range *skin_ents,
+ std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
{
// set sharing procs tags on other skin ents
ErrorCode result;
@@ -6247,7 +6241,7 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
}
// create interface sets from shared entities
- result = create_interface_sets(proc_nvecs, 3, 2);
+ result = create_interface_sets(proc_nvecs);
RRA("Trouble creating iface sets.");
return MB_SUCCESS;
diff --git a/src/parallel/ParallelMergeMesh.cpp b/src/parallel/ParallelMergeMesh.cpp
index f0b99c6..138e32c 100644
--- a/src/parallel/ParallelMergeMesh.cpp
+++ b/src/parallel/ParallelMergeMesh.cpp
@@ -559,7 +559,7 @@ namespace moab{
}
// get entities shared by 1 or n procs
- rval = myPcomm->tag_shared_ents(dim,dim-1, &mySkinEnts[0],proc_nranges);
+ rval = myPcomm->get_proc_nvecs(dim,dim-1, &mySkinEnts[0],proc_nranges);
if(rval != MB_SUCCESS){
return rval;
}
@@ -567,7 +567,7 @@ namespace moab{
// create the sets for each interface; store them as tags on
// the interface instance
Range iface_sets;
- rval = myPcomm->create_interface_sets(proc_nranges, dim, dim-1);
+ rval = myPcomm->create_interface_sets(proc_nranges);
if(rval != MB_SUCCESS){
return rval;
}
diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index 90cbad6..5468de1 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -96,17 +96,47 @@ namespace moab {
// \section GLOBAL IDS
// ==================================
- //! assign a global id space, for largest-dimension or all entities (and
- //! in either case for vertices too)
- //!\param owned_only If true, do not get global IDs for non-owned entities
- //! from remote processors.
+ /* \brief Assign a global id space for entities
+ *
+ * Spaces are separate (and overlapping) for each dimension (i.e. global id space
+ * is not unique over all dimension entities)
+ *
+ * \param this_set Assign ids for entities in this set
+ * \param max_dim Assign for entities up to max_dim
+ * \param start_id Starting id for entities, defaults to 1
+ * \param largest_dim_only Assign only for max_dim and vertices, otherwise for edges/faces too
+ * \param parallel If true, use MPI_Scan-like function to properly assign over all processors,
+ * otherwise gids are assigned locally only
+ * \param owned_only If false, exchanged gids with sharing processors so gids are valid for
+ * non-owned entities too
+ */
ErrorCode assign_global_ids(EntityHandle this_set,
- const int dimension,
+ const int max_dim,
const int start_id = 1,
const bool largest_dim_only = true,
const bool parallel = true,
const bool owned_only = false);
+ /* \brief Assign a global id space for entities
+ *
+ * Same as set-based variant of this function, except entity Range vectors are input directly
+ * (by dimension, so entities[0] contains vertices, etc.)
+ *
+ * \param entities Assign ids for entities in the vector by dimension
+ * \param max_dim Assign for entities up to max_dim
+ * \param start_id Starting id for entities, defaults to 1
+ * \param largest_dim_only Assign only for max_dim and vertices, otherwise for edges/faces too
+ * \param parallel If true, use MPI_Scan-like function to properly assign over all processors,
+ * otherwise gids are assigned locally only
+ * \param owned_only If false, exchanged gids with sharing processors so gids are valid for
+ * non-owned entities too
+ */
+ ErrorCode assign_global_ids( Range entities[],
+ const int dimension,
+ const int start_id,
+ const bool parallel = true,
+ const bool owned_only = true);
+
//! check for global ids; based only on tag handle being there or not;
//! if it's not there, create them for the specified dimensions
//!\param owned_only If true, do not get global IDs for non-owned entities
@@ -396,54 +426,51 @@ namespace moab {
*
* Resolve shared entities between processors for entities in proc_ents,
* by comparing global id tag values on vertices on skin of elements in
- * proc_ents. Shared entities are assigned a tag that's either
- * PARALLEL_SHARED_PROC_TAG_NAME, which is 1 integer in length, or
- * PARALLEL_SHARED_PROCS_TAG_NAME, whose length depends on the maximum
- * number of sharing processors. Values in these tags denote the ranks
- * of sharing processors, and the list ends with the value -1.
- *
- * If shared_dim is input as -1 or not input, a value one less than the
- * maximum dimension of entities in proc_ents is used.
+ * proc_ents.
*
+ * \param this_set Set from which entities for proc_ents are taken
* \param proc_ents Entities for which to resolve shared entities
- * \param shared_dim Maximum dimension of shared entities to look for
+ * \param resolve_dim Dimension of entities in part/partition
+ * \param shared_dim Maximum dimension of shared entities to look for; if -1,
+ * resolve_dim-1 is used
+ * \param id_tag If non-NULL, use this tag to get global id on which vertex resolution
+ * is based
*/
ErrorCode resolve_shared_ents(EntityHandle this_set,
- Range &proc_ents,
- int resolve_dim = -1,
+ int resolve_dim = 3,
int shared_dim = -1,
const Tag* id_tag = 0);
-
+
/** \brief Resolve shared entities between processors
*
- * Same as resolve_shared_ents(Range&), except works for
- * all entities in instance of dimension dim.
+ * Same as resolve_shared_ents with entity set as first argument, except instead
+ * a range is input containing entities in the part/partition.
*
- * If shared_dim is input as -1 or not input, a value one less than the
- * maximum dimension of entities is used.
-
- * \param dim Dimension of entities in the partition
- * \param shared_dim Maximum dimension of shared entities to look for
+ * \param this_set In this function variant, set is used to speed up skinning
+ * \param proc_ents Entities for which to resolve shared entities
+ * \param resolve_dim Dimension of entities in part/partition
+ * \param shared_dim Maximum dimension of shared entities to look for; if -1,
+ * resolve_dim-1 is used
+ * \param id_tag If non-NULL, use this tag to get global id on which vertex resolution
+ * is based
*/
ErrorCode resolve_shared_ents(EntityHandle this_set,
- int resolve_dim = 3,
+ Range &proc_ents,
+ int resolve_dim = -1,
int shared_dim = -1,
+ Range *skin_ents = NULL,
const Tag* id_tag = 0);
-
+
/** \brief Resolve shared entities between processors
*
- * Entity skin array is offered by user not by skinner
- * It is used by other resolve_shared_ents functions above
-
- * \param skin_ents[] entity skin array by user
+ * This version can be used statically, with messages exchanged via direct calls to buffer
+ * pack/unpack functions instead of MPI
+ *
+ * \param pc Array of ParallelComm instances
+ * \param np Number of ParallelComm objects in previous argument
+ * \param this_set Set of entities in parts/partition
+ * \param to_dim Maximum dimension of shared entities to look for
*/
- ErrorCode resolve_shared_ents(EntityHandle this_set,
- Range &proc_ents,
- Range skin_ents[],
- int resolve_dim = 3,
- int shared_dim = -1,
- const Tag* id_tag = 0);
-
static ErrorCode resolve_shared_ents(ParallelComm **pc,
const unsigned int np,
EntityHandle this_set,
@@ -863,8 +890,7 @@ namespace moab {
// and tags the set with the procs sharing it; interface sets are optionally
// returned; NOTE: a subsequent step is used to verify entities on the interface
// and remove them if they're not shared
- ErrorCode create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
- int resolve_dim, int shared_dim);
+ ErrorCode create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
// do the same but working straight from sharedEnts
ErrorCode create_interface_sets(EntityHandle this_set, int resolve_dim, int shared_dim);
@@ -1230,10 +1256,10 @@ namespace moab {
std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
Range &proc_verts);
- ErrorCode tag_shared_ents(int resolve_dim,
- int shared_dim,
- Range *skin_ents,
- std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
+ ErrorCode get_proc_nvecs(int resolve_dim,
+ int shared_dim,
+ Range *skin_ents,
+ std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
// after verifying shared entities, now parent/child links between sets can be established
ErrorCode create_iface_pc_links();
diff --git a/test/parallel/parallel_hdf5_test.cc b/test/parallel/parallel_hdf5_test.cc
index 2b2a1c8..355e99e 100644
--- a/test/parallel/parallel_hdf5_test.cc
+++ b/test/parallel/parallel_hdf5_test.cc
@@ -1491,7 +1491,7 @@ void test_write_unbalanced()
ParallelComm* pcomm = ParallelComm::get_pcomm( &mb, 0 );
if (0 == pcomm)
pcomm = new ParallelComm( &mb, MPI_COMM_WORLD );
- rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, &idtag );
+ rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, NULL, &idtag );
CHECK_ERR(rval);
rval = pcomm->resolve_shared_sets( sets, idtag );
CHECK_ERR(rval);
https://bitbucket.org/fathomteam/moab/commits/0284b32f1e73/
Changeset: 0284b32f1e73
Branch: None
User: tautges
Date: 2013-09-03 20:53:23
Summary: Removing idx_tag stuff, don't know why it was there in the first place.
Affected #: 1 file
diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 114a3d9..d00d55a 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -3676,19 +3676,6 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
}
}
- // store index in temp tag; reuse gid_data
- gid_data.resize(2*skin_ents[0].size());
- int idx = 0;
- for (Range::iterator rit = skin_ents[0].begin();
- rit != skin_ents[0].end(); rit++)
- gid_data[idx] = idx, idx++;
- Tag idx_tag;
- result = mbImpl->tag_get_handle("__idx_tag", 1, MB_TYPE_INTEGER,
- idx_tag, MB_TAG_DENSE|MB_TAG_CREAT, &def_val );
- if (MB_SUCCESS != result) return result;
- result = mbImpl->tag_set_data(idx_tag, skin_ents[0], &gid_data[0]);
- RRA("Couldn't assign index tag.");
-
// get gids for skin ents in a vector, to pass to gs
result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);
RRA("Couldn't get gid tag for skin vertices.");
https://bitbucket.org/fathomteam/moab/commits/7c1987e21051/
Changeset: 7c1987e21051
Branch: master
User: tautges
Date: 2013-09-03 22:30:05
Summary: Forgot to init gid array to receive tags, and putting scd box set in partition
sets.
make check passes, except for scdpart, which relies on file id tags which
aren't being set consistently in all the NC readers (reported to Danqing,
who's working on it).
Affected #: 2 files
diff --git a/src/ScdInterface.cpp b/src/ScdInterface.cpp
index 21a81dd..5413e3e 100644
--- a/src/ScdInterface.cpp
+++ b/src/ScdInterface.cpp
@@ -662,6 +662,9 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, EntityHandle se
rval = pcomm->create_interface_sets(proc_nvecs);
if (MB_SUCCESS != rval) return rval;
+ // add the box to the PComm's partitionSets
+ pcomm->partition_sets().insert(box->box_set());
+
// make sure buffers are allocated for communicating procs
for (std::vector<int>::iterator pit = procs.begin(); pit != procs.end(); pit++)
pcomm->get_buffers(*pit);
diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index d00d55a..20fc5da 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -3542,9 +3542,9 @@ ErrorCode ParallelComm::reduce_void(int tag_data_type, const MPI_Op mpi_op, int
}
ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
- int resolve_dim,
- int shared_dim,
- const Tag* id_tag)
+ int resolve_dim,
+ int shared_dim,
+ const Tag* id_tag)
{
ErrorCode result;
Range proc_ents;
@@ -3556,7 +3556,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
result = scdi->tag_shared_vertices(this, this_set);
if (MB_SUCCESS == result) {
myDebug->tprintf(1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size());
- return result;
+ if (shared_dim == 0) return result;
}
}
@@ -3626,7 +3626,6 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// get the skin entities by dimension
Range tmp_skin_ents[4];
- std::vector<int> gid_data;
std::vector<EntityHandle> handle_vec;
int skin_dim;
@@ -3640,7 +3639,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// find the skin
Skinner skinner(mbImpl);
- result = skinner.find_skin(this_set, skin_ents[skin_dim+1], false, skin_ents[skin_dim],
+ result = skinner.find_skin(this_set, skin_ents[resolve_dim], false, skin_ents[skin_dim],
NULL, true, true, true);
RRA("Failed to find skin.");
myDebug->tprintf(1, "Found skin, now resolving.\n");
@@ -3677,6 +3676,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
}
// get gids for skin ents in a vector, to pass to gs
+ std::vector<int> gid_data(skin_ents[0].size());
result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);
RRA("Couldn't get gid tag for skin vertices.");
@@ -3764,8 +3764,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
Interface::UNION);
RRA("Couldn't get proc_verts.");
- result = tag_shared_verts(shared_verts, skin_ents,
- proc_nvecs, proc_verts);
+ result = tag_shared_verts(shared_verts, skin_ents, proc_nvecs, proc_verts);
RRA("Trouble tagging shared verts.");
#ifdef USE_MPE
@@ -3806,7 +3805,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
RRA("Shared handle check failed after iface vertex exchange.");
#endif
- // resolve shared entity remote handles; implemented in ghost cell exchange
+ // resolve shared non-vertex remote handles; implemented in ghost cell exchange
// code because it's so similar
result = exchange_ghost_cells(-1, -1, 0, 0, true, true);
RRA("Trouble resolving shared entity remote handles.");
Repository URL: https://bitbucket.org/fathomteam/moab/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the moab-dev
mailing list