[MOAB-dev] commit/MOAB: 2 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Tue Sep 3 16:48:07 CDT 2013
2 new commits in MOAB:
https://bitbucket.org/fathomteam/moab/commits/ef97993ceca7/
Changeset: ef97993ceca7
Branch: None
User: danwu
Date: 2013-09-03 23:44:28
Summary: Create file id tag (duplicate global id data) for Euler and FV meshes, which will be used in the resolve_shared stuff.
Affected #: 1 file
diff --git a/src/io/NCHelper.cpp b/src/io/NCHelper.cpp
index c3b6b8b..f4fd618 100644
--- a/src/io/NCHelper.cpp
+++ b/src/io/NCHelper.cpp
@@ -812,6 +812,7 @@ ErrorCode ScdNCHelper::create_mesh(Range& faces)
{
Interface*& mbImpl = _readNC->mbImpl;
Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
+ const Tag*& mpFileIdTag = _readNC->mpFileIdTag;
DebugOutput& dbgOut = _readNC->dbgOut;
ScdInterface* scdi = _readNC->scdi;
ScdParData& parData = _readNC->parData;
@@ -842,12 +843,21 @@ ErrorCode ScdNCHelper::create_mesh(Range& faces)
assert(count == scd_box->num_vertices());
int* gid_data = (int*) data;
+ // Duplicate global id data, which will be used to resolve sharing
+ int* fid_data;
+ if (mpFileIdTag) {
+ rval = mbImpl->tag_iterate(*mpFileIdTag, tmp_range.begin(), topv, count, data);
+ ERRORR(rval, "Failed to get tag iterator on file id tag.");
+ assert(count == scd_box->num_vertices());
+ fid_data = (int*) data;
+ }
+
// Set the vertex coordinates
double *xc, *yc, *zc;
rval = scd_box->get_coordinate_arrays(xc, yc, zc);
ERRORR(rval, "Couldn't get vertex coordinate arrays.");
- int i, j, k, il, jl, kl, itmp;
+ int i, j, k, il, jl, kl, itmp, id;
int dil = lDims[3] - lDims[0] + 1;
int djl = lDims[4] - lDims[1] + 1;
int di = gDims[3] - gDims[0] + 1;
@@ -866,8 +876,13 @@ ErrorCode ScdNCHelper::create_mesh(Range& faces)
yc[pos] = jlVals[j];
zc[pos] = (-1 == lDims[2] ? 0.0 : levVals[k]);
itmp = (!locallyPeriodic[0] && globallyPeriodic[0] && il == gDims[3] ? gDims[0] : il);
- *gid_data = (-1 != kl ? kl * di * dj : 0) + jl * di + itmp + 1;
+ id = (-1 != kl ? kl * di * dj : 0) + jl * di + itmp + 1;
+ *gid_data = id;
gid_data++;
+ if (mpFileIdTag) {
+ *fid_data = id;
+ fid_data++;
+ }
}
}
}
https://bitbucket.org/fathomteam/moab/commits/b1fdd22d8ba9/
Changeset: b1fdd22d8ba9
Branch: master
User: danwu
Date: 2013-09-03 23:46:25
Summary: Merge branch 'master' of https://bitbucket.org/fathomteam/moab
Affected #: 9 files
diff --git a/.gitignore b/.gitignore
index 6d29b5e..967aaa7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -131,10 +131,12 @@ test/obb/obb_time
test/obb/obb_tree_tool
test/obb_test
test/oldinc/test_oldinc
+test/read_mpas_nc
test/parallel/*.h5m
test/parallel/*.vtk
test/parallel/mbparallelcomm_test
test/parallel/mhdf_parallel
+test/parallel/mpastrvpart
test/parallel/par_coupler_test
test/parallel/par_intx_sph
test/parallel/parallel_hdf5_test
@@ -158,6 +160,7 @@ test/perf/tstt_perf_binding
test/range_test
test/reorder_test
test/scdseq_test
+test/scd_test_partn
test/seq_man_test
test/tag_test
test/test_adj
diff --git a/itaps/imesh/iMesh_MOAB.cpp b/itaps/imesh/iMesh_MOAB.cpp
index 6d55492..d09fec1 100644
--- a/itaps/imesh/iMesh_MOAB.cpp
+++ b/itaps/imesh/iMesh_MOAB.cpp
@@ -3424,7 +3424,7 @@ void iMesh_createStructuredMesh(iMesh_Instance instance,
}
void iMesh_freeMemory(
- iMesh_Instance instance,
+ iMesh_Instance /*instance*/,
/**< [in] iMesh instance handle */
void ** ptrToMem)
{
diff --git a/src/ScdInterface.cpp b/src/ScdInterface.cpp
index 054b8dd..5413e3e 100644
--- a/src/ScdInterface.cpp
+++ b/src/ScdInterface.cpp
@@ -659,9 +659,12 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, EntityHandle se
if (MB_SUCCESS != rval) return rval;
// create interface sets
- rval = pcomm->create_interface_sets(proc_nvecs, -1, -1);
+ rval = pcomm->create_interface_sets(proc_nvecs);
if (MB_SUCCESS != rval) return rval;
+ // add the box to the PComm's partitionSets
+ pcomm->partition_sets().insert(box->box_set());
+
// make sure buffers are allocated for communicating procs
for (std::vector<int>::iterator pit = procs.begin(); pit != procs.end(); pit++)
pcomm->get_buffers(*pit);
diff --git a/src/Skinner.cpp b/src/Skinner.cpp
index 57b3d51..b5fd24e 100644
--- a/src/Skinner.cpp
+++ b/src/Skinner.cpp
@@ -264,66 +264,13 @@ ErrorCode Skinner::find_skin( const EntityHandle meshset,
!this_core->a_entity_factory()->vert_elem_adjacencies())
this_core->a_entity_factory()->create_vert_elem_adjacencies();
- if (this_core && this_core->a_entity_factory()->vert_elem_adjacencies())
- return find_skin_vertices( meshset,
- source_entities,
- get_vertices ? &output_handles : 0,
- get_vertices ? 0 : &output_handles,
- output_reverse_handles,
- create_skin_elements );
+ return find_skin_vertices( meshset,
+ source_entities,
+ get_vertices ? &output_handles : 0,
+ get_vertices ? 0 : &output_handles,
+ output_reverse_handles,
+ create_skin_elements );
- Range forward, reverse;
- Range prev;
- const int d = CN::Dimension(TYPE_FROM_HANDLE(source_entities.front()));
- if (!source_entities.all_of_dimension(d))
- return MB_TYPE_OUT_OF_RANGE;
-
- rval = thisMB->get_entities_by_dimension( meshset, d-1, prev );
- if (MB_SUCCESS != rval)
- return rval;
-
- rval = find_skin_noadj( source_entities, forward, reverse );
- if (MB_SUCCESS != rval)
- return rval;
-
- if (get_vertices && !output_reverse_handles) {
- forward.merge( reverse );
- reverse.clear();
- }
-
- if (get_vertices) {
- rval = thisMB->get_connectivity( forward, output_handles );
- if (MB_SUCCESS != rval)
- return rval;
- }
-
- if (!create_skin_elements) {
- Range new_skin;
- rval = thisMB->get_entities_by_dimension( meshset, d-1, new_skin);
- if (MB_SUCCESS != rval)
- return rval;
- new_skin = subtract( new_skin, prev );
- forward = subtract( forward, new_skin );
- reverse = subtract( reverse, new_skin );
- rval = thisMB->delete_entities( new_skin );
- if (MB_SUCCESS != rval)
- return rval;
- }
-
- if (!get_vertices) {
- if (output_handles.empty())
- output_handles.swap( forward );
- else
- output_handles.merge( forward );
- if (!output_reverse_handles)
- output_handles.merge( reverse );
- else if (output_reverse_handles->empty())
- output_reverse_handles->swap( reverse );
- else
- output_reverse_handles->merge( reverse );
- }
-
- return MB_SUCCESS;
}
ErrorCode Skinner::find_skin_scd(const Range& source_entities,
diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 4977f91..20fc5da 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -250,7 +250,7 @@ namespace moab {
if (tag < MB_MESG_REMOTEH_ACK) myDebug->print(3, ", recv_ent_reqs=");
else if (tag < MB_MESG_TAGS_ACK) myDebug->print(3, ", recv_remoteh_reqs=");
else myDebug->print(3, ", recv_tag_reqs=");
- for (unsigned int i = 0; i < reqs.size(); i++) myDebug->printf(3, " %x", reqs[i]);
+ for (unsigned int i = 0; i < reqs.size(); i++) myDebug->printf(3, " %p", (void*)reqs[i]);
myDebug->print(3, "\n");
}
}
@@ -409,7 +409,6 @@ namespace moab {
const bool owned_only)
{
Range entities[4];
- int local_num_elements[4];
ErrorCode result;
std::vector<unsigned char> pstatus;
for (int dim = 0; dim <= dimension; dim++) {
@@ -430,7 +429,23 @@ namespace moab {
if (pstatus[i] & PSTATUS_NOT_OWNED)
dum_range.insert(*rit);
entities[dim] = subtract( entities[dim], dum_range);
+ }
+
+ return assign_global_ids(entities, dimension, start_id, parallel, owned_only);
+ }
+ //! assign a global id space, for largest-dimension or all entities (and
+ //! in either case for vertices too)
+ ErrorCode ParallelComm::assign_global_ids( Range entities[],
+ const int dimension,
+ const int start_id,
+ const bool parallel,
+ const bool owned_only)
+ {
+ int local_num_elements[4];
+ ErrorCode result;
+ std::vector<unsigned char> pstatus;
+ for (int dim = 0; dim <= dimension; dim++) {
local_num_elements[dim] = entities[dim].size();
}
@@ -3527,9 +3542,9 @@ ErrorCode ParallelComm::reduce_void(int tag_data_type, const MPI_Op mpi_op, int
}
ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
- int resolve_dim,
- int shared_dim,
- const Tag* id_tag)
+ int resolve_dim,
+ int shared_dim,
+ const Tag* id_tag)
{
ErrorCode result;
Range proc_ents;
@@ -3541,7 +3556,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
result = scdi->tag_shared_vertices(this, this_set);
if (MB_SUCCESS == result) {
myDebug->tprintf(1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size());
- return result;
+ if (shared_dim == 0) return result;
}
}
@@ -3572,13 +3587,14 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// must call even if we don't have any entities, to make sure
// collective comm'n works
- return resolve_shared_ents(this_set, proc_ents, resolve_dim, shared_dim, id_tag);
+ return resolve_shared_ents(this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag);
}
ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
Range &proc_ents,
int resolve_dim,
int shared_dim,
+ Range *skin_ents,
const Tag* id_tag)
{
#ifdef USE_MPE
@@ -3591,12 +3607,13 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
ErrorCode result;
myDebug->tprintf(1, "Resolving shared entities.\n");
+ if (resolve_dim < shared_dim) {
+ result = MB_FAILURE;
+ RRA("MOAB does not support vertex-based partitions, only element-based ones.");
+ }
+
if (-1 == shared_dim) {
- if (0 == resolve_dim) {
- result = mbImpl->get_dimension(shared_dim);
- RRA("Couldn't get dimension.");
- }
- else if (!proc_ents.empty())
+ if (!proc_ents.empty())
shared_dim = mbImpl->dimension_from_handle(*proc_ents.begin())-1;
else if (resolve_dim == 3)
shared_dim = 2;
@@ -3605,60 +3622,37 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
return MB_FAILURE;
}
}
- assert(shared_dim >= 0 && resolve_dim >= 0);
+ assert(shared_dim >= 0 && resolve_dim >= shared_dim);
// get the skin entities by dimension
- Range skin_ents[4];
- std::vector<int> gid_data;
+ Range tmp_skin_ents[4];
std::vector<EntityHandle> handle_vec;
int skin_dim;
- // get the entities to be skinned
- if (resolve_dim < shared_dim) {
- // for vertex-based partition, it's the elements adj to the vertices
- result = mbImpl->get_adjacencies(proc_ents, shared_dim,
- false, skin_ents[resolve_dim],
- Interface::UNION);
- RRA("Failed getting skinned entities.");
- skin_dim = shared_dim-1;
- }
- else {
- // for element-based partition, it's just the elements
+ if (!skin_ents) {
+ // need to compute the skin here
+ skin_ents = tmp_skin_ents;
+
+ // get the entities to be skinned
skin_ents[resolve_dim] = proc_ents;
skin_dim = resolve_dim-1;
- }
-
- // find the skin
- Skinner skinner(mbImpl);
- result = skinner.find_skin(this_set, skin_ents[skin_dim+1], false, skin_ents[skin_dim],
- NULL, true, true, true);
- RRA("Failed to find skin.");
- myDebug->tprintf(1, "Found skin, now resolving.\n");
- // get entities adjacent to skin ents from shared_dim down to zero
- for (int this_dim = skin_dim-1; this_dim >= 0; this_dim--) {
- result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
- true, skin_ents[this_dim],
- Interface::UNION);
- RRA("Failed getting skin adjacencies.");
+ // find the skin
+ Skinner skinner(mbImpl);
+ result = skinner.find_skin(this_set, skin_ents[resolve_dim], false, skin_ents[skin_dim],
+ NULL, true, true, true);
+ RRA("Failed to find skin.");
+ myDebug->tprintf(1, "Found skin, now resolving.\n");
+
+ // get entities adjacent to skin ents from shared_dim down to zero
+ for (int this_dim = skin_dim-1; this_dim >= 0; this_dim--) {
+ result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
+ true, skin_ents[this_dim],
+ Interface::UNION);
+ RRA("Failed getting skin adjacencies.");
+ }
}
-
- return resolve_shared_ents(this_set, proc_ents, skin_ents,
- resolve_dim, shared_dim, id_tag);
- }
-
- ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
- Range &proc_ents,
- Range skin_ents[],
- int resolve_dim,
- int shared_dim,
- const Tag* id_tag)
- { // resolve shared vertices first
- ErrorCode result;
- std::vector<int> gid_data;
- std::vector<EntityHandle> handle_vec;
- int skin_dim = resolve_dim-1;
-
+
// global id tag
Tag gid_tag; int def_val = -1;
if (id_tag)
@@ -3672,25 +3666,17 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
else if (tag_created) {
// just created it, so we need global ids
- result = assign_global_ids(this_set, skin_dim+1,true,true,true);
+ Range tmp_ents[4];
+ tmp_ents[resolve_dim] = proc_ents.subset_by_dimension(resolve_dim);
+ result = mbImpl->get_adjacencies(tmp_ents[resolve_dim], 0, false, tmp_ents[0]);
+ RRA("Failed to get adjacent vertices.");
+ result = assign_global_ids(tmp_ents, resolve_dim, 1, true, true);
RRA("Failed assigning global ids.");
}
}
- // store index in temp tag; reuse gid_data
- gid_data.resize(2*skin_ents[0].size());
- int idx = 0;
- for (Range::iterator rit = skin_ents[0].begin();
- rit != skin_ents[0].end(); rit++)
- gid_data[idx] = idx, idx++;
- Tag idx_tag;
- result = mbImpl->tag_get_handle("__idx_tag", 1, MB_TYPE_INTEGER,
- idx_tag, MB_TAG_DENSE|MB_TAG_CREAT, &def_val );
- if (MB_SUCCESS != result) return result;
- result = mbImpl->tag_set_data(idx_tag, skin_ents[0], &gid_data[0]);
- RRA("Couldn't assign index tag.");
-
// get gids for skin ents in a vector, to pass to gs
+ std::vector<int> gid_data(skin_ents[0].size());
result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);
RRA("Couldn't get gid tag for skin vertices.");
@@ -3707,7 +3693,6 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// get a crystal router
gs_data::crystal_data *cd = procConfig.crystal_router();
-
/*
// get total number of entities; will overshoot highest global id, but
// that's ok
@@ -3779,8 +3764,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
Interface::UNION);
RRA("Couldn't get proc_verts.");
- result = tag_shared_verts(shared_verts, skin_ents,
- proc_nvecs, proc_verts);
+ result = tag_shared_verts(shared_verts, skin_ents, proc_nvecs, proc_verts);
RRA("Trouble tagging shared verts.");
#ifdef USE_MPE
@@ -3790,8 +3774,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
#endif
// get entities shared by 1 or n procs
- result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
- proc_nvecs);
+ result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
RRA("Trouble tagging shared entities.");
shared_verts.reset();
@@ -3809,7 +3792,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
// create the sets for each interface; store them as tags on
// the interface instance
Range iface_sets;
- result = create_interface_sets(proc_nvecs, resolve_dim, shared_dim);
+ result = create_interface_sets(proc_nvecs);
RRA("Trouble creating iface sets.");
// establish comm procs and buffers for them
@@ -3822,7 +3805,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
RRA("Shared handle check failed after iface vertex exchange.");
#endif
- // resolve shared entity remote handles; implemented in ghost cell exchange
+ // resolve shared non-vertex remote handles; implemented in ghost cell exchange
// code because it's so similar
result = exchange_ghost_cells(-1, -1, 0, 0, true, true);
RRA("Trouble resolving shared entity remote handles.");
@@ -4368,15 +4351,12 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
RRA("");
}
- result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
- proc_nvecs);
+ result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
- return create_interface_sets(proc_nvecs, resolve_dim, shared_dim);
+ return create_interface_sets(proc_nvecs);
}
- ErrorCode ParallelComm::create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
- int /*resolve_dim*/,
- int /*shared_dim*/)
+ ErrorCode ParallelComm::create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
{
if (proc_nvecs.empty()) return MB_SUCCESS;
@@ -4530,10 +4510,10 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
return MB_SUCCESS;
}
- ErrorCode ParallelComm::tag_shared_ents(int resolve_dim,
- int shared_dim,
- Range *skin_ents,
- std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
+ ErrorCode ParallelComm::get_proc_nvecs(int resolve_dim,
+ int shared_dim,
+ Range *skin_ents,
+ std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs)
{
// set sharing procs tags on other skin ents
ErrorCode result;
@@ -6247,7 +6227,7 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
}
// create interface sets from shared entities
- result = create_interface_sets(proc_nvecs, 3, 2);
+ result = create_interface_sets(proc_nvecs);
RRA("Trouble creating iface sets.");
return MB_SUCCESS;
diff --git a/src/parallel/ParallelMergeMesh.cpp b/src/parallel/ParallelMergeMesh.cpp
index f0b99c6..138e32c 100644
--- a/src/parallel/ParallelMergeMesh.cpp
+++ b/src/parallel/ParallelMergeMesh.cpp
@@ -559,7 +559,7 @@ namespace moab{
}
// get entities shared by 1 or n procs
- rval = myPcomm->tag_shared_ents(dim,dim-1, &mySkinEnts[0],proc_nranges);
+ rval = myPcomm->get_proc_nvecs(dim,dim-1, &mySkinEnts[0],proc_nranges);
if(rval != MB_SUCCESS){
return rval;
}
@@ -567,7 +567,7 @@ namespace moab{
// create the sets for each interface; store them as tags on
// the interface instance
Range iface_sets;
- rval = myPcomm->create_interface_sets(proc_nranges, dim, dim-1);
+ rval = myPcomm->create_interface_sets(proc_nranges);
if(rval != MB_SUCCESS){
return rval;
}
diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index 90cbad6..5468de1 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -96,17 +96,47 @@ namespace moab {
// \section GLOBAL IDS
// ==================================
- //! assign a global id space, for largest-dimension or all entities (and
- //! in either case for vertices too)
- //!\param owned_only If true, do not get global IDs for non-owned entities
- //! from remote processors.
+ /* \brief Assign a global id space for entities
+ *
+ * Spaces are separate (and overlapping) for each dimension (i.e. global id space
+ * is not unique over all dimension entities)
+ *
+ * \param this_set Assign ids for entities in this set
+ * \param max_dim Assign for entities up to max_dim
+ * \param start_id Starting id for entities, defaults to 1
+ * \param largest_dim_only Assign only for max_dim and vertices, otherwise for edges/faces too
+ * \param parallel If true, use MPI_Scan-like function to properly assign over all processors,
+ * otherwise gids are assigned locally only
+ * \param owned_only If false, exchanged gids with sharing processors so gids are valid for
+ * non-owned entities too
+ */
ErrorCode assign_global_ids(EntityHandle this_set,
- const int dimension,
+ const int max_dim,
const int start_id = 1,
const bool largest_dim_only = true,
const bool parallel = true,
const bool owned_only = false);
+ /* \brief Assign a global id space for entities
+ *
+ * Same as set-based variant of this function, except entity Range vectors are input directly
+ * (by dimension, so entities[0] contains vertices, etc.)
+ *
+ * \param entities Assign ids for entities in the vector by dimension
+ * \param max_dim Assign for entities up to max_dim
+ * \param start_id Starting id for entities, defaults to 1
+ * \param largest_dim_only Assign only for max_dim and vertices, otherwise for edges/faces too
+ * \param parallel If true, use MPI_Scan-like function to properly assign over all processors,
+ * otherwise gids are assigned locally only
+ * \param owned_only If false, exchanged gids with sharing processors so gids are valid for
+ * non-owned entities too
+ */
+ ErrorCode assign_global_ids( Range entities[],
+ const int dimension,
+ const int start_id,
+ const bool parallel = true,
+ const bool owned_only = true);
+
//! check for global ids; based only on tag handle being there or not;
//! if it's not there, create them for the specified dimensions
//!\param owned_only If true, do not get global IDs for non-owned entities
@@ -396,54 +426,51 @@ namespace moab {
*
* Resolve shared entities between processors for entities in proc_ents,
* by comparing global id tag values on vertices on skin of elements in
- * proc_ents. Shared entities are assigned a tag that's either
- * PARALLEL_SHARED_PROC_TAG_NAME, which is 1 integer in length, or
- * PARALLEL_SHARED_PROCS_TAG_NAME, whose length depends on the maximum
- * number of sharing processors. Values in these tags denote the ranks
- * of sharing processors, and the list ends with the value -1.
- *
- * If shared_dim is input as -1 or not input, a value one less than the
- * maximum dimension of entities in proc_ents is used.
+ * proc_ents.
*
+ * \param this_set Set from which entities for proc_ents are taken
* \param proc_ents Entities for which to resolve shared entities
- * \param shared_dim Maximum dimension of shared entities to look for
+ * \param resolve_dim Dimension of entities in part/partition
+ * \param shared_dim Maximum dimension of shared entities to look for; if -1,
+ * resolve_dim-1 is used
+ * \param id_tag If non-NULL, use this tag to get global id on which vertex resolution
+ * is based
*/
ErrorCode resolve_shared_ents(EntityHandle this_set,
- Range &proc_ents,
- int resolve_dim = -1,
+ int resolve_dim = 3,
int shared_dim = -1,
const Tag* id_tag = 0);
-
+
/** \brief Resolve shared entities between processors
*
- * Same as resolve_shared_ents(Range&), except works for
- * all entities in instance of dimension dim.
+ * Same as resolve_shared_ents with entity set as first argument, except instead
+ * a range is input containing entities in the part/partition.
*
- * If shared_dim is input as -1 or not input, a value one less than the
- * maximum dimension of entities is used.
-
- * \param dim Dimension of entities in the partition
- * \param shared_dim Maximum dimension of shared entities to look for
+ * \param this_set In this function variant, set is used to speed up skinning
+ * \param proc_ents Entities for which to resolve shared entities
+ * \param resolve_dim Dimension of entities in part/partition
+ * \param shared_dim Maximum dimension of shared entities to look for; if -1,
+ * resolve_dim-1 is used
+ * \param id_tag If non-NULL, use this tag to get global id on which vertex resolution
+ * is based
*/
ErrorCode resolve_shared_ents(EntityHandle this_set,
- int resolve_dim = 3,
+ Range &proc_ents,
+ int resolve_dim = -1,
int shared_dim = -1,
+ Range *skin_ents = NULL,
const Tag* id_tag = 0);
-
+
/** \brief Resolve shared entities between processors
*
- * Entity skin array is offered by user not by skinner
- * It is used by other resolve_shared_ents functions above
-
- * \param skin_ents[] entity skin array by user
+ * This version can be used statically, with messages exchanged via direct calls to buffer
+ * pack/unpack functions instead of MPI
+ *
+ * \param pc Array of ParallelComm instances
+ * \param np Number of ParallelComm objects in previous argument
+ * \param this_set Set of entities in parts/partition
+ * \param to_dim Maximum dimension of shared entities to look for
*/
- ErrorCode resolve_shared_ents(EntityHandle this_set,
- Range &proc_ents,
- Range skin_ents[],
- int resolve_dim = 3,
- int shared_dim = -1,
- const Tag* id_tag = 0);
-
static ErrorCode resolve_shared_ents(ParallelComm **pc,
const unsigned int np,
EntityHandle this_set,
@@ -863,8 +890,7 @@ namespace moab {
// and tags the set with the procs sharing it; interface sets are optionally
// returned; NOTE: a subsequent step is used to verify entities on the interface
// and remove them if they're not shared
- ErrorCode create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
- int resolve_dim, int shared_dim);
+ ErrorCode create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
// do the same but working straight from sharedEnts
ErrorCode create_interface_sets(EntityHandle this_set, int resolve_dim, int shared_dim);
@@ -1230,10 +1256,10 @@ namespace moab {
std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
Range &proc_verts);
- ErrorCode tag_shared_ents(int resolve_dim,
- int shared_dim,
- Range *skin_ents,
- std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
+ ErrorCode get_proc_nvecs(int resolve_dim,
+ int shared_dim,
+ Range *skin_ents,
+ std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
// after verifying shared entities, now parent/child links between sets can be established
ErrorCode create_iface_pc_links();
diff --git a/test/parallel/parallel_hdf5_test.cc b/test/parallel/parallel_hdf5_test.cc
index 2b2a1c8..355e99e 100644
--- a/test/parallel/parallel_hdf5_test.cc
+++ b/test/parallel/parallel_hdf5_test.cc
@@ -1491,7 +1491,7 @@ void test_write_unbalanced()
ParallelComm* pcomm = ParallelComm::get_pcomm( &mb, 0 );
if (0 == pcomm)
pcomm = new ParallelComm( &mb, MPI_COMM_WORLD );
- rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, &idtag );
+ rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, NULL, &idtag );
CHECK_ERR(rval);
rval = pcomm->resolve_shared_sets( sets, idtag );
CHECK_ERR(rval);
diff --git a/test/scdseq_test.cpp b/test/scdseq_test.cpp
index b241bb1..6393ddc 100644
--- a/test/scdseq_test.cpp
+++ b/test/scdseq_test.cpp
@@ -584,7 +584,7 @@ ErrorCode eseq_test2d(ScdInterface *scdi)
// tri-valent shared edge between the three blocks
// interval settings: only 3 of them
- int int1 = 100, int2 = 100, int3 = 100, int4 = 100;
+ int int1 = 10, int2 = 10, int3 = 10, int4 = 10;
ScdBox *ebox[3], *vbox[3];
ErrorCode result = create_3dtri_3_sequences(scdi, int1, int2, int3, int4, vbox, ebox);
CHECK_ERR(result);
Repository URL: https://bitbucket.org/fathomteam/moab/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the moab-dev
mailing list