[MOAB-dev] r1795 - in MOAB/trunk: . parallel
tautges at mcs.anl.gov
tautges at mcs.anl.gov
Mon May 5 20:05:46 CDT 2008
Author: tautges
Date: 2008-05-05 20:05:46 -0500 (Mon, 05 May 2008)
New Revision: 1795
Modified:
MOAB/trunk/MBParallelConventions.h
MOAB/trunk/parallel/MBParallelComm.cpp
MOAB/trunk/parallel/MBParallelComm.hpp
MOAB/trunk/parallel/ReadParallel.cpp
Log:
Backing up some work on vertex-based partitions. Element-based partitions work as before, near as I can tell.
Modified: MOAB/trunk/MBParallelConventions.h
===================================================================
--- MOAB/trunk/MBParallelConventions.h 2008-05-05 22:13:14 UTC (rev 1794)
+++ MOAB/trunk/MBParallelConventions.h 2008-05-06 01:05:46 UTC (rev 1795)
@@ -58,11 +58,13 @@
*
* bit 0: shared (0=not shared, 1=shared)
* bit 1: !owned (0=owned, 1=not owned)
+ * bit 2: ghost (0=not ghost, 1=ghost)
*/
#define PARALLEL_STATUS_TAG_NAME "PARALLEL_STATUS"
#define PSTATUS_SHARED 0x1
#define PSTATUS_NOT_OWNED 0x2
+#define PSTATUS_GHOST 0x4
/** \brief Tag storing interface sets
*
Modified: MOAB/trunk/parallel/MBParallelComm.cpp
===================================================================
--- MOAB/trunk/parallel/MBParallelComm.cpp 2008-05-05 22:13:14 UTC (rev 1794)
+++ MOAB/trunk/parallel/MBParallelComm.cpp 2008-05-06 01:05:46 UTC (rev 1795)
@@ -116,7 +116,8 @@
MBParallelComm::MBParallelComm(MBInterface *impl, MPI_Comm comm)
: mbImpl(impl), procConfig(comm), sharedpTag(0), sharedpsTag(0),
- sharedhTag(0), sharedhsTag(0), pstatusTag(0), ifaceSetsTag(0)
+ sharedhTag(0), sharedhsTag(0), pstatusTag(0), ifaceSetsTag(0),
+ partitionTag(0)
{
myBuffer.resize(INITIAL_BUFF_SIZE);
@@ -128,7 +129,8 @@
std::vector<unsigned char> &tmp_buff,
MPI_Comm comm)
: mbImpl(impl), procConfig(comm), sharedpTag(0), sharedpsTag(0),
- sharedhTag(0), sharedhsTag(0), pstatusTag(0), ifaceSetsTag(0)
+ sharedhTag(0), sharedhsTag(0), pstatusTag(0), ifaceSetsTag(0),
+ partitionTag(0)
{
myBuffer.swap(tmp_buff);
}
@@ -2007,134 +2009,147 @@
return MB_SUCCESS;
}
-MBErrorCode MBParallelComm::resolve_shared_ents(int dim,
+MBErrorCode MBParallelComm::resolve_shared_ents(int resolve_dim,
int shared_dim)
{
MBErrorCode result;
MBRange proc_ents;
- if (-1 == dim) {
- int this_dim = 3;
- while (proc_ents.empty() && this_dim >= 0) {
- result = mbImpl->get_entities_by_dimension(0, this_dim, proc_ents);
- if (MB_SUCCESS != result) return result;
- this_dim--;
- }
- }
- else {
- result = mbImpl->get_entities_by_dimension(0, dim, proc_ents);
+ // get the entities in the partition sets
+ MBRange part_sets;
+ MBTag part_tag = partition_tag();
+ result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET, &part_tag,
+ NULL, 1, part_sets);
+ if (MB_SUCCESS != result) return result;
+ for (MBRange::iterator rit = part_sets.begin(); rit != part_sets.end(); rit++) {
+ MBRange tmp_ents;
+ result = mbImpl->get_entities_by_handle(*rit, tmp_ents, true);
if (MB_SUCCESS != result) return result;
+ proc_ents.merge(tmp_ents);
}
+ // resolve dim is maximal dim of entities in proc_ents
+ if (-1 == resolve_dim) {
+ resolve_dim = mbImpl->dimension_from_handle(*proc_ents.rbegin());
+ RR("Couldn't get dimension.");
+
+ }
+
+ // proc_ents should all be of same dimension
+ if (resolve_dim > shared_dim &&
+ mbImpl->dimension_from_handle(*proc_ents.rbegin()) !=
+ mbImpl->dimension_from_handle(*proc_ents.begin())) {
+ MBRange::iterator lower = proc_ents.lower_bound(MBCN::TypeDimensionMap[0].first),
+ upper = proc_ents.upper_bound(MBCN::TypeDimensionMap[resolve_dim-1].second);
+ proc_ents.erase(lower, upper);
+ }
+
// must call even if we don't have any entities, to make sure
// collective comm'n works
- return resolve_shared_ents(proc_ents, shared_dim);
+ return resolve_shared_ents(proc_ents, resolve_dim, shared_dim);
}
MBErrorCode MBParallelComm::resolve_shared_ents(MBRange &proc_ents,
+ int resolve_dim,
int shared_dim)
{
+ MBErrorCode result;
if (debug) std::cerr << "Resolving shared entities." << std::endl;
- if (proc_ents.empty()) return MB_SUCCESS;
+ if (-1 == shared_dim) {
+ if (0 == resolve_dim) {
+ result = mbImpl->get_dimension(shared_dim);
+ RR("Couldn't get dimension.");
+ }
+ else shared_dim = mbImpl->dimension_from_handle(*proc_ents.begin())-1;
+ }
+ assert(shared_dim >= 0 && resolve_dim >= 0);
- if (-1 == shared_dim)
- shared_dim = mbImpl->dimension_from_handle(*proc_ents.begin())-1;
-
// get the skin entities by dimension
MBRange skin_ents[4];
- MBErrorCode result;
std::vector<int> gid_data;
std::vector<MBEntityHandle> handle_vec;
+ int skin_dim;
- if (!proc_ents.empty()) {
- // find the skin entities
- int upper_dim = MBCN::Dimension(TYPE_FROM_HANDLE(*proc_ents.begin()));
+ // get the entities to be skinned
+ if (resolve_dim < shared_dim) {
+ // for vertex-based partition, it's the elements adj to the vertices
+ result = mbImpl->get_adjacencies(proc_ents, shared_dim,
+ false, skin_ents[resolve_dim],
+ MBInterface::UNION);
+ RR("Failed getting skinned entities.");
+ skin_dim = shared_dim-1;
+ }
+ else {
+ // for element-based partition, it's just the elements
+ skin_ents[resolve_dim] = proc_ents;
+ skin_dim = resolve_dim-1;
+ }
- MBRange::iterator rit;
- MBSkinner skinner(mbImpl);
-
- int skin_dim;
- if (shared_dim < upper_dim) {
- // if shared entity dimension is less than maximal dimension,
- // start with skin entities
- skin_dim = upper_dim-1;
- result = skinner.find_skin(proc_ents, skin_ents[skin_dim],
- skin_ents[skin_dim], true);
- RRA("Failed to find skin.");
- if (debug) std::cerr << "Found skin, now resolving." << std::endl;
- }
- else {
- // otherwise start with original entities
- skin_ents[upper_dim] = proc_ents;
- skin_dim = upper_dim;
- }
+ // find the skin
+ MBSkinner skinner(mbImpl);
+ result = skinner.find_skin(skin_ents[skin_dim+1], skin_ents[skin_dim],
+ skin_ents[skin_dim], true);
+ RRA("Failed to find skin.");
+ if (debug) std::cerr << "Found skin, now resolving." << std::endl;
- // get entities adjacent to skin ents from shared_dim down to
- // zero; don't create them if they don't exist already
- for (int this_dim = shared_dim; this_dim >= 0; this_dim--) {
+ // get entities adjacent to skin ents from shared_dim down to
+ // zero; don't create them if they don't exist already
+ for (int this_dim = skin_dim-1; this_dim >= 0; this_dim--) {
+ result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
+ false, skin_ents[this_dim],
+ MBInterface::UNION);
+ RR("Failed getting skin adjacencies.");
+ }
- if (this_dim == skin_dim) continue;
-
- result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
- false, skin_ents[this_dim],
- MBInterface::UNION);
- RR("Failed getting skin adjacencies.");
- }
-
- // global id tag
- MBTag gid_tag; int def_val = -1;
- result = mbImpl->tag_create(GLOBAL_ID_TAG_NAME, sizeof(int),
- MB_TAG_DENSE, MB_TYPE_INTEGER, gid_tag,
- &def_val, true);
- if (MB_FAILURE == result) return result;
+ // resolve shared vertices first
- else if (MB_ALREADY_ALLOCATED != result) {
- // just created it, so we need global ids
- result = assign_global_ids(0, upper_dim);
- RRA("Failed assigning global ids.");
- }
+ // global id tag
+ MBTag gid_tag; int def_val = -1;
+ result = mbImpl->tag_create(GLOBAL_ID_TAG_NAME, sizeof(int),
+ MB_TAG_DENSE, MB_TYPE_INTEGER, gid_tag,
+ &def_val, true);
+ if (MB_FAILURE == result) return result;
- // store index in temp tag; reuse gid_data
- gid_data.resize(2*skin_ents[0].size());
- int idx = 0;
- for (rit = skin_ents[0].begin();
- rit != skin_ents[0].end(); rit++)
- gid_data[idx] = idx, idx++;
- MBTag idx_tag;
- result = mbImpl->tag_create("__idx_tag", sizeof(int), MB_TAG_DENSE,
- MB_TYPE_INTEGER, idx_tag, &def_val, true);
- if (MB_SUCCESS != result && MB_ALREADY_ALLOCATED != result) return result;
- result = mbImpl->tag_set_data(idx_tag, skin_ents[0], &gid_data[0]);
- RR("Couldn't assign index tag.");
+ else if (MB_ALREADY_ALLOCATED != result) {
+ // just created it, so we need global ids
+ result = assign_global_ids(0, skin_dim+1);
+ RRA("Failed assigning global ids.");
+ }
- // get gids for skin verts in a vector, to pass to gs
- result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);
- RR("Couldn't get gid tag for skin vertices.");
+ // store index in temp tag; reuse gid_data
+ gid_data.resize(2*skin_ents[0].size());
+ int idx = 0;
+ for (MBRange::iterator rit = skin_ents[0].begin();
+ rit != skin_ents[0].end(); rit++)
+ gid_data[idx] = idx, idx++;
+ MBTag idx_tag;
+ result = mbImpl->tag_create("__idx_tag", sizeof(int), MB_TAG_DENSE,
+ MB_TYPE_INTEGER, idx_tag, &def_val, true);
+ if (MB_SUCCESS != result && MB_ALREADY_ALLOCATED != result) return result;
+ result = mbImpl->tag_set_data(idx_tag, skin_ents[0], &gid_data[0]);
+ RR("Couldn't assign index tag.");
- // put handles in vector for passing to gs setup
- std::copy(skin_ents[0].begin(), skin_ents[0].end(),
- std::back_inserter(handle_vec));
+ // get gids for skin ents in a vector, to pass to gs
+ result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);
+ RR("Couldn't get gid tag for skin vertices.");
- }
- else {
- // need to have at least one position so we can get a ptr to it
- gid_data.resize(1);
- handle_vec.resize(1);
- }
+ // put handles in vector for passing to gs setup
+ std::copy(skin_ents[0].begin(), skin_ents[0].end(),
+ std::back_inserter(handle_vec));
// get a crystal router
crystal_data *cd = procConfig.crystal_router();
- // get total number of verts; will overshoot highest global id, but
+ // get total number of entities; will overshoot highest global id, but
// that's ok
- int nverts_total, nverts_local;
- result = mbImpl->get_number_entities_by_dimension(0, 0, nverts_local);
+ int num_total, num_local;
+ result = mbImpl->get_number_entities_by_dimension(0, 0, num_local);
if (MB_SUCCESS != result) return result;
- int failure = MPI_Allreduce(&nverts_local, &nverts_total, 1,
+ int failure = MPI_Allreduce(&num_local, &num_total, 1,
MPI_INT, MPI_SUM, procConfig.proc_comm());
if (failure) {
result = MB_FAILURE;
- RR("Allreduce for total number of vertices failed.");
+ RR("Allreduce for total number of shared ents failed.");
}
// call gather-scatter to get shared ids & procs
@@ -2156,16 +2171,13 @@
RR("Couldn't create gs data.");
}
- // if no entities, no more communication after this, so just return
- if (proc_ents.empty()) return MB_SUCCESS;
-
// get shared proc tags
MBTag sharedp_tag, sharedps_tag, sharedh_tag, sharedhs_tag, pstatus_tag;
result = get_shared_proc_tags(sharedp_tag, sharedps_tag,
sharedh_tag, sharedhs_tag, pstatus_tag);
RRA("Couldn't get shared proc tags.");
- // load shared vertices into a tuple, then sort by index
+ // load shared verts into a tuple, then sort by index
tuple_list shared_verts;
tuple_list_init_max(&shared_verts, 2, 0, 1, 0,
skin_ents[0].size()*(MAX_SHARING_PROCS+1));
@@ -2183,27 +2195,33 @@
std::vector<int> sort_buffer(max_size);
tuple_list_sort(&shared_verts, 0,(buffer*)&sort_buffer[0]);
- // set sharing procs and handles tags on skin vertices
+ // set sharing procs and handles tags on skin ents
int maxp = -1;
std::vector<int> sharing_procs(MAX_SHARING_PROCS);
std::fill(sharing_procs.begin(), sharing_procs.end(), maxp);
j = 0; i = 0;
- // get vertices shared by 1 or n procs
+ // get ents shared by 1 or n procs
std::map<std::vector<int>, MBRange> proc_nranges;
+ MBRange proc_verts;
+ result = mbImpl->get_adjacencies(proc_ents, 0, false, proc_verts,
+ MBInterface::UNION);
+ RR("Couldn't get proc_verts.");
+
result = tag_shared_verts(shared_verts, skin_ents,
- proc_nranges);
- RRA("Trouble tagging shared vertices.");
-
+ proc_nranges, proc_verts);
+ RRA("Trouble tagging shared verts.");
+
// get entities shared by 1 or n procs
- result = tag_shared_ents(shared_dim, shared_verts, skin_ents,
+ result = tag_shared_ents(resolve_dim, shared_dim, shared_verts, skin_ents,
proc_nranges);
RRA("Trouble tagging shared entities.");
// create the sets for each interface; store them as tags on
// the interface instance
MBRange iface_sets;
- result = create_interface_sets(proc_nranges, &iface_sets);
+ result = create_interface_sets(proc_nranges, resolve_dim, shared_dim,
+ &iface_sets);
RRA("Trouble creating iface sets.");
// resolve shared ent remote handles
@@ -2260,6 +2278,7 @@
}
MBErrorCode MBParallelComm::create_interface_sets(std::map<std::vector<int>, MBRange> &proc_nranges,
+ int resolve_dim, int shared_dim,
MBRange *iface_sets_ptr)
{
if (proc_nranges.empty()) return MB_SUCCESS;
@@ -2280,27 +2299,29 @@
MBRange psets;
if (!iface_sets_ptr) iface_sets_ptr = &psets;
-
+
// get all partition sets and mark contents with iface set tag;
// pre-use iface_sets
- MBTag pset_tag;
+ MBTag pset_tag = partition_tag();
MBRange tmp_ents, tmp_ents2;
-;
- result = mbImpl->tag_get_handle(PARALLEL_PARTITION_TAG_NAME, pset_tag);
- RRA("Couldn't get PARALLEL_PARTITION tag, needed to create iface sets.");
+
result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET, &pset_tag, NULL, 1,
psets);
RRA("Couldn't get PARALLEL_PARTITION sets.");
- for (rit = psets.begin(); rit != psets.end(); rit++) {
- result = mbImpl->get_entities_by_handle(*rit, tmp_ents, true);
- RR("Failed to get entities in partition set.");
- std::vector<MBEntityHandle> tag_vals(tmp_ents.size());
- std::fill(tag_vals.begin(), tag_vals.end(), *rit);
- result = mbImpl->tag_set_data(tmp_iface_tag, tmp_ents, &tag_vals[0]);
- RR("Failed to set iface tag on partition ents.");
- tmp_ents.clear();
+
+ if (resolve_dim < shared_dim) {
+
+ for (rit = psets.begin(); rit != psets.end(); rit++) {
+ result = mbImpl->get_entities_by_handle(*rit, tmp_ents, true);
+ RR("Failed to get entities in partition set.");
+ std::vector<MBEntityHandle> tag_vals(tmp_ents.size());
+ std::fill(tag_vals.begin(), tag_vals.end(), *rit);
+ result = mbImpl->tag_set_data(tmp_iface_tag, tmp_ents, &tag_vals[0]);
+ RR("Failed to set iface tag on partition ents.");
+ tmp_ents.clear();
+ }
}
-
+
// create interface sets, tag them, and tag their contents with iface set tag
std::vector<MBEntityHandle> tag_vals;
for (std::map<std::vector<int>,MBRange>::iterator mit = proc_nranges.begin();
@@ -2342,22 +2363,29 @@
// now go back through interface sets and add parent/child links
for (int d = 2; d >= 0; d--) {
+ if (resolve_dim < shared_dim) {
+ tag_vals.clear();
+ std::copy(psets.begin(), psets.end(), std::back_inserter(tag_vals));
+ }
+
for (rit = iface_sets_ptr->begin(); rit != iface_sets_ptr->end();
rit++) {
tmp_ents.clear();
result = mbImpl->get_entities_by_handle(*rit, tmp_ents, true);
RR("Couldn't get entities by dimension.");
- if (tmp_ents.empty() ||
- mbImpl->dimension_from_handle(*tmp_ents.rbegin()) != d) continue;
+ if (tmp_ents.empty() || (resolve_dim > shared_dim &&
+ mbImpl->dimension_from_handle(*tmp_ents.rbegin()) != d)) continue;
- // get higher-dimensional entities and their interface sets
- result = mbImpl->get_adjacencies(&(*tmp_ents.begin()), 1, d+1,
- false, tmp_ents2);
- RR("Couldn't get adjacencies for interface sets.");
- tag_vals.resize(tmp_ents2.size());
- result = mbImpl->tag_get_data(tmp_iface_tag, tmp_ents2, &tag_vals[0]);
- RR("Couldn't get iface set tag for interface sets.");
+ if (resolve_dim > shared_dim) {
+ // get higher-dimensional entities and their interface sets
+ result = mbImpl->get_adjacencies(&(*tmp_ents.begin()), 1, d+1,
+ false, tmp_ents2);
+ RR("Couldn't get adjacencies for interface sets.");
+ tag_vals.resize(tmp_ents2.size());
+ result = mbImpl->tag_get_data(tmp_iface_tag, tmp_ents2, &tag_vals[0]);
+ RR("Couldn't get iface set tag for interface sets.");
+ }
// go through and for any on interface make it a parent
MBEntityHandle last_set = 0;
@@ -2390,7 +2418,8 @@
return MB_SUCCESS;
}
-MBErrorCode MBParallelComm::tag_shared_ents(int shared_dim,
+MBErrorCode MBParallelComm::tag_shared_ents(int resolve_dim,
+ int shared_dim,
tuple_list &shared_verts,
MBRange *skin_ents,
std::map<std::vector<int>, MBRange> &proc_nranges)
@@ -2405,7 +2434,9 @@
std::fill(sharing_procs.begin(), sharing_procs.end(), -1);
std::vector<unsigned char> pstatus_flags(MB_MAX_SUB_ENTITIES);
- for (int d = shared_dim; d > 0; d--) {
+ for (int d = 3; d > 0; d--) {
+ if (resolve_dim == d) continue;
+
for (MBRange::iterator rit = skin_ents[d].begin();
rit != skin_ents[d].end(); rit++) {
// get connectivity
@@ -2444,15 +2475,20 @@
vp_range.insert(sharing_procs[p]), p++;
assert(p < MAX_SHARING_PROCS);
// intersect with range for this skin ent
- if (0 != nc) sp_range = sp_range.intersect(vp_range);
- else sp_range = vp_range;
+ if (0 == nc) sp_range = vp_range;
+ else if (resolve_dim < shared_dim)
+ sp_range.merge(vp_range);
+ else
+ sp_range = sp_range.intersect(vp_range);
// need to also save rank zero, since ranges don't handle that
if (sharing_procs[0] == 0) and_zero = true;
}
+ if (sp_range.empty() && resolve_dim < shared_dim) continue;
+
// intersection is the owning proc(s) for this skin ent; should
- // not be empty
+ // not be empty unless we're using a vertex-based partition
assert(!sp_range.empty() || and_zero);
MBRange::iterator rit2;
// set tag for this ent
@@ -2483,22 +2519,13 @@
}
}
- // build range for each sharing proc
- std::map<int, MBRange> proc_ranges;
- for (std::map<std::vector<int>, MBRange>::iterator mit = proc_nranges.begin();
- mit != proc_nranges.end(); mit++) {
- for (unsigned int i = 0; i < (*mit).first.size(); i++)
- proc_ranges[(*mit).first[i]].merge((*mit).second);
- }
-
- // for each sharing proc, send handles, then post receive to get sharing handles back
-
return MB_SUCCESS;
}
-MBErrorCode MBParallelComm::tag_shared_verts(tuple_list &shared_verts,
+MBErrorCode MBParallelComm::tag_shared_verts(tuple_list &shared_ents,
MBRange *skin_ents,
- std::map<std::vector<int>, MBRange> &proc_nranges)
+ std::map<std::vector<int>, MBRange> &proc_nranges,
+ MBRange &proc_verts)
{
MBTag sharedp_tag, sharedps_tag, sharedh_tag, sharedhs_tag, pstatus_tag;
MBErrorCode result = get_shared_proc_tags(sharedp_tag, sharedps_tag,
@@ -2511,15 +2538,15 @@
std::fill(sharing_procs.begin(), sharing_procs.end(), -1);
std::fill(sharing_handles, sharing_handles+MAX_SHARING_PROCS, 0);
- while (j < 2*shared_verts.n) {
+ while (j < 2*shared_ents.n) {
// count & accumulate sharing procs
unsigned int nump = 0;
- int this_idx = shared_verts.vi[j];
+ int this_idx = shared_ents.vi[j];
MBEntityHandle this_ent = skin_ents[0][this_idx];
- while (shared_verts.vi[j] == this_idx) {
+ while (shared_ents.vi[j] == this_idx) {
j++;
- sharing_procs[nump] = shared_verts.vi[j++];
- sharing_handles[nump++] = shared_verts.vul[i++];
+ sharing_procs[nump] = shared_ents.vi[j++];
+ sharing_handles[nump++] = shared_ents.vul[i++];
}
std::sort(&sharing_procs[0], &sharing_procs[nump]);
@@ -2540,8 +2567,12 @@
RR("Failed setting shared_procs tag on skin vertices.");
unsigned char share_flag = PSTATUS_SHARED;
- if ((int) procConfig.proc_rank() > sharing_procs[0])
+ if (!proc_verts.empty() && proc_verts.find(this_ent) == proc_verts.end())
+ share_flag |= (PSTATUS_NOT_OWNED | PSTATUS_GHOST);
+
+ else if (proc_verts.empty() && (int) procConfig.proc_rank() > sharing_procs[0])
share_flag |= PSTATUS_NOT_OWNED;
+
result = mbImpl->tag_set_data(pstatus_tag, &this_ent, 1, &share_flag);
RRA("Couldn't set shared tag on shared vertex.");
@@ -3002,6 +3033,22 @@
return ifaceSetsTag;
}
+ //! return partition set tag
+MBTag MBParallelComm::partition_tag()
+{
+ if (!partitionTag) {
+ MBErrorCode result = mbImpl->tag_create(PARALLEL_PARTITION_TAG_NAME,
+ MAX_SHARING_PROCS*sizeof(MBEntityHandle),
+ MB_TAG_SPARSE,
+ MB_TYPE_HANDLE, partitionTag,
+ NULL, true);
+ if (MB_SUCCESS != result && MB_ALREADY_ALLOCATED != result)
+ return 0;
+ }
+
+ return partitionTag;
+}
+
#ifdef TEST_PARALLELCOMM
#include <iostream>
Modified: MOAB/trunk/parallel/MBParallelComm.hpp
===================================================================
--- MOAB/trunk/parallel/MBParallelComm.hpp 2008-05-05 22:13:14 UTC (rev 1794)
+++ MOAB/trunk/parallel/MBParallelComm.hpp 2008-05-06 01:05:46 UTC (rev 1795)
@@ -163,6 +163,7 @@
* \param shared_dim Maximum dimension of shared entities to look for
*/
MBErrorCode resolve_shared_ents(MBRange &proc_ents,
+ int resolve_dim = -1,
int shared_dim = -1);
/** \brief Resolve shared entities between processors
@@ -176,7 +177,7 @@
* \param dim Dimension of entities in the partition
* \param shared_dim Maximum dimension of shared entities to look for
*/
- MBErrorCode resolve_shared_ents(int dim = 3,
+ MBErrorCode resolve_shared_ents(int resolve_dim = 3,
int shared_dim = -1);
/** \brief Get entities shared with other processors, based on
@@ -234,6 +235,9 @@
//! return iface_set tag
MBTag iface_sets_tag();
+ //! return partitions set tag
+ MBTag partition_tag();
+
private:
int num_subranges(const MBRange &this_range);
@@ -342,14 +346,17 @@
MBErrorCode tag_shared_verts(tuple_list &shared_verts,
MBRange *skin_ents,
- std::map<std::vector<int>, MBRange> &proc_nranges);
+ std::map<std::vector<int>, MBRange> &proc_nranges,
+ MBRange &proc_verts);
- MBErrorCode tag_shared_ents(int shared_dim,
+ MBErrorCode tag_shared_ents(int resolve_dim,
+ int shared_dim,
tuple_list &shared_verts,
MBRange *skin_ents,
std::map<std::vector<int>, MBRange> &proc_nranges);
MBErrorCode create_interface_sets(std::map<std::vector<int>, MBRange> &proc_nranges,
+ int resolve_dim, int shared_dim,
MBRange *iface_sets_ptr = NULL);
//! resolve remote handles for shared non-vertex ents, assuming
@@ -456,7 +463,7 @@
//! tags used to save sharing procs and handles
MBTag sharedpTag, sharedpsTag, sharedhTag, sharedhsTag, pstatusTag,
- ifaceSetsTag;
+ ifaceSetsTag, partitionTag;
//! interface sets, one set per unique combination of procs
MBRange ifaceSets;
Modified: MOAB/trunk/parallel/ReadParallel.cpp
===================================================================
--- MOAB/trunk/parallel/ReadParallel.cpp 2008-05-05 22:13:14 UTC (rev 1794)
+++ MOAB/trunk/parallel/ReadParallel.cpp 2008-05-06 01:05:46 UTC (rev 1795)
@@ -98,10 +98,10 @@
// get resolve_shared_ents option
std::string shared_str;
- int resolve_dim = 3, shared_dim = -1;
+ int resolve_dim = -1, shared_dim = -1;
result = opts.get_str_option("PARALLEL_RESOLVE_SHARED_ENTS", shared_str);
if (MB_TYPE_OUT_OF_RANGE == result) {
- resolve_dim = 3;
+ resolve_dim = -1;
shared_dim = -1;
}
else if (MB_SUCCESS == result) {
@@ -168,7 +168,7 @@
return MB_FAILURE;
}
- if (-1 != resolve_dim) pa_vec.push_back(PA_RESOLVE_SHARED_ENTS);
+ if (-2 != resolve_dim) pa_vec.push_back(PA_RESOLVE_SHARED_ENTS);
if (-1 != ghost_dim) pa_vec.push_back(PA_EXCHANGE_GHOSTS);
More information about the moab-dev
mailing list