[MOAB-dev] r2925 - in MOAB/branches/parallel_ghosting: . parallel tools/iMesh tools/mbcoupler
tautges at mcs.anl.gov
tautges at mcs.anl.gov
Tue Jun 2 21:42:46 CDT 2009
Author: tautges
Date: 2009-06-02 21:42:45 -0500 (Tue, 02 Jun 2009)
New Revision: 2925
Modified:
MOAB/branches/parallel_ghosting/MBParallelConventions.h
MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp
MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp
MOAB/branches/parallel_ghosting/parallel/MBProcConfig.hpp
MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp
MOAB/branches/parallel_ghosting/tools/iMesh/iMeshP_MOAB.cpp
MOAB/branches/parallel_ghosting/tools/mbcoupler/mbcoupler_test.cpp
Log:
- changed interface in a few MBParallelComm tag communication functions, to
consolidate some repeated code
- added operation types in PSTATUS (or, and, not)
- reordered some of the data in buffers passed with MPI
- implemented functions allowing application to set rank/size
(used for testing packing/unpacking)
- wrote unit tests for packing/unpacking with store_remote_handles=true
Pack/unpack tests work for interface cases, moving on to ghost cases.
Modified: MOAB/branches/parallel_ghosting/MBParallelConventions.h
===================================================================
--- MOAB/branches/parallel_ghosting/MBParallelConventions.h 2009-06-02 23:10:03 UTC (rev 2924)
+++ MOAB/branches/parallel_ghosting/MBParallelConventions.h 2009-06-03 02:42:45 UTC (rev 2925)
@@ -83,6 +83,9 @@
#define PSTATUS_SHARED 0x2
#define PSTATUS_MULTISHARED 0x4
#define PSTATUS_INTERFACE 0x8
-#define PSTATUS_GHOST 0x16
+#define PSTATUS_GHOST 0x10
+#define PSTATUS_AND 0x1
+#define PSTATUS_OR 0x2
+#define PSTATUS_NOT 0x3
#endif
Modified: MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp 2009-06-02 23:10:03 UTC (rev 2924)
+++ MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp 2009-06-03 02:42:45 UTC (rev 2925)
@@ -101,11 +101,12 @@
#define UNPACK_RANGE(buff, rng) {int num_subs; UNPACK_INTS(buff, &num_subs, 1); UPC(num_subs, "-subranged range"); MBEntityHandle _eh[2]; \
for (int i = 0; i < num_subs; i++) { UNPACK_EH(buff, _eh, 2); rng.insert(_eh[0], _eh[1]);}}
#define CHECK_BUFF_SPACE(buff_vec, buff_ptr, addl_space) { \
- unsigned int _new_size = buff_ptr - &buff_vec[0] + (addl_space); \
- if (_new_size > buff_vec.capacity()) { \
- buff_vec.reserve(1.5*_new_size); \
- buff_vec.resize(_new_size - (addl_space)); \
- buff_ptr = &buff_vec[_new_size-(addl_space)];}}
+ unsigned int _old_size = buff_ptr - &buff_vec[0], \
+ _new_size = _old_size + (addl_space); \
+ if (_new_size > buff_vec.size()) { \
+ buff_vec.resize(1.5*_new_size); \
+ buff_ptr = &buff_vec[_new_size-(addl_space)];} }
+
#define RANGE_SIZE(rng) (2*sizeof(MBEntityHandle)*num_subranges(rng)+sizeof(int))
#define RR(a) if (MB_SUCCESS != result) {\
@@ -468,9 +469,9 @@
// get an estimate of the buffer size
buff_size = estimate_ents_buffer_size(orig_ents, store_remote_handles);
buff.clear();
- buff.resize(buff_size);
-
+ buff.reserve(1);
unsigned char *buff_ptr = &buff[0];
+ CHECK_BUFF_SPACE(buff, buff_ptr, buff_size);
// entities
result = pack_entities(orig_ents, buff, buff_ptr,
@@ -661,9 +662,10 @@
// others (with zero handles)
if (store_remote_handles) {
- // buff space is at least proc+handle for each entity
+ // buff space is at least proc+handle for each entity; use avg of 4 other procs
+ // to estimate buff size, but check later
CHECK_BUFF_SPACE(buff, buff_ptr,
- sizeof(int) + (sizeof(int) + sizeof(MBEntityHandle))*entities.size());
+ sizeof(int) + (5*sizeof(int) + sizeof(MBEntityHandle))*entities.size());
// 1. # entities = E
PACK_INT(buff_ptr, entities.size());
@@ -678,80 +680,25 @@
result = mbImpl->tag_get_data(pstatus_tag(), entities, &pstatus_vals[0]);
RRA("Failed to get sharedp_tag.");
- unsigned int i, j;
+ unsigned int i;
std::vector<std::set<unsigned int> >::iterator vit;
int tmp_procs[MAX_SHARING_PROCS];
- MBEntityHandle zeroh = 0, tmp_handles[MAX_SHARING_PROCS];
+ MBEntityHandle tmp_handles[MAX_SHARING_PROCS];
+ std::set<unsigned int> dumprocs;
// 2. for e in E
for (rit = entities.begin(), i = 0;
rit != entities.end(); rit++, i++) {
- // number of entities starts with # others (besides to_proc) being sent
- // this entity in this message exchange, but only if not iface exchange
- if (!is_iface && entprocs) num_ents = entprocs[i].size()-1;
- else num_ents = 0;
+ result = build_sharedhps_list(*rit, pstatus_vals[i], sharedp_vals[i],
+ (entprocs ? (*entprocs)[i] : dumprocs),
+ num_ents, tmp_procs, tmp_handles);
+ RRA("Failed to build sharedhps.");
- // add number of procs/handles in shared handle list
- if (!(pstatus_vals[i] & PSTATUS_SHARED))
- // just sender and receiver
- num_ents += 2;
- else if (-1 != sharedp_vals[i])
- // sender, receiver, and one other
- num_ents += 3;
- else {
- // variable # already-sharing procs
- result = mbImpl->tag_get_data(sharedps_tag(), entities, tmp_procs);
- RRA("Failed to get sharedps tag.");
- result = mbImpl->tag_get_data(sharedhs_tag(), entities, tmp_handles);
- RRA("Failed to get sharedhs tag.");
- j = 0;
- // scan forward to first unoccupied shared proc or end of shared proc tag
- while (j < MAX_SHARING_PROCS && tmp_procs[j] != -1)
- j++;
- num_ents += j;
- assert(num_ents < MAX_SHARING_PROCS);
- }
-
- CHECK_BUFF_SPACE(buff, buff_ptr,
- num_ents*(sizeof(int)+sizeof(MBEntityHandle)) + sizeof(int));
- // a. # procs sharing e, incl. sender and receiver = P
+ // now pack them
+ CHECK_BUFF_SPACE(buff, buff_ptr, (num_ents+1)*sizeof(int) + sizeof(MBEntityHandle));
PACK_INT(buff_ptr, num_ents);
-
- // pack procs/handles on sender/receiver first
- PACK_INT(buff_ptr, procConfig.proc_rank());
- PACK_EH(buff_ptr, &(*rit), 1);
- PACK_INT(buff_ptr, to_proc);
- PACK_EH(buff_ptr, &zeroh, 1);
-
- // now other procs to which this ent is being sent, with zero handle for now,
- // only if not on iface
- if (!is_iface && entprocs) {
- for (std::set<unsigned int>::iterator sit = (*entprocs)[i].begin();
- sit != (*entprocs)[i].end(); sit++) {
- if (to_proc == (int)*sit) continue;
- PACK_INT(buff_ptr, *sit);
- PACK_EH(buff_ptr, &zeroh, 1);
- }
- }
-
- // now other sharing procs, again only if not iface
- if (!is_iface) {
- num_ents -= 2;
- if (entprocs) num_ents -= entprocs[i].size();
- }
- else num_ents = 0;
-
- for (j = 0; j < num_ents; j++) {
- // only need to test for sending proc here, receiving proc
- // won't be in that list, by def (otherwise we wouldn't be sending
- // the entity there)
- if (tmp_procs[j] == (int)procConfig.proc_rank()) continue;
- assert(-1 != tmp_procs[j] && 0 != tmp_handles[j]);
- // b. for p in P (procs sharing e)
- PACK_INT(buff_ptr, tmp_procs[j]);
- // c. for p in P (handle for e on p) (Note1)
- PACK_EH(buff_ptr, tmp_handles+j, 1);
- }
+ PACK_INTS(buff_ptr, tmp_procs, num_ents);
+ PACK_EH(buff_ptr, tmp_handles, num_ents);
}
}
@@ -768,12 +715,8 @@
PACK_INT(buff_ptr, ((int) MBVERTEX));
PACK_INT(buff_ptr, ((int) num_ents));
- std::vector<double*> coords(3);
- for (int i = 0; i < 3; i++)
- coords[i] = reinterpret_cast<double*>(buff_ptr + i * num_ents * sizeof(double));
- assert(NULL != wu);
- result = wu->get_node_arrays(3, num_ents, these_ents, 0, 0, coords);
- RRA("Couldn't allocate node space.");
+ result = mbImpl->get_coords(these_ents, (double*)buff_ptr);
+ RRA("Couldn't get vertex coordinates.");
PC(3*num_ents, " doubles");
buff_ptr += 3 * num_ents * sizeof(double);
@@ -845,6 +788,72 @@
return MB_SUCCESS;
}
+MBErrorCode MBParallelComm::build_sharedhps_list(const MBEntityHandle entity,
+ const unsigned char pstatus,
+ const int sharedp,
+ const std::set<unsigned int> &entprocs,
+ unsigned int &num_ents,
+ int *tmp_procs,
+ MBEntityHandle *tmp_handles)
+{
+ MBErrorCode result = MB_SUCCESS;
+ MBEntityHandle zeroh = 0, sharedh;
+
+ // build shared proc/handle lists
+ num_ents = 0;
+ // start with multi-shared, since if it is the owner will be first
+ if (pstatus & PSTATUS_MULTISHARED) {
+ result = mbImpl->tag_get_data(sharedps_tag(), &entity, 1, tmp_procs);
+ RRA("Failed to get sharedps tag.");
+ result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1, tmp_handles);
+ RRA("Failed to get sharedhs tag.");
+ assert(-1 != tmp_procs[2] && 0 != tmp_handles[2] &&
+ "Shared procs/handles should have at least 3 non-zeros for multi-shared ents");
+ int j = 3;
+ // scan forward to first unoccupied shared proc or end of shared proc tag
+ while (j < MAX_SHARING_PROCS && tmp_procs[j] != -1)
+ j++;
+ num_ents += j;
+ assert(num_ents < MAX_SHARING_PROCS);
+ }
+ else if (pstatus & PSTATUS_NOT_OWNED) {
+ // if not multishared and not owned, other sharing proc is owner, put that
+ // one first
+ assert("If not owned, I should be shared too" &&
+ pstatus & PSTATUS_SHARED);
+ tmp_procs[0] = sharedp;
+ tmp_procs[1] = procConfig.proc_rank();
+ tmp_handles[0] = sharedh;
+ tmp_handles[1] = entity;
+ num_ents = 2;
+ }
+ else if (pstatus & PSTATUS_SHARED) {
+ // if not multishared and owned, I'm owner
+ tmp_procs[0] = procConfig.proc_rank();
+ tmp_procs[1] = sharedp;
+ tmp_handles[0] = entity;
+ result = mbImpl->tag_get_data(sharedh_tag(), &entity, 1, tmp_handles+1);
+ RRA("Failed to get sharedh tag.");
+ num_ents = 2;
+ }
+ else {
+ // not shared yet, just add owner (me)
+ tmp_procs[0] = procConfig.proc_rank();
+ tmp_handles[0] = entity;
+ num_ents = 1;
+ }
+
+ // now add others, with zero handle for now
+ for (std::set<unsigned int>::iterator sit = entprocs.begin();
+ sit != entprocs.end(); sit++) {
+ tmp_procs[num_ents] = *sit;
+ tmp_handles[num_ents] = zeroh;
+ num_ents++;
+ }
+
+ return MB_SUCCESS;
+}
+
MBErrorCode MBParallelComm::pack_entity_seq(const int nodes_per_entity,
const bool store_remote_handles,
const int to_proc,
@@ -1126,7 +1135,6 @@
int num_ents;
unsigned char *buff_save = buff_ptr;
int i, j;
- unsigned int k;
if (store_remote_handles) {
UNPACK_INT(buff_ptr, num_ents);
@@ -1167,49 +1175,57 @@
MBEntityHandle new_h = 0;
MBEntityHandle *connect;
- if (this_type != MBVERTEX) connect = (MBEntityHandle*) buff_ptr;
+ double *coords;
+ int num_ps = -1;
- int num_ps = -1;
- int minproc = -1;
-
+ //=======================================
+ // unpack all the data at once, to make sure the buffer pointers
+ // are tracked correctly
+ //=======================================
if (store_remote_handles) {
// pointers to other procs/handles
UNPACK_INT(buff_save, num_ps);
- for (i = 0; i < num_ps; i++) {
- UNPACK_INT(buff_save, ps[i]);
- UNPACK_EH(buff_save, &hs[i], 1);
- }
-
- // if multi-shared entity, need to see if we have received it from
- // another proc already; look up by owner proc and handle in L2 lists
- if (num_ps > 2) {
- minproc = std::min_element(&ps[0], &ps[num_ps]) - &ps[0];
- assert(minproc < num_ps);
- int idx = get_buffers(ps[minproc]);
- if (idx == (int)L1h.size()) L1h.resize(idx+1);
- for (k = 0; k < L2hrem.size(); k++) {
- if (L2hrem[k] == hs[minproc] && ps[minproc] == (int) L2p[k])
- new_h = L2hloc[k];
- }
- }
+ UNPACK_INTS(buff_save, &ps[0], num_ps);
+ UNPACK_EH(buff_save, &hs[0], num_ps);
}
-
+
+ if (MBVERTEX == this_type) {
+ coords = (double*) buff_ptr;
+ buff_ptr += 3*sizeof(double);
+ }
+ else {
+ connect = (MBEntityHandle*) buff_ptr;
+ buff_ptr += verts_per_entity * sizeof(MBEntityHandle);
+
+ // update connectivity to local handles
+ result = get_local_handles(connect, verts_per_entity, new_ents);
+ RRA("Couldn't get local handles.");
+ }
+
+ //=======================================
+ // now, process that data; begin by finding an identical
+ // entity, if there is one
+ //=======================================
+ if (store_remote_handles) {
+ result = find_existing_entity(is_iface, &ps[0], &hs[0], num_ps,
+ connect, verts_per_entity,
+ this_type,
+ L2hloc, L2hrem, L2p,
+ new_h);
+ RRA("Trouble getting existing entity.");
+ }
+
+ //=======================================
+ // if we didn't find one, we'll have to create one
+ //=======================================
if (!new_h) {
- if (is_iface)
- new_h = find_existing_entity(connect, verts_per_entity,
- this_type);
- if (!new_h && MBVERTEX == this_type) {
- // make new vertex
- double xyz[3];
- for (k = 0; k < 3; k++) xyz[k] = ((double*)buff_ptr)[k*num_ents2];
- result = mbImpl->create_vertex(xyz, new_h);
+
+ if (MBVERTEX == this_type) {
+ // create a vertex
+ result = mbImpl->create_vertex(coords, new_h);
RRA("Couldn't make new vertex.");
- buff_ptr += sizeof(double);
}
- else if (!new_h) {
- // update connectivity to local handles
- result = get_local_handles(connect, verts_per_entity, new_ents);
- RRA("Couldn't get local handles.");
+ else {
// create the element
result = mbImpl->create_element(this_type, connect, verts_per_entity, new_h);
RRA("Couldn't make new vertex.");
@@ -1218,24 +1234,28 @@
result = ru->update_adjacencies(new_h, num_ents,
verts_per_entity, connect);
RRA("Failed to update adjacencies.");
-
- buff_ptr += verts_per_entity * sizeof(MBEntityHandle);
}
+ // should have a new handle now
+ assert(new_h);
+
// if a new multi-shared entity, save owner for subsequent lookup in L2 lists
if (store_remote_handles && !is_iface && num_ps > 2) {
- assert(-1 != minproc);
- L2hrem.push_back(hs[minproc]);
+ L2hrem.push_back(hs[0]);
L2hloc.push_back(new_h);
- L2p.push_back(ps[minproc]);
+ L2p.push_back(ps[0]);
}
if (!is_iface) {
- assert(is_iface || new_h > *new_ents.rbegin());
+ assert("New entity shouldn't be in new_ents list already" &&
+ new_ents.find(new_h) == new_ents.end());
new_ents.insert(new_h);
}
}
+ //=======================================
+ // take care of sharing data
+ //=======================================
if (store_remote_handles) {
// update sharing data and pstatus, adjusting order if iface
@@ -1245,24 +1265,24 @@
// need to send this new handle to all sharing procs
if (!is_iface) {
for (j = 0; j < num_ps; j++) {
+ if (ps[j] == (int)procConfig.proc_rank()) continue;
int idx = get_buffers(ps[j]);
if (idx == (int)L1h.size()) L1h.resize(idx+1);
+ assert("Returned handles should always be non-zero" &&
+ hs[j] && new_h);
L1h[idx].push_back(hs[j]);
L1h[idx].push_back(new_h);
}
}
- }
- if (store_remote_handles) {
- assert(-1 != num_ps);
+ assert("Shouldn't be here for non-shared entities" &&
+ -1 != num_ps);
std::fill(&ps[0], &ps[num_ps], -1);
std::fill(&hs[0], &hs[num_ps], 0);
}
}
- if (MBVERTEX == this_type) buff_ptr += 2*num_ents2*sizeof(double);
-
#ifdef DEBUG_PACKING
std::cerr << "Unpacked " << num_ents << " ents of type "
<< MBCN::EntityTypeName(TYPE_FROM_HANDLE(this_type)) << std::endl;
@@ -1277,95 +1297,173 @@
return MB_SUCCESS;
}
+MBErrorCode MBParallelComm::print_buffer(unsigned char *buff_ptr)
+{
+ // 1. # entities = E
+ int num_ents;
+ int i, j, k;
+ std::vector<int> ps;
+ std::vector<MBEntityHandle> hs;
+
+ UNPACK_INT(buff_ptr, num_ents);
+ std::cout << num_ents << " entities..." << std::endl;
+
+ // save place where remote handle info starts, then scan forward to ents
+ for (i = 0; i < num_ents; i++) {
+ UNPACK_INT(buff_ptr, j);
+ ps.resize(j);
+ hs.resize(j);
+ std::cout << "Entity " << i << ": # procs = " << j << std::endl;
+ UNPACK_INTS(buff_ptr, &ps[0], j);
+ UNPACK_EH(buff_ptr, &hs[0], j);
+ std::cout << " Procs: ";
+ for (k = 0; k < j; k++) std::cout << ps[k] << " ";
+ std::cout << std::endl;
+ std::cout << " Handles: ";
+ for (k = 0; k < j; k++) std::cout << hs[k] << " ";
+ std::cout << std::endl;
+ }
+
+ while (true) {
+ MBEntityType this_type = MBMAXTYPE;
+ UNPACK_INT(buff_ptr, this_type);
+ assert(this_type >= MBVERTEX &&
+ (this_type == MBMAXTYPE || this_type < MBENTITYSET));
+
+ // MBMAXTYPE signifies end of entities data
+ if (MBMAXTYPE == this_type) break;
+
+ // get the number of ents
+ int num_ents2, verts_per_entity;
+ UNPACK_INT(buff_ptr, num_ents2);
+
+ // unpack the nodes per entity
+ if (MBVERTEX != this_type) {
+ UNPACK_INT(buff_ptr, verts_per_entity);
+ }
+
+ std::cout << "Type: " << MBCN::EntityTypeName(this_type)
+ << "; num_ents = " << num_ents2;
+ if (MBVERTEX != this_type) std::cout << "; verts_per_ent = " << verts_per_entity;
+ std::cout << std::endl;
+
+ for (int e = 0; e < num_ents2; e++) {
+ // check for existing entity, otherwise make new one
+ MBEntityHandle *connect;
+ double *coords;
+
+ if (MBVERTEX == this_type) {
+ coords = (double*) buff_ptr;
+ buff_ptr += 3*sizeof(double);
+ std::cout << "xyz = " << coords[0] << ", " << coords[1] << ", "
+ << coords[2] << std::endl;
+ }
+ else {
+ connect = (MBEntityHandle*) buff_ptr;
+ buff_ptr += verts_per_entity * sizeof(MBEntityHandle);
+
+ // update connectivity to local handles
+ std::cout << "Connectivity: ";
+ for (k = 0; k < verts_per_entity; k++) std::cout << connect[k] << " ";
+ std::cout << std::endl;
+ }
+ }
+ }
+
+ return MB_SUCCESS;
+}
+
MBErrorCode MBParallelComm::update_remote_data(MBEntityHandle new_h,
int *ps,
MBEntityHandle *hs,
int num_ps,
const bool is_iface)
{
- MBEntityHandle tmp_hs[MAX_SHARING_PROCS];
- int tmp_ps[MAX_SHARING_PROCS];
+ MBEntityHandle tag_hs[MAX_SHARING_PROCS];
+ int tag_ps[MAX_SHARING_PROCS];
unsigned char pstat;
- // get initial sharing data; tmp_ps and tmp_hs get terminated with -1 and 0
+ // get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
// in this function, so no need to initialize
- MBErrorCode result = get_sharing_data(new_h, tmp_ps, tmp_hs, pstat);
+ MBErrorCode result = get_sharing_data(new_h, tag_ps, tag_hs, pstat);
RRA("");
// add any new sharing data
- int num_exist = std::find(tmp_ps, tmp_ps+MAX_SHARING_PROCS, -1) - tmp_ps;
+ int num_exist = std::find(tag_ps, tag_ps+MAX_SHARING_PROCS, -1) - tag_ps;
bool changed = false;
int idx;
if (!num_exist) {
- memcpy(tmp_ps, ps, num_ps*sizeof(int));
- memcpy(tmp_hs, hs, num_ps*sizeof(MBEntityHandle));
+ memcpy(tag_ps, ps, num_ps*sizeof(int));
+ memcpy(tag_hs, hs, num_ps*sizeof(MBEntityHandle));
num_exist = num_ps;
changed = true;
}
else {
for (int i = 0; i < num_ps; i++) {
- idx = std::find(tmp_ps, tmp_ps+num_exist, ps[i]) - tmp_ps;
+ idx = std::find(tag_ps, tag_ps+num_exist, ps[i]) - tag_ps;
if (idx == num_exist) {
- tmp_ps[num_exist] = ps[i];
- tmp_hs[num_exist] = hs[i];
+ tag_ps[num_exist] = ps[i];
+ tag_hs[num_exist] = hs[i];
num_exist++;
changed = true;
}
- else if (0 == tmp_hs[idx]) {
- tmp_hs[idx] = hs[i];
+ else if (0 == tag_hs[idx]) {
+ tag_hs[idx] = hs[i];
changed = true;
}
- else {
- assert(hs[i] == tmp_hs[idx]);
+ else if (0 != hs[i]) {
+ assert(hs[i] == tag_hs[idx]);
}
}
}
// adjust for interface layer if necessary
if (is_iface) {
- idx = std::min_element(tmp_ps, tmp_ps+num_exist) - tmp_ps;
+ idx = std::min_element(tag_ps, tag_ps+num_exist) - tag_ps;
if (idx) {
- int tmp_proc = tmp_ps[idx];
- tmp_ps[idx] = tmp_ps[0];
- tmp_ps[0] = tmp_proc;
- MBEntityHandle tmp_h = tmp_hs[idx];
- tmp_hs[idx] = tmp_hs[0];
- tmp_hs[0] = tmp_h;
+ int tag_proc = tag_ps[idx];
+ tag_ps[idx] = tag_ps[0];
+ tag_ps[0] = tag_proc;
+ MBEntityHandle tag_h = tag_hs[idx];
+ tag_hs[idx] = tag_hs[0];
+ tag_hs[0] = tag_h;
changed = true;
- if (tmp_ps[0] != (int)procConfig.proc_rank()) pstat |= PSTATUS_NOT_OWNED;
+ if (tag_ps[0] != (int)procConfig.proc_rank()) pstat |= PSTATUS_NOT_OWNED;
}
}
if (changed) {
- int tmp_p;
- MBEntityHandle tmp_h;
+ int tag_p;
+ MBEntityHandle tag_h;
if (num_exist > 2 && !(pstat & PSTATUS_MULTISHARED) &&
(pstat & PSTATUS_SHARED)) {
// must remove sharedp/h first, which really means set to default value
- tmp_p = -1;
- result = mbImpl->tag_set_data(sharedp_tag(), &new_h, 1, &tmp_p);
+ tag_p = -1;
+ result = mbImpl->tag_set_data(sharedp_tag(), &new_h, 1, &tag_p);
RRA("Couldn't set sharedp tag.");
- tmp_h = 0;
- result = mbImpl->tag_set_data(sharedh_tag(), &new_h, 1, &tmp_h);
+ tag_h = 0;
+ result = mbImpl->tag_set_data(sharedh_tag(), &new_h, 1, &tag_h);
RRA("Couldn't set sharedh tag.");
}
if (num_exist > 2) {
- result = mbImpl->tag_set_data(sharedps_tag(), &new_h, 1, tmp_ps);
+ std::fill(tag_ps+num_exist, tag_ps+MAX_SHARING_PROCS, -1);
+ std::fill(tag_hs+num_exist, tag_hs+MAX_SHARING_PROCS, 0);
+ result = mbImpl->tag_set_data(sharedps_tag(), &new_h, 1, tag_ps);
RRA("Couldn't set sharedps tag.");
- result = mbImpl->tag_set_data(sharedhs_tag(), &new_h, 1, tmp_hs);
+ result = mbImpl->tag_set_data(sharedhs_tag(), &new_h, 1, tag_hs);
RRA("Couldn't set sharedhs tag.");
- pstat |= PSTATUS_MULTISHARED;
+ pstat |= (PSTATUS_MULTISHARED | PSTATUS_SHARED);
}
else if (num_exist == 2) {
- tmp_p = (tmp_ps[0] == (int) procConfig.proc_rank() ? tmp_ps[1] : tmp_ps[0]);
- tmp_h = (tmp_ps[0] == (int) procConfig.proc_rank() ? tmp_hs[1] : tmp_hs[0]);
- result = mbImpl->tag_set_data(sharedp_tag(), &new_h, 1, &tmp_p);
+ tag_p = (tag_ps[0] == (int) procConfig.proc_rank() ? tag_ps[1] : tag_ps[0]);
+ tag_h = (tag_ps[0] == (int) procConfig.proc_rank() ? tag_hs[1] : tag_hs[0]);
+ result = mbImpl->tag_set_data(sharedp_tag(), &new_h, 1, &tag_p);
RRA("Couldn't set sharedps tag.");
- result = mbImpl->tag_set_data(sharedh_tag(), &new_h, 1, &tmp_h);
+ result = mbImpl->tag_set_data(sharedh_tag(), &new_h, 1, &tag_h);
RRA("Couldn't set sharedhs tag.");
pstat |= PSTATUS_SHARED;
}
- if (!is_iface && num_exist > 1)
+ if (is_iface && num_exist > 1)
pstat |= PSTATUS_INTERFACE;
// now set new pstatus
@@ -1408,23 +1506,44 @@
return MB_SUCCESS;
}
-MBEntityHandle MBParallelComm::find_existing_entity(const MBEntityHandle *connect,
- const int num_connect,
- const MBEntityType this_type)
+MBErrorCode MBParallelComm::find_existing_entity(const bool is_iface,
+ const int *ps,
+ const MBEntityHandle *hs,
+ const int num_ps,
+ const MBEntityHandle *connect,
+ const int num_connect,
+ const MBEntityType this_type,
+ std::vector<MBEntityHandle> &L2hloc,
+ std::vector<MBEntityHandle> &L2hrem,
+ std::vector<MBEntityHandle> &L2p,
+ MBEntityHandle &new_h)
{
+ if (!is_iface && num_ps > 2) {
+ for (unsigned int i = 0; i < L2hrem.size(); i++) {
+ if (L2hrem[i] == hs[0] && ps[0] == (int) L2p[i]) {
+ new_h = L2hloc[i];
+ return MB_SUCCESS;
+ }
+ }
+ }
+
+ // if we got here and it's a vertex, we don't need to look further
+ if (MBVERTEX == this_type) return MB_SUCCESS;
+
MBRange tmp_range;
MBErrorCode result = mbImpl->get_adjacencies(connect, num_connect,
MBCN::Dimension(this_type), false,
tmp_range);
- if (MB_MULTIPLE_ENTITIES_FOUND == result) {
- std::cerr << "Multiple entities found.";
- return 0;
+ RRA("Problem getting existing entity.");
+ if (!tmp_range.empty()) {
+ // found a corresponding entity - return target
+ new_h = *tmp_range.begin();
+ }
+ else {
+ new_h = 0;
}
- if (!tmp_range.empty())
- // found a corresponding entity - return target
- return *tmp_range.begin();
-
- else return 0;
+
+ return MB_SUCCESS;
}
MBErrorCode MBParallelComm::get_local_handles(const MBRange &remote_handles,
@@ -3210,7 +3329,7 @@
// done in a separate loop over procs because sometimes later procs
// need to add info to earlier procs' messages
- MBRange sent_ents[MAX_SHARING_PROCS], allsent;
+ MBRange sent_ents[MAX_SHARING_PROCS], allsent, tmp_range;
for (ind = 0, proc_it = buffProcs.begin();
proc_it != buffProcs.end(); proc_it++, ind++) {
if (!is_iface) {
@@ -3222,11 +3341,22 @@
result = get_iface_entities(buffProcs[ind], -1, sent_ents[ind]);
RRA("Failed to get interface layers.");
+/*
// remove vertices, since we know they're already shared
std::pair<MBRange::const_iterator,MBRange::const_iterator> vert_it =
sent_ents[ind].equal_range(MBVERTEX);
sent_ents[ind].erase(vert_it.first, vert_it.second);
+*/
}
+
+ // filter out entities already shared with destination
+ tmp_range.clear();
+ result = filter_pstatus(sent_ents[ind], PSTATUS_SHARED, PSTATUS_AND,
+ buffProcs[ind], &tmp_range);
+ RRA("Couldn't filter on owner.");
+ if (!tmp_range.empty())
+ sent_ents[ind] = sent_ents[ind].subtract(tmp_range);
+
allsent.merge(sent_ents[ind]);
}
@@ -3237,8 +3367,11 @@
std::vector<std::set<unsigned int> > entprocs(allsent.size());
for (ind = 0, proc_it = buffProcs.begin();
proc_it != buffProcs.end(); proc_it++, ind++) {
- for (rit = sent_ents[ind].begin(); rit != sent_ents[ind].end(); rit++)
- entprocs[allsent.index(*rit)].insert(*proc_it);
+ for (rit = sent_ents[ind].begin(); rit != sent_ents[ind].end(); rit++) {
+ int rind = allsent.index(*rit);
+ assert(rind < (int) allsent.size() && rind >= 0);
+ entprocs[rind].insert(*proc_it);
+ }
}
//===========================================
Modified: MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp 2009-06-02 23:10:03 UTC (rev 2924)
+++ MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp 2009-06-03 02:42:45 UTC (rev 2925)
@@ -449,9 +449,31 @@
std::vector<unsigned int> &L2p,
MBRange &new_ents);
+ MBErrorCode pack_entities(MBRange &entities,
+ std::vector<unsigned char> &buff,
+ unsigned char *&buff_ptr,
+ const bool store_remote_handles,
+ const int to_proc,
+ const bool is_iface,
+ std::vector<std::set<unsigned int> > *entprocs = NULL);
+
+ //! unpack entities in buff_ptr
+ MBErrorCode unpack_entities(unsigned char *&buff_ptr,
+ const bool store_remote_handles,
+ const int from_ind,
+ const bool is_iface,
+ std::vector<std::vector<MBEntityHandle> > &L1h,
+ std::vector<MBEntityHandle> &L2hloc,
+ std::vector<MBEntityHandle> &L2hrem,
+ std::vector<unsigned int> &L2p,
+ MBRange &new_ents);
+
//! Call exchange_all_shared_handles, then compare the results with tag data
//! on local shared entities.
MBErrorCode check_all_shared_handles();
+
+ //! set rank for this pcomm; USED FOR TESTING ONLY!
+ void set_rank(unsigned int r);
private:
@@ -517,14 +539,6 @@
MPI_Request &recv_req,
int mesg_tag);
- MBErrorCode pack_entities(MBRange &entities,
- std::vector<unsigned char> &buff,
- unsigned char *&buff_ptr,
- const bool store_remote_handles,
- const int to_proc,
- const bool is_iface,
- std::vector<std::set<unsigned int> > *entprocs = NULL);
-
//! pack a range of entities with equal # verts per entity, along with
//! the range on the sending proc
MBErrorCode pack_entity_seq(const int nodes_per_entity,
@@ -535,16 +549,7 @@
std::vector<unsigned char> &buff,
unsigned char *&buff_ptr);
- //! unpack entities in buff_ptr
- MBErrorCode unpack_entities(unsigned char *&buff_ptr,
- const bool store_remote_handles,
- const int from_ind,
- const bool is_iface,
- std::vector<std::vector<MBEntityHandle> > &L1h,
- std::vector<MBEntityHandle> &L2hloc,
- std::vector<MBEntityHandle> &L2hrem,
- std::vector<unsigned int> &L2p,
- MBRange &new_ents);
+ MBErrorCode print_buffer(unsigned char *buff_ptr);
//! for all the entities in the received buffer; for each, save
//! entities in this instance which match connectivity, or zero if none found
@@ -608,10 +613,26 @@
const int ind);
//! given connectivity and type, find an existing entity, if there is one
- MBEntityHandle find_existing_entity(const MBEntityHandle *connect,
- const int num_connect,
- const MBEntityType this_type);
+ MBErrorCode find_existing_entity(const bool is_iface,
+ const int *ps,
+ const MBEntityHandle *hs,
+ const int num_ents,
+ const MBEntityHandle *connect,
+ const int num_connect,
+ const MBEntityType this_type,
+ std::vector<MBEntityHandle> &L2hloc,
+ std::vector<MBEntityHandle> &L2hrem,
+ std::vector<MBEntityHandle> &L2p,
+ MBEntityHandle &new_h);
+ MBErrorCode build_sharedhps_list(const MBEntityHandle entity,
+ const unsigned char pstatus,
+ const int sharedp,
+ const std::set<unsigned int> &entprocs,
+ unsigned int &num_ents,
+ int *tmp_procs,
+ MBEntityHandle *tmp_handles);
+
/**\brief Get list of tags for which to exchange data
*
* Get tags and entities for which to exchange tag data. This function
@@ -1005,5 +1026,11 @@
unsigned char *tmp_buff = const_cast<unsigned char*>(buff_ptr);
return unpack_remote_handles(from_proc, tmp_buff, is_iface, ind);
}
-
+
+inline void MBParallelComm::set_rank(unsigned int r)
+{
+ procConfig.proc_rank(r);
+ if (procConfig.proc_size() < r) procConfig.proc_size(r+1);
+}
+
#endif
Modified: MOAB/branches/parallel_ghosting/parallel/MBProcConfig.hpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBProcConfig.hpp 2009-06-02 23:10:03 UTC (rev 2924)
+++ MOAB/branches/parallel_ghosting/parallel/MBProcConfig.hpp 2009-06-03 02:42:45 UTC (rev 2925)
@@ -79,7 +79,11 @@
//! get/set the communicator for this proc config
const MPI_Comm proc_comm() const {return procComm;}
void proc_comm(MPI_Comm this_comm) {procComm = this_comm;}
-
+
+ //! set rank/size; USED FOR TESTING ONLY!
+ void proc_rank(unsigned r) {procRank = r;}
+ void proc_size(unsigned s) {procSize = s;}
+
private:
//! MPI communicator set for this instance
Modified: MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp 2009-06-02 23:10:03 UTC (rev 2924)
+++ MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp 2009-06-03 02:42:45 UTC (rev 2925)
@@ -1,8 +1,11 @@
#include "MBParallelComm.hpp"
#include "MBParallelConventions.h"
+#include "MBTagConventions.hpp"
#include "MBCore.hpp"
#include "TestUtil.hpp"
#include <algorithm>
+#include <vector>
+#include <set>
#ifdef USE_MPI
# include <mpi.h>
@@ -34,6 +37,8 @@
void test_pack_variable_length_tag();
/** Test pack/unpack tag values*/
void test_pack_tag_handle_data();
+/** Test pack/unpack of shared entities*/
+void test_pack_shared_entities();
/** Test filter_pstatus function*/
void test_filter_pstatus();
@@ -58,6 +63,7 @@
//num_err += RUN_TEST( test_pack_bit_tag_data );
num_err += RUN_TEST( test_pack_variable_length_tag );
num_err += RUN_TEST( test_pack_tag_handle_data );
+ num_err += RUN_TEST( test_pack_shared_entities );
num_err += RUN_TEST( test_filter_pstatus );
#ifdef USE_MPI
@@ -67,7 +73,7 @@
}
/* Utility method: pack mesh data, clear moab instance, and unpack */
-void pack_unpack_mesh( MBCore& moab, MBRange& entities )
+void pack_unpack_noremoteh( MBCore& moab, MBRange& entities )
{
MBErrorCode rval;
if (entities.empty()) {
@@ -107,10 +113,10 @@
delete pcomm;
}
-void pack_unpack_mesh( MBCore& moab )
+void pack_unpack_noremoteh( MBCore& moab )
{
MBRange empty;
- pack_unpack_mesh( moab, empty );
+ pack_unpack_noremoteh( moab, empty );
}
/* Utility method -- check expected sizes */
@@ -213,7 +219,286 @@
delete [] elems;
}
+#define NVERTS 25
+#define NQUADS 16
+MBErrorCode create_shared_grid(MBInterface &moab, MBRange &verts, MBRange &quads)
+{
+//
+// P2______
+// /__/__/ /|P1
+// y: /__/__/ /||
+// 1 _____ ||/ _____ 1
+// | | | |/|-1 | | |
+// .5 |__|__| ||/ |__|__| .5
+// |P0| | |/-.5 |P3| |
+// 0 |__|__| z:0 |__|__| 0
+// x:-1 -.5 0 0 .5 1
+//
+// nodes: P2
+// 18 16 14 P1
+// 17 15 13 14
+// 6 7 8 13 12
+// 8 11 10
+// 6 7 8 5 9 8 23 24 P3
+// 3 4 5 2 5 21 22
+// P0 0 1 2 2 19 20
+ int connecti[] = {
+ 0, 1, 4, 3, 1, 2, 5, 4, 3, 4, 7, 6, 4, 5, 8, 7, // P0
+ 2, 9, 11, 5, 9, 10, 12, 11, 5, 11, 13, 8, 11, 12, 14, 13, // P1
+ 6, 7, 15, 17, 7, 8, 13, 15, 17, 15, 16, 18, 15, 13, 14, 16, // P2
+ 2, 19, 21, 5, 19, 20, 22, 21, 5, 21, 23, 8, 21, 22, 24, 23 // P3
+ };
+ double xyz[] = {
+ -1.0, 0.0, 0.0, -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.5, 0.0,
+ -0.5, 0.5, 0.0, 0.0, 0.5, 0.0, -1.0, 1.0, 0.0, -0.5, 1.0, 0.0,
+ 0.0, 1.0, 0.0, // n0-8
+ 0.0, 0.0, -0.5, 0.0, 0.0, -1.0, 0.0, 0.5, -0.5,
+ 0.0, 0.5, -1.0, 0.0, 1.0, -0.5, 0.0, 1.0, -1.0, // n9-14
+ -0.5, 1.0, -0.5, -0.5, 1.0, -1.0, -1.0, 1.0, -0.5, -1.0, 1.0, -1.0, // n15-18
+ 0.5, 0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.5, 0.0, 1.0, 0.5, 0.0,
+ 0.5, 1.0, 0.0, 1.0, 1.0, 0.0, // n19-24
+ };
+
+ // create vertices/quads
+ MBErrorCode result = moab.create_vertices(xyz, NVERTS, verts);
+ if (MB_SUCCESS != result) return result;
+ MBEntityHandle connect[4*NQUADS];
+ for (unsigned int i = 0; i < 4*NQUADS; i++) connect[i] = verts[connecti[i]];
+ for (unsigned int i = 0; i < NQUADS; i++) {
+ MBEntityHandle dum_quad;
+ result = moab.create_element(MBQUAD, connect+4*i, 4, dum_quad);
+ if (MB_SUCCESS != result) return result;
+ quads.insert(dum_quad);
+ }
+
+ // global ids
+ int gids[4*NQUADS];
+ for (unsigned int i = 0; i < 4*NQUADS; i++) gids[i] = i;
+ MBTag gid_tag;
+ int dum_default = -1;
+ result = moab.tag_create(GLOBAL_ID_TAG_NAME, sizeof(int), MB_TAG_DENSE,
+ MB_TYPE_INTEGER, gid_tag, &dum_default, true);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(gid_tag, verts, gids);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(gid_tag, quads, gids);
+ if (MB_SUCCESS != result) return result;
+
+ return result;
+}
+
+MBErrorCode set_owners(MBInterface &moab,
+ MBParallelComm &pcomm,
+ MBRange &verts,
+ bool edges_too)
+{
+ unsigned int owner = pcomm.proc_config().proc_rank();
+
+ // vert owners
+ MBEntityHandle dum_verts[9];
+ int dum_owners[MAX_SHARING_PROCS];
+ MBEntityHandle dum_handles[MAX_SHARING_PROCS];
+ unsigned char pstatus[MAX_SHARING_PROCS];
+ unsigned char dum_status;
+
+ MBErrorCode result = MB_FAILURE;
+
+ std::fill(dum_owners, dum_owners+MAX_SHARING_PROCS, -1);
+ std::fill(dum_handles, dum_handles+MAX_SHARING_PROCS, 0);
+ std::fill(pstatus, pstatus+MAX_SHARING_PROCS, 0x0);
+
+ // multi-shared vertices, P0-P1-P3
+ if (owner == 0)
+ dum_status = (PSTATUS_INTERFACE | PSTATUS_SHARED | PSTATUS_MULTISHARED);
+ else
+ dum_status = (PSTATUS_INTERFACE | PSTATUS_NOT_OWNED | PSTATUS_SHARED | PSTATUS_MULTISHARED);
+ dum_owners[0] = 0; dum_owners[1] = 1; dum_owners[2] = 2; dum_owners[3] = 3;
+ dum_handles[0] = verts[8]; dum_handles[1] = verts[8]; dum_handles[2] = verts[8]; dum_handles[3] = verts[8];
+ result = moab.tag_set_data(pcomm.sharedps_tag(), dum_handles, 1, dum_owners);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.sharedhs_tag(), dum_handles, 1, dum_handles);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.pstatus_tag(), dum_handles, 1, &dum_status);
+ if (MB_SUCCESS != result) return result;
+ dum_owners[3] = -1;
+ dum_handles[3] = 0;
+ dum_handles[0] = verts[5]; dum_handles[1] = verts[5]; dum_handles[2] = verts[5];
+ result = moab.tag_set_data(pcomm.sharedps_tag(), dum_handles, 1, dum_owners);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.sharedhs_tag(), dum_handles, 1, dum_handles);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.pstatus_tag(), dum_handles, 1, &dum_status);
+ if (MB_SUCCESS != result) return result;
+ dum_handles[0] = verts[2]; dum_handles[1] = verts[2]; dum_handles[2] = verts[2];
+ result = moab.tag_set_data(pcomm.sharedps_tag(), dum_handles, 1, dum_owners);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.sharedhs_tag(), dum_handles, 1, dum_handles);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.pstatus_tag(), dum_handles, 1, &dum_status);
+ if (MB_SUCCESS != result) return result;
+
+ // shared, P0-P2
+ dum_verts[0] = verts[6]; dum_verts[1] = verts[7]; dum_verts[2] = 0; dum_verts[3] = 0;
+ if (owner == 0) {
+ dum_status = (PSTATUS_INTERFACE | PSTATUS_SHARED);
+ dum_owners[0] = 2; dum_owners[1] = 2; dum_owners[2] = -1;
+ }
+ else {
+ dum_owners[0] = 0; dum_owners[1] = 0; dum_owners[2] = -1;
+ dum_status = (PSTATUS_INTERFACE | PSTATUS_SHARED | PSTATUS_NOT_OWNED);
+ }
+ result = moab.tag_set_data(pcomm.sharedp_tag(), dum_verts, 2, dum_owners);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.sharedh_tag(), dum_verts, 2, dum_verts);
+ if (MB_SUCCESS != result) return result;
+
+ // shared, P1-P2
+ dum_verts[0] = verts[13]; dum_verts[1] = verts[14]; dum_verts[2] = 0;
+ if (owner == 1) {
+ dum_status = (PSTATUS_INTERFACE | PSTATUS_SHARED);
+ dum_owners[0] = 2; dum_owners[1] = 2; dum_owners[2] = -1;
+ }
+ else {
+ dum_status = (PSTATUS_INTERFACE | PSTATUS_SHARED | PSTATUS_NOT_OWNED);
+ dum_owners[0] = 1; dum_owners[1] = 1; dum_owners[2] = -1;
+ }
+ result = moab.tag_set_data(pcomm.sharedp_tag(), dum_verts, 2, dum_owners);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.sharedh_tag(), dum_verts, 2, dum_verts);
+ if (MB_SUCCESS != result) return result;
+
+ if (edges_too) {
+ MBRange tmpr;
+ // P0 - P1 - P3
+ pstatus[0] = (PSTATUS_INTERFACE | PSTATUS_SHARED | PSTATUS_MULTISHARED);
+ dum_verts[0] = verts[8]; dum_verts[1] = verts[5];
+ result = moab.get_adjacencies(verts, 1, true, tmpr);
+ if (MB_SUCCESS != result || tmpr.size() != 1) return MB_FAILURE;
+ dum_handles[0] = dum_handles[1] = dum_handles[2] = *tmpr.begin(); tmpr.clear();
+ dum_handles[3] = 0;
+ if (owner != 0)
+ pstatus[0] |= PSTATUS_NOT_OWNED;
+ dum_owners[0] = 0; dum_owners[1] = 1; dum_owners[2] = 3;
+ result = moab.tag_set_data(pcomm.sharedps_tag(), dum_handles, 1, dum_owners);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.sharedhs_tag(), dum_handles, 1, dum_handles);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.pstatus_tag(), dum_handles, 1, pstatus);
+ if (MB_SUCCESS != result) return result;
+
+ dum_verts[0] = verts[5]; dum_verts[1] = verts[2];
+ result = moab.get_adjacencies(verts, 1, true, tmpr);
+ if (MB_SUCCESS != result || tmpr.size() != 1) return MB_FAILURE;
+ dum_handles[0] = dum_handles[1] = dum_handles[2] = *tmpr.begin(); tmpr.clear();
+ result = moab.tag_set_data(pcomm.sharedps_tag(), dum_handles, 1, dum_owners);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.sharedhs_tag(), dum_handles, 1, dum_handles);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.pstatus_tag(), dum_handles, 1, pstatus);
+ if (MB_SUCCESS != result) return result;
+
+ // P0 - P2
+ pstatus[0] = (PSTATUS_INTERFACE | PSTATUS_SHARED);
+ dum_verts[0] = verts[6]; dum_verts[1] = verts[7];
+ result = moab.get_adjacencies(verts, 1, true, tmpr);
+ if (MB_SUCCESS != result || tmpr.size() != 1) return MB_FAILURE;
+ dum_handles[0] = *tmpr.begin(); tmpr.clear();
+ dum_verts[0] = verts[7]; dum_verts[1] = verts[8];
+ result = moab.get_adjacencies(verts, 1, true, tmpr);
+ if (MB_SUCCESS != result || tmpr.size() != 1) return MB_FAILURE;
+ dum_handles[1] = *tmpr.begin(); tmpr.clear();
+ dum_handles[2] = 0;
+ if (owner == 0) {
+ dum_owners[0] = 2; dum_owners[1] = 2; dum_owners[2] = -1;
+ }
+ else {
+ pstatus[0] |= PSTATUS_NOT_OWNED;
+ dum_owners[0] = 0; dum_owners[1] = 0; dum_owners[2] = -1;
+ }
+ result = moab.tag_set_data(pcomm.sharedp_tag(), dum_handles, 2, dum_owners);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.sharedh_tag(), dum_handles, 2, dum_handles);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.pstatus_tag(), dum_handles, 2, pstatus);
+ if (MB_SUCCESS != result) return result;
+
+ // P1 - P2
+ pstatus[0] = (PSTATUS_INTERFACE | PSTATUS_SHARED);
+ dum_verts[0] = verts[8]; dum_verts[1] = verts[13];
+ result = moab.get_adjacencies(verts, 1, true, tmpr);
+ if (MB_SUCCESS != result || tmpr.size() != 1) return MB_FAILURE;
+ dum_handles[0] = *tmpr.begin(); tmpr.clear();
+ dum_verts[0] = verts[13]; dum_verts[1] = verts[14];
+ result = moab.get_adjacencies(verts, 1, true, tmpr);
+ if (MB_SUCCESS != result || tmpr.size() != 1) return MB_FAILURE;
+ dum_handles[1] = *tmpr.begin(); tmpr.clear();
+ dum_handles[2] = 0;
+ if (owner == 1) {
+ dum_owners[0] = 2; dum_owners[1] = 2; dum_owners[2] = -1;
+ }
+ else {
+ pstatus[0] |= PSTATUS_NOT_OWNED;
+ dum_owners[0] = 1; dum_owners[1] = 1; dum_owners[2] = -1;
+ }
+ result = moab.tag_set_data(pcomm.sharedp_tag(), dum_handles, 2, dum_owners);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.sharedh_tag(), dum_handles, 2, dum_handles);
+ if (MB_SUCCESS != result) return result;
+ result = moab.tag_set_data(pcomm.pstatus_tag(), dum_handles, 2, pstatus);
+ if (MB_SUCCESS != result) return result;
+ }
+
+ return result;
+}
+
+MBErrorCode set_owners(MBInterface &moab,
+ MBParallelComm &pcomm,
+ MBRange &ents,
+ unsigned char pstat,
+ std::vector<int> &procs,
+ std::vector<MBEntityHandle> &handles)
+{
+ int owners[MAX_SHARING_PROCS];
+ MBEntityHandle tmp_handles[MAX_SHARING_PROCS];
+ int negone = -1;
+ MBEntityHandle zeroh = 0;
+ assert(handles.size() == ents.size()*procs.size());
+
+ MBErrorCode result = MB_SUCCESS;
+
+ std::fill(owners, owners+MAX_SHARING_PROCS, -1);
+ std::fill(tmp_handles, tmp_handles+MAX_SHARING_PROCS, 0);
+ std::copy(procs.begin(), procs.end(), owners);
+ std::vector<MBEntityHandle>::iterator vit = handles.begin();
+ for (MBRange::iterator rit = ents.begin(); rit != ents.end(); rit++) {
+ if (procs.size() > 1 || procs.empty()) {
+ result = moab.tag_set_data(pcomm.sharedp_tag(), &(*rit), 1, &negone);
+ result = moab.tag_set_data(pcomm.sharedh_tag(), &(*rit), 1, &zeroh);
+ }
+ if (procs.size() > 1) {
+ result = moab.tag_set_data(pcomm.sharedps_tag(), &(*rit), 1, owners);
+ for (unsigned int i = 0; i < procs.size(); i++) {
+ assert(vit != handles.end());
+ tmp_handles[i] = *vit++;
+ }
+ result = moab.tag_set_data(pcomm.sharedhs_tag(), &(*rit), 1, tmp_handles);
+ }
+ else {
+ result = moab.tag_set_data(pcomm.sharedp_tag(), &(*rit), 1, owners);
+ if (vit != handles.end()) tmp_handles[0] = *vit++;
+ result = moab.tag_set_data(pcomm.sharedh_tag(), &(*rit), 1, tmp_handles);
+ result = moab.tag_delete_data(pcomm.sharedps_tag(), &(*rit), 1);
+ result = moab.tag_delete_data(pcomm.sharedhs_tag(), &(*rit), 1);
+ }
+ result = moab.tag_set_data(pcomm.pstatus_tag(), &(*rit), 1, &pstat);
+ std::fill(owners, owners+procs.size(), -1);
+ std::fill(tmp_handles, tmp_handles+procs.size(), 0);
+ }
+
+ return result;
+}
+
void test_pack_vertices()
{
MBCore moab;
@@ -229,7 +514,7 @@
rval = moab.create_vertices( coords, num_verts, verts );
CHECK_ERR(rval);
- pack_unpack_mesh( moab, verts );
+ pack_unpack_noremoteh( moab, verts );
CHECK_EQUAL( num_verts, verts.size() );
double coords2[3*num_verts];
@@ -359,7 +644,7 @@
elems.insert( pyr );
// pack and unpack mesh
- pack_unpack_mesh( moab, elems );
+ pack_unpack_noremoteh( moab, elems );
// check_counts
check_sizes( moab, num_verts, 0, num_tri, num_quad, 0,
@@ -471,7 +756,7 @@
elems.insert( tri );
// pack and unpack mesh
- pack_unpack_mesh( moab, elems );
+ pack_unpack_noremoteh( moab, elems );
// check_counts
check_sizes( moab, num_vert, 0, num_tri, 0, 0, num_tet, 0, 0, 0, 0, 0 );
@@ -599,7 +884,7 @@
elems.insert( polyhedron );
elems.insert( octagon );
std::copy( tri, tri+num_tri, mb_range_inserter(elems) );
- pack_unpack_mesh( moab, elems );
+ pack_unpack_noremoteh( moab, elems );
// check counts
check_sizes( moab, num_vert, 0, num_tri, 0, num_polygon,
@@ -722,7 +1007,7 @@
CHECK_ERR(rval);
// do pack and unpack
- pack_unpack_mesh( moab, entities );
+ pack_unpack_noremoteh( moab, entities );
// get entities by type
verts.clear();
@@ -823,7 +1108,7 @@
// Will fail unless we also pass in set contents explicitly
MBRange entities( vertices );
entities.insert( set );
- pack_unpack_mesh( moab, entities );
+ pack_unpack_noremoteh( moab, entities );
// expect single set in mesh
entities.clear();
@@ -878,7 +1163,7 @@
sets.insert( set3 );
// pack and unpack
- pack_unpack_mesh( moab, sets );
+ pack_unpack_noremoteh( moab, sets );
// get sets
sets.clear();
@@ -980,7 +1265,7 @@
CHECK( MB_SUCCESS == rval && 0 == count );
// pack and unpack
- pack_unpack_mesh( moab, sets );
+ pack_unpack_noremoteh( moab, sets );
// get sets
sets.clear();
@@ -1069,7 +1354,7 @@
// pack and unpack
MBRange ents;
- pack_unpack_mesh( moab, ents );
+ pack_unpack_noremoteh( moab, ents );
elems.clear();
rval = mb.get_entities_by_type( 0, MBHEX, elems );
CHECK_ERR(rval);
@@ -1154,7 +1439,7 @@
// pack and unpack
MBRange ents;
- pack_unpack_mesh( moab, ents );
+ pack_unpack_noremoteh( moab, ents );
verts.clear();
rval = mb.get_entities_by_type( 0, MBVERTEX, verts );
CHECK_ERR(rval);
@@ -1227,7 +1512,7 @@
// pack and unpack
MBRange ents;
- pack_unpack_mesh( moab, ents );
+ pack_unpack_noremoteh( moab, ents );
elems.clear();
verts.clear();
sets.clear();
@@ -1322,7 +1607,7 @@
// pack and unpack
MBRange ents;
- pack_unpack_mesh( moab, ents );
+ pack_unpack_noremoteh( moab, ents );
verts.clear();
rval = mb.get_entities_by_type( 0, MBVERTEX, verts );
CHECK_ERR(rval);
@@ -1403,7 +1688,7 @@
}
// pack and unpack
- pack_unpack_mesh( moab );
+ pack_unpack_noremoteh( moab );
verts.clear();
rval = mb.get_entities_by_type( 0, MBVERTEX, verts );
CHECK_ERR(rval);
@@ -1509,7 +1794,7 @@
}
// pack and unpack
- pack_unpack_mesh( moab );
+ pack_unpack_noremoteh( moab );
verts.clear();
rval = mb.get_entities_by_type( 0, MBVERTEX, verts );
CHECK_ERR(rval);
@@ -1572,6 +1857,159 @@
}
}
+MBErrorCode get_entities(MBInterface &mb,
+ std::vector<MBEntityHandle> &ent_verts,
+ int verts_per_entity, int dim,
+ MBRange &ents)
+{
+ assert(!(ent_verts.size()%verts_per_entity));
+ unsigned int num_ents = ent_verts.size() / verts_per_entity;
+ MBRange dum_ents;
+ MBErrorCode result;
+ for (unsigned int i = 0; i < num_ents; i++) {
+ result = mb.get_adjacencies(&ent_verts[verts_per_entity*i], verts_per_entity,
+ dim, true, dum_ents);
+ CHECK_ERR(result);
+ assert(dum_ents.size() == 1);
+ ents.merge(dum_ents);
+ dum_ents.clear();
+ }
+ return MB_SUCCESS;
+}
+
+void test_pack_shared_entities()
+{
+ MBRange::iterator i;
+ MBCore moab;
+ MBInterface& mb = moab;
+ MBParallelComm *pcomm = new MBParallelComm( &moab );
+ MBErrorCode rval;
+ std::vector<std::vector<MBEntityHandle> > L1h;
+ std::vector<MBEntityHandle> L2hloc;
+ std::vector<MBEntityHandle> L2hrem;
+ std::vector<unsigned int> L2p;
+ MBRange new_ents;
+
+ // create some mesh, from proc 0's perspective
+ MBRange verts, quads;
+ create_shared_grid(mb, verts, quads);
+ MBRange sent_ents;
+ std::vector<MBEntityHandle> ent_verts, dum_handles;
+ std::vector<int> dum_procs;
+ std::vector<std::set<unsigned int> > entprocs(4);
+ std::vector<unsigned char> buffer;
+ buffer.reserve(1);
+ unsigned char *buff_ptr = &buffer[0];
+
+ //========================
+ // interface, shared, 0->2
+ pcomm->set_rank(0);
+ rval = set_owners(moab, *pcomm, verts, false);
+ CHECK_ERR(rval);
+ // get edges that we want
+ ent_verts.push_back(verts[6]); ent_verts.push_back(verts[7]);
+ ent_verts.push_back(verts[7]); ent_verts.push_back(verts[8]);
+ rval = get_entities(mb, ent_verts, 2, 1, sent_ents);
+ CHECK_ERR(rval);
+ assert(2 == sent_ents.size());
+ // set entprocs
+ entprocs[0].insert(1); entprocs[1].insert(1);
+
+ rval = pcomm->pack_entities(sent_ents, buffer, buff_ptr, true, 2,
+ true, &entprocs);
+ CHECK_ERR(rval);
+
+ // now unpack the buffer
+ pcomm->set_rank(2);
+ rval = set_owners(moab, *pcomm, verts, false);
+ CHECK_ERR(rval);
+ buff_ptr = &buffer[0];
+ rval = pcomm->unpack_entities(buff_ptr, true, 0, true,
+ L1h, L2hloc, L2hrem, L2p, new_ents);
+ if (!L1h.empty() || !L2p.empty() || !new_ents.empty()) rval = MB_FAILURE;
+ CHECK_ERR(rval);
+
+ ent_verts.clear(); sent_ents.clear();
+ entprocs[0].clear(); entprocs[1].clear();
+ buffer.clear(); buff_ptr = &buffer[0];
+
+ //========================
+ // interface, multishared, 1st message, 0->1
+ pcomm->set_rank(0);
+ ent_verts.push_back(verts[8]); ent_verts.push_back(verts[5]);
+ ent_verts.push_back(verts[5]); ent_verts.push_back(verts[2]);
+ rval = get_entities(mb, ent_verts, 2, 1, sent_ents);
+ CHECK_ERR(rval);
+ assert(2 == sent_ents.size());
+ // sending these edges to 1 and 3
+ entprocs[0].insert(1); entprocs[0].insert(3);
+ entprocs[1].insert(1); entprocs[1].insert(3);
+
+ rval = pcomm->pack_entities(sent_ents, buffer, buff_ptr, true, 1,
+ true, &entprocs);
+ CHECK_ERR(rval);
+
+ // now unpack the buffer
+ pcomm->set_rank(1);
+ rval = set_owners(moab, *pcomm, verts, false);
+ CHECK_ERR(rval);
+ buff_ptr = &buffer[0];
+ rval = pcomm->unpack_entities(buff_ptr, true, 0, true,
+ L1h, L2hloc, L2hrem, L2p, new_ents);
+ if (!L1h.empty() || !L2p.empty() || !new_ents.empty()) rval = MB_FAILURE;
+ CHECK_ERR(rval);
+
+ entprocs[0].clear(); entprocs[1].clear();
+ buffer.clear(); buff_ptr = &buffer[0];
+
+ //========================
+ // interface, multishared, 2nd message, 3->1
+ pcomm->set_rank(3);
+ // sending these edges to 0 and 1
+ entprocs[0].insert(0); entprocs[0].insert(1);
+ entprocs[1].insert(0); entprocs[1].insert(1);
+ // need to reset sharing data on edges, since it was set in prev unpack
+ rval = set_owners(moab, *pcomm, sent_ents, 0x0, dum_procs, dum_handles);
+ rval = pcomm->pack_entities(sent_ents, buffer, buff_ptr, true, 1,
+ true, &entprocs);
+ CHECK_ERR(rval);
+
+ // now unpack the buffer
+ pcomm->set_rank(1);
+ rval = set_owners(moab, *pcomm, verts, false);
+ dum_procs.push_back(0); dum_procs.push_back(1); dum_procs.push_back(3);
+ dum_handles.push_back(*sent_ents.begin()); dum_handles.push_back(*sent_ents.begin()); dum_handles.push_back(0);
+ dum_handles.push_back(*sent_ents.rbegin()); dum_handles.push_back(*sent_ents.rbegin()); dum_handles.push_back(0);
+ unsigned char pstat =
+ PSTATUS_SHARED | PSTATUS_MULTISHARED | PSTATUS_INTERFACE | PSTATUS_NOT_OWNED;
+ rval = set_owners(moab, *pcomm, sent_ents, pstat, dum_procs, dum_handles);
+ CHECK_ERR(rval);
+ buff_ptr = &buffer[0];
+ rval = pcomm->unpack_entities(buff_ptr, true, 3, true,
+ L1h, L2hloc, L2hrem, L2p, new_ents);
+ if (!L1h.empty()) rval = MB_FAILURE;
+ CHECK_ERR(rval);
+
+ ent_verts.clear(); sent_ents.clear(); dum_procs.clear(); dum_handles.clear();
+ entprocs[0].clear(); entprocs[1].clear();
+ buffer.clear(); buff_ptr = &buffer[0];
+
+
+ //========================
+ // ghost, unshared, 2->1
+
+ //========================
+ // ghost, multishared, 1st message, 0->2
+ //
+ //========================
+ // ghost, multishared, 2nd message, 1->2
+
+ //========================
+ // pack and unpack
+
+
+}
+
void test_filter_pstatus()
{
MBRange::iterator i;
Modified: MOAB/branches/parallel_ghosting/tools/iMesh/iMeshP_MOAB.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/tools/iMesh/iMeshP_MOAB.cpp 2009-06-02 23:10:03 UTC (rev 2924)
+++ MOAB/branches/parallel_ghosting/tools/iMesh/iMeshP_MOAB.cpp 2009-06-03 02:42:45 UTC (rev 2925)
@@ -1472,16 +1472,18 @@
--types.second;
}
- MBTag src_handle = itaps_cast<MBTag>(source_tag);
- MBTag dst_handle = itaps_cast<MBTag>(dest_tag);
+ std::vector<MBTag> src_tags(1, itaps_cast<MBTag>(source_tag));
+ std::vector<MBTag> dst_tags(1, itaps_cast<MBTag>(dest_tag));
+
MBErrorCode rval;
MBRange entities;
for (MBEntityType t = types.first; t <= types.second; ++t) {
- rval = MBI->get_entities_by_type_and_tag( 0, t, &src_handle, 0, 1, entities, MBInterface::UNION );
+ rval = MBI->get_entities_by_type_and_tag( 0, t, &src_tags[0], 0, 1,
+ entities, MBInterface::UNION );
CHKERR(rval);
}
- rval = pcomm->exchange_tags( src_handle, dst_handle, entities );
+ rval = pcomm->exchange_tags( src_tags, dst_tags, entities );
CHKERR(rval);
RETURN (iBase_SUCCESS);
}
@@ -1499,10 +1501,10 @@
const MBEntityHandle* ents = itaps_cast<const MBEntityHandle*>(entities);
std::copy( ents, ents+entities_size, mb_range_inserter(range) );
- MBTag src_handle = itaps_cast<MBTag>(source_tag);
- MBTag dst_handle = itaps_cast<MBTag>(dest_tag);
+ std::vector<MBTag> src_tags(1, itaps_cast<MBTag>(source_tag));
+ std::vector<MBTag> dst_tags(1, itaps_cast<MBTag>(dest_tag));
MBParallelComm* pcomm = PCOMM;
- MBErrorCode rval = pcomm->exchange_tags( src_handle, dst_handle, range );
+ MBErrorCode rval = pcomm->exchange_tags( src_tags, dst_tags, range );
CHKERR(rval);
RETURN (iBase_SUCCESS);
}
Modified: MOAB/branches/parallel_ghosting/tools/mbcoupler/mbcoupler_test.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/tools/mbcoupler/mbcoupler_test.cpp 2009-06-02 23:10:03 UTC (rev 2924)
+++ MOAB/branches/parallel_ghosting/tools/mbcoupler/mbcoupler_test.cpp 2009-06-03 02:42:45 UTC (rev 2925)
@@ -151,7 +151,7 @@
// now figure out which vertices are shared
for (unsigned int p = 0; p < pcs.size(); p++) {
for (int i = 0; i < 4; i++) {
- tmp_result = pcs[p]->get_iface_entities(-1, iface_ents[i], i);
+ tmp_result = pcs[p]->get_iface_entities(-1, i, iface_ents[i]);
if (MB_SUCCESS != tmp_result) {
std::cerr << "get_iface_entities returned error on proc "
More information about the moab-dev
mailing list