[MOAB-dev] r2959 - MOAB/branches/parallel_ghosting/parallel

tautges at mcs.anl.gov tautges at mcs.anl.gov
Mon Jun 22 12:55:39 CDT 2009


Author: tautges
Date: 2009-06-22 12:55:39 -0500 (Mon, 22 Jun 2009)
New Revision: 2959

Modified:
   MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp
   MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp
   MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp
   MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp
Log:
One major bug in unpacking (listed first); lots of other code for
convenience and testing.  2d tests in pcomm_unit now runs, 3d does
not.


- in unpack_entities, keep ALL entities, even the ones matching an
  existing entity, in a vector; these are referenced in later entities
  in this same message, and this vector is different from the new
  entities coming from this message
- for sending back remote handles, added L1p, L1hloc and L1hrem,
  instead of just L1h, because sometimes we're sending to a proc for
  which we don't know the handle on that proc; in that case, we put
  the owning proc in L1p, and the owner proc handle in L1hrem
- lots of debugging of processing of sharing data
- wrote variants of several functions in MBPC, allowing a program to
  test initialization (through exchange_ghost_cells and
  resolve_shared_entities) based on a vector of MBPC's, rather than
  calling in a parallel program; will facilitate future debugging of
  parallel communication too
- created 2d and 3d tests and calling them from pcomm_unit



Modified: MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp	2009-06-22 17:13:38 UTC (rev 2958)
+++ MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp	2009-06-22 17:55:39 UTC (rev 2959)
@@ -119,6 +119,12 @@
       dynamic_cast<MBCore*>(mbImpl)->get_error_handler()->set_last_error(tmp_str.c_str()); \
       return result;}
 
+#define RRAI(i, a) if (MB_SUCCESS != result) {                \
+      std::string tmp_str; i->get_last_error(tmp_str);\
+      tmp_str.append("\n"); tmp_str.append(a);\
+      dynamic_cast<MBCore*>(i)->get_error_handler()->set_last_error(tmp_str.c_str()); \
+      return result;}
+
 /** Name of tag used to store MBParallelComm Index on mesh paritioning sets */
 const char* PARTITIONING_PCOMM_TAG_NAME = "__PRTN_PCOMM";
  
@@ -430,12 +436,12 @@
   }
 
   if ((int)procConfig.proc_rank() != from_proc) {
-    std::vector<std::vector<MBEntityHandle> > dum1;
+    std::vector<std::vector<MBEntityHandle> > dum1a, dum1b;
     std::vector<std::vector<int> > dum1p;
     std::vector<MBEntityHandle> dum2;
     std::vector<unsigned int> dum3;
     result = unpack_buffer(&buff[0], false, from_proc, -1, 
-                           dum1, dum1p, dum2, dum2, dum3, entities);
+                           dum1a, dum1b, dum1p, dum2, dum2, dum3, entities);
     RRA("Failed to unpack buffer in broadcast_entities.");
   }
 
@@ -501,7 +507,8 @@
                                           const bool store_remote_handles,
                                           const int from_proc,
                                           const int ind,
-                                          std::vector<std::vector<MBEntityHandle> > &L1h,
+                                          std::vector<std::vector<MBEntityHandle> > &L1hloc,
+                                          std::vector<std::vector<MBEntityHandle> > &L1hrem,
                                           std::vector<std::vector<int> > &L1p,
                                           std::vector<MBEntityHandle> &L2hloc, 
                                           std::vector<MBEntityHandle> &L2hrem,
@@ -515,7 +522,7 @@
 #endif  
     MBErrorCode result;
     result = unpack_entities(buff_ptr, store_remote_handles,
-                             ind, false, L1h, L1p, L2hloc, L2hrem, L2p, new_ents);
+                             ind, false, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents);
   RRA("Unpacking entities failed.");
 #ifdef DEBUG_PACKING
     std::cerr << "unpack_entities buffer space: " << buff_ptr - tmp_buff << " bytes." << std::endl;
@@ -643,7 +650,8 @@
                                           const bool store_remote_handles,
                                           const int to_proc,
                                           const bool is_iface,
-                                          std::vector<std::set<unsigned int> > *entprocs) 
+                                          std::vector<std::set<unsigned int> > *entprocs,
+                                          MBRange *allsent) 
 {
     // packed information:
     // 1. # entities = E
@@ -653,6 +661,11 @@
     //   c. for p in P (handle for e on p) (Note1)
     // 3. vertex/entity info
 
+    // get an estimate of the buffer size & pre-allocate buffer size
+  unsigned int buff_size = estimate_ents_buffer_size(entities, 
+                                                     store_remote_handles);
+  CHECK_BUFF_SPACE(buff, buff_ptr, buff_size);
+  
   MBWriteUtilIface *wu;
   MBErrorCode result = mbImpl->query_interface(std::string("MBWriteUtilIface"), 
                                                reinterpret_cast<void**>(&wu));
@@ -692,7 +705,7 @@
     for (rit = entities.begin(), i = 0; 
          rit != entities.end(); rit++, i++) {
       result = build_sharedhps_list(*rit, pstatus_vals[i], sharedp_vals[i],
-                                    (entprocs ? (*entprocs)[i] : dumprocs),
+                                    (entprocs ? (*entprocs)[allsent->index(*rit)] : dumprocs),
                                     num_ents, tmp_procs, tmp_handles);
       RRA("Failed to build sharedhps.");
 
@@ -707,7 +720,6 @@
     // pack vertices
   MBRange these_ents = entities.subset_by_type(MBVERTEX);
   num_ents = these_ents.size();
-  int buff_size;
 
   if (num_ents) {
     buff_size = 2*sizeof(int) + 3*num_ents*sizeof(double);
@@ -1085,7 +1097,8 @@
                                             const bool store_remote_handles,
                                             const int from_ind,
                                             const bool is_iface,
-                                            std::vector<std::vector<MBEntityHandle> > &L1h,
+                                            std::vector<std::vector<MBEntityHandle> > &L1hloc,
+                                            std::vector<std::vector<MBEntityHandle> > &L1hrem,
                                             std::vector<std::vector<int> > &L1p,
                                             std::vector<MBEntityHandle> &L2hloc, 
                                             std::vector<MBEntityHandle> &L2hrem,
@@ -1111,8 +1124,8 @@
     //      o if !iface, save new handle on L1 for all sharing procs
 
     // lists of handles/procs to return to sending/other procs
-    // L1h[p]: handle pairs [h, h'], where h' is the local proc handle
-    //         and h is either the remote proc handle (if that is known) or
+    // L1hloc[p], L1hrem[p]: handle pairs [h, h'], where h is the local proc handle
+    //         and h' is either the remote proc handle (if that is known) or
     //         the owner proc handle (otherwise);
     // L1p[p]: indicates whether h is remote handle (= -1) or owner (rank of owner)
     // L2hloc, L2hrem: local/remote handles for entities shared by > 2 procs;
@@ -1146,6 +1159,8 @@
       buff_ptr += j * (sizeof(int)+sizeof(MBEntityHandle));
     }
   }
+
+  std::vector<MBEntityHandle> msg_ents;
   
   while (!done) {
     MBEntityType this_type = MBMAXTYPE;
@@ -1197,7 +1212,7 @@
         buff_ptr += verts_per_entity * sizeof(MBEntityHandle);
 
           // update connectivity to local handles
-        result = get_local_handles(connect, verts_per_entity, new_ents);
+        result = get_local_handles(connect, verts_per_entity, msg_ents);
         RRA("Couldn't get local handles.");
       }
 
@@ -1247,18 +1262,19 @@
           L2p.push_back(ps[0]);
         }
 
-        if (!is_iface) {
-          assert("New entity shouldn't be in new_ents list already" &&
-                 new_ents.find(new_h) == new_ents.end());
-          new_ents.insert(new_h);
-        }
-
         created_here = true;
       }
 
         //=======================================
         // take care of sharing data
         //=======================================
+
+        // need to save entities found in order, for interpretation of
+        // later parts of this message
+      if (!is_iface) msg_ents.push_back(new_h);
+
+      if (created_here) new_ents.insert(new_h);
+
       if (store_remote_handles) {
         
           // update sharing data and pstatus, adjusting order if iface
@@ -1272,21 +1288,41 @@
           for (j = 0; j < num_ps; j++) {
             if (ps[j] == (int)procConfig.proc_rank()) continue;
             int idx = get_buffers(ps[j]);
-            if (idx == (int)L1h.size()) {
-              L1h.resize(idx+1);
+            if (idx == (int)L1hloc.size()) {
+              L1hloc.resize(idx+1);
+              L1hrem.resize(idx+1);
               L1p.resize(idx+1);
             }
             
-            if (!hs[j]) {
-              assert(-1 != ps[0] && num_ps > 2);
-              L1p[idx].push_back(ps[0]);
-              L1h[idx].push_back(hs[0]);
+              // don't bother adding if it's already in the list
+            std::vector<MBEntityHandle>::iterator vit = 
+                std::find(L1hloc[idx].begin(), L1hloc[idx].end(), new_h);
+            if (vit != L1hloc[idx].end()) {
+                // if it's in the list but remote handle isn't known but we know
+                // it, replace in the list
+              if (L1p[idx][vit-L1hloc[idx].begin()] != -1 && hs[j]) {
+                L1hrem[idx][vit-L1hloc[idx].begin()] = hs[j];
+                L1p[idx][vit-L1hloc[idx].begin()] = -1;
+              }
+              else continue;
             }
             else {
-              L1p[idx].push_back(-1);
-              L1h[idx].push_back(hs[j]);
+              if (!hs[j]) {
+                assert(-1 != ps[0] && num_ps > 2);
+                L1p[idx].push_back(ps[0]);
+                L1hrem[idx].push_back(hs[0]);
+              }
+              else {
+                assert("either this remote handle isn't in the remote list, or it's for another proc" &&
+                       (std::find(L1hrem[idx].begin(), L1hrem[idx].end(), hs[j]) == 
+                        L1hrem[idx].end() ||
+                        L1p[idx][std::find(L1hrem[idx].begin(), L1hrem[idx].end(), hs[j]) - 
+                                 L1hrem[idx].begin()] != -1));
+                L1p[idx].push_back(-1);
+                L1hrem[idx].push_back(hs[j]);
+              }
+              L1hloc[idx].push_back(new_h);
             }
-            L1h[idx].push_back(new_h);
           }
         }
 
@@ -1391,13 +1427,11 @@
 MBErrorCode MBParallelComm::list_entities(const MBEntityHandle *ents, int num_ents) 
 {
   if (NULL == ents && 0 == num_ents) {
-    return mbImpl->list_entities(0, 0);
+    return list_entities(NULL, 0);
   }
   
   else if (NULL == ents || 0 == num_ents) {
-    MBRange dum_ents;
-    mbImpl->get_entities_by_handle(0, dum_ents);
-    return list_entities(dum_ents);
+    return list_entities(sharedEnts);
   }
     
   unsigned char pstat;
@@ -1484,7 +1518,7 @@
     num_exist = num_ps;
       // if it's only one, hopefully I'm not there yet...
     assert("I shouldn't be the only proc there." &&
-           (1 != num_exist || ps[0] != procConfig.proc_rank()));
+           (1 != num_exist || ps[0] != (int)procConfig.proc_rank()));
     changed = true;
   }
   else {
@@ -1530,7 +1564,7 @@
   
     // if it's multi-shared and we created the entity in this unpack,
     // local handle probably isn't in handle list yet
-  if (add_pstat & PSTATUS_GHOST && num_exist > 2) {
+  if (num_exist > 2) {
     idx = std::find(tag_ps, tag_ps+num_exist, procConfig.proc_rank()) - tag_ps;
     assert(idx < (int) num_exist);
     if (!tag_hs[idx])
@@ -1571,6 +1605,7 @@
       tag_ps[0] = tag_ps[1];
       tag_hs[0] = tag_hs[1];
     }
+    assert(tag_ps[0] != -1 && tag_hs[0] != 0);
     result = mbImpl->tag_set_data(sharedp_tag(), &new_h, 1, tag_ps);
     RRA("Couldn't set sharedp tag.");
     result = mbImpl->tag_set_data(sharedh_tag(), &new_h, 1, tag_hs);
@@ -1598,23 +1633,27 @@
   if (pstat & PSTATUS_MULTISHARED) {
     result = mbImpl->tag_get_data(sharedps_tag(), &entity, 1, ps);
     RRA("Couldn't get sharedps tag.");
-    result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1, hs);
-    RRA("Couldn't get sharedhs tag.");
+    if (hs) {
+      result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1, hs);
+      RRA("Couldn't get sharedhs tag.");
+    }
     num_ps = std::find(ps, ps+MAX_SHARING_PROCS, -1) - ps;
   }
   else if (pstat & PSTATUS_SHARED) {
     result = mbImpl->tag_get_data(sharedp_tag(), &entity, 1, ps);
     RRA("Couldn't get sharedp tag.");
-    result = mbImpl->tag_get_data(sharedh_tag(), &entity, 1, hs);
-    RRA("Couldn't get sharedh tag.");
+    if (hs) {
+      result = mbImpl->tag_get_data(sharedh_tag(), &entity, 1, hs);
+      RRA("Couldn't get sharedh tag.");
+      hs[1] = 0;
+    }
       // initialize past end of data
     ps[1] = -1;
-    hs[1] = 0;
     num_ps = 1;
   }
   else {
     ps[0] = -1;
-    hs[0] = 0;
+    if (hs) hs[0] = 0;
     num_ps = 0;
   }
 
@@ -1635,6 +1674,7 @@
                                                  std::vector<unsigned int> &L2p,
                                                  MBEntityHandle &new_h) 
 {
+  new_h = 0;
   if (!is_iface && num_ps > 2) {
     for (unsigned int i = 0; i < L2hrem.size(); i++) {
       if (L2hrem[i] == owner_h && owner_p == (int) L2p[i]) {
@@ -1677,16 +1717,21 @@
   
 MBErrorCode MBParallelComm::get_local_handles(MBEntityHandle *from_vec, 
                                               int num_ents,
-                                              const MBRange &new_ents,
-                                              std::vector<MBEntityHandle> *no_ents) 
+                                              const MBRange &new_ents) 
 {
+  std::vector<MBEntityHandle> tmp_ents;
+  std::copy(new_ents.begin(), new_ents.end(), std::back_inserter(tmp_ents));
+  return get_local_handles(from_vec, num_ents, tmp_ents);
+}
+
+MBErrorCode MBParallelComm::get_local_handles(MBEntityHandle *from_vec,
+                                              int num_ents,
+                                              const std::vector<MBEntityHandle> &new_ents) 
+{
   for (int i = 0; i < num_ents; i++) {
     if (TYPE_FROM_HANDLE(from_vec[i]) == MBMAXTYPE) {
       assert(ID_FROM_HANDLE(from_vec[i]) < (int) new_ents.size());
-      if (no_ents && i < (int)no_ents->size() && (*no_ents)[i])
-        from_vec[i] = (*no_ents)[i];
-      else
-        from_vec[i] = new_ents[ID_FROM_HANDLE(from_vec[i])];
+      from_vec[i] = new_ents[ID_FROM_HANDLE(from_vec[i])];
     }
   }
   
@@ -2542,7 +2587,7 @@
   RRA("Trouble tagging shared verts.");
 
     // get entities shared by 1 or n procs
-  result = tag_shared_ents(resolve_dim, shared_dim, shared_verts, skin_ents,
+  result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
                            proc_nranges);
   RRA("Trouble tagging shared entities.");
 
@@ -2561,7 +2606,7 @@
     // create the sets for each interface; store them as tags on
     // the interface instance
   MBRange iface_sets;
-  result = create_interface_sets(proc_nranges, this_set, resolve_dim, shared_dim);
+  result = create_interface_sets(proc_nranges, resolve_dim, shared_dim);
   RRA("Trouble creating iface sets.");
 
     // establish comm procs and buffers for them
@@ -2590,6 +2635,99 @@
   return result;
 }
 
+MBErrorCode MBParallelComm::resolve_shared_ents(MBParallelComm **pc, 
+                                                const unsigned int np, 
+                                                const int part_dim) 
+{
+  std::vector<MBRange> verts(np);
+  int tot_verts = 0;
+  unsigned int p, i, j, v, vtot;
+  MBErrorCode rval;
+  for (p = 0; p < np; p++) {
+    MBSkinner skinner(pc[p]->get_moab());
+    MBRange part_ents, skin_ents;
+    rval = pc[p]->get_moab()->get_entities_by_dimension(0, part_dim, part_ents);
+    if (MB_SUCCESS != rval) return rval;
+    rval = skinner.find_skin(part_ents, skin_ents, skin_ents, true);
+    if (MB_SUCCESS != rval) return rval;
+    rval = pc[p]->get_moab()->get_adjacencies(skin_ents, 0, true, verts[p],
+                                              MBInterface::UNION);
+    if (MB_SUCCESS != rval) return rval;
+    tot_verts += verts[p].size();
+  }
+  
+  tuple_list shared_ents;
+  tuple_list_init_max(&shared_ents, 2, 0, 1, 0, tot_verts);
+
+  i = 0; j = 0;
+  std::vector<int> gids;
+  MBRange::iterator rit;
+  MBTag gid_tag;
+  int dum_default = -1;
+  for (p = 0; p < np; p++) {
+    rval = pc[p]->get_moab()->tag_create(GLOBAL_ID_TAG_NAME, 
+                                         sizeof(int), MB_TAG_DENSE,
+                                         MB_TYPE_INTEGER, gid_tag, 
+                                         &dum_default, true);
+    gids.resize(verts[p].size());
+    rval = pc[p]->get_moab()->tag_get_data(gid_tag, verts[p], &gids[0]);
+    if (MB_SUCCESS != rval) return rval;
+    
+    for (v = 0, rit = verts[p].begin(); v < gids.size(); v++, rit++) {
+      shared_ents.vi[i++] = gids[v];
+      shared_ents.vi[i++] = p;
+      shared_ents.vul[j] = *rit;
+      j++;
+      shared_ents.n++;
+    }
+  }
+  
+  buffer sort_buffer;
+  buffer_init(&sort_buffer, vtot);
+  tuple_list_sort(&shared_ents, 0, &sort_buffer);
+  buffer_free(&sort_buffer);
+
+  j = 0; i = 0;
+  std::vector<MBEntityHandle> handles;
+  std::vector<int> procs;
+  
+  while (i < shared_ents.n) {
+    handles.clear();
+    procs.clear();
+    
+      // count & accumulate sharing procs
+    int this_gid = shared_ents.vi[j];
+    while (i < shared_ents.n && shared_ents.vi[j] == this_gid) {
+      j++;
+      procs.push_back( shared_ents.vi[j++] );
+      handles.push_back( shared_ents.vul[i++] );
+    }
+    if (1 == procs.size()) continue;
+    
+    for (v = 0; v < procs.size(); v++) {
+      rval = pc[procs[v]]->update_remote_data(handles[v], 
+                                              &procs[0], &handles[0], procs.size(),
+                                              PSTATUS_INTERFACE);
+      if (MB_SUCCESS != rval) return rval;
+    }
+  }
+
+  std::set<unsigned int> psets;
+  for (p = 0; p < np; p++) {
+    rval = pc[p]->create_interface_sets(part_dim, part_dim-1);
+    if (MB_SUCCESS != rval) return rval;
+      // establish comm procs and buffers for them
+    psets.clear();
+    rval = pc[p]->get_interface_procs(psets);
+    if (MB_SUCCESS != rval) return rval;
+    for (std::set<unsigned int>::iterator sit = psets.begin(); sit != psets.end(); sit++)
+      pc[p]->get_buffers(*sit);
+
+  }
+  
+  return MB_SUCCESS;
+}
+
 MBErrorCode MBParallelComm::tag_iface_entities() 
 {
   MBErrorCode result = MB_SUCCESS;
@@ -2685,8 +2823,46 @@
   return MB_SUCCESS;
 }
   
+MBErrorCode MBParallelComm::create_interface_sets(int resolve_dim, int shared_dim) 
+{
+  std::map<std::vector<int>, MBRange> proc_nranges;
+  
+    // build up the list of shared entities
+  int procs[MAX_SHARING_PROCS];
+  MBEntityHandle handles[MAX_SHARING_PROCS];
+  MBErrorCode result;
+  int nprocs;
+  unsigned char pstat;
+  for (MBRange::iterator rit = sharedEnts.begin(); rit != sharedEnts.end(); rit++) {
+    if (shared_dim != -1 && mbImpl->dimension_from_handle(*rit) > shared_dim)
+      continue;
+    result = get_sharing_data(*rit, procs, handles, pstat, nprocs);
+    RRA("");
+    std::sort(procs, procs+nprocs);
+    std::vector<int> tmp_procs(procs, procs + nprocs);
+    proc_nranges[tmp_procs].insert(*rit);
+  }
+                                                  
+  MBSkinner skinner(mbImpl);
+  MBRange skin_ents[4];
+  result = mbImpl->get_entities_by_dimension(0, resolve_dim, skin_ents[resolve_dim]);
+  RRA("");
+  result = skinner.find_skin(skin_ents[resolve_dim], skin_ents[resolve_dim-1], 
+                             skin_ents[resolve_dim-1], true);
+  RRA("Failed to find skin.");
+  if (shared_dim > 1) {
+    result = mbImpl->get_adjacencies(skin_ents[resolve_dim-1], resolve_dim-2, true,
+                                     skin_ents[resolve_dim-2], MBInterface::UNION);
+    RRA("");
+  }
+
+  result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
+                           proc_nranges);
+    
+  return create_interface_sets(proc_nranges, resolve_dim, shared_dim);
+}
+  
 MBErrorCode MBParallelComm::create_interface_sets(std::map<std::vector<int>, MBRange> &proc_nranges,
-                                                  MBEntityHandle this_set,
                                                   int resolve_dim, int shared_dim) 
 {
   if (proc_nranges.empty()) return MB_SUCCESS;
@@ -2730,6 +2906,7 @@
     int min_proc = (mit->first)[0];
     unsigned char pval = (PSTATUS_SHARED | PSTATUS_INTERFACE);
     if (min_proc < (int) procConfig.proc_rank()) pval |= PSTATUS_NOT_OWNED;
+    if (mit->first.size() > 1) pval |= PSTATUS_MULTISHARED;
     result = mbImpl->tag_set_data(pstatus_tag, &new_set, 1, &pval); 
     RRA("Failed to tag interface set with pstatus.");
 
@@ -2816,7 +2993,6 @@
 
 MBErrorCode MBParallelComm::tag_shared_ents(int resolve_dim,
                                             int shared_dim,
-                                            tuple_list &shared_verts,
                                             MBRange *skin_ents,
                                             std::map<std::vector<int>, MBRange> &proc_nranges) 
 {
@@ -2956,7 +3132,8 @@
     
     proc_nranges[sharing_procs].insert(this_ent);
 
-    unsigned char share_flag = PSTATUS_SHARED, ms_flag = PSTATUS_MULTISHARED;
+    unsigned char share_flag = PSTATUS_SHARED, 
+        ms_flag = (PSTATUS_SHARED | PSTATUS_MULTISHARED);
     if (sharing_procs.size() == 1) {
       result = mbImpl->tag_set_data(sharedp_tag, &this_ent, 1,
                                     &sharing_procs[0]);
@@ -3016,7 +3193,7 @@
       for (j = 0; j < MAX_SHARING_PROCS; j++) {
         if (-1 != tmp_iface_procs[j] && tmp_iface_procs[j] != (int)procConfig.proc_rank()) 
           procs_set.insert((unsigned int) tmp_iface_procs[j]);
-        else {
+        else if (-1 == tmp_iface_procs[j]) {
           std::fill(tmp_iface_procs, tmp_iface_procs+j, -1);
           break;
         }
@@ -3124,7 +3301,7 @@
 {
   MBRange tmp_ents;
 
-  if (ents.empty()) ents = sharedEnts;
+  assert(!ents.empty());
 
     // Put into tmp_ents any entities which are not owned locally or
     // who are already shared with to_proc
@@ -3250,53 +3427,12 @@
     // get entities to be sent to neighbors
     //===========================================
 
-    // done in a separate loop over procs because sometimes later procs 
-    // need to add info to earlier procs' messages
   MBRange sent_ents[MAX_SHARING_PROCS], allsent, tmp_range;
-  for (ind = 0, proc_it = buffProcs.begin(); 
-       proc_it != buffProcs.end(); proc_it++, ind++) {
-    if (!is_iface) {
-      result = get_ghosted_entities(bridge_dim, ghost_dim, buffProcs[ind],
-                                    num_layers, sent_ents[ind]);
-      RRA("Failed to get ghost layers.");
-    }
-    else {
-      result = get_iface_entities(buffProcs[ind], -1, sent_ents[ind]);
-      RRA("Failed to get interface layers.");
-
-/*
-        // remove vertices, since we know they're already shared
-      std::pair<MBRange::const_iterator,MBRange::const_iterator> vert_it =
-          sent_ents[ind].equal_range(MBVERTEX);
-      sent_ents[ind].erase(vert_it.first, vert_it.second);
-*/
-    }
-
-      // filter out entities already shared with destination
-    tmp_range.clear();
-    result = filter_pstatus(sent_ents[ind], PSTATUS_SHARED, PSTATUS_AND,
-                            buffProcs[ind], &tmp_range);
-    RRA("Couldn't filter on owner.");
-    if (!tmp_range.empty()) 
-      sent_ents[ind] = sent_ents[ind].subtract(tmp_range);
-
-    allsent.merge(sent_ents[ind]);
-  }
-
-    //===========================================
-    // need to get procs each entity is sent to
-    //===========================================
-  MBRange::iterator rit;
   std::vector<std::set<unsigned int> > entprocs(allsent.size());
-  for (ind = 0, proc_it = buffProcs.begin(); 
-       proc_it != buffProcs.end(); proc_it++, ind++) {
-    for (rit = sent_ents[ind].begin(); rit != sent_ents[ind].end(); rit++) {
-      int rind = allsent.index(*rit);
-      assert(rind < (int) allsent.size() && rind >= 0);
-      entprocs[rind].insert(*proc_it);
-    }
-  }
-    
+  result = get_sent_ents(is_iface, bridge_dim, ghost_dim, num_layers,
+                         sent_ents, allsent, entprocs);
+  RRA("get_sent_ents failed.");
+  
     //===========================================
     // pack and send ents from this proc to others
     //===========================================
@@ -3304,19 +3440,13 @@
   for (ind = 0, proc_it = buffProcs.begin(); 
        proc_it != buffProcs.end(); proc_it++, ind++) {
 
-      // get an estimate of the buffer size & pre-allocate buffer size
-    unsigned int buff_size = estimate_ents_buffer_size(sent_ents[ind], 
-                                                       store_remote_handles);
-    ownerSBuffs[ind].clear();
-    ownerSBuffs[ind].reserve(buff_size);
-    
       // buff_ptr points to the END (one past last occupied byte) of buffer
     buff_ptr = &ownerSBuffs[ind][0];
 
       // entities
     result = pack_entities(sent_ents[ind], ownerSBuffs[ind], buff_ptr,
                            store_remote_handles, buffProcs[ind], is_iface,
-                           &entprocs); 
+                           &entprocs, &allsent); 
     RRA("Packing entities failed.");
 
       // now we're ready to send the buffer
@@ -3334,7 +3464,7 @@
   int num_incoming = buffProcs.size();
   std::vector<MPI_Status> status(buffProcs.size());
   std::vector<std::vector<MBEntityHandle> > recd_ents(num_incoming);
-  std::vector<std::vector<MBEntityHandle> > L1h(buffProcs.size());
+  std::vector<std::vector<MBEntityHandle> > L1hloc(buffProcs.size()), L1hrem(buffProcs.size());
   std::vector<std::vector<int> > L1p(buffProcs.size());
   std::vector<MBEntityHandle> L2hloc, L2hrem;
   std::vector<unsigned int> L2p;
@@ -3370,7 +3500,7 @@
       unsigned char *buff_ptr = &ghostRBuffs[ind][0];
       result = unpack_entities(buff_ptr,
                                store_remote_handles, ind, is_iface,
-                               L1h, L1p, L2hloc, L2hrem, L2p, new_ents);
+                               L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents);
       RRA("Failed to unpack entities.");
     }
     else {
@@ -3419,7 +3549,7 @@
        proc_it != buffProcs.end(); proc_it++, ind++) {
       // skip if iface layer and higher-rank proc
     buff_ptr = &ghostSBuffs[ind][0];
-    result = pack_remote_handles(L1h[ind], L1p[ind], *proc_it,
+    result = pack_remote_handles(L1hloc[ind], L1hrem[ind], L1p[ind], *proc_it,
                                    ghostSBuffs[ind], buff_ptr);
     RRA("Failed to pack remote handles.");
     result = send_buffer(buffProcs[ind], &ghostSBuffs[ind][0], 
@@ -3472,6 +3602,211 @@
   return MB_SUCCESS;
 }
 
+MBErrorCode MBParallelComm::get_sent_ents(const bool is_iface, 
+                                          const int bridge_dim, const int ghost_dim,
+                                          const int num_layers,
+                                          MBRange *sent_ents, MBRange &allsent,
+                                          std::vector<std::set<unsigned int> > &entprocs) 
+{
+  MBErrorCode result;
+  int ind;
+  std::vector<unsigned int>::iterator proc_it;
+  MBRange tmp_range;
+  
+    // done in a separate loop over procs because sometimes later procs 
+    // need to add info to earlier procs' messages
+  for (ind = 0, proc_it = buffProcs.begin(); 
+       proc_it != buffProcs.end(); proc_it++, ind++) {
+    if (!is_iface) {
+      result = get_ghosted_entities(bridge_dim, ghost_dim, buffProcs[ind],
+                                    num_layers, sent_ents[ind]);
+      RRA("Failed to get ghost layers.");
+    }
+    else {
+      result = get_iface_entities(buffProcs[ind], -1, sent_ents[ind]);
+      RRA("Failed to get interface layers.");
+    }
+
+      // filter out entities already shared with destination
+    tmp_range.clear();
+    result = filter_pstatus(sent_ents[ind], PSTATUS_SHARED, PSTATUS_AND,
+                            buffProcs[ind], &tmp_range);
+    RRA("Couldn't filter on owner.");
+    if (!tmp_range.empty()) 
+      sent_ents[ind] = sent_ents[ind].subtract(tmp_range);
+
+    allsent.merge(sent_ents[ind]);
+  }
+
+    //===========================================
+    // need to get procs each entity is sent to
+    //===========================================
+  MBRange::iterator rit;
+  entprocs.resize(allsent.size());
+  for (ind = 0, proc_it = buffProcs.begin(); 
+       proc_it != buffProcs.end(); proc_it++, ind++) {
+    for (rit = sent_ents[ind].begin(); rit != sent_ents[ind].end(); rit++) {
+      int rind = allsent.index(*rit);
+      assert(rind < (int) allsent.size() && rind >= 0);
+      entprocs[rind].insert(*proc_it);
+    }
+  }
+
+  return MB_SUCCESS;
+}
+
+MBErrorCode MBParallelComm::exchange_ghost_cells(MBParallelComm **pcs,
+                                                 unsigned int num_procs,
+                                                 int ghost_dim, int bridge_dim,
+                                                 int num_layers,
+                                                 bool store_remote_handles)
+{
+    // static version of function, exchanging info through buffers rather 
+    // than through messages
+
+    // if we're only finding out about existing ents, we have to be storing
+    // remote handles too
+  assert(num_layers > 0 || store_remote_handles);
+  
+  const bool is_iface = !num_layers;
+  
+  unsigned int ind;
+  unsigned char *buff_ptr;
+  MBParallelComm *pc;
+  MBErrorCode result = MB_SUCCESS;
+
+    // when this function is called, buffProcs should already have any 
+    // communicating procs
+
+    //===========================================
+    // get entities to be sent to neighbors
+    //===========================================
+
+    // done in a separate loop over procs because sometimes later procs 
+    // need to add info to earlier procs' messages
+  MBRange sent_ents[MAX_SHARING_PROCS][MAX_SHARING_PROCS], 
+      allsent[MAX_SHARING_PROCS];
+
+    //===========================================
+    // get entities to be sent to neighbors
+    //===========================================
+
+  std::vector<std::set<unsigned int> > entprocs[MAX_SHARING_PROCS];
+  for (unsigned int p = 0; p < num_procs; p++) {
+    pc = pcs[p];
+    result = pc->get_sent_ents(is_iface, bridge_dim, ghost_dim, num_layers,
+                               sent_ents[p], allsent[p], entprocs[p]);
+    RRAI(pc->get_moab(), "get_sent_ents failed.");
+  
+    //===========================================
+    // pack entities into buffers
+    //===========================================
+
+    for (ind = 0; ind < pc->buffProcs.size(); ind++) {
+      
+        // buff_ptr points to the END (one past last occupied byte) of buffer
+      buff_ptr = &pc->ownerSBuffs[ind][0];
+
+        // entities
+      result = pc->pack_entities(sent_ents[p][ind], pc->ownerSBuffs[ind], buff_ptr,
+                                 store_remote_handles, pc->buffProcs[ind], is_iface,
+                                 &entprocs[p], &allsent[p]); 
+      RRAI(pc->get_moab(), "Packing entities failed.");
+    }
+  }
+
+    //===========================================
+    // receive/unpack new entities
+    //===========================================
+    // number of incoming messages for ghosts is the number of procs we 
+    // communicate with; for iface, it's the number of those with lower rank
+  std::vector<std::vector<MBEntityHandle> > L1hloc[MAX_SHARING_PROCS], L1hrem[MAX_SHARING_PROCS];
+  std::vector<std::vector<int> > L1p[MAX_SHARING_PROCS];
+  std::vector<MBEntityHandle> L2hloc[MAX_SHARING_PROCS], L2hrem[MAX_SHARING_PROCS];
+  std::vector<unsigned int> L2p[MAX_SHARING_PROCS];
+  MBRange new_ents[MAX_SHARING_PROCS];
+  
+  for (unsigned int p = 0; p < num_procs; p++) {
+    L1hloc[p].resize(pcs[p]->buffProcs.size());
+    L1hrem[p].resize(pcs[p]->buffProcs.size());
+    L1p[p].resize(pcs[p]->buffProcs.size());
+  }
+  
+  for (unsigned int p = 0; p < num_procs; p++) {
+  
+    MBParallelComm *pc = pcs[p];
+    
+    for (ind = 0; ind < pc->buffProcs.size(); ind++) {
+        // incoming ghost entities; unpack; returns entities received
+        // both from sending proc and from owning proc (which may be different)
+      unsigned int to_p = pc->buffProcs[ind];
+      unsigned char *buff_ptr = &pc->ownerSBuffs[ind][0];
+      result = pcs[to_p]->unpack_entities(buff_ptr,
+                                          store_remote_handles, ind, is_iface,
+                                          L1hloc[to_p], L1hrem[to_p], L1p[to_p], L2hloc[to_p], 
+                                          L2hrem[to_p], L2p[to_p], new_ents[to_p]);
+      RRAI(pc->get_moab(), "Failed to unpack entities.");
+    }
+  }
+
+  if (is_iface) {
+#ifdef NDEBUG
+    for (unsigned int p = 0; p < num_procs; p++) {
+      result = pcs[p]->check_sent_ents(allsent[p]);
+      RRAI(pcs[p]->get_moab(), "Failed check on shared entities.");
+      result = pcs[p]->check_all_shared_handles();
+      RRAI(pcs[p]->get_moab(), "Failed check on all shared handles.");
+    }
+#endif
+    return MB_SUCCESS;
+  }
+  
+      //===========================================
+      // send local handles for new ghosts to owner, then add
+      // those to ghost list for that owner
+      //===========================================
+  std::vector<unsigned int>::iterator proc_it;
+  for (unsigned int p = 0; p < num_procs; p++) {
+    pc = pcs[p];
+  
+    for (ind = 0, proc_it = pc->buffProcs.begin(); 
+         proc_it != pc->buffProcs.end(); proc_it++, ind++) {
+        // skip if iface layer and higher-rank proc
+      unsigned char *buff_ptr = &pc->ghostSBuffs[ind][0];
+      result = pc->pack_remote_handles(L1hloc[p][ind], L1hrem[p][ind], L1p[p][ind], *proc_it,
+                                       pc->ghostSBuffs[ind], buff_ptr);
+      RRAI(pc->get_moab(), "Failed to pack remote handles.");
+    }
+  }
+  
+    //===========================================
+    // process remote handles of my ghosteds
+    //===========================================
+  for (unsigned int p = 0; p < num_procs; p++) {
+    pc = pcs[p];
+  
+    for (ind = 0, proc_it = pc->buffProcs.begin(); 
+         proc_it != pc->buffProcs.end(); proc_it++, ind++) {
+        // incoming remote handles
+      unsigned int to_p = pc->buffProcs[ind];
+      result = pcs[to_p]->unpack_remote_handles(p, &pc->ghostSBuffs[ind][0],
+                                                L2hloc[to_p], L2hrem[to_p], L2p[to_p]);
+      RRAI(pc->get_moab(), "Failed to unpack remote handles.");
+    }
+  }
+    
+#ifdef NDEBUG
+  for (unsigned int p = 0; p < num_procs; p++) {
+    result = pcs[p]->check_sent_ents(allsent[p]);
+    RRAI(pcs[p]->get_moab(), "Failed check on shared entities.");
+    result = pcs[p]->check_all_shared_handles();
+    RRAI(pcs[p]->get_moab(), "Failed check on all shared handles.");
+  }
+#endif
+
+  return MB_SUCCESS;
+}
+
 MBErrorCode MBParallelComm::get_iface_entities(int other_proc,
                                                int dim,
                                                MBRange &iface_ents) 
@@ -3536,7 +3871,8 @@
   return MB_SUCCESS;
 }
 
-MBErrorCode MBParallelComm::pack_remote_handles(std::vector<MBEntityHandle> &L1h,
+MBErrorCode MBParallelComm::pack_remote_handles(std::vector<MBEntityHandle> &L1hloc,
+                                                std::vector<MBEntityHandle> &L1hrem,
                                                 std::vector<int> &L1p,
                                                 unsigned int to_proc,
                                                 std::vector<unsigned char> &buff,
@@ -3544,13 +3880,13 @@
 {
     // 2 vectors of handles plus ints
   CHECK_BUFF_SPACE(buff, buff_ptr, ((L1p.size()+1)*sizeof(int) + 
-                                    L1h.size()*sizeof(MBEntityHandle)));
+                                    L1hloc.size()*sizeof(MBEntityHandle)));
   
     // should be in pairs of handles
-  assert(!(L1h.size()%2));
-  PACK_INT(buff_ptr, L1h.size()/2);
+  PACK_INT(buff_ptr, L1hloc.size());
   PACK_INTS(buff_ptr, &L1p[0], L1p.size());
-  PACK_EH(buff_ptr, &L1h[0], L1h.size());
+  PACK_EH(buff_ptr, &L1hrem[0], L1hrem.size());
+  PACK_EH(buff_ptr, &L1hloc[0], L1hloc.size());
   
   return MB_SUCCESS;
 }
@@ -3567,20 +3903,22 @@
 
   unsigned char *buff_proc = buff_ptr;
   buff_ptr += num_eh * sizeof(int);
+  unsigned char *buff_rem = buff_ptr + num_eh * sizeof(MBEntityHandle);
   MBErrorCode result;
-  MBEntityHandle hpair[2];
+  MBEntityHandle hpair[2], dum_h;
   int proc;
   for (int i = 0; i < num_eh; i++) {
     UNPACK_INT(buff_proc, proc);
-    UNPACK_EH(buff_ptr, hpair, 2);
+    UNPACK_EH(buff_ptr, hpair, 1);
+    UNPACK_EH(buff_rem, hpair+1, 1);
 
     if (-1 != proc) {
-      MBEntityHandle dum_h;
       result = find_existing_entity(false, proc, hpair[0], 3, NULL, 0,
                                     mbImpl->type_from_handle(hpair[1]),
                                     L2hloc, L2hrem, L2p, dum_h);
       RRA("Didn't get existing entity.");
       if (dum_h) hpair[0] = dum_h;
+      else hpair[0] = 0;
     }
     assert(hpair[0] && hpair[1]);
     int this_proc = from_proc;
@@ -4350,44 +4688,40 @@
                                              int &owner,
                                              MBEntityHandle &handle) 
 {
-    // I'm sure there's a much more efficient logic to this,
+  unsigned char pstat;
+  int sharing_procs[MAX_SHARING_PROCS];
+  MBEntityHandle sharing_handles[MAX_SHARING_PROCS];
 
-    // but I'm tired...
-  unsigned char pstat;
   MBErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1,
                                             &pstat);
   if (!(pstat & PSTATUS_NOT_OWNED)) {
     owner = proc_config().proc_rank();
     handle = entity;
-    return MB_SUCCESS;
   }
   
-  int sharing_procs[MAX_SHARING_PROCS];
-  MBEntityHandle sharing_handles[MAX_SHARING_PROCS];
-  result = mbImpl->tag_get_data(sharedp_tag(), &entity, 1,
-                                sharing_procs);
-  RRA(" ");
-  if (-1 != sharing_procs[0]) {
+  else if (pstat & PSTATUS_MULTISHARED) {
+    result = mbImpl->tag_get_data(sharedps_tag(), &entity, 1,
+                                  sharing_procs);
     owner = sharing_procs[0];
-    result = mbImpl->tag_get_data(sharedh_tag(), &entity, 1,
+    result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1,
                                   sharing_handles);
     handle = sharing_handles[0];
-    return MB_SUCCESS;
   }
-  
-  result = mbImpl->tag_get_data(sharedps_tag(), &entity, 1,
-                                sharing_procs);
-  if (MB_SUCCESS == result && -1 != sharing_procs[0]) {
+  else if (pstat & PSTATUS_SHARED) {
+    result = mbImpl->tag_get_data(sharedp_tag(), &entity, 1,
+                                  sharing_procs);
+    RRA(" ");
     owner = sharing_procs[0];
-    result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1,
+    result = mbImpl->tag_get_data(sharedh_tag(), &entity, 1,
                                   sharing_handles);
     handle = sharing_handles[0];
-    return MB_SUCCESS;
   }
-
-  owner = -1;
-  handle = 0;
-  return MB_FAILURE;
+  else {
+    owner = -1;
+    handle = 0;
+  }
+  
+  return MB_SUCCESS;
 }
 
 MBErrorCode MBParallelComm::get_global_part_count( int& count_out ) const
@@ -4487,7 +4821,8 @@
   int tmp[MAX_SHARING_PROCS], curr[MAX_SHARING_PROCS];
   int *parts[2] = { neighbors_out, tmp };
   for (MBRange::iterator i = iface.begin(); i != iface.end(); ++i) {
-    rval = get_sharing_parts( *i, curr, n );
+    unsigned char pstat;
+    rval = get_sharing_data( *i, curr, NULL, pstat, n);
     if (MB_SUCCESS != rval)
       return rval;
     std::sort( curr, curr+n );
@@ -4522,7 +4857,8 @@
     int part_ids[MAX_SHARING_PROCS], num_parts;
     MBRange::iterator i = iface_sets_out.begin();
     while (i != iface_sets_out.end()) {
-      MBErrorCode rval = get_sharing_parts( *i, part_ids, num_parts );
+      unsigned char pstat;
+      MBErrorCode rval = get_sharing_data( *i, part_ids, NULL, pstat, num_parts );
       if (MB_SUCCESS != rval)
         return rval;
       
@@ -4590,78 +4926,15 @@
   return MB_SUCCESS;
 }    
 
-MBErrorCode MBParallelComm::get_sharing_parts( MBEntityHandle entity,
-                                               int part_ids_out[MAX_SHARING_PROCS],
-                                               int& num_part_ids_out,
-                                               MBEntityHandle remote_handles[MAX_SHARING_PROCS] )
+MBErrorCode MBParallelComm::exchange_all_shared_handles(std::vector<std::vector<SharedEntityData> > &result)
 {
-
-  // FIXME : assumes one part per proc, and therefore part_id == rank
-  
-    // If entity is not shared, then we're the owner.
-  unsigned char pstat;
-  MBErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1,
-                                            &pstat);
-  if (!(pstat & PSTATUS_SHARED)) {
-    part_ids_out[0] = proc_config().proc_rank();
-    if (remote_handles)
-      remote_handles[0] = entity;
-    num_part_ids_out = 1;
-    return MB_SUCCESS;
-  }
-  
-    // If entity is shared with one other proc, then
-    // sharedp_tag will contain a positive value.
-  result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, part_ids_out );
-  if (MB_SUCCESS != result)
-    return result;
-  if (part_ids_out[0] != -1) {
-    
-    num_part_ids_out = 2;
-    part_ids_out[1] = proc_config().proc_rank();
-
-      // done?
-    if (!remote_handles)
-      return MB_SUCCESS;
-      
-      // get handles on remote processors (and this one)
-    remote_handles[1] = entity;
-    return mbImpl->tag_get_data( sharedh_tag(), &entity, 1, remote_handles );
-  }
-  
-    // If here, then the entity is shared with at least two other processors.
-    // Get the list from the sharedps_tag
-  result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, part_ids_out );
-  if (MB_SUCCESS != result)
-    return result;
-    // Count number of valid (positive) entries in sharedps_tag
-  for (num_part_ids_out = 0; num_part_ids_out < MAX_SHARING_PROCS &&
-       part_ids_out[num_part_ids_out] >= 0; ++num_part_ids_out);
-  part_ids_out[num_part_ids_out++] = proc_config().proc_rank();
-  
-    // done?
-  if (!remote_handles)
-    return MB_SUCCESS;
-  
-    // get remote handles
-  result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, remote_handles );
-  remote_handles[num_part_ids_out-1] = entity;
-  return result;
-}
-
-MBErrorCode MBParallelComm::exchange_all_shared_handles( shared_entity_map& result )
-{
   MBErrorCode rval;
   int ierr;
   const int tag = 0x4A41534E;
   const MPI_Comm comm = procConfig.proc_comm();
-  std::set<unsigned int> exch_procs;
-  rval = get_comm_procs(exch_procs);  
-  if (MB_SUCCESS != rval)
-    return rval;
-  const int num_proc = exch_procs.size();
+  const int num_proc = buffProcs.size();
   std::vector<MPI_Request> send_req(num_proc), recv_req(num_proc);
-  const std::vector<int> procs( exch_procs.begin(), exch_procs.end() );
+  const std::vector<int> procs( buffProcs.begin(), buffProcs.end() );
   
     // get all shared entities
   MBRange all_shared, dum_range;
@@ -4673,9 +4946,10 @@
     return rval;
   all_shared = all_shared.subtract(dum_range);
   all_shared.erase(all_shared.upper_bound(MBPOLYHEDRON), all_shared.end());
+  assert(sharedEnts == all_shared);
 
     // build up send buffers
-  shared_entity_map send_data;
+  std::vector<std::vector<SharedEntityData> > send_data(buffProcs.size());
   int ent_procs[MAX_SHARING_PROCS];
   MBEntityHandle handles[MAX_SHARING_PROCS];
   int num_sharing;
@@ -4685,13 +4959,16 @@
     rval = get_owner( *i, tmp.owner );
     if (MB_SUCCESS != rval)
       return rval;
-    
-    rval = get_sharing_parts( *i, ent_procs, num_sharing, handles );
+
+    unsigned char pstat;
+    rval = get_sharing_data( *i, ent_procs, handles, pstat, num_sharing );
     for (int j = 0; j < num_sharing; ++j) {
       if (ent_procs[j] == (int)proc_config().proc_rank())
         continue;
       tmp.local = handles[j];
-      send_data[ent_procs[j]].push_back( tmp );
+      int ind = get_buffers(ent_procs[j]);
+      assert(-1 != ind);
+      send_data[ind].push_back( tmp );
     }
   }
 
@@ -4705,8 +4982,8 @@
   
     // send sizes
   for (int i = 0; i < num_proc; ++i) {
-    sizes_send[i] = send_data[procs[i]].size();
-    ierr = MPI_Isend( &sizes_send[i], 1, MPI_INT, procs[i], tag, comm, &send_req[i] );
+    sizes_send[i] = send_data[i].size();
+    ierr = MPI_Isend( &sizes_send[i], 1, MPI_INT, buffProcs[i], tag, comm, &send_req[i] );
     if (ierr) 
       return MB_FILE_WRITE_ERROR;
   }
@@ -4724,21 +5001,21 @@
   
     // set up to receive data
   for (int i = 0; i < num_proc; ++i) {
-    result[procs[i]].resize( sizes_recv[i] );
-    ierr = MPI_Irecv( &result[procs[i]][0], 
+    result[i].resize( sizes_recv[i] );
+    ierr = MPI_Irecv( &result[i][0], 
                       sizeof(SharedEntityData)*sizes_recv[i], 
                       MPI_UNSIGNED_CHAR, 
-                      procs[i], tag, comm, &recv_req[i] );
+                      buffProcs[i], tag, comm, &recv_req[i] );
     if (ierr) 
       return MB_FILE_WRITE_ERROR;
   }
   
     // send data
   for (int i = 0; i < num_proc; ++i) {
-    ierr = MPI_Isend( &send_data[procs[i]][0], 
+    ierr = MPI_Isend( &send_data[i][0], 
                       sizeof(SharedEntityData)*sizes_send[i], 
                       MPI_UNSIGNED_CHAR, 
-                      procs[i], tag, comm, &send_req[i] );
+                      buffProcs[i], tag, comm, &send_req[i] );
     if (ierr) 
       return MB_FILE_WRITE_ERROR;
   }
@@ -4759,7 +5036,7 @@
 MBErrorCode MBParallelComm::check_all_shared_handles() 
 {
     // get all shared ent data from other procs
-  shared_entity_map shents;
+  std::vector<std::vector<SharedEntityData> > shents(buffProcs.size());
   MBErrorCode result;
   result = exchange_all_shared_handles(shents);
   if (MB_SUCCESS != result)
@@ -4780,13 +5057,11 @@
   all_shared.erase(all_shared.upper_bound(MBPOLYHEDRON), all_shared.end());
 
   MBRange bad_ents, local_shared;
-  for (shared_entity_map::iterator mit = shents.begin(); mit != shents.end(); mit++) {
-    int other_proc = (*mit).first;
-    int ind = get_buffers(other_proc);
-    if (-1 == ind) return MB_FAILURE;
+  std::vector<SharedEntityData>::iterator vit;
+  for (unsigned int i = 0; i < shents.size(); i++) {
+    int other_proc = buffProcs[i];
     local_shared = all_shared;
-    shared_entity_vec &shvec = (*mit).second;
-    for (shared_entity_vec::iterator vit = shvec.begin(); vit != shvec.end(); vit++) {
+    for (vit = shents[i].begin(); vit != shents[i].end(); vit++) {
       MBEntityHandle localh = vit->local, remoteh = vit->remote, dumh;
       local_shared.erase(localh);
       result = get_remote_handles(true, &localh, &dumh, 1, other_proc, dum_range);

Modified: MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp	2009-06-22 17:13:38 UTC (rev 2958)
+++ MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp	2009-06-22 17:55:39 UTC (rev 2959)
@@ -169,6 +169,15 @@
                                    bool store_remote_handles,
                                    bool wait_all = true);
 
+    /** \brief Static version of exchange_ghost_cells, exchanging info through
+     * buffers rather than messages
+     */
+  static MBErrorCode exchange_ghost_cells(MBParallelComm **pc,
+                                          unsigned int num_procs,
+                                          int ghost_dim, int bridge_dim,
+                                          int num_layers,
+                                          bool store_remote_handles);
+  
     /** \brief Exchange tags for all shared and ghosted entities
      * This function should be called collectively over the communicator for this MBParallelComm.
      * If this version is called, all ghosted/shared entities should have a value for this
@@ -246,6 +255,10 @@
                                   int resolve_dim = 3, 
                                   int shared_dim = -1);
   
+  static MBErrorCode resolve_shared_ents(MBParallelComm **pc, 
+                                         const unsigned int np, 
+                                         const int to_dim);
+  
     // ==================================
     // \section GET PARALLEL DATA (shared/owned/iface entities, etc.)
     // ==================================
@@ -390,11 +403,6 @@
   MBErrorCode get_owning_part( MBEntityHandle entity, 
                                int& owning_part_id_out,
                                MBEntityHandle* owning_handle = 0 );
-  MBErrorCode get_sharing_parts( MBEntityHandle entity, 
-                                 int part_ids_out[MAX_SHARING_PROCS],
-                                 int& num_part_ids_out,
-                                 MBEntityHandle remote_handles[MAX_SHARING_PROCS] = 0 );
-
     // Propogate mesh modification amongst shared entities
     // from the onwing processor to any procs with copies.
   MBErrorCode update_shared_mesh();
@@ -450,7 +458,8 @@
                             const bool store_remote_handles,
                             const int from_proc,
                             const int ind,
-                            std::vector<std::vector<MBEntityHandle> > &L1h,
+                            std::vector<std::vector<MBEntityHandle> > &L1hloc,
+                            std::vector<std::vector<MBEntityHandle> > &L1hrem,
                             std::vector<std::vector<int> > &L1p,
                             std::vector<MBEntityHandle> &L2hloc, 
                             std::vector<MBEntityHandle> &L2hrem,
@@ -463,14 +472,16 @@
                             const bool store_remote_handles,
                             const int to_proc,
                             const bool is_iface,
-                            std::vector<std::set<unsigned int> > *entprocs = NULL);
+                            std::vector<std::set<unsigned int> > *entprocs = NULL,
+                            MBRange *allsent = NULL);
 
     //! unpack entities in buff_ptr
   MBErrorCode unpack_entities(unsigned char *&buff_ptr,
                               const bool store_remote_handles,
                               const int from_ind,
                               const bool is_iface,
-                              std::vector<std::vector<MBEntityHandle> > &L1h,
+                              std::vector<std::vector<MBEntityHandle> > &L1hloc,
+                              std::vector<std::vector<MBEntityHandle> > &L1hrem,
                               std::vector<std::vector<int> > &L1p,
                               std::vector<MBEntityHandle> &L2hloc, 
                               std::vector<MBEntityHandle> &L2hrem,
@@ -502,7 +513,8 @@
     /* \brief Pack message with remote handles
      * PUBLIC ONLY FOR TESTING!
      */
-  MBErrorCode pack_remote_handles(std::vector<MBEntityHandle> &entities,
+  MBErrorCode pack_remote_handles(std::vector<MBEntityHandle> &L1hloc,
+                                  std::vector<MBEntityHandle> &L1hrem,
                                   std::vector<int> &procs,
                                   unsigned int to_proc,
                                   std::vector<unsigned char> &buff,
@@ -514,6 +526,12 @@
   
 private:
 
+  MBErrorCode get_sent_ents(const bool is_iface,
+                            const int bridge_dim, const int ghost_dim,
+                            const int num_layers,
+                            MBRange *sent_ents, MBRange &allsent,
+                            std::vector<std::set<unsigned int> > &entprocs);
+  
     /** \brief Set pstatus values on entities
      *
      * \param pstatus_ents Entities to be set
@@ -771,7 +789,6 @@
   
   MBErrorCode tag_shared_ents(int resolve_dim,
                               int shared_dim,
-                              tuple_list &shared_verts,
                               MBRange *skin_ents,
                               std::map<std::vector<int>, MBRange> &proc_nranges);
 
@@ -781,9 +798,11 @@
     // returned; NOTE: a subsequent step is used to verify entities on the interface
     // and remove them if they're not shared
   MBErrorCode create_interface_sets(std::map<std::vector<int>, MBRange> &proc_nranges,
-                                    MBEntityHandle this_set,
                                     int resolve_dim, int shared_dim);
 
+    // do the same but working straight from sharedEnts
+  MBErrorCode create_interface_sets(int resolve_dim, int shared_dim);
+
     // after verifying shared entities, now parent/child links between sets can be established
   MBErrorCode create_iface_pc_links();
   
@@ -819,19 +838,11 @@
     int owner;
   };
 
-  typedef std::vector< SharedEntityData > shared_entity_vec;
-
-  //! Map indexed by processor ID and containing, for each processor ID,
-  //! a list of <local,remote> handle pairs, where the local handle is
-  //! the handle on this processor and the remove handle is the handle on
-  //! the processor ID indicated by the map index.
-  typedef std::map< int, shared_entity_vec > shared_entity_map;
-
   //! Every processor sends shared entity handle data to every other processor
   //! that it shares entities with.  Passed back map is all received data,
   //! indexed by processor ID. This function is intended to be used for 
   //! debugging.
-  MBErrorCode exchange_all_shared_handles( shared_entity_map& result );
+  MBErrorCode exchange_all_shared_handles(std::vector<std::vector<SharedEntityData> > &result);
   
     //! replace handles in from_vec with corresponding handles on
     //! to_proc (by checking shared[p/h]_tag and shared[p/h]s_tag;
@@ -865,14 +876,18 @@
     //! new_ents value at index corresponding to id of entity in from_vec
   MBErrorCode get_local_handles(MBEntityHandle *from_vec, 
                                 int num_ents,
-                                const MBRange &new_ents,
-                                std::vector<MBEntityHandle> *no_ents = NULL);
+                                const MBRange &new_ents);
 
     //! same as above except puts results in range
   MBErrorCode get_local_handles(const MBRange &remote_handles,
                                 MBRange &local_handles,
                                 const MBRange &new_ents);
   
+    //! same as above except gets new_ents from vector
+  MBErrorCode get_local_handles(MBEntityHandle *from_vec,
+                                int num_ents,
+                                const std::vector<MBEntityHandle> &new_ents);
+  
   MBErrorCode update_remote_data(MBRange &local_range,
                                  MBRange &remote_range,
                                  int other_proc,

Modified: MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp	2009-06-22 17:13:38 UTC (rev 2958)
+++ MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp	2009-06-22 17:55:39 UTC (rev 2959)
@@ -484,6 +484,22 @@
                          "PARALLEL_RESOLVE_SHARED_ENTS" );
   CHKERR(rval);
   
+    // test contents of interface sets against sharedEnts structure in pcomm;
+  int my_error = 0;
+  MBParallelComm* pcomm = MBParallelComm::get_pcomm(&moab, 0);
+  rval = pcomm->check_all_shared_handles();
+  if (MB_SUCCESS != rval) {
+    my_error = 1;
+    std::cerr << "check_all_shared_handles test failed on proc " 
+              << pcomm->proc_config().proc_rank() << std::endl;
+  }
+  PCHECK(!my_error);
+  
+    // check adjacencies just to make sure they're consistent
+  rval = mb_instance.check_adjacencies();
+  if (MB_SUCCESS != rval) my_error = 1;
+  PCHECK(!my_error);
+
   MBTag geom_tag, id_tag;
   rval = moab.tag_get_handle( GEOM_DIMENSION_TAG_NAME, geom_tag ); CHKERR(rval);
   rval = moab.tag_get_handle( GLOBAL_ID_TAG_NAME, id_tag ); CHKERR(rval);  
@@ -514,7 +530,6 @@
     // if the vertices owned by any geometric entity do not
     // have consistent shared processor ids, list the geometric
     // entities and return failure.
-  int my_error = 0;
   if (!invalid_proc_ents.empty()) {
     my_error = 1;
     std::cerr << "Vertices owned by a single geometric entity are "
@@ -582,21 +597,6 @@
   }
   PCHECK(!my_error);
 
-    // test contents of interface sets against sharedEnts structure in pcomm;
-  MBParallelComm* pcomm = MBParallelComm::get_pcomm(&moab, 0);
-  rval = pcomm->check_all_shared_handles();
-  if (MB_SUCCESS != rval) {
-    my_error = 1;
-    std::cerr << "check_all_shared_handles test failed on proc " 
-              << pcomm->proc_config().proc_rank() << std::endl;
-  }
-  PCHECK(!my_error);
-  
-    // finally, check adjacencies just to make sure they're consistent
-  rval = mb_instance.check_adjacencies();
-  if (MB_SUCCESS != rval) my_error = 1;
-  PCHECK(!my_error);
-
   return MB_SUCCESS;
 }
 
@@ -1337,8 +1337,9 @@
   MBRange::iterator it = vertices.begin();
   for (int idx = 0; it != vertices.end(); ++it, ++idx) {
     int n;
-    rval = pcomm.get_sharing_parts( *it, &vert_shared[idx*MAX_SHARING_PROCS],
-                                    n, &vert_handles[idx*MAX_SHARING_PROCS] );
+    unsigned char pstat;
+    rval = pcomm.get_sharing_data( *it, &vert_shared[idx*MAX_SHARING_PROCS],
+                                   &vert_handles[idx*MAX_SHARING_PROCS], pstat, n );
     if (MB_SUCCESS != rval)
       break;
     std::fill( vert_shared.begin() + idx*MAX_SHARING_PROCS + n,

Modified: MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp	2009-06-22 17:13:38 UTC (rev 2958)
+++ MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp	2009-06-22 17:55:39 UTC (rev 2959)
@@ -38,8 +38,10 @@
 void test_pack_variable_length_tag();
 /** Test pack/unpack tag values*/
 void test_pack_tag_handle_data();
-/** Test pack/unpack of shared entities*/
-void test_pack_shared_entities();
+/** Test pack/unpack of shared entities in 2d*/
+void test_pack_shared_entities_2d();
+/** Test pack/unpack of shared entities in 3d*/
+void test_pack_shared_entities_3d();
 /** Test filter_pstatus function*/
 void test_filter_pstatus();
 
@@ -64,7 +66,8 @@
   //num_err += RUN_TEST( test_pack_bit_tag_data );
   num_err += RUN_TEST( test_pack_variable_length_tag );
   num_err += RUN_TEST( test_pack_tag_handle_data );
-  num_err += RUN_TEST( test_pack_shared_entities );
+  num_err += RUN_TEST( test_pack_shared_entities_2d );
+  num_err += RUN_TEST( test_pack_shared_entities_3d );
   num_err += RUN_TEST( test_filter_pstatus );
   
 #ifdef USE_MPI
@@ -105,11 +108,11 @@
   pcomm = new MBParallelComm( &moab);
   
   entities.clear();
-  std::vector<std::vector<MBEntityHandle> > L1h;
+  std::vector<std::vector<MBEntityHandle> > L1hloc, L1hrem;
   std::vector<std::vector<int> > L1p;
   std::vector<MBEntityHandle> L2hloc, L2hrem;
   std::vector<unsigned int> L2p;
-  rval = pcomm->unpack_buffer( &buff[0], false, -1, -1, L1h, L1p, L2hloc, 
+  rval = pcomm->unpack_buffer( &buff[0], false, -1, -1, L1hloc, L1hrem, L1p, L2hloc, 
                                L2hrem, L2p, entities);
   CHECK_ERR(rval);
 
@@ -310,7 +313,7 @@
   int dum_default = -1;
   result = moab->tag_create(GLOBAL_ID_TAG_NAME, sizeof(int), MB_TAG_DENSE,
                            MB_TYPE_INTEGER, gid_tag, &dum_default, true);
-  if (MB_SUCCESS != result) return result;
+  if (MB_SUCCESS != result && MB_ALREADY_ALLOCATED != result) return result;
   result = moab->tag_set_data(gid_tag, verts, gids);
   if (MB_SUCCESS != result) return result;
 
@@ -391,7 +394,7 @@
   return MB_SUCCESS;
 }
 
-MBErrorCode create_shared_grid(MBParallelComm **pc, MBRange *verts, MBRange *quads) 
+MBErrorCode create_shared_grid_2d(MBParallelComm **pc, MBRange *verts, MBRange *quads) 
 {
 //          
 //        P2______
@@ -442,11 +445,105 @@
     create_patch(pc[i]->get_moab(), verts[i], quads[i], 3, xyztmp, &gids[9*i]);
   }
 
-  MBErrorCode rval = set_owners(pc, verts, quads, false);
+  MBErrorCode rval = MBParallelComm::resolve_shared_ents(pc, 4, 2);
   CHECK_ERR(rval);
+
   return rval;
 }
 
+MBErrorCode create_shared_grid_3d(MBParallelComm **pc, MBRange *verts, MBRange *hexes) 
+{
+//    
+//   4 _____   _____ 
+//    |  |  | |  |  |
+//   3|__|__| |__|__|
+//    |P1|  | |P2|  |
+//    |__|__| |__|__|
+//    2 ___________         P3 - k = 2..4
+//     |  |  |  |  | 
+// /  1|__|__|__|__|         
+// |   |  |P0|  |  |         
+// J  0|__|__|__|__|        
+// I-> 0  1  2  3  4
+  
+    // create structured meshes
+    // ijkmin[p][ijk], ijkmax[p][ijk]
+#define P 4
+  int ijkmin[P][3] = { {0, 0, 0}, {0, 2, 0}, {2, 2, 0}, {0, 0, 2}};
+  int ijkmax[P][3] = { {4, 2, 2}, {2, 4, 2}, {4, 4, 2}, {4, 4, 4}};
+
+  int nijk[P][3];
+  int NIJK[3] = {0, 0, 0};
+#define INDEX(i, j, k) (k * NIJK[1] * NIJK[0] + j * NIJK[0] + i)
+
+  int p, i, j, k;
+  for (int p = 0; p < P; p++) {
+    for (int i = 0; i < 3; i++) {
+      nijk[p][i] =  ijkmax[p][i] - ijkmin[p][i] + 1;
+      NIJK[i] = std::max(NIJK[i], nijk[p][i]);
+    }
+  }
+    
+  std::vector<int> gids;
+  std::vector<double> xyz;
+  MBErrorCode rval;
+  MBTag gid_tag;
+  int dum_default = -1;
+  if (MB_SUCCESS != rval) return rval;
+
+  for (p = 0; p < P; p++) {
+    rval = pc[p]->get_moab()->tag_create(GLOBAL_ID_TAG_NAME, 
+                                         sizeof(int), MB_TAG_DENSE,
+                                         MB_TYPE_INTEGER, gid_tag, 
+                                         &dum_default, true);
+    if (MB_SUCCESS != rval && MB_ALREADY_ALLOCATED != rval) return rval;
+
+      // make vertices
+    int nverts = nijk[p][0] * nijk[p][1] * nijk[p][2];
+    xyz.resize(3*nverts);
+    gids.resize(nverts);
+    rval = pc[p]->get_moab()->create_vertices(&xyz[0], nverts, verts[p]);
+    CHECK_ERR(rval);
+
+      // set vertex gids
+    int nv = 0;
+    for (k = ijkmin[p][2]; k < ijkmax[p][2]; k++) 
+      for (j = ijkmin[p][1]; j < ijkmax[p][1]; j++) 
+        for (i = ijkmin[p][0]; i < ijkmax[p][0]; i++)
+            // gid
+          gids[nv++] = INDEX(i, j, k);
+
+    rval = pc[p]->get_moab()->tag_set_data(gid_tag, verts[p], &gids[0]);
+    if (MB_SUCCESS != rval) return rval;
+
+      // make elements
+    nv = 0;
+    MBEntityHandle connect[8], dum_hex;
+    for (k = ijkmin[p][2]; k < ijkmax[p][2]-1; k++) 
+      for (j = ijkmin[p][1]; j < ijkmax[p][1]-1; j++) 
+        for (i = ijkmin[p][0]; i < ijkmax[p][0]-1; i++) {
+            // gid
+          connect[0] = verts[p][INDEX(i, j, k)];
+          connect[1] = verts[p][INDEX(i+1, j, k)];
+          connect[2] = verts[p][INDEX(i+1, j+1, k)];
+          connect[3] = verts[p][INDEX(i, j+1, k)];
+          connect[4] = verts[p][INDEX(i, j, k+1)];
+          connect[5] = verts[p][INDEX(i+1, j, k+1)];
+          connect[6] = verts[p][INDEX(i+1, j+1, k+1)];
+          connect[7] = verts[p][INDEX(i, j+1, k+1)];
+          rval = pc[p]->get_moab()->create_element(MBHEX, connect, 8, dum_hex);
+          hexes[p].insert(dum_hex);
+          gids[nv++] = INDEX(i, j, k);
+        }
+    rval = pc[p]->get_moab()->tag_set_data(gid_tag, hexes[p], &gids[0]);
+    if (MB_SUCCESS != rval) return rval;
+  }
+  
+  rval = MBParallelComm::resolve_shared_ents(pc, 3, 3);
+  CHECK_ERR(rval);
+  return rval;
+}
+
 void test_pack_vertices()
 {
   MBCore moab;
@@ -1282,6 +1379,7 @@
                         MB_TYPE_INTEGER,
                         sparse_2_int_tag,
                         0 );
+  if (MB_ALREADY_ALLOCATED == rval) rval = MB_SUCCESS;
   CHECK_ERR(rval);
   bool skip = false;
   for (i = elems.begin(); i != elems.end(); ++i, skip = !skip) {
@@ -1825,275 +1923,50 @@
   return MB_SUCCESS;
 }
   
-void test_pack_shared_entities()
+void test_pack_shared_entities_2d()
 {
   MBCore moab[4];
-  MBInterface *mb[] = {&moab[0], &moab[1], &moab[2], &moab[3]};
   MBParallelComm *pc[4];
   for (unsigned int i = 0; i < 4; i++) {
     pc[i] = new MBParallelComm(&moab[i]);
     pc[i]->set_rank(i);
-    for (unsigned int j = 0; j < 4; j++) {
-      if (j == i) continue;
-      else pc[i]->get_buffers(j);
-    }
   }
 
   MBRange verts[4], quads[4];
-  MBErrorCode rval = create_shared_grid(pc, verts, quads);
+  MBErrorCode rval = create_shared_grid_2d(pc, verts, quads);
 
-  std::vector<std::vector<MBEntityHandle> > L1h[4];
-  std::vector<std::vector<int> > L1p[4];
-  for (unsigned int i = 0; i < 4; i++) {
-    L1h[i].resize(4);
-    L1p[i].resize(4);
-  }
-  
-  std::vector<MBEntityHandle> L2hloc[4];
-  std::vector<MBEntityHandle> L2hrem[4];
-  std::vector<unsigned int> L2p[4];
-  MBRange new_ents[4];
-  MBRange sent_ents;
-  std::vector<MBEntityHandle> ent_verts, dum_handles;
-  std::vector<int> dum_procs;
-  std::vector<std::set<unsigned int> > entprocs(4);
-  std::vector<unsigned char> buffer;
-  buffer.reserve(1);
-  unsigned char *buff_ptr = &buffer[0];
-
-    //========================
-    // interface, shared, 0->2
-    // get edges that we want
-  ent_verts.push_back(verts[0][6]); ent_verts.push_back(verts[0][7]);
-  ent_verts.push_back(verts[0][7]); ent_verts.push_back(verts[0][8]);
-  rval = get_entities(mb[0], ent_verts, 2, 1, sent_ents);
+    // exchange interface cells
+  rval = MBParallelComm::exchange_ghost_cells(pc, 4, -1, -1, 0, true);
   CHECK_ERR(rval);
-  assert(2 == sent_ents.size());
-    // set entprocs
-  entprocs[0].insert(2); entprocs[1].insert(2);
   
-  rval = pc[0]->pack_entities(sent_ents, buffer, buff_ptr, true, 2,
-                              true, &entprocs);
+    // now 1 layer of hex ghosts
+  rval = MBParallelComm::exchange_ghost_cells(pc, 4, 2, 0, 1, true);
   CHECK_ERR(rval);
+}
 
-    // now unpack the buffer
-  buff_ptr = &buffer[0];
-  rval = pc[2]->unpack_entities(buff_ptr, true, 0, true,
-                                L1h[2], L1p[2], L2hloc[2], L2hrem[2], L2p[2], new_ents[2]);
-    // all L1h lists should be empty, since we're dealing with iface
-  if (!L1h[2][0].empty() || !L1h[2][1].empty() || 
-      !L1h[2][2].empty() || !L1h[2][3].empty() || 
-      !L2p[2].empty() || !new_ents[2].empty()) rval = MB_FAILURE;
-  CHECK_ERR(rval);
+void test_pack_shared_entities_3d()
+{
+  MBCore moab[4];
+  MBParallelComm *pc[4];
+  for (unsigned int i = 0; i < 4; i++) {
+    pc[i] = new MBParallelComm(&moab[i]);
+    pc[i]->set_rank(i);
+    for (unsigned int j = 0; j < 4; j++) {
+      if (j == i) continue;
+      else pc[i]->get_buffers(j);
+    }
+  }
 
-  buffer.clear(); buff_ptr = &buffer[0];
-  ent_verts.clear(); sent_ents.clear();
+  MBRange verts[4], hexes[4];
+  MBErrorCode rval = create_shared_grid_3d(pc, verts, hexes);
 
-    //========================
-    // interface, multishared, 1st message, 0->1
-  ent_verts.push_back(verts[0][8]); ent_verts.push_back(verts[0][5]);
-  ent_verts.push_back(verts[0][5]); ent_verts.push_back(verts[0][2]);
-  rval = get_entities(mb[0], ent_verts, 2, 1, sent_ents);
+    // exchange interface cells
+  rval = MBParallelComm::exchange_ghost_cells(pc, 4, -1, -1, 0, true);
   CHECK_ERR(rval);
-  assert(2 == sent_ents.size());
-    // sending these edges to 1 and 3
-  entprocs[0].insert(1); entprocs[0].insert(3);
-  entprocs[1].insert(1); entprocs[1].insert(3);
-
-  rval = pc[0]->pack_entities(sent_ents, buffer, buff_ptr, true, 1,
-                              true, &entprocs);
-  CHECK_ERR(rval);
-
-    // now unpack the buffer
-  buff_ptr = &buffer[0];
-  rval = pc[1]->unpack_entities(buff_ptr, true, 0, true,
-                                L1h[1], L1p[1], L2hloc[1], L2hrem[1], L2p[1], new_ents[1]);
-    // all L1h lists should be empty, since we're dealing with iface
-  if (!L1h[1][0].empty() || !L1h[1][1].empty() || 
-      !L1h[1][2].empty() || !L1h[1][3].empty() || 
-      !L2p[1].empty() || !new_ents[1].empty()) rval = MB_FAILURE;
-  CHECK_ERR(rval);
-
-  entprocs[0].clear(); entprocs[1].clear();
-  buffer.clear(); buff_ptr = &buffer[0];
-  ent_verts.clear(); sent_ents.clear();
   
-    //========================
-    // interface, multishared, 2nd message, 3->1
-    // sending these edges to 0 and 1
-  ent_verts.push_back(verts[3][6]); ent_verts.push_back(verts[3][3]);
-  ent_verts.push_back(verts[3][3]); ent_verts.push_back(verts[3][0]);
-  rval = get_entities(mb[3], ent_verts, 2, 1, sent_ents);
+    // now 1 layer of hex ghosts
+  rval = MBParallelComm::exchange_ghost_cells(pc, 4, 3, 0, 1, true);
   CHECK_ERR(rval);
-  assert(2 == sent_ents.size());
-  entprocs[0].insert(0); entprocs[0].insert(1);
-  entprocs[1].insert(0); entprocs[1].insert(1);
-  rval = pc[3]->pack_entities(sent_ents, buffer, buff_ptr, true, 1,
-                              true, &entprocs);
-  CHECK_ERR(rval);
-
-    // now unpack the buffer
-  buff_ptr = &buffer[0];
-  rval = pc[1]->unpack_entities(buff_ptr, true, 3, true,
-                                L1h[1], L1p[1], L2hloc[1], L2hrem[1], L2p[1], new_ents[1]);
-    // all L1h lists should be empty, since we're dealing with iface
-  if (!L1h[1][0].empty() || !L1h[1][1].empty() || 
-      !L1h[1][2].empty() || !L1h[1][3].empty())
-    rval = MB_FAILURE;
-  CHECK_ERR(rval);
-
-  ent_verts.clear(); sent_ents.clear();
-  entprocs[0].clear(); entprocs[1].clear();
-  buffer.clear(); buff_ptr = &buffer[0];
-  
-    //========================================================================
-    // prepare for ghost communication; set all iface entities ownership
-    // stuff, so we don't need to do it explicitly using pack/unpack;
-    // passing true makes it happen for edges too
-  rval = set_owners(pc, verts, quads, true);
-  CHECK_ERR(rval);
-
-    //========================
-    // ghost, unshared, 2->1
-  sent_ents.insert(quads[2][3]);  
-    // add vertices not already shared
-  sent_ents.insert(verts[2][4]); 
-  sent_ents.insert(verts[2][7]); 
-    // entprocs lists are all just 1
-  entprocs.resize(sent_ents.size());
-  entprocs[0].insert(1); 
-  entprocs[1].insert(1); 
-  entprocs[2].insert(1); 
-
-  rval = pc[2]->pack_entities(sent_ents, buffer, buff_ptr, true, 1,
-                              true, &entprocs);
-  CHECK_ERR(rval);
-
-    // now unpack the buffer
-  buff_ptr = &buffer[0];
-  rval = pc[1]->unpack_entities(buff_ptr, true, 2, false,
-                                L1h[1], L1p[1], L2hloc[1], L2hrem[1], L2p[1], new_ents[1]);
-  if (
-        // to P2: 2 handles per 3 entities = 6
-      L1h[1][1].size() != 6 || 
-        // to all others, 0
-      !L1h[1][0].empty() || !L1h[1][2].empty() || !L1h[1][3].empty()) 
-    rval = MB_FAILURE;
-  CHECK_ERR(rval);
-
-  buffer.clear(); buff_ptr = &buffer[0];
-  rval = pc[1]->pack_remote_handles(L1h[1][1], L1p[1][1], 2, buffer, buff_ptr);
-  CHECK_ERR(rval);
-
-  buff_ptr = &buffer[0];
-  rval = pc[2]->unpack_remote_handles(1, buff_ptr, L2hloc[2], L2hrem[2], L2p[2]);
-  CHECK_ERR(rval);
-
-  ent_verts.clear(); sent_ents.clear();
-  entprocs[0].clear(); entprocs[1].clear(); entprocs[2].clear();
-  buffer.clear(); buff_ptr = &buffer[0]; L1h[1][1].clear();
-
-    //========================
-    // ghost, multishared, 1st message, 2->0
-    // sent: v3, v4, v5, q0, q1
-  sent_ents.insert(quads[2][0]); sent_ents.insert(quads[2][1]);  
-    // add vertices not already shared
-  sent_ents.insert(verts[2][3]); sent_ents.insert(verts[2][4]); 
-  sent_ents.insert(verts[2][5]); 
-    // entprocs lists are entity-dependent
-    // v3, v4, v5, q1: only to 0 (v4, v5 already on 1)
-  entprocs.resize(sent_ents.size());
-  entprocs[0].insert(0); 
-  entprocs[1].insert(0); 
-  entprocs[2].insert(0); 
-  entprocs[3].insert(0); 
-    // q2: P0 and P1
-  entprocs[4].insert(0); entprocs[4].insert(1); 
-
-  rval = pc[2]->pack_entities(sent_ents, buffer, buff_ptr, true, 0,
-                              true, &entprocs);
-  CHECK_ERR(rval);
-
-    // now unpack the buffer
-  buff_ptr = &buffer[0];
-  rval = pc[0]->unpack_entities(buff_ptr, true, 2, false,
-                                L1h[0], L1p[0], L2hloc[0], L2hrem[0], L2p[0], new_ents[0]);
-  if (
-        // 2 handles per 5 entities = 10
-      L1h[0][1].size() != 10 || 
-        // 2 handles per 3 entities
-      L1h[0][0].size() != 6 || 
-        // none received from 3, no 4th proc
-      !L1h[0][2].empty() || !L1h[0][3].empty()) 
-    rval = MB_FAILURE;
-
-  CHECK_ERR(rval);
-
-  ent_verts.clear(); sent_ents.clear();
-  for (int i = 0; i < 5; i++) entprocs[i].clear();
-  L1p[0][1].clear();  L1h[0][1].clear();
-  L1p[0][0].clear();  L1h[0][0].clear();
-  buffer.clear(); buff_ptr = &buffer[0];
-
-    //========================
-    // ghost, multishared, 2nd message, 1->0
-    // sent: v1, v4, v7, q0, q2
-  sent_ents.insert(quads[1][0]); sent_ents.insert(quads[1][2]);
-    // add vertices not already shared
-  sent_ents.insert(verts[1][1]); sent_ents.insert(verts[1][4]); 
-  sent_ents.insert(verts[1][7]); 
-    // entprocs lists are entity-dependent
-    // v1, q0: only to 0
-  entprocs.resize(sent_ents.size());
-  entprocs[0].insert(0); 
-  entprocs[2].insert(0);
-  entprocs[3].insert(0); 
-    // v4, v7, q2: P0 and P2
-  entprocs[1].insert(0); entprocs[1].insert(2); 
-  entprocs[4].insert(0); entprocs[4].insert(2); 
-
-  rval = pc[1]->pack_entities(sent_ents, buffer, buff_ptr, true, 0,
-                              true, &entprocs);
-  CHECK_ERR(rval);
-
-    // now unpack the buffer
-  buff_ptr = &buffer[0];
-  rval = pc[0]->unpack_entities(buff_ptr, true, 1, false,
-                                L1h[0], L1p[0], L2hloc[0], L2hrem[0], L2p[0], new_ents[0]);
-  if (
-        // 2 handles per 5 entities = 10
-      L1h[0][0].size() != 10 || 
-        // 2 handles per 3 entities
-      L1h[0][1].size() != 6 || 
-        // none received from 3, no 4th proc
-      !L1h[0][2].empty() || !L1h[0][3].empty()) 
-    rval = MB_FAILURE;
-  CHECK_ERR(rval);
-
-    //========================
-    // now pack/unpack the handles
-    // P0 -> P2
-    // xxx moved pack/unpack handles to after P2, P1 send
-  buffer.clear(); buff_ptr = &buffer[0];
-  rval = pc[0]->pack_remote_handles(L1h[0][1], L1p[0][1], 2, buffer, buff_ptr);
-  CHECK_ERR(rval);
-
-  rval = pc[2]->unpack_remote_handles(0, buff_ptr, L2hloc[2], L2hrem[2], L2p[2]);
-  CHECK_ERR(rval);
-
-    // P0 -> P1
-  buffer.clear(); buff_ptr = &buffer[0];
-  rval = pc[0]->pack_remote_handles(L1h[0][0], L1p[0][0], 1, buffer, buff_ptr);
-  CHECK_ERR(rval);
-
-  rval = pc[1]->unpack_remote_handles(0, buff_ptr, L2hloc[1], L2hrem[1], L2p[1]);
-  CHECK_ERR(rval);
-
-  ent_verts.clear(); sent_ents.clear();
-  for (int i = 0; i < 5; i++) entprocs[i].clear();
-  L1p[0][1].clear();  L1h[0][1].clear();
-  buffer.clear(); buff_ptr = &buffer[0];
-
 }
 
 void test_filter_pstatus()



More information about the moab-dev mailing list