[MOAB-dev] r2938 - MOAB/branches/parallel_ghosting/parallel

tautges at mcs.anl.gov tautges at mcs.anl.gov
Tue Jun 9 08:55:33 CDT 2009


Author: tautges
Date: 2009-06-09 08:55:32 -0500 (Tue, 09 Jun 2009)
New Revision: 2938

Modified:
   MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp
   MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp
   MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp
Log:
These changes make pcomm_unit run correctly, even for tricky ghosting
cases.

- added L1p list to unpack args, for cases where proc doesn't yet know
handle on destination proc (e.g. when one proc sends a new entity to 
2 or more procs, and those procs need to exchange handles for the new entity)
- added MBParallelComm::list_entities, which calls the MOAB version of the
function then also prints sharing info for the entities
- implemented ghost exchange function testing to pcomm_unit, including
packing/unpacking of multishared entities and remote handles
- consolidated some code to use get_sharing_data then process the data
- add new handle and ghost status to sharing data for new multishared entities
- find_existing_entity changed to take single proc/handle instead of lists
- corrected pack/unpack_remote_handles to add passing of proc list; also
implemented unpack for cases indexed by owner handle



Modified: MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp	2009-06-09 02:00:08 UTC (rev 2937)
+++ MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp	2009-06-09 13:55:32 UTC (rev 2938)
@@ -431,10 +431,11 @@
 
   if ((int)procConfig.proc_rank() != from_proc) {
     std::vector<std::vector<MBEntityHandle> > dum1;
+    std::vector<std::vector<int> > dum1p;
     std::vector<MBEntityHandle> dum2;
     std::vector<unsigned int> dum3;
     result = unpack_buffer(&buff[0], false, from_proc, -1, 
-                           dum1, dum2, dum2, dum3, entities);
+                           dum1, dum1p, dum2, dum2, dum3, entities);
     RRA("Failed to unpack buffer in broadcast_entities.");
   }
 
@@ -501,6 +502,7 @@
                                           const int from_proc,
                                           const int ind,
                                           std::vector<std::vector<MBEntityHandle> > &L1h,
+                                          std::vector<std::vector<int> > &L1p,
                                           std::vector<MBEntityHandle> &L2hloc, 
                                           std::vector<MBEntityHandle> &L2hrem,
                                           std::vector<unsigned int> &L2p,
@@ -513,7 +515,7 @@
 #endif  
     MBErrorCode result;
     result = unpack_entities(buff_ptr, store_remote_handles,
-                             ind, false, L1h, L2hloc, L2hrem, L2p, new_ents);
+                             ind, false, L1h, L1p, L2hloc, L2hrem, L2p, new_ents);
   RRA("Unpacking entities failed.");
 #ifdef DEBUG_PACKING
     std::cerr << "unpack_entities buffer space: " << buff_ptr - tmp_buff << " bytes." << std::endl;
@@ -796,44 +798,35 @@
                                                  int *tmp_procs,
                                                  MBEntityHandle *tmp_handles)
 {
-  MBErrorCode result = MB_SUCCESS;
-  MBEntityHandle zeroh = 0, sharedh;
+  num_ents = 0;
+  unsigned char pstat;
+  MBErrorCode result = get_sharing_data(entity, tmp_procs, tmp_handles,
+                                        pstat, num_ents);
+  RRA("Failed in get_sharing_data.");
+  assert(pstat == pstatus);
   
     // build shared proc/handle lists
-  num_ents = 0;
     // start with multi-shared, since if it is the owner will be first
   if (pstatus & PSTATUS_MULTISHARED) {
-    result = mbImpl->tag_get_data(sharedps_tag(), &entity, 1, tmp_procs);
-    RRA("Failed to get sharedps tag.");
-    result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1, tmp_handles);
-    RRA("Failed to get sharedhs tag.");
-    assert(-1 != tmp_procs[2] && 0 != tmp_handles[2] &&
-           "Shared procs/handles should have at least 3 non-zeros for multi-shared ents");
-    int j = 3;
-      // scan forward to first unoccupied shared proc or end of shared proc tag
-    while (j < MAX_SHARING_PROCS && tmp_procs[j] != -1)
-      j++;
-    num_ents += j;
-    assert(num_ents < MAX_SHARING_PROCS);
   }
   else if (pstatus & PSTATUS_NOT_OWNED) {
       // if not multishared and not owned, other sharing proc is owner, put that
       // one first
     assert("If not owned, I should be shared too" &&
-           pstatus & PSTATUS_SHARED);
-    tmp_procs[0] = sharedp; 
+           pstatus & PSTATUS_SHARED &&
+           num_ents == 1);
     tmp_procs[1] = procConfig.proc_rank();
-    tmp_handles[0] = sharedh;
     tmp_handles[1] = entity;
     num_ents = 2;
   }
   else if (pstatus & PSTATUS_SHARED) {
       // if not multishared and owned, I'm owner
+    assert("shared and owned, should be only 1 sharing proc" &&
+           1 == num_ents);
+    tmp_procs[1] = tmp_procs[0];
     tmp_procs[0] = procConfig.proc_rank();
-    tmp_procs[1] = sharedp; 
+    tmp_handles[1] = tmp_handles[0];
     tmp_handles[0] = entity;
-    result = mbImpl->tag_get_data(sharedh_tag(), &entity, 1, tmp_handles+1);
-    RRA("Failed to get sharedh tag.");
     num_ents = 2;
   }
   else {
@@ -843,11 +836,15 @@
     num_ents = 1;
   }
 
+  int tmp_ps = num_ents;
+  
     // now add others, with zero handle for now
   for (std::set<unsigned int>::iterator sit = entprocs.begin();
        sit != entprocs.end(); sit++) {
+    assert("these procs shouldn't already be in the shared list" &&
+           std::find(tmp_procs, tmp_procs+tmp_ps, *sit) == tmp_procs+tmp_ps);
     tmp_procs[num_ents] = *sit;
-    tmp_handles[num_ents] = zeroh;
+    tmp_handles[num_ents] = 0;
     num_ents++;
   }
 
@@ -1089,6 +1086,7 @@
                                             const int from_ind,
                                             const bool is_iface,
                                             std::vector<std::vector<MBEntityHandle> > &L1h,
+                                            std::vector<std::vector<int> > &L1p,
                                             std::vector<MBEntityHandle> &L2hloc, 
                                             std::vector<MBEntityHandle> &L2hrem,
                                             std::vector<unsigned int> &L2p,
@@ -1113,9 +1111,10 @@
     //      o if !iface, save new handle on L1 for all sharing procs
 
     // lists of handles/procs to return to sending/other procs
-    // L1h[p]: handle pairs [h, h'], where h is the sending proc handle
-    //         and h' is the receiving proc handle; this list only for
-    //         entities shared only between 2 procs, i.e. new shared entities
+    // L1h[p]: handle pairs [h, h'], where h' is the local proc handle
+    //         and h is either the remote proc handle (if that is known) or
+    //         the owner proc handle (otherwise);
+    // L1p[p]: indicates whether h is remote handle (= -1) or owner (rank of owner)
     // L2hloc, L2hrem: local/remote handles for entities shared by > 2 procs;
     //         remote handles are on owning proc
     // L2p: owning procs for handles in L2hrem
@@ -1207,7 +1206,7 @@
         // entity, if there is one
         //=======================================
       if (store_remote_handles) {
-        result = find_existing_entity(is_iface, &ps[0], &hs[0], num_ps, 
+        result = find_existing_entity(is_iface, ps[0], hs[0], num_ps, 
                                       connect, verts_per_entity,
                                       this_type,
                                       L2hloc, L2hrem, L2p,
@@ -1218,6 +1217,8 @@
         //=======================================
         // if we didn't find one, we'll have to create one
         //=======================================
+
+      bool created_here = false;
       if (!new_h) {
         
         if (MBVERTEX == this_type) {
@@ -1251,6 +1252,8 @@
                  new_ents.find(new_h) == new_ents.end());
           new_ents.insert(new_h);
         }
+
+        created_here = true;
       }
 
         //=======================================
@@ -1259,7 +1262,8 @@
       if (store_remote_handles) {
         
           // update sharing data and pstatus, adjusting order if iface
-        result = update_remote_data(new_h, &ps[0], &hs[0], num_ps, is_iface);
+        result = update_remote_data(new_h, &ps[0], &hs[0], num_ps, is_iface,
+                                    created_here);
         RRA("");
       
           // need to send this new handle to all sharing procs
@@ -1267,10 +1271,20 @@
           for (j = 0; j < num_ps; j++) {
             if (ps[j] == (int)procConfig.proc_rank()) continue;
             int idx = get_buffers(ps[j]);
-            if (idx == (int)L1h.size()) L1h.resize(idx+1);
-            assert("Returned handles should always be non-zero" &&
-                   hs[j] && new_h);
-            L1h[idx].push_back(hs[j]);
+            if (idx == (int)L1h.size()) {
+              L1h.resize(idx+1);
+              L1p.resize(idx+1);
+            }
+            
+            if (!hs[j]) {
+              assert(-1 != ps[0] && num_ps > 2);
+              L1p[idx].push_back(ps[0]);
+              L1h[idx].push_back(hs[0]);
+            }
+            else {
+              L1p[idx].push_back(-1);
+              L1h[idx].push_back(hs[j]);
+            }
             L1h[idx].push_back(new_h);
           }
         }
@@ -1373,22 +1387,65 @@
   return MB_SUCCESS;
 }
 
+MBErrorCode MBParallelComm::list_entities(const MBEntityHandle *ents, int num_ents) 
+{
+  unsigned char pstat;
+  MBEntityHandle tmp_handles[MAX_SHARING_PROCS];
+  int tmp_procs[MAX_SHARING_PROCS];
+  unsigned int num_ps;
+  MBErrorCode result;
+
+  for (int i = 0; i < num_ents; i++) {
+    result = get_sharing_data(ents[i], tmp_procs, tmp_handles, pstat, num_ps);
+    RRA("Failed to get sharing data.");
+
+    result = mbImpl->list_entities(ents+i, 1);
+    std::cout << "Pstatus: ";
+    if (!num_ps)
+      std::cout << "local " << std::endl;
+    else {
+      if (pstat & PSTATUS_NOT_OWNED) std::cout << "NOT_OWNED; ";
+      if (pstat & PSTATUS_SHARED) std::cout << "SHARED; ";
+      if (pstat & PSTATUS_MULTISHARED) std::cout << "MULTISHARED; ";
+      if (pstat & PSTATUS_INTERFACE) std::cout << "INTERFACE; ";
+      if (pstat & PSTATUS_GHOST) std::cout << "GHOST; ";
+      std::cout << std::endl;
+      for (unsigned int j = 0; j < num_ps; j++) {
+        std::cout << "  proc " << tmp_procs[j] << " id (handle) " 
+                  << mbImpl->id_from_handle(tmp_handles[j]) 
+                  << "(" << tmp_handles[j] << ")" << std::endl;
+      }
+    }
+    std::cout << std::endl;
+  }
+
+  return MB_SUCCESS;
+}
+  
+MBErrorCode MBParallelComm::list_entities(const MBRange &ents) 
+{
+  for (MBRange::iterator rit = ents.begin(); rit != ents.end(); rit++)
+    list_entities(&(*rit), 1);
+  return MB_SUCCESS;
+}
+
 MBErrorCode MBParallelComm::update_remote_data(MBEntityHandle new_h,
                                                int *ps,
                                                MBEntityHandle *hs,
                                                int num_ps,
-                                               const bool is_iface) 
+                                               const bool is_iface,
+                                               const bool created_here) 
 {
   MBEntityHandle tag_hs[MAX_SHARING_PROCS];
   int tag_ps[MAX_SHARING_PROCS];
   unsigned char pstat;
     // get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
     // in this function, so no need to initialize
-  MBErrorCode result = get_sharing_data(new_h, tag_ps, tag_hs, pstat);
+  unsigned int num_exist;
+  MBErrorCode result = get_sharing_data(new_h, tag_ps, tag_hs, pstat, num_exist);
   RRA("");
   
     // add any new sharing data
-  int num_exist = std::find(tag_ps, tag_ps+MAX_SHARING_PROCS, -1) - tag_ps;
   bool changed = false;
   int idx;
   if (!num_exist) {
@@ -1400,7 +1457,7 @@
   else {
     for (int i = 0; i < num_ps; i++) {
       idx = std::find(tag_ps, tag_ps+num_exist, ps[i]) - tag_ps;
-      if (idx == num_exist) {
+      if (idx == (int) num_exist) {
         tag_ps[num_exist] = ps[i];
         tag_hs[num_exist] = hs[i];
         num_exist++;
@@ -1432,6 +1489,22 @@
   }
     
   if (changed) {
+    if (is_iface && num_exist > 1) 
+      pstat |= PSTATUS_INTERFACE;
+      // if we created the entity in this unpack and it's shared,
+      // that means it's a ghost and we don't own it
+    else if (!is_iface && created_here && num_exist > 1)
+      pstat |= (PSTATUS_GHOST | PSTATUS_NOT_OWNED);
+
+      // if it's multi-shared and we created the entity in this unpack,
+      // local handle probably isn't in handle list yet
+    if (created_here && num_exist > 2) {
+      idx = std::find(tag_ps, tag_ps+num_exist, procConfig.proc_rank()) - tag_ps;
+      assert(idx < (int) num_exist);
+      if (!tag_hs[idx])
+        tag_hs[idx] = new_h;
+    }
+      
     int tag_p;
     MBEntityHandle tag_h;
     if (num_exist > 2 && !(pstat & PSTATUS_MULTISHARED) &&
@@ -1463,9 +1536,6 @@
       pstat |= PSTATUS_SHARED;
     }
 
-    if (is_iface && num_exist > 1) 
-      pstat |= PSTATUS_INTERFACE;
-
       // now set new pstatus
     result = mbImpl->tag_set_data(pstatus_tag(), &new_h, 1, &pstat);
     RRA("Couldn't set pstatus tag.");
@@ -1479,7 +1549,8 @@
 MBErrorCode MBParallelComm::get_sharing_data(MBEntityHandle entity,
                                              int *ps, 
                                              MBEntityHandle *hs,
-                                             unsigned char &pstat) 
+                                             unsigned char &pstat,
+                                             unsigned int &num_ps)
 {
   MBErrorCode result = mbImpl->tag_get_data(pstatus_tag(), &entity, 1, &pstat);
   RRA("Couldn't get pstatus tag.");
@@ -1488,6 +1559,7 @@
     RRA("Couldn't get sharedps tag.");
     result = mbImpl->tag_get_data(sharedhs_tag(), &entity, 1, hs);
     RRA("Couldn't get sharedhs tag.");
+    num_ps = std::find(ps, ps+MAX_SHARING_PROCS, -1) - ps;
   }
   else if (pstat & PSTATUS_SHARED) {
     result = mbImpl->tag_get_data(sharedp_tag(), &entity, 1, ps);
@@ -1497,30 +1569,34 @@
       // initialize past end of data
     ps[1] = -1;
     hs[1] = 0;
+    num_ps = 1;
   }
   else {
     ps[0] = -1;
     hs[0] = 0;
+    num_ps = 0;
   }
 
+  assert(0 <= num_ps && MAX_SHARING_PROCS >= num_ps);
+  
   return MB_SUCCESS;
 }
   
 MBErrorCode MBParallelComm::find_existing_entity(const bool is_iface,
-                                                 const int *ps,
-                                                 const MBEntityHandle *hs,
+                                                 const int owner_p,
+                                                 const MBEntityHandle owner_h,
                                                  const int num_ps,
                                                  const MBEntityHandle *connect,
                                                  const int num_connect,
                                                  const MBEntityType this_type,
                                                  std::vector<MBEntityHandle> &L2hloc,
                                                  std::vector<MBEntityHandle> &L2hrem,
-                                                 std::vector<MBEntityHandle> &L2p,
+                                                 std::vector<unsigned int> &L2p,
                                                  MBEntityHandle &new_h) 
 {
   if (!is_iface && num_ps > 2) {
     for (unsigned int i = 0; i < L2hrem.size(); i++) {
-      if (L2hrem[i] == hs[0] && ps[0] == (int) L2p[i]) {
+      if (L2hrem[i] == owner_h && owner_p == (int) L2p[i]) {
         new_h = L2hloc[i];
         return MB_SUCCESS;
       }
@@ -1528,7 +1604,7 @@
   }
 
     // if we got here and it's a vertex, we don't need to look further
-  if (MBVERTEX == this_type) return MB_SUCCESS;
+  if (MBVERTEX == this_type || !connect || !num_connect) return MB_SUCCESS;
   
   MBRange tmp_range;
   MBErrorCode result = mbImpl->get_adjacencies(connect, num_connect, 
@@ -3412,6 +3488,7 @@
   std::vector<MPI_Status> status(buffProcs.size());
   std::vector<std::vector<MBEntityHandle> > recd_ents(num_incoming);
   std::vector<std::vector<MBEntityHandle> > L1h(buffProcs.size());
+  std::vector<std::vector<int> > L1p(buffProcs.size());
   std::vector<MBEntityHandle> L2hloc, L2hrem;
   std::vector<unsigned int> L2p;
   MBRange new_ents;
@@ -3446,7 +3523,7 @@
       unsigned char *buff_ptr = &ghostRBuffs[ind][0];
       result = unpack_entities(buff_ptr,
                                store_remote_handles, ind, is_iface,
-                               L1h, L2hloc, L2hrem, L2p, new_ents);
+                               L1h, L1p, L2hloc, L2hrem, L2p, new_ents);
       RRA("Failed to unpack entities.");
     }
     else {
@@ -3495,7 +3572,7 @@
        proc_it != buffProcs.end(); proc_it++, ind++) {
       // skip if iface layer and higher-rank proc
     buff_ptr = &ghostSBuffs[ind][0];
-      result = pack_remote_handles(L1h[ind], *proc_it,
+    result = pack_remote_handles(L1h[ind], L1p[ind], *proc_it,
                                    ghostSBuffs[ind], buff_ptr);
     RRA("Failed to pack remote handles.");
     result = send_buffer(buffProcs[ind], &ghostSBuffs[ind][0], 
@@ -3531,7 +3608,8 @@
     }
     else if (MB_MESG_REMOTE_HANDLES == status[0].MPI_TAG) {
         // incoming remote handles
-      result = unpack_remote_handles(buffProcs[ind], &ghostRBuffs[ind][0], is_iface, ind);
+      result = unpack_remote_handles(buffProcs[ind], &ghostRBuffs[ind][0], is_iface, 
+                                     L2hloc, L2hrem, L2p);
       RRA("Failed to unpack remote handles.");
     }
     else assert(false);
@@ -3612,16 +3690,19 @@
 }
 
 MBErrorCode MBParallelComm::pack_remote_handles(std::vector<MBEntityHandle> &L1h,
+                                                std::vector<int> &L1p,
                                                 unsigned int to_proc,
                                                 std::vector<unsigned char> &buff,
                                                 unsigned char *&buff_ptr) 
 {
-    // 2 vectors of handles
-  CHECK_BUFF_SPACE(buff, buff_ptr, sizeof(int) + L1h.size()*sizeof(MBEntityHandle));
+    // 2 vectors of handles plus ints
+  CHECK_BUFF_SPACE(buff, buff_ptr, ((L1p.size()+1)*sizeof(int) + 
+                                    L1h.size()*sizeof(MBEntityHandle)));
   
     // should be in pairs of handles
   assert(!(L1h.size()%2));
   PACK_INT(buff_ptr, L1h.size()/2);
+  PACK_INTS(buff_ptr, &L1p[0], L1p.size());
   PACK_EH(buff_ptr, &L1h[0], L1h.size());
   
   return MB_SUCCESS;
@@ -3630,16 +3711,31 @@
 MBErrorCode MBParallelComm::unpack_remote_handles(unsigned int from_proc,
                                                   unsigned char *&buff_ptr,
                                                   const bool is_iface,
-                                                  const int ind) 
+                                                  std::vector<MBEntityHandle> &L2hloc,
+                                                  std::vector<MBEntityHandle> &L2hrem,
+                                                  std::vector<unsigned int> &L2p)
 {
     // incoming remote handles; use to set remote handles
   int num_eh;
   UNPACK_INT(buff_ptr, num_eh);
 
+  unsigned char *buff_proc = buff_ptr;
+  buff_ptr += num_eh * sizeof(int);
   MBErrorCode result;
   MBEntityHandle hpair[2];
+  int proc;
   for (int i = 0; i < num_eh; i++) {
+    UNPACK_INT(buff_proc, proc);
     UNPACK_EH(buff_ptr, hpair, 2);
+
+    if (-1 != proc) {
+      MBEntityHandle dum_h;
+      result = find_existing_entity(is_iface, proc, hpair[0], 3, NULL, 0,
+                                    mbImpl->type_from_handle(hpair[1]),
+                                    L2hloc, L2hrem, L2p, dum_h);
+      RRA("Didn't get existing entity.");
+      if (dum_h) hpair[0] = dum_h;
+    }
     assert(hpair[0] && hpair[1]);
     result = add_remote_data(hpair[0], from_proc, hpair[1]);
     RRA("Trouble setting remote data range on sent entities in ghost exchange.");

Modified: MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp	2009-06-09 02:00:08 UTC (rev 2937)
+++ MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp	2009-06-09 13:55:32 UTC (rev 2938)
@@ -283,7 +283,14 @@
   MBErrorCode get_sharing_data(MBEntityHandle entity,
                                int *ps, 
                                MBEntityHandle *hs,
-                               unsigned char &pstat);
+                               unsigned char &pstat,
+                               unsigned int &num_ps);
+
+  MBErrorCode get_sharing_data(MBEntityHandle entity,
+                               int *ps, 
+                               MBEntityHandle *hs,
+                               unsigned char &pstat,
+                               int &num_ps);
   
     /** \brief Get entities on an inter-processor interface and of specified dimension
      * If other_proc is -1, any interface entities are returned.  If dim is -1,
@@ -444,6 +451,7 @@
                             const int from_proc,
                             const int ind,
                             std::vector<std::vector<MBEntityHandle> > &L1h,
+                            std::vector<std::vector<int> > &L1p,
                             std::vector<MBEntityHandle> &L2hloc, 
                             std::vector<MBEntityHandle> &L2hrem,
                             std::vector<unsigned int> &L2p,
@@ -463,6 +471,7 @@
                               const int from_ind,
                               const bool is_iface,
                               std::vector<std::vector<MBEntityHandle> > &L1h,
+                              std::vector<std::vector<int> > &L1p,
                               std::vector<MBEntityHandle> &L2hloc, 
                               std::vector<MBEntityHandle> &L2hrem,
                               std::vector<unsigned int> &L2p,
@@ -475,6 +484,35 @@
     //! set rank for this pcomm; USED FOR TESTING ONLY!
   void set_rank(unsigned int r);
   
+    //! get (and possibly allocate) buffers for messages to/from to_proc; returns
+    //! index of to_proc in buffProcs vector; if is_new is non-NULL, sets to
+    //! whether new buffer was allocated
+    //! PUBLIC ONLY FOR TESTING!
+  int get_buffers(int to_proc, bool *is_new = NULL);
+
+    /* \brief Unpack message with remote handles
+     * PUBLIC ONLY FOR TESTING!
+     */
+  MBErrorCode unpack_remote_handles(unsigned int from_proc,
+                                    unsigned char *&buff_ptr,
+                                    const bool is_iface,
+                                    std::vector<MBEntityHandle> &L2hloc,
+                                    std::vector<MBEntityHandle> &L2hrem,
+                                    std::vector<unsigned int> &L2p);
+  
+    /* \brief Pack message with remote handles
+     * PUBLIC ONLY FOR TESTING!
+     */
+  MBErrorCode pack_remote_handles(std::vector<MBEntityHandle> &entities,
+                                  std::vector<int> &procs,
+                                  unsigned int to_proc,
+                                  std::vector<unsigned char> &buff,
+                                  unsigned char *&buff_ptr);
+  
+  MBErrorCode list_entities(const MBEntityHandle *ents, int num_ents);
+  
+  MBErrorCode list_entities(const MBRange &ents);
+  
 private:
 
     /** \brief Set pstatus values on entities
@@ -520,11 +558,6 @@
   int estimate_sets_buffer_size(MBRange &entities,
                                 const bool store_remote_handles);
   
-    //! get (and possibly allocate) buffers for messages to/from to_proc; returns
-    //! index of to_proc in buffProcs vector; if is_new is non-NULL, sets to
-    //! whether new buffer was allocated
-  int get_buffers(int to_proc, bool *is_new = NULL);
-
     //! send the indicated buffer, possibly sending size first
   MBErrorCode send_buffer(const unsigned int to_proc,
                           const unsigned char *send_buff,
@@ -584,38 +617,19 @@
                                  const int from_proc);
   
 
-    /* \brief Pack message with remote handles
-     */
-  MBErrorCode pack_remote_handles(MBRange &entities,
-                                  unsigned int to_proc,
-                                  std::vector<unsigned char> &buff,
-                                  unsigned char *&buff_ptr);
-  
-    /* \brief Pack message with remote handles
-     */
-  MBErrorCode pack_remote_handles(std::vector<MBEntityHandle> &entities,
-                                  unsigned int to_proc,
-                                  std::vector<unsigned char> &buff,
-                                  unsigned char *&buff_ptr);
-  
-    /* \brief Unpack message with remote handles
-     */
-  MBErrorCode unpack_remote_handles(unsigned int from_proc,
-                                    unsigned char *&buff_ptr,
-                                    const bool is_iface,
-                                    const int ind);
-  
     /* \brief Unpack message with remote handles (const pointer to buffer)
      */
   MBErrorCode unpack_remote_handles(unsigned int from_proc,
                                     const unsigned char *buff_ptr,
                                     const bool is_iface,
-                                    const int ind);
+                                    std::vector<MBEntityHandle> &L2hloc,
+                                    std::vector<MBEntityHandle> &L2hrem,
+                                    std::vector<unsigned int> &L2p);
   
     //! given connectivity and type, find an existing entity, if there is one
   MBErrorCode find_existing_entity(const bool is_iface,
-                                   const int *ps,
-                                   const MBEntityHandle *hs,
+                                   const int owner_p,
+                                   const MBEntityHandle owner_h,
                                    const int num_ents,
                                    const MBEntityHandle *connect,
                                    const int num_connect,
@@ -882,7 +896,8 @@
                                  int *ps,
                                  MBEntityHandle *hs,
                                  int num_ps,
-                                 const bool is_iface);
+                                 const bool is_iface,
+                                 const bool created_here);
   
   MBErrorCode add_remote_data(MBEntityHandle this_h,
                               int other_proc,
@@ -1020,11 +1035,13 @@
 inline MBErrorCode MBParallelComm::unpack_remote_handles(unsigned int from_proc,
                                                          const unsigned char *buff_ptr,
                                                          const bool is_iface,
-                                                         const int ind) 
+                                                         std::vector<MBEntityHandle> &L2hloc,
+                                                         std::vector<MBEntityHandle> &L2hrem,
+                                                         std::vector<unsigned int> &L2p) 
 {
     // cast away const-ness, we won't be passing back a modified ptr
   unsigned char *tmp_buff = const_cast<unsigned char*>(buff_ptr);
-  return unpack_remote_handles(from_proc, tmp_buff, is_iface, ind);
+  return unpack_remote_handles(from_proc, tmp_buff, is_iface, L2hloc, L2hrem, L2p);
 }
 
 inline void MBParallelComm::set_rank(unsigned int r) 
@@ -1033,4 +1050,16 @@
   if (procConfig.proc_size() < r) procConfig.proc_size(r+1);
 }
 
+inline MBErrorCode MBParallelComm::get_sharing_data(MBEntityHandle entity,
+                                                    int *ps, 
+                                                    MBEntityHandle *hs,
+                                                    unsigned char &pstat,
+                                                    int &num_ps) 
+{
+  unsigned int dum_int;
+  MBErrorCode result = get_sharing_data(entity, ps, hs, pstat, dum_int);
+  num_ps = dum_int;
+  return result;
+}
+
 #endif

Modified: MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp	2009-06-09 02:00:08 UTC (rev 2937)
+++ MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp	2009-06-09 13:55:32 UTC (rev 2938)
@@ -2,6 +2,7 @@
 #include "MBParallelConventions.h"
 #include "MBTagConventions.hpp"
 #include "MBCore.hpp"
+#include "MeshTopoUtil.hpp"
 #include "TestUtil.hpp"
 #include <algorithm>
 #include <vector>
@@ -105,9 +106,10 @@
   
   entities.clear();
   std::vector<std::vector<MBEntityHandle> > L1h;
+  std::vector<std::vector<int> > L1p;
   std::vector<MBEntityHandle> L2hloc, L2hrem;
   std::vector<unsigned int> L2p;
-  rval = pcomm->unpack_buffer( &buff[0], false, -1, -1, L1h, L2hloc, 
+  rval = pcomm->unpack_buffer( &buff[0], false, -1, -1, L1h, L1p, L2hloc, 
                                L2hrem, L2p, entities);
   CHECK_ERR(rval);
 
@@ -315,6 +317,80 @@
   return result;
 }
 
+MBErrorCode set_owners(MBParallelComm **pc, MBRange *verts, MBRange *quads,
+                       const bool edges_too) 
+{
+    // P0-P2
+  unsigned char pstat = PSTATUS_SHARED | PSTATUS_INTERFACE;
+  MBErrorCode rval = set_owners(pstat, pc[0], verts[0][6], pc[2], verts[2][0]);
+  rval = set_owners(pstat, pc[0], verts[0][7], pc[2], verts[2][1]); CHECK_ERR(rval);
+
+    // P1-P2
+  rval = set_owners(pstat, pc[1], verts[1][7], pc[2], verts[2][5]); CHECK_ERR(rval);
+  rval = set_owners(pstat, pc[1], verts[1][8], pc[2], verts[2][8]); CHECK_ERR(rval);
+    // P0-P1-P3
+  pstat |= PSTATUS_MULTISHARED;
+  rval = set_owners(pstat, pc[0], verts[0][2], pc[1], verts[1][0], pc[3], verts[3][0]); CHECK_ERR(rval);
+  rval = set_owners(pstat, pc[0], verts[0][5], pc[1], verts[1][3], pc[3], verts[3][3]); CHECK_ERR(rval);
+    // P0-P1-P2-P3
+  rval = set_owners(pstat, pc[0], verts[0][8], pc[1], verts[1][6], 
+                    pc[2], verts[2][2], pc[3], verts[3][6]); CHECK_ERR(rval);
+
+  if (edges_too) {
+    MeshTopoUtil *mtu[4];
+    MBInterface *mb[4];
+    MBRange dum_range;
+      // create mtu's and explicit edges
+    for (unsigned int i = 0; i < 4; i++) {
+      mb[i] = pc[i]->get_moab();
+      assert(mb[i]);
+      mtu[i] = new MeshTopoUtil(mb[i]);
+      rval = mb[i]->get_adjacencies(quads[i], 1, true, dum_range, 
+                                    MBInterface::UNION);
+      CHECK_ERR(rval);
+      dum_range.clear();
+    }
+    
+    MBEntityHandle edge1, edge2, edge3;
+    pstat = PSTATUS_SHARED | PSTATUS_INTERFACE;
+      // P0-P2
+    edge1 = mtu[0]->common_entity(verts[0][6], verts[0][7], 1);
+    edge2 = mtu[2]->common_entity(verts[2][0], verts[2][1], 1);
+    assert(edge1 && edge2);
+    rval = set_owners(pstat, pc[0], edge1, pc[2], edge2); CHECK_ERR(rval);
+    edge1 = mtu[0]->common_entity(verts[0][7], verts[0][8], 1);
+    edge2 = mtu[2]->common_entity(verts[2][1], verts[2][2], 1);
+    assert(edge1 && edge2);
+    rval = set_owners(pstat, pc[0], edge1, pc[2], edge2); CHECK_ERR(rval);
+      // P1-P2
+    edge1 = mtu[1]->common_entity(verts[1][6], verts[1][7], 1);
+    edge2 = mtu[2]->common_entity(verts[2][2], verts[2][5], 1);
+    assert(edge1 && edge2);
+    rval = set_owners(pstat, pc[1], edge1, pc[2], edge2); CHECK_ERR(rval);
+    edge1 = mtu[1]->common_entity(verts[1][7], verts[1][8], 1);
+    edge2 = mtu[2]->common_entity(verts[2][5], verts[2][8], 1);
+    assert(edge1 && edge2);
+    rval = set_owners(pstat, pc[1], edge1, pc[2], edge2); CHECK_ERR(rval);
+      // P0-P1-P3
+    pstat |= PSTATUS_MULTISHARED;
+    edge1 = mtu[0]->common_entity(verts[0][2], verts[0][5], 1);
+    edge2 = mtu[1]->common_entity(verts[1][0], verts[1][3], 1);
+    edge3 = mtu[3]->common_entity(verts[3][0], verts[3][3], 1);
+    assert(edge1 && edge2 && edge3);
+    rval = set_owners(pstat, pc[0], edge1, pc[1], edge2, pc[3], edge3); CHECK_ERR(rval);
+    edge1 = mtu[0]->common_entity(verts[0][5], verts[0][8], 1);
+    edge2 = mtu[1]->common_entity(verts[1][3], verts[1][6], 1);
+    edge3 = mtu[3]->common_entity(verts[3][3], verts[3][6], 1);
+    assert(edge1 && edge2 && edge3);
+    rval = set_owners(pstat, pc[0], edge1, pc[1], edge2, pc[3], edge3); CHECK_ERR(rval);
+
+    for (unsigned int i = 0; i < 4; i++)
+      delete mtu[i];
+  }
+    
+  return MB_SUCCESS;
+}
+
 MBErrorCode create_shared_grid(MBParallelComm **pc, MBRange *verts, MBRange *quads) 
 {
 //          
@@ -366,22 +442,9 @@
     create_patch(pc[i]->get_moab(), verts[i], quads[i], 3, xyztmp, &gids[9*i]);
   }
 
-    // P0-P2
-  unsigned char pstat = PSTATUS_SHARED | PSTATUS_INTERFACE;
-  MBErrorCode rval = set_owners(pstat, pc[0], verts[0][6], pc[2], verts[2][0]);
-  rval = set_owners(pstat, pc[0], verts[0][7], pc[2], verts[2][1]);
-    // P1-P2
-  rval = set_owners(pstat, pc[1], verts[1][7], pc[2], verts[2][5]);
-  rval = set_owners(pstat, pc[1], verts[1][8], pc[2], verts[2][8]);
-    // P0-P1-P3
-  pstat |= PSTATUS_MULTISHARED;
-  rval = set_owners(pstat, pc[0], verts[0][2], pc[1], verts[1][0], pc[3], verts[3][0]);
-  rval = set_owners(pstat, pc[0], verts[0][5], pc[1], verts[1][3], pc[3], verts[3][3]);
-    // P0-P1-P2-P3
-  rval = set_owners(pstat, pc[0], verts[0][8], pc[1], verts[1][6], 
-                    pc[2], verts[3][2], pc[3], verts[0][6]);
-
-  return MB_SUCCESS;
+  MBErrorCode rval = set_owners(pc, verts, quads, false);
+  CHECK_ERR(rval);
+  return rval;
 }
 
 void test_pack_vertices()
@@ -1770,12 +1833,22 @@
   for (unsigned int i = 0; i < 4; i++) {
     pc[i] = new MBParallelComm(&moab[i]);
     pc[i]->set_rank(i);
+    for (unsigned int j = 0; j < 4; j++) {
+      if (j == i) continue;
+      else pc[i]->get_buffers(j);
+    }
   }
 
   MBRange verts[4], quads[4];
   MBErrorCode rval = create_shared_grid(pc, verts, quads);
 
   std::vector<std::vector<MBEntityHandle> > L1h[4];
+  std::vector<std::vector<int> > L1p[4];
+  for (unsigned int i = 0; i < 4; i++) {
+    L1h[i].resize(4);
+    L1p[i].resize(4);
+  }
+  
   std::vector<MBEntityHandle> L2hloc[4];
   std::vector<MBEntityHandle> L2hrem[4];
   std::vector<unsigned int> L2p[4];
@@ -1797,7 +1870,7 @@
   CHECK_ERR(rval);
   assert(2 == sent_ents.size());
     // set entprocs
-  entprocs[0].insert(1); entprocs[1].insert(1);
+  entprocs[0].insert(2); entprocs[1].insert(2);
   
   rval = pc[0]->pack_entities(sent_ents, buffer, buff_ptr, true, 2,
                               true, &entprocs);
@@ -1806,8 +1879,11 @@
     // now unpack the buffer
   buff_ptr = &buffer[0];
   rval = pc[2]->unpack_entities(buff_ptr, true, 0, true,
-                                L1h[2], L2hloc[2], L2hrem[2], L2p[2], new_ents[2]);
-  if (!L1h[2].empty() || !L2p[2].empty() || !new_ents[2].empty()) rval = MB_FAILURE;
+                                L1h[2], L1p[2], L2hloc[2], L2hrem[2], L2p[2], new_ents[2]);
+    // all L1h lists should be empty, since we're dealing with iface
+  if (!L1h[2][0].empty() || !L1h[2][1].empty() || 
+      !L1h[2][2].empty() || !L1h[2][3].empty() || 
+      !L2p[2].empty() || !new_ents[2].empty()) rval = MB_FAILURE;
   CHECK_ERR(rval);
 
   buffer.clear(); buff_ptr = &buffer[0];
@@ -1831,8 +1907,11 @@
     // now unpack the buffer
   buff_ptr = &buffer[0];
   rval = pc[1]->unpack_entities(buff_ptr, true, 0, true,
-                                L1h[1], L2hloc[1], L2hrem[1], L2p[1], new_ents[1]);
-  if (!L1h[1].empty() || !L2p[1].empty() || !new_ents[1].empty()) rval = MB_FAILURE;
+                                L1h[1], L1p[1], L2hloc[1], L2hrem[1], L2p[1], new_ents[1]);
+    // all L1h lists should be empty, since we're dealing with iface
+  if (!L1h[1][0].empty() || !L1h[1][1].empty() || 
+      !L1h[1][2].empty() || !L1h[1][3].empty() || 
+      !L2p[1].empty() || !new_ents[1].empty()) rval = MB_FAILURE;
   CHECK_ERR(rval);
 
   entprocs[0].clear(); entprocs[1].clear();
@@ -1856,28 +1935,164 @@
     // now unpack the buffer
   buff_ptr = &buffer[0];
   rval = pc[1]->unpack_entities(buff_ptr, true, 3, true,
-                                L1h[1], L2hloc[1], L2hrem[1], L2p[1], new_ents[1]);
-  if (!L1h[1].empty()) rval = MB_FAILURE;
+                                L1h[1], L1p[1], L2hloc[1], L2hrem[1], L2p[1], new_ents[1]);
+    // all L1h lists should be empty, since we're dealing with iface
+  if (!L1h[1][0].empty() || !L1h[1][1].empty() || 
+      !L1h[1][2].empty() || !L1h[1][3].empty())
+    rval = MB_FAILURE;
   CHECK_ERR(rval);
 
   ent_verts.clear(); sent_ents.clear();
   entprocs[0].clear(); entprocs[1].clear();
   buffer.clear(); buff_ptr = &buffer[0];
   
+    //========================================================================
+    // prepare for ghost communication; set all iface entities ownership
+    // stuff, so we don't need to do it explicitly using pack/unpack;
+    // passing true makes it happen for edges too
+  rval = set_owners(pc, verts, quads, true);
+  CHECK_ERR(rval);
 
     //========================
     // ghost, unshared, 2->1
+  sent_ents.insert(quads[2][3]);  
+    // add vertices not already shared
+  sent_ents.insert(verts[2][4]); 
+  sent_ents.insert(verts[2][7]); 
+    // entprocs lists are all just 1
+  entprocs.resize(sent_ents.size());
+  entprocs[0].insert(1); 
+  entprocs[1].insert(1); 
+  entprocs[2].insert(1); 
 
+  rval = pc[2]->pack_entities(sent_ents, buffer, buff_ptr, true, 1,
+                              true, &entprocs);
+  CHECK_ERR(rval);
+
+    // now unpack the buffer
+  buff_ptr = &buffer[0];
+  rval = pc[1]->unpack_entities(buff_ptr, true, 2, false,
+                                L1h[1], L1p[1], L2hloc[1], L2hrem[1], L2p[1], new_ents[1]);
+  if (
+        // to P2: 2 handles per 3 entities = 6
+      L1h[1][1].size() != 6 || 
+        // to all others, 0
+      !L1h[1][0].empty() || !L1h[1][2].empty() || !L1h[1][3].empty()) 
+    rval = MB_FAILURE;
+  CHECK_ERR(rval);
+
+  buffer.clear(); buff_ptr = &buffer[0];
+  rval = pc[1]->pack_remote_handles(L1h[1][1], L1p[1][1], 2, buffer, buff_ptr);
+  CHECK_ERR(rval);
+
+  buff_ptr = &buffer[0];
+  rval = pc[2]->unpack_remote_handles(1, buff_ptr, false, L2hloc[2], L2hrem[2], L2p[2]);
+  CHECK_ERR(rval);
+
+  ent_verts.clear(); sent_ents.clear();
+  entprocs[0].clear(); entprocs[1].clear(); entprocs[2].clear();
+  buffer.clear(); buff_ptr = &buffer[0]; L1h[1][1].clear();
+
     //========================
-    // ghost, multishared, 1st message, 0->2
-    // 
+    // ghost, multishared, 1st message, 2->0
+    // sent: v3, v4, v5, q0, q1
+  sent_ents.insert(quads[2][0]); sent_ents.insert(quads[2][1]);  
+    // add vertices not already shared
+  sent_ents.insert(verts[2][3]); sent_ents.insert(verts[2][4]); 
+  sent_ents.insert(verts[2][5]); 
+    // entprocs lists are entity-dependent
+    // v3, v4, v5, q1: only to 0 (v4, v5 already on 1)
+  entprocs.resize(sent_ents.size());
+  entprocs[0].insert(0); 
+  entprocs[1].insert(0); 
+  entprocs[2].insert(0); 
+  entprocs[3].insert(0); 
+    // q2: P0 and P1
+  entprocs[4].insert(0); entprocs[4].insert(1); 
+
+  rval = pc[2]->pack_entities(sent_ents, buffer, buff_ptr, true, 0,
+                              true, &entprocs);
+  CHECK_ERR(rval);
+
+    // now unpack the buffer
+  buff_ptr = &buffer[0];
+  rval = pc[0]->unpack_entities(buff_ptr, true, 2, false,
+                                L1h[0], L1p[0], L2hloc[0], L2hrem[0], L2p[0], new_ents[0]);
+  if (
+        // 2 handles per 5 entities = 10
+      L1h[0][1].size() != 10 || 
+        // 2 handles per 3 entities
+      L1h[0][0].size() != 6 || 
+        // none received from 3, no 4th proc
+      !L1h[0][2].empty() || !L1h[0][3].empty()) 
+    rval = MB_FAILURE;
+  CHECK_ERR(rval);
+
+  ent_verts.clear(); sent_ents.clear();
+  for (int i = 0; i < 5; i++) entprocs[i].clear();
+  L1p[0][1].clear();  L1h[0][1].clear();
+  L1p[0][0].clear();  L1h[0][0].clear();
+  buffer.clear(); buff_ptr = &buffer[0];
+
     //========================
-    // ghost, multishared, 2nd message, 1->2
+    // ghost, multishared, 2nd message, 1->0
+    // sent: v1, v4, v7, q0, q2
+  sent_ents.insert(quads[1][0]); sent_ents.insert(quads[1][2]);
+    // add vertices not already shared
+  sent_ents.insert(verts[1][1]); sent_ents.insert(verts[1][4]); 
+  sent_ents.insert(verts[1][7]); 
+    // entprocs lists are entity-dependent
+    // v1, q0: only to 0
+  entprocs.resize(sent_ents.size());
+  entprocs[0].insert(0); 
+  entprocs[2].insert(0);
+  entprocs[3].insert(0); 
+    // v4, v7, q2: P0 and P2
+  entprocs[1].insert(0); entprocs[1].insert(2); 
+  entprocs[4].insert(0); entprocs[4].insert(2); 
 
+  rval = pc[1]->pack_entities(sent_ents, buffer, buff_ptr, true, 0,
+                              true, &entprocs);
+  CHECK_ERR(rval);
+
+    // now unpack the buffer
+  buff_ptr = &buffer[0];
+  rval = pc[0]->unpack_entities(buff_ptr, true, 1, false,
+                                L1h[0], L1p[0], L2hloc[0], L2hrem[0], L2p[0], new_ents[0]);
+  if (
+        // 2 handles per 5 entities = 10
+      L1h[0][0].size() != 10 || 
+        // 2 handles per 3 entities
+      L1h[0][1].size() != 6 || 
+        // none received from 3, no 4th proc
+      !L1h[0][2].empty() || !L1h[0][3].empty()) 
+    rval = MB_FAILURE;
+  CHECK_ERR(rval);
+
     //========================
-    // pack and unpack
+    // now pack/unpack the handles
+    // P0 -> P2
+    // xxx moved pack/unpack handles to after P2, P1 send
+  buffer.clear(); buff_ptr = &buffer[0];
+  rval = pc[0]->pack_remote_handles(L1h[0][1], L1p[0][1], 2, buffer, buff_ptr);
+  CHECK_ERR(rval);
 
+  rval = pc[2]->unpack_remote_handles(0, buff_ptr, false, L2hloc[2], L2hrem[2], L2p[2]);
+  CHECK_ERR(rval);
 
+    // P0 -> P1
+  buffer.clear(); buff_ptr = &buffer[0];
+  rval = pc[0]->pack_remote_handles(L1h[0][0], L1p[0][0], 1, buffer, buff_ptr);
+  CHECK_ERR(rval);
+
+  rval = pc[1]->unpack_remote_handles(0, buff_ptr, false, L2hloc[1], L2hrem[1], L2p[1]);
+  CHECK_ERR(rval);
+
+  ent_verts.clear(); sent_ents.clear();
+  for (int i = 0; i < 5; i++) entprocs[i].clear();
+  L1p[0][1].clear();  L1h[0][1].clear();
+  buffer.clear(); buff_ptr = &buffer[0];
+
 }
 
 void test_filter_pstatus()



More information about the moab-dev mailing list