[MOAB-dev] r2876 - MOAB/branches/parallel_ghosting/parallel

tautges at mcs.anl.gov tautges at mcs.anl.gov
Mon May 4 17:18:32 CDT 2009


Author: tautges
Date: 2009-05-04 17:18:32 -0500 (Mon, 04 May 2009)
New Revision: 2876

Modified:
   MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp
   MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp
   MOAB/branches/parallel_ghosting/parallel/WriteHDF5Parallel.cpp
   MOAB/branches/parallel_ghosting/parallel/mbparallelcomm_test.cpp
   MOAB/branches/parallel_ghosting/parallel/parallel_hdf5_test.cc
   MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp
   MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp
Log:
- added a filter_pstatus function which is more general
- modified exchange_tags functions to have more reuse
- modified tests to account for above changes



Modified: MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp	2009-05-04 21:02:13 UTC (rev 2875)
+++ MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp	2009-05-04 22:18:32 UTC (rev 2876)
@@ -487,7 +487,7 @@
   if (tags) {
     result = get_tag_send_list(orig_ents, all_tags, tag_ranges );
     RRA("Failed to get tagged entities.");
-    result = pack_tags(orig_ents, all_tags, tag_ranges, 
+    result = pack_tags(orig_ents, all_tags, all_tags, tag_ranges, 
                        buff, buff_ptr, store_remote_handles, to_proc);
     RRA("Packing tags (count) failed.");
   }
@@ -1353,6 +1353,8 @@
       // now set new pstatus
     result = mbImpl->tag_set_data(pstatus_tag(), &new_h, 1, &pstat);
     RRA("Couldn't set pstatus tag.");
+
+    if (pstat & PSTATUS_SHARED) sharedEnts.insert(new_h);
   }
   
   return MB_SUCCESS;
@@ -1907,7 +1909,8 @@
 }
 
 MBErrorCode MBParallelComm::pack_tags(MBRange &entities,
-                                      const std::vector<MBTag> &all_tags,
+                                      const std::vector<MBTag> &src_tags,
+                                      const std::vector<MBTag> &dst_tags,
                                       const std::vector<MBRange> &tag_ranges,
                                       std::vector<unsigned char> &buff,
                                       unsigned char *&buff_ptr,
@@ -1917,12 +1920,12 @@
   
 
   MBErrorCode result;
-  std::vector<MBTag>::const_iterator tag_it;
+  std::vector<MBTag>::const_iterator tag_it, dst_it;
   std::vector<MBRange>::const_iterator rit;
   int count = 0;
   
-  for (tag_it = all_tags.begin(), rit = tag_ranges.begin(); 
-       tag_it != all_tags.end(); tag_it++, rit++) {
+  for (tag_it = src_tags.begin(), rit = tag_ranges.begin(); 
+       tag_it != src_tags.end(); tag_it++, rit++) {
 
     result = packed_tag_size( *tag_it, *rit, count );
     if (MB_SUCCESS != result)
@@ -1934,12 +1937,12 @@
 
   CHECK_BUFF_SPACE(buff, buff_ptr, count);
   
-  PACK_INT(buff_ptr, all_tags.size());
+  PACK_INT(buff_ptr, src_tags.size());
     
-  for (tag_it = all_tags.begin(), rit = tag_ranges.begin(); 
-       tag_it != all_tags.end(); tag_it++, rit++) {
+  for (tag_it = src_tags.begin(), dst_it = dst_tags.begin(), rit = tag_ranges.begin(); 
+       tag_it != src_tags.end(); tag_it++, dst_it++, rit++) {
     
-    result = pack_tag( *tag_it, *tag_it, *rit, entities, buff, buff_ptr, 
+    result = pack_tag( *tag_it, *dst_it, *rit, entities, buff, buff_ptr, 
                        store_remote_handles, to_proc );
     if (MB_SUCCESS != result)
       return result;
@@ -2915,24 +2918,6 @@
   return MB_SUCCESS;
 }
   
-MBErrorCode MBParallelComm::get_iface_entities(int other_proc,
-                                               MBRange &iface_ents,
-                                               int dim) 
-{
-  MBRange iface_sets;
-  MBErrorCode result = MB_SUCCESS;
-  
-  for (MBRange::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); rit++) {
-    if (-1 != other_proc && !is_iface_proc(*rit, other_proc)) continue;
-    
-    if (-1 == dim) result = mbImpl->get_entities_by_handle(*rit, iface_ents);
-    else result = mbImpl->get_entities_by_dimension(*rit, dim, iface_ents);
-    RRA(" Failed to get entities in iface set.");
-  }
-  
-  return MB_SUCCESS;
-}
-
   //! get processors with which this processor communicates; sets are sorted by processor
 MBErrorCode MBParallelComm::get_interface_procs(std::set<unsigned int> &procs_set)
 {
@@ -3060,67 +3045,88 @@
   return false;
 }
 
-MBErrorCode MBParallelComm::filter_owned_shared( MBRange &ents,
-                                                 bool owned_test,
-                                                 bool owned_val,
-                                                 bool shared_test,
-                                                 bool shared_val,
-                                                 int to_proc,
-                                                 MBRange *returned_ents)
+MBErrorCode MBParallelComm::filter_pstatus( MBRange &ents,
+                                            unsigned char pstat,
+                                            unsigned char op,
+                                            int to_proc,
+                                            MBRange *returned_ents)
 {
-  if (!owned_test && !shared_test) return MB_FAILURE;
-
   MBRange tmp_ents;
 
+  if (ents.empty()) ents = sharedEnts;
+
     // Put into tmp_ents any entities which are not owned locally or
     // who are already shared with to_proc
-  std::vector<unsigned char> shared_flags(ents.size());
+  std::vector<unsigned char> shared_flags(ents.size()), shared_flags2;
   MBErrorCode result = mbImpl->tag_get_data(pstatus_tag(), ents,
                                             &shared_flags[0]);
   RRA("Failed to get pstatus flag.");
   MBRange::const_iterator rit;
-  int sharing_procs[MAX_SHARING_PROCS];
-  std::fill(sharing_procs, sharing_procs+MAX_SHARING_PROCS, -1);
   int i;
-  for (rit = ents.begin(), i = 0; rit != ents.end(); rit++, i++) {
-    bool owned = !(PSTATUS_NOT_OWNED & shared_flags[i]),
-        shared = (PSTATUS_SHARED & shared_flags[i]);
+  if (op == PSTATUS_OR) {
+    for (rit = ents.begin(), i = 0; rit != ents.end(); rit++, i++) 
+      if (((shared_flags[i] & ~pstat)^shared_flags[i]) & pstat) {
+        tmp_ents.insert(*rit);
+        if (-1 != to_proc) shared_flags2.push_back(shared_flags[i]);
+      }
+  }
+  else if (op == PSTATUS_AND) {
+    for (rit = ents.begin(), i = 0; rit != ents.end(); rit++, i++)
+      if ((shared_flags[i] & pstat) == pstat) {
+        tmp_ents.insert(*rit);
+        if (-1 != to_proc) shared_flags2.push_back(shared_flags[i]);
+      }
+  }
+  else if (op == PSTATUS_NOT) {
+    for (rit = ents.begin(), i = 0; rit != ents.end(); rit++, i++)
+      if (!(shared_flags[i] & pstat)) {
+        tmp_ents.insert(*rit);
+        if (-1 != to_proc) shared_flags2.push_back(shared_flags[i]);
+      }
+  }
+  else {
+    assert(false);
+    return MB_FAILURE;
+  }
 
-    bool owned_passed = !owned_test ||
-        (owned_test && (owned_val == owned));
-    bool shared_passed = !shared_test ||
-        (shared_val == shared && (!shared_val || -1 == to_proc));
-    
-    if (owned_passed && shared_passed)
-      tmp_ents.insert(*rit);
-      
-    else if (owned_passed && shared_test && -1 != to_proc &&
-             shared_val == shared) {
-      // we need to check sharing procs
-      result = mbImpl->tag_get_data(sharedp_tag(), &(*rit), 1,
-                                    sharing_procs);
-      RRA(" ");
-      if (-1 == sharing_procs[0]) {
+  if (-1 != to_proc) {
+
+    int sharing_procs[MAX_SHARING_PROCS];
+    std::fill(sharing_procs, sharing_procs+MAX_SHARING_PROCS, -1);
+    MBRange tmp_ents2;
+
+    for (rit = tmp_ents.begin(), i = 0; rit != tmp_ents.end(); rit++, i++) {
+        // we need to check sharing procs
+      if (shared_flags2[i] & PSTATUS_MULTISHARED) {
         result = mbImpl->tag_get_data(sharedps_tag(), &(*rit), 1,
                                       sharing_procs);
         assert(-1 != sharing_procs[0]);
         RRA(" ");
+        for (unsigned int j = 0; j < MAX_SHARING_PROCS; j++) {
+            // if to_proc shares this entity, add it to list
+          if (sharing_procs[j] == to_proc) {
+            tmp_ents2.insert(*rit);
+          }
+          else if (sharing_procs[j] == -1) break;
+
+          sharing_procs[j] = -1;
+        }
       }
-      unsigned int j;
-      for (j = 0; j < MAX_SHARING_PROCS; j++) {
-          // if to_proc shares this entity, add it to list
-        if (-1 != to_proc && sharing_procs[j] == to_proc) 
-          tmp_ents.insert(*rit);
-        
-          // if we get here, no more sharing procs, and it's not shared
-          // with to_proc
-        else if (-1 == sharing_procs[j])
-          break;
+      else if (shared_flags2[i] & PSTATUS_SHARED) {
+        result = mbImpl->tag_get_data(sharedp_tag(), &(*rit), 1,
+                                      sharing_procs);
+        RRA(" ");
+        assert(-1 != sharing_procs[0]);
+        if (sharing_procs[0] == to_proc) tmp_ents2.insert(*rit);
+        sharing_procs[0] = -1;
       }
-      std::fill(sharing_procs, sharing_procs+j, -1);
+      else
+        assert("should never get here" && false);
     }
+
+    tmp_ents.swap(tmp_ents2);
   }
-
+  
   if (returned_ents)
     returned_ents->swap(tmp_ents);
   else
@@ -3186,7 +3192,7 @@
       RRA("Failed to get ghost layers.");
     }
     else {
-      result = get_iface_entities(buffProcs[ind], sent_ents[ind]);
+      result = get_iface_entities(buffProcs[ind], -1, sent_ents[ind]);
       RRA("Failed to get interface layers.");
 
         // remove vertices, since we know they're already shared
@@ -3377,6 +3383,24 @@
   return MB_SUCCESS;
 }
 
+MBErrorCode MBParallelComm::get_iface_entities(int other_proc,
+                                               int dim,
+                                               MBRange &iface_ents) 
+{
+  MBRange iface_sets;
+  MBErrorCode result = MB_SUCCESS;
+  
+  for (MBRange::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); rit++) {
+    if (-1 != other_proc && !is_iface_proc(*rit, other_proc)) continue;
+    
+    if (-1 == dim) result = mbImpl->get_entities_by_handle(*rit, iface_ents);
+    else result = mbImpl->get_entities_by_dimension(*rit, dim, iface_ents);
+    RRA(" Failed to get entities in iface set.");
+  }
+  
+  return MB_SUCCESS;
+}
+
 MBErrorCode MBParallelComm::check_sent_ents(MBRange &allsent) 
 {
     // check entities to make sure there are no zero-valued remote handles
@@ -3519,7 +3543,9 @@
 }
 
 
-MBErrorCode MBParallelComm::exchange_tags(std::vector<MBTag> &tags)
+MBErrorCode MBParallelComm::exchange_tags(std::vector<MBTag> &src_tags,
+                                          std::vector<MBTag> &dst_tags,
+                                          const MBRange &entities)
 {
   MBErrorCode result;
   int success;
@@ -3551,30 +3577,19 @@
   
   for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
     
-    MBRange tag_ents;
+    MBRange tag_ents = entities;
     
-      // get bridge ents on interface(s)
-    for (MBRange::iterator rit = interfaceSets.begin(); rit != interfaceSets.end();
-         rit++) {
-      if (!is_iface_proc(*rit, *sit)) continue;
-
-      int owner;
-      result = get_owner(*rit, owner);
-      if (MB_SUCCESS != result || owner != (int)proc_config().proc_rank()) 
-        continue;
-      
-      result = mbImpl->get_entities_by_handle(*rit, tag_ents);
-      RRA("Failed to get tag ents for exchange.");
-    }
-
-      // also get ghosted entities for this proc
-      //if (!sharedEnts[ind].ownedShared.empty())
-      // tag_ents.merge(sharedEnts[ind].ownedShared);
-    assert(false);
-
+      // get ents shared by proc *sit
+    result = filter_pstatus(tag_ents, PSTATUS_SHARED, PSTATUS_AND, *sit);
+    RRA("Failed pstatus AND check.");
+    
+      // remote nonowned entities
+    result = filter_pstatus(tag_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT);
+    RRA("Failed pstatus NOT check.");
+    
       // pack-send; this also posts receives if store_remote_handles is true
     std::vector<MBRange> tag_ranges;
-    for (std::vector<MBTag>::iterator vit = tags.begin(); vit != tags.end(); vit++) {
+    for (std::vector<MBTag>::iterator vit = src_tags.begin(); vit != src_tags.end(); vit++) {
       const void* ptr;
       int size;
       if (tagServer->get_default_data_ref( *vit, ptr, size ) != MB_SUCCESS) {
@@ -3590,7 +3605,7 @@
       // pack the data
     unsigned char *buff_ptr = &ownerSBuffs[ind][0];
     result = pack_tags(tag_ents,
-                       tags, tag_ranges, 
+                       src_tags, dst_tags, tag_ranges, 
                        ownerSBuffs[ind], buff_ptr, true, *sit);
     RRA("Failed to count buffer in pack_send_tag.");
 
@@ -3659,6 +3674,7 @@
   return MB_SUCCESS;
 }
 
+/*
 MBErrorCode MBParallelComm::exchange_tags( MBTag src_tag, 
                                            MBTag dst_tag, 
                                            const MBRange& entities )
@@ -3809,6 +3825,7 @@
   
   return MB_SUCCESS;
 }
+*/
 
 MBErrorCode MBParallelComm::update_shared_mesh()
 {
@@ -4674,7 +4691,7 @@
   if (!bad_ents.empty() || !local_shared.empty()) return MB_FAILURE;
   else return MB_SUCCESS;
 }
-    
+
 #ifdef TEST_PARALLELCOMM
 
 #include <iostream>

Modified: MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp	2009-05-04 21:02:13 UTC (rev 2875)
+++ MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp	2009-05-04 22:18:32 UTC (rev 2876)
@@ -50,6 +50,10 @@
 {
 public:
 
+    // ==================================
+    // \section CONSTRUCTORS/DESTRUCTORS/PCOMM MANAGEMENT
+    // ==================================
+
     //! constructor
   MBParallelComm(MBInterface *impl,
                  MPI_Comm comm = MPI_COMM_WORLD,
@@ -82,6 +86,10 @@
   
   static unsigned char PROC_SHARED, PROC_OWNER;
   
+    // ==================================
+    // \section GLOBAL IDS
+    // ==================================
+
     //! assign a global id space, for largest-dimension or all entities (and
     //! in either case for vertices too)
   MBErrorCode assign_global_ids(MBEntityHandle this_set,
@@ -98,6 +106,9 @@
                                const bool largest_dim_only = true,
                                const bool parallel = true);
   
+    // ==================================
+    // \section HIGH-LEVEL COMMUNICATION (send/recv/bcast ents, exchange tags)
+    // ==================================
 
     /** \brief send entities to another processor, optionally waiting until it's done
      *
@@ -164,24 +175,24 @@
      * tag (or the tag should have a default value).
      * \param tags Vector of tag handles to be exchanged
      */
-  MBErrorCode exchange_tags(std::vector<MBTag> &tags);
+  MBErrorCode exchange_tags(std::vector<MBTag> &src_tags,
+                            std::vector<MBTag> &dst_tags,
+                            const MBRange &entities);
   
     /** \brief Exchange tags for all shared and ghosted entities
      * This function should be called collectively over the communicator for this MBParallelComm
      * \param tag_name Name of tag to be exchanged
      */
-  MBErrorCode exchange_tags(const char *tag_name);
+  MBErrorCode exchange_tags(const char *tag_name,
+                            const MBRange &entities);
   
     /** \brief Exchange tags for all shared and ghosted entities
      * This function should be called collectively over the communicator for this MBParallelComm
      * \param tagh Handle of tag to be exchanged
      */
-  MBErrorCode exchange_tags(MBTag tagh);
+  MBErrorCode exchange_tags(MBTag tagh,
+                            const MBRange &entities);
   
-  MBErrorCode exchange_tags( MBTag src_tag, 
-                             MBTag dst_tag, 
-                             const MBRange& entities );
-
     /** \brief Broadcast all entities resident on from_proc to other processors
      * This function assumes remote handles are *not* being stored, since (usually)
      * every processor will know about the whole mesh.
@@ -195,6 +206,10 @@
                                  const bool adjacencies = false,
                                  const bool tags = true );
 
+    // ==================================
+    // \section INITIALIZATION OF PARALLEL DATA (resolve_shared_ents, etc.)
+    // ==================================
+
     /** \brief Resolve shared entities between processors
      *
      * Resolve shared entities between processors for entities in proc_ents,
@@ -231,6 +246,10 @@
                                   int resolve_dim = 3, 
                                   int shared_dim = -1);
   
+    // ==================================
+    // \section GET PARALLEL DATA (shared/owned/iface entities, etc.)
+    // ==================================
+
     /** \brief Get entities with the given pstatus bit(s) set
      * Returns any entities whose pstatus tag value v satisfies (v & pstatus_val)
      *
@@ -242,39 +261,6 @@
                                    unsigned char pstatus_val,
                                    MBRange &pstatus_ents);
   
-    /** \brief Set pstatus values on entities
-     *
-     * \param pstatus_ents Entities to be set
-     * \param pstatus_val Pstatus value to be set
-     * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
-     *        (and created if they don't exist)
-     * \param verts_too If true, vertices also set
-     * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
-     *        existing value is over-written
-     */
-  MBErrorCode set_pstatus_entities(MBRange &pstatus_ents,
-                                   unsigned char pstatus_val,
-                                   bool lower_dim_ents = false,
-                                   bool verts_too = true,
-                                   int operation = MBInterface::UNION);
-
-    /** \brief Set pstatus values on entities (vector-based function)
-     *
-     * \param pstatus_ents Entities to be set
-     * \param pstatus_val Pstatus value to be set
-     * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
-     *        (and created if they don't exist)
-     * \param verts_too If true, vertices also set
-     * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
-     *        existing value is over-written
-     */
-  MBErrorCode set_pstatus_entities(MBEntityHandle *pstatus_ents,
-                                   int num_ents,
-                                   unsigned char pstatus_val,
-                                   bool lower_dim_ents = false,
-                                   bool verts_too = true,
-                                   int operation = MBInterface::UNION);
-
     /** \brief Return the rank of the entity owner
      */
   MBErrorCode get_owner(MBEntityHandle entity,
@@ -303,12 +289,15 @@
      * If other_proc is -1, any interface entities are returned.  If dim is -1,
      * entities of all dimensions on interface are returned.
      * \param other_proc Rank of processor for which interface entities are requested
+     * \param shared_ents Entities returned from function
      * \param dim Dimension of interface entities requested
-     * \param iface_ents Entities returned from function
+     * \param iface If true, return only entities on the interface
      */
-  MBErrorCode get_iface_entities(int other_proc,
-                                 MBRange &iface_ents,
-                                 int dim = -1);
+  MBErrorCode get_shared_entities(int other_proc,
+                                  MBRange &shared_ents,
+                                  int dim = -1,
+                                  const bool iface = false,
+                                  const bool owned_filter = false);
 /*  
     //! return partition sets; if tag_name is input, gets sets with
     //! that tag name, otherwise uses PARALLEL_PARTITION tag
@@ -322,6 +311,10 @@
     //! get processors with which this processor communicates
   MBErrorCode get_comm_procs(std::set<unsigned int> &procs);
   
+    // ==================================
+    // \section LOW-LEVEL DATA (tags, sets on interface/partition, etc.)
+    // ==================================
+
     //! Get proc config for this communication object
   const MBProcConfig &proc_config() const {return procConfig;}
   
@@ -365,31 +358,13 @@
   MBTag partition_tag();
   MBTag part_tag() { return partition_tag(); }
 
+    // ==================================
+    // \section IMESHP-RELATED FUNCTIONS
+    // ==================================
+
     //! return all the entities in parts owned locally
   MBErrorCode get_part_entities(MBRange &ents, int dim = -1);
   
-    /** Filter the entities by ownership and sharing value(s); if testing
-     * for sharing value, optional sharing processor can be specified.  Results
-     * returned in input list, unless result_ents is passed in non-null.
-     *\param ents       Input entities to filter
-     *\param owned_test Whether to perform ownership test
-     *\param owned_val Ownership value required to remain in list
-     *\param shared_test Whether to perform shared test
-     *\param shared_val Shared value required to remain in list (also subject
-     *                  to to_proc
-     *\param to_proc If non-negative and shared test is requested, proc
-     *               with which this entity must be shared to remain in list
-     *\param result_ents If non-null, results of filter are put in the 
-     *       pointed-to range
-     */
-  MBErrorCode filter_owned_shared( MBRange &ents,
-                                   bool owned_test,
-                                   bool owned_val,
-                                   bool shared_test,
-                                   bool shared_val,
-                                   int to_proc = -1,
-                                   MBRange *returned_ents = NULL);
-
   MBEntityHandle get_partitioning() const { return partitioningSet; }
   MBErrorCode set_partitioning( MBEntityHandle h );
   MBErrorCode get_global_part_count( int& count_out ) const;
@@ -417,6 +392,45 @@
     // from the onwing processor to any procs with copies.
   MBErrorCode update_shared_mesh();
 
+    /** Filter the entities by pstatus tag.  
+     * op is one of PSTATUS_ AND, OR, NOT; an entity is output if:
+     * AND: all bits set in pstatus_val are also set on entity
+     * OR: any bits set in pstatus_val also set on entity
+     * NOT: any bits set in pstatus_val are not set on entity
+     *
+     * Results returned in input list, unless result_ents is passed in non-null,
+     * in which case results are returned in result_ents.
+     *
+     * If ents is passed in empty, filter is done on shared entities in this
+     * pcomm instance, i.e. contents of sharedEnts.
+     *
+     *\param ents       Input entities to filter
+     *\param pstatus_val pstatus value to which entities are compared
+     *\param op Bitwise operation performed between pstatus values
+     *\param to_proc If non-negative and PSTATUS_SHARED is set on pstatus_val,
+     *               only entities shared with to_proc are returned
+     *\param result_ents If non-null, results of filter are put in the 
+     *       pointed-to range
+     */
+  MBErrorCode filter_pstatus( MBRange &ents,
+                              const unsigned char pstatus_val,
+                              const unsigned char op,
+                              int to_proc = -1,
+                              MBRange *returned_ents = NULL);
+
+    /** \brief Get entities on interfaces shared with another proc
+     *
+     * \param other_proc Other proc sharing the interface
+     * \param dim Dimension of entities to return, -1 if all dims
+     * \param iface_ents Returned entities
+     */
+  MBErrorCode get_iface_entities(int other_proc,
+                                 int dim,
+                                 MBRange &iface_ents);
+  
+  MBInterface* get_moab() const { return mbImpl; }
+
+    //! public 'cuz we want to unit test these externally
   MBErrorCode pack_buffer(MBRange &orig_ents, 
                           const bool adjacencies,
                           const bool tags,
@@ -435,8 +449,45 @@
                             std::vector<unsigned int> &L2p,
                             MBRange &new_ents);
   
+    //! Call exchange_all_shared_handles, then compare the results with tag data
+    //! on local shared entities.
+  MBErrorCode check_all_shared_handles();
+  
 private:
 
+    /** \brief Set pstatus values on entities
+     *
+     * \param pstatus_ents Entities to be set
+     * \param pstatus_val Pstatus value to be set
+     * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
+     *        (and created if they don't exist)
+     * \param verts_too If true, vertices also set
+     * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
+     *        existing value is over-written
+     */
+  MBErrorCode set_pstatus_entities(MBRange &pstatus_ents,
+                                   unsigned char pstatus_val,
+                                   bool lower_dim_ents = false,
+                                   bool verts_too = true,
+                                   int operation = MBInterface::UNION);
+
+    /** \brief Set pstatus values on entities (vector-based function)
+     *
+     * \param pstatus_ents Entities to be set
+     * \param pstatus_val Pstatus value to be set
+     * \param lower_dim_ents If true, lower-dimensional ents (incl. vertices) set too
+     *        (and created if they don't exist)
+     * \param verts_too If true, vertices also set
+     * \param operation If UNION, pstatus_val is OR-d with existing value, otherwise
+     *        existing value is over-written
+     */
+  MBErrorCode set_pstatus_entities(MBEntityHandle *pstatus_ents,
+                                   int num_ents,
+                                   unsigned char pstatus_val,
+                                   bool lower_dim_ents = false,
+                                   bool verts_too = true,
+                                   int operation = MBInterface::UNION);
+
   int num_subranges(const MBRange &this_range);
 
     //! estimate size required to pack entities
@@ -466,24 +517,6 @@
                              MPI_Request &recv_req,
                              int mesg_tag);
   
-    //! process contents of receive buffer to get new entities; if store_remote_handles
-    //! is true, also Isend (using send_buff) handles for these entities back to 
-    //! source proc, returning request handle in &send_req; if iface_layer is true,
-    //! don't instantiate the entities, just check to see if they correspond to 
-    //! existing entities, and if not, set corresponding recd_ents handle to zero
-  MBErrorCode recv_unpack_entities(const int from_proc,
-                                   const bool store_remote_handles,
-                                   const bool iface_layer,
-                                   std::vector<unsigned char> &recv_buff,
-                                   MBRange &recd_ents);
-  
-    //! for all the entities in the received buffer; for each, save
-    //! entities in this instance which match connectivity, or zero if none found
-  MBErrorCode unpack_iface_entities(unsigned char *&buff_ptr, 
-                                    const int from_proc,
-                                    const int ind,
-                                    std::vector<MBEntityHandle> &recd_ents);
-  
   MBErrorCode pack_entities(MBRange &entities,
                             std::vector<unsigned char> &buff,
                             unsigned char *&buff_ptr,
@@ -513,10 +546,12 @@
                               std::vector<unsigned int> &L2p,
                               MBRange &new_ents);
   
-    //! given connectivity and type, find an existing entity, if there is one
-  MBEntityHandle find_existing_entity(const MBEntityHandle *connect,
-                                      const int num_connect,
-                                      const MBEntityType this_type);
+    //! for all the entities in the received buffer; for each, save
+    //! entities in this instance which match connectivity, or zero if none found
+  MBErrorCode unpack_iface_entities(unsigned char *&buff_ptr, 
+                                    const int from_proc,
+                                    const int ind,
+                                    std::vector<MBEntityHandle> &recd_ents);
   
   MBErrorCode pack_sets(MBRange &entities,
                         std::vector<unsigned char> &buff,
@@ -572,6 +607,11 @@
                                     const bool is_iface,
                                     const int ind);
   
+    //! given connectivity and type, find an existing entity, if there is one
+  MBEntityHandle find_existing_entity(const MBEntityHandle *connect,
+                                      const int num_connect,
+                                      const MBEntityType this_type);
+  
   /**\brief Get list of tags for which to exchange data
    *
    * Get tags and entities for which to exchange tag data.  This function
@@ -635,7 +675,8 @@
    *                            for each corresponding tag handle in 'all_tags.
    */
   MBErrorCode pack_tags(MBRange &entities,
-                        const std::vector<MBTag> &all_tags,
+                        const std::vector<MBTag> &src_tags,
+                        const std::vector<MBTag> &dst_tags,
                         const std::vector<MBRange> &tag_ranges,
                         std::vector<unsigned char> &buff,
                         unsigned char *&buff_ptr,
@@ -713,10 +754,6 @@
     // after verifying shared entities, now parent/child links between sets can be established
   MBErrorCode create_iface_pc_links();
   
-    //! resolve remote handles for shared non-vertex ents, assuming
-    //! this has already been done for vertices
-  MBErrorCode resolve_ent_remote_handles();
-  
     //! pack a range map with keys in this_range and values a contiguous series
     //! of handles starting at actual_start
   MBErrorCode pack_range_map(MBRange &this_range, MBEntityHandle actual_start,
@@ -740,7 +777,6 @@
                                    int num_layers,
                                    MBRange &ghosted_ents);
   
-public:  
     //! add vertices adjacent to entities in this list
   MBErrorCode add_verts(MBRange &sent_ents);
   
@@ -764,10 +800,6 @@
   //! debugging.
   MBErrorCode exchange_all_shared_handles( shared_entity_map& result );
   
-    //! Call exchange_all_shared_handles, then compare the results with tag data
-    //! on local shared entities.
-  MBErrorCode check_all_shared_handles();
-  
     //! replace handles in from_vec with corresponding handles on
     //! to_proc (by checking shared[p/h]_tag and shared[p/h]s_tag;
     //! if no remote handle and new_ents is non-null, substitute
@@ -794,11 +826,8 @@
                                  int to_proc,
                                  const MBRange &new_ents);
 
-  MBInterface* get_moab() const { return mbImpl; }
-
   std::vector<unsigned int> &buff_procs();
 
-private:  
     //! goes through from_vec, and for any with type MBMAXTYPE, replaces with
     //! new_ents value at index corresponding to id of entity in from_vec
   MBErrorCode get_local_handles(MBEntityHandle *from_vec, 
@@ -926,7 +955,8 @@
   return MB_SUCCESS;
 }
 
-inline MBErrorCode MBParallelComm::exchange_tags(const char *tag_name)
+inline MBErrorCode MBParallelComm::exchange_tags(const char *tag_name,
+                                                 const MBRange &entities)
 {
     // get the tag handle
   std::vector<MBTag> tags(1);
@@ -934,16 +964,17 @@
   if (MB_SUCCESS != result) return result;
   else if (!tags[0]) return MB_TAG_NOT_FOUND;
   
-  return exchange_tags(tags);
+  return exchange_tags(tags, tags, entities);
 }
   
-inline MBErrorCode MBParallelComm::exchange_tags(MBTag tagh)
+inline MBErrorCode MBParallelComm::exchange_tags(MBTag tagh,
+                                                 const MBRange &entities)
 {
     // get the tag handle
   std::vector<MBTag> tags;
   tags.push_back(tagh);
   
-  return exchange_tags(tags);
+  return exchange_tags(tags, tags, entities);
 }
   
 inline MBErrorCode MBParallelComm::get_comm_procs(std::set<unsigned int> &procs) 

Modified: MOAB/branches/parallel_ghosting/parallel/WriteHDF5Parallel.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/WriteHDF5Parallel.cpp	2009-05-04 21:02:13 UTC (rev 2875)
+++ MOAB/branches/parallel_ghosting/parallel/WriteHDF5Parallel.cpp	2009-05-04 22:18:32 UTC (rev 2876)
@@ -299,23 +299,19 @@
     // that this processor will write to the 'nonowned' list.
     
   MBRange nonowned;
-  tmpset.clear();
-  result = myPcomm->filter_owned_shared( nodeSet.range, 
-                                         true, true, false, false, -1, &tmpset );
+  result = myPcomm->filter_pstatus( nodeSet.range, PSTATUS_NOT_OWNED, PSTATUS_AND, -1, &nonowned);
   if (MB_SUCCESS != result)
     return result;
-  nodeSet.range.swap( tmpset );
-  nonowned.merge( tmpset.subtract( nodeSet.range ) );
+  nodeSet.range = nodeSet.range.subtract(nonowned);
   
   for (std::list<ExportSet>::iterator eiter = exportList.begin();
        eiter != exportList.end(); ++eiter ) {
     tmpset.clear();
-    result = myPcomm->filter_owned_shared( eiter->range, 
-                                           true, true, false, false, -1, &tmpset);
+    result = myPcomm->filter_pstatus( eiter->range, PSTATUS_NOT_OWNED, PSTATUS_AND, -1, &tmpset);
     if (MB_SUCCESS != result)
       return result;
-    eiter->range.swap( tmpset );
-    nonowned.merge( tmpset.subtract( eiter->range ) );
+    eiter->range = eiter->range.subtract( tmpset );
+    nonowned.merge(tmpset);
   }
   
     // Now remove from interfaceMesh any entities that are not
@@ -1917,7 +1913,8 @@
   }
   
     // do communication
-  rval = myPcomm->exchange_tags( file_id_tag );
+  MBRange dum_range;
+  rval = myPcomm->exchange_tags( file_id_tag, dum_range );
   if (MB_SUCCESS != rval) {
     iFace->tag_delete( file_id_tag );
     return rval;

Modified: MOAB/branches/parallel_ghosting/parallel/mbparallelcomm_test.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/mbparallelcomm_test.cpp	2009-05-04 21:02:13 UTC (rev 2875)
+++ MOAB/branches/parallel_ghosting/parallel/mbparallelcomm_test.cpp	2009-05-04 22:18:32 UTC (rev 2876)
@@ -349,7 +349,8 @@
       }
 
         // exchange tag
-      result = pcs[i]->exchange_tags("GLOBAL_ID");
+      MBRange tmp_range;
+      result = pcs[i]->exchange_tags("GLOBAL_ID", tmp_range);
       if (MB_SUCCESS != result) {
         std::cerr << "Tag exchange didn't work." << std::endl;
         break;
@@ -388,10 +389,11 @@
   ents.insert(file_set);
   
   MBParallelComm *pcomm = new MBParallelComm(mbImpl);
+
   std::vector<unsigned char> buff(1024);
   int buff_size;
   result = pcomm->pack_buffer(ents, false, true, false, -1,
-                              buff, buff_size);
+                                   buff, buff_size);
   RRA("Packing buffer count (non-stored handles) failed.");
 
   std::vector<std::vector<MBEntityHandle> > L1h;
@@ -399,7 +401,7 @@
   std::vector<unsigned int> L2p;
   
   result = pcomm->unpack_buffer(&buff[0], false, -1, -1, L1h, L2hloc, 
-                                L2hrem, L2p, new_ents);
+                         L2hrem, L2p, new_ents);
   RRA("Unpacking buffer (non-stored handles) failed.");
 
   return MB_SUCCESS;
@@ -422,7 +424,7 @@
     }
 
     for (int i = 0; i < 4; i++) {
-      tmp_result = pcs[p]->get_iface_entities(-1, iface_ents[i], i);
+      tmp_result = pcs[p]->get_iface_entities(-1, i, iface_ents[i]);
       
       if (MB_SUCCESS != tmp_result) {
         std::cerr << "get_iface_entities returned error on proc " 

Modified: MOAB/branches/parallel_ghosting/parallel/parallel_hdf5_test.cc
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/parallel_hdf5_test.cc	2009-05-04 21:02:13 UTC (rev 2875)
+++ MOAB/branches/parallel_ghosting/parallel/parallel_hdf5_test.cc	2009-05-04 22:18:32 UTC (rev 2876)
@@ -258,13 +258,14 @@
   std::fill( counts, counts+MBENTITYSET, 0u );
   
   for (MBEntityType t = MBVERTEX; t < MBENTITYSET; ++t) {
-    MBRange range, r2;
+    MBRange range;
     rval = moab.get_entities_by_type( 0, t, range );
     CHECK_ERR(rval);
-    rval = pcomm->filter_owned_shared(range, true, true, false, false, 
-                                      -1, &r2);
+    rval = pcomm->filter_pstatus(range, PSTATUS_SHARED, PSTATUS_AND);
     CHECK_ERR(rval);
-    counts[t] = r2.size();
+    rval = pcomm->filter_pstatus(range, PSTATUS_NOT_OWNED, PSTATUS_NOT);
+    CHECK_ERR(rval);
+    counts[t] = range.size();
   }
 }
 

Modified: MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp	2009-05-04 21:02:13 UTC (rev 2875)
+++ MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp	2009-05-04 22:18:32 UTC (rev 2876)
@@ -750,7 +750,8 @@
   PCHECK( !partition_geom[3].empty() );
 
     // exchange id tags to allow comparison by id
-  rval = pcomm->exchange_tags(id_tag);
+  MBRange tmp_ents;
+  rval = pcomm->exchange_tags(id_tag, tmp_ents);
   CHKERR(rval);
   
     // Get geometric surfaces
@@ -1031,7 +1032,8 @@
   rval = moab.tag_set_data( dense_test_tag, local, &handles[0] ); CHKERR(rval);
   
     // exchange tag data
-  rval = pcomm->exchange_tags( dense_test_tag ); CHKERR(rval);
+  MBRange tmp_range;
+  rval = pcomm->exchange_tags( dense_test_tag, tmp_range ); CHKERR(rval);
   
     // make sure local values are unchanged
   handles2.resize( local.size() );
@@ -1056,7 +1058,8 @@
   rval = moab.tag_set_data( sparse_test_tag, local, &procs1[0] ); CHKERR(rval);
   
     // exchange tag data
-  rval = pcomm->exchange_tags( sparse_test_tag ); 
+  tmp_range.clear();
+  rval = pcomm->exchange_tags( sparse_test_tag, tmp_range ); 
   PCHECK( MB_SUCCESS == rval );
   
     // make sure local values are unchanged
@@ -1100,7 +1103,8 @@
   
     // exchange tag data
   MBParallelComm* pcomm = MBParallelComm::get_pcomm(&moab, 0);
-  rval = pcomm->exchange_tags( dense_test_tag ); 
+  MBRange tmp_range;
+  rval = pcomm->exchange_tags( dense_test_tag, tmp_range ); 
   PCHECK(MB_SUCCESS == rval);
   
   return MB_SUCCESS;
@@ -1391,7 +1395,8 @@
   rval = mb.tag_get_handle( GLOBAL_ID_TAG_NAME, id_tag );
   PCHECK(MB_SUCCESS == rval);
 
-  rval = pcomm.exchange_tags(id_tag);
+  MBRange tmp_range;
+  rval = pcomm.exchange_tags(id_tag, tmp_range);
   PCHECK(MB_SUCCESS == rval);
 
     // get all vertices

Modified: MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp	2009-05-04 21:02:13 UTC (rev 2875)
+++ MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp	2009-05-04 22:18:32 UTC (rev 2876)
@@ -1,4 +1,5 @@
 #include "MBParallelComm.hpp"
+#include "MBParallelConventions.h"
 #include "MBCore.hpp"
 #include "TestUtil.hpp"
 #include <algorithm>
@@ -33,6 +34,8 @@
 void test_pack_variable_length_tag();
 /** Test pack/unpack tag values*/
 void test_pack_tag_handle_data();
+/** Test filter_pstatus function*/
+void test_filter_pstatus();
 
 int main( int argc, char* argv[] )
 {
@@ -55,6 +58,7 @@
   //num_err += RUN_TEST( test_pack_bit_tag_data );
   num_err += RUN_TEST( test_pack_variable_length_tag );
   num_err += RUN_TEST( test_pack_tag_handle_data );
+  num_err += RUN_TEST( test_filter_pstatus );
   
 #ifdef USE_MPI
   MPI_Finalize();
@@ -77,10 +81,12 @@
   std::vector<int> addl_procs;
 
     // get the necessary vertices too
-  rval = pcomm->add_verts(entities);
+  MBRange tmp_range = entities.subset_by_type(MBENTITYSET);
+  entities = entities.subtract(tmp_range);
+  rval = moab.get_adjacencies(entities, 0, false, entities, MBInterface::UNION);
   CHECK_ERR(rval);
+  entities.merge(tmp_range);
   
-  MBRange tmp_range;
   rval = pcomm->pack_buffer( entities, false, true, false, 
                              -1, buff, size);
   CHECK_ERR(rval);
@@ -1451,7 +1457,6 @@
   }
 }
 
-  
 void test_pack_tag_handle_data()
 {
   MBRange::iterator i;
@@ -1566,3 +1571,105 @@
     }
   }
 }
+  
+void test_filter_pstatus()
+{
+  MBRange::iterator i;
+  MBCore moab;
+  MBInterface& mb = moab;
+  MBErrorCode rval;
+  
+    // create some mesh
+  create_simple_grid( mb, 3 );  
+  std::vector<MBEntityHandle> verts;
+  MBRange dum_vertsr, vertsr;
+  rval = mb.get_entities_by_type( 0, MBVERTEX, dum_vertsr );
+  CHECK_ERR(rval);
+  vertsr.insert(dum_vertsr[0], dum_vertsr[8]);
+  for (unsigned int i = 0; i < 9; i++) verts.push_back(vertsr[i]);
+
+  CHECK( !verts.empty() );
+ 
+  MBParallelComm *pcomm = new MBParallelComm( &moab );
+
+  std::vector<int> procs(70, -1);
+  for (unsigned int i = 0; i < 6; i++) procs[i] = i;
+
+  std::vector<unsigned char> pvals(verts.size(), 0);
+    // interface, owned
+  pvals[0] = (PSTATUS_INTERFACE | PSTATUS_SHARED); // p0
+  rval = moab.tag_set_data(pcomm->sharedp_tag(), &verts[0], 1, &procs[0]); CHECK_ERR(rval);  
+    // interface, not owned
+  pvals[1] = (PSTATUS_NOT_OWNED | PSTATUS_INTERFACE | PSTATUS_SHARED); // p1
+  rval = moab.tag_set_data(pcomm->sharedp_tag(), &verts[1], 1, &procs[1]); CHECK_ERR(rval);  
+    // interface, multi-shared, owned
+  pvals[2] = (PSTATUS_INTERFACE | PSTATUS_SHARED | PSTATUS_MULTISHARED); // p0, p1
+  rval = moab.tag_set_data(pcomm->sharedps_tag(), &verts[2], 1, &procs[0]); CHECK_ERR(rval);  
+    // interface, multi-shared, not owned
+  pvals[3] = (PSTATUS_INTERFACE | PSTATUS_MULTISHARED | PSTATUS_NOT_OWNED | PSTATUS_SHARED); // p1, p2
+  rval = moab.tag_set_data(pcomm->sharedps_tag(), &verts[3], 1, &procs[1]); CHECK_ERR(rval);  
+    // ghost, shared
+  pvals[4] = (PSTATUS_GHOST | PSTATUS_SHARED | PSTATUS_NOT_OWNED); // p2
+  rval = moab.tag_set_data(pcomm->sharedp_tag(), &verts[4], 1, &procs[2]); CHECK_ERR(rval);  
+    // ghost, multi-shared
+  pvals[5] = (PSTATUS_GHOST | PSTATUS_MULTISHARED | PSTATUS_NOT_OWNED | PSTATUS_SHARED); // p2, p3
+  rval = moab.tag_set_data(pcomm->sharedps_tag(), &verts[5], 1, &procs[2]); CHECK_ERR(rval);  
+    // owned, shared
+  pvals[6] = (PSTATUS_SHARED); // p4
+  rval = moab.tag_set_data(pcomm->sharedp_tag(), &verts[6], 1, &procs[4]); CHECK_ERR(rval);  
+    // owned, multi-shared
+  pvals[7] = (PSTATUS_MULTISHARED | PSTATUS_SHARED); // p4, p5
+  rval = moab.tag_set_data(pcomm->sharedps_tag(), &verts[7], 1, &procs[4]); CHECK_ERR(rval);  
+    // not shared, owned
+  pvals[8] = 0x0;
+
+  rval = moab.tag_set_data(pcomm->pstatus_tag(), &verts[0], 9, &pvals[0]);
+  CHECK_ERR(rval);
+  
+
+  MBRange tmp_range = vertsr;
+
+    // interface ents
+  rval = pcomm->filter_pstatus(tmp_range, PSTATUS_INTERFACE, PSTATUS_AND);
+  CHECK_ERR(rval);
+  CHECK(tmp_range.size() == 4 && *tmp_range.begin() == verts[0] && 
+        *tmp_range.rbegin() == verts[3]);
+    // not interface
+  tmp_range = vertsr;
+  rval = pcomm->filter_pstatus(tmp_range, PSTATUS_INTERFACE, PSTATUS_NOT);
+  CHECK_ERR(rval);
+  CHECK(tmp_range.size() == 5 && *tmp_range.begin() == verts[4] && 
+        *tmp_range.rbegin() == verts[8]);
+    // interface not owned
+  tmp_range = vertsr;
+  rval = pcomm->filter_pstatus(tmp_range, PSTATUS_INTERFACE | PSTATUS_NOT_OWNED, PSTATUS_AND);
+  CHECK_ERR(rval);
+  CHECK(tmp_range.size() == 2 && *tmp_range.begin() == verts[1] && 
+        *tmp_range.rbegin() == verts[3]);
+    // ghost
+  tmp_range = vertsr;
+  rval = pcomm->filter_pstatus(tmp_range, PSTATUS_GHOST, PSTATUS_AND);
+  CHECK_ERR(rval);
+  CHECK(tmp_range.size() == 2 && *tmp_range.begin() == verts[4] && 
+        *tmp_range.rbegin() == verts[5]);
+    // shared not multi-shared
+  tmp_range = vertsr;
+  rval = pcomm->filter_pstatus(tmp_range, PSTATUS_SHARED, PSTATUS_AND);
+  CHECK_ERR(rval);
+  rval = pcomm->filter_pstatus(tmp_range, PSTATUS_MULTISHARED, PSTATUS_NOT);
+  CHECK_ERR(rval);
+  CHECK(tmp_range.size() == 4 && tmp_range[0] == verts[0] && 
+        tmp_range[1] == verts[1] && tmp_range[2] == verts[4] && tmp_range[3] == verts[6]);
+    // shared w/ p0
+  tmp_range = vertsr;
+  rval = pcomm->filter_pstatus(tmp_range, PSTATUS_SHARED, PSTATUS_AND, 0);
+  CHECK_ERR(rval);
+  CHECK(tmp_range.size() == 2 && tmp_range[1] == verts[2]);
+    // shared w/ p2 && not owned
+  tmp_range = vertsr;
+  rval = pcomm->filter_pstatus(tmp_range, PSTATUS_SHARED | PSTATUS_NOT_OWNED, PSTATUS_AND, 2);
+  CHECK_ERR(rval);
+  CHECK(tmp_range.size() == 3 && tmp_range[0] == verts[3] && 
+        tmp_range[1] == verts[4] && tmp_range[2] == verts[5]);
+  
+}



More information about the moab-dev mailing list