[MOAB-dev] commit/MOAB: 9 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Sep 4 12:42:31 CDT 2013


9 new commits in MOAB:

https://bitbucket.org/fathomteam/moab/commits/6be936a95b94/
Changeset:   6be936a95b94
Branch:      None
User:        tautges
Date:        2013-09-04 16:39:24
Summary:     Removing unnecessary arguments from create_interface_sets variant that takes procs_nvecs arg.

Passes parallel tests.

Affected #:  4 files

diff --git a/src/ScdInterface.cpp b/src/ScdInterface.cpp
index 9747af1..5413e3e 100644
--- a/src/ScdInterface.cpp
+++ b/src/ScdInterface.cpp
@@ -659,7 +659,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, EntityHandle se
   if (MB_SUCCESS != rval) return rval;
   
     // create interface sets
-  rval = pcomm->create_interface_sets(proc_nvecs, 3, 2);
+  rval = pcomm->create_interface_sets(proc_nvecs);
   if (MB_SUCCESS != rval) return rval;
 
     // add the box to the PComm's partitionSets

diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 4977f91..86d63c3 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -3809,7 +3809,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
     // create the sets for each interface; store them as tags on
     // the interface instance
     Range iface_sets;
-    result = create_interface_sets(proc_nvecs, resolve_dim, shared_dim);
+    result = create_interface_sets(proc_nvecs);
     RRA("Trouble creating iface sets.");
 
     // establish comm procs and buffers for them
@@ -4371,12 +4371,10 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
     result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
                              proc_nvecs);
     
-    return create_interface_sets(proc_nvecs, resolve_dim, shared_dim);
+    return create_interface_sets(proc_nvecs);
   }
   
-  ErrorCode ParallelComm::create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
-                                                int /*resolve_dim*/, 
-                                                int /*shared_dim*/) 
+  ErrorCode ParallelComm::create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs) 
   {
     if (proc_nvecs.empty()) return MB_SUCCESS;
   
@@ -6247,7 +6245,7 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
     }
 
     // create interface sets from shared entities
-    result = create_interface_sets(proc_nvecs, 3, 2);
+    result = create_interface_sets(proc_nvecs);
     RRA("Trouble creating iface sets.");
 
     return MB_SUCCESS;

diff --git a/src/parallel/ParallelMergeMesh.cpp b/src/parallel/ParallelMergeMesh.cpp
index f0b99c6..863e8b2 100644
--- a/src/parallel/ParallelMergeMesh.cpp
+++ b/src/parallel/ParallelMergeMesh.cpp
@@ -567,7 +567,7 @@ namespace moab{
     // create the sets for each interface; store them as tags on
     // the interface instance
     Range iface_sets;
-    rval = myPcomm->create_interface_sets(proc_nranges, dim, dim-1);
+    rval = myPcomm->create_interface_sets(proc_nranges);
     if(rval != MB_SUCCESS){
       return rval;
     }

diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index 90cbad6..19a68e6 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -863,8 +863,7 @@ namespace moab {
     // and tags the set with the procs sharing it; interface sets are optionally
     // returned; NOTE: a subsequent step is used to verify entities on the interface
     // and remove them if they're not shared
-    ErrorCode create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
-                                    int resolve_dim, int shared_dim);
+    ErrorCode create_interface_sets(std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
 
     // do the same but working straight from sharedEnts
     ErrorCode create_interface_sets(EntityHandle this_set, int resolve_dim, int shared_dim);


https://bitbucket.org/fathomteam/moab/commits/84a12c3768d6/
Changeset:   84a12c3768d6
Branch:      None
User:        tautges
Date:        2013-09-04 16:40:41
Summary:     Removing compile warning.

Passes tests.

Affected #:  1 file

diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 86d63c3..939d410 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -250,7 +250,7 @@ namespace moab {
       if (tag < MB_MESG_REMOTEH_ACK) myDebug->print(3, ", recv_ent_reqs=");
       else if (tag < MB_MESG_TAGS_ACK) myDebug->print(3, ", recv_remoteh_reqs=");
       else myDebug->print(3, ", recv_tag_reqs=");
-      for (unsigned int i = 0; i < reqs.size(); i++) myDebug->printf(3, " %x", reqs[i]);
+      for (unsigned int i = 0; i < reqs.size(); i++) myDebug->printf(3, " %p", (void*)reqs[i]);
       myDebug->print(3, "\n");
     }
   }


https://bitbucket.org/fathomteam/moab/commits/8dfad41ab8ca/
Changeset:   8dfad41ab8ca
Branch:      None
User:        tautges
Date:        2013-09-04 16:45:15
Summary:     Remove vestigial code purporting to support vertex-based partitions.

Passes tests.

Affected #:  1 file

diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 939d410..94dd5ff 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -3591,12 +3591,13 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
     ErrorCode result;
     myDebug->tprintf(1, "Resolving shared entities.\n");
 
+    if (resolve_dim < shared_dim) {
+      result = MB_FAILURE;
+      RRA("MOAB does not support vertex-based partitions, only element-based ones.");
+    }
+    
     if (-1 == shared_dim) {
-      if (0 == resolve_dim) {
-        result = mbImpl->get_dimension(shared_dim); 
-        RRA("Couldn't get dimension.");
-      }
-      else if (!proc_ents.empty())
+      if (!proc_ents.empty())
         shared_dim = mbImpl->dimension_from_handle(*proc_ents.begin())-1;
       else if (resolve_dim == 3)
         shared_dim = 2;


https://bitbucket.org/fathomteam/moab/commits/88384fe103aa/
Changeset:   88384fe103aa
Branch:      None
User:        tautges
Date:        2013-09-04 16:52:01
Summary:     Rename tag_shared_ents to get_proc_nvecs.

Passes tests.

Affected #:  3 files

diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 94dd5ff..e8a31f9 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -3791,8 +3791,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
 #endif
 
     // get entities shared by 1 or n procs
-    result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
-                             proc_nvecs);
+    result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
     RRA("Trouble tagging shared entities.");
 
     shared_verts.reset();
@@ -4369,8 +4368,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
       RRA("");
     }
 
-    result = tag_shared_ents(resolve_dim, shared_dim, skin_ents,
-                             proc_nvecs);
+    result = get_proc_nvecs(resolve_dim, shared_dim, skin_ents, proc_nvecs);
     
     return create_interface_sets(proc_nvecs);
   }
@@ -4529,10 +4527,10 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
     return MB_SUCCESS;
   }
 
-  ErrorCode ParallelComm::tag_shared_ents(int resolve_dim,
-                                          int shared_dim,
-                                          Range *skin_ents,
-                                          std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs) 
+  ErrorCode ParallelComm::get_proc_nvecs(int resolve_dim,
+                                         int shared_dim,
+                                         Range *skin_ents,
+                                         std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs) 
   {
     // set sharing procs tags on other skin ents
     ErrorCode result;
@@ -4553,7 +4551,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
  
         int op = (resolve_dim < shared_dim ? Interface::UNION : Interface::INTERSECT);      
         result = get_sharing_data(connect, num_connect, sharing_procs, op);
-        RRA("Failed to get sharing data in tag_shared_ents");
+        RRA("Failed to get sharing data in get_proc_nvecs");
         if (sharing_procs.empty() ||
             (sharing_procs.size() == 1 && *sharing_procs.begin() == (int)procConfig.proc_rank())) continue;
 

diff --git a/src/parallel/ParallelMergeMesh.cpp b/src/parallel/ParallelMergeMesh.cpp
index 863e8b2..138e32c 100644
--- a/src/parallel/ParallelMergeMesh.cpp
+++ b/src/parallel/ParallelMergeMesh.cpp
@@ -559,7 +559,7 @@ namespace moab{
     }
     
     // get entities shared by 1 or n procs
-    rval = myPcomm->tag_shared_ents(dim,dim-1, &mySkinEnts[0],proc_nranges);
+    rval = myPcomm->get_proc_nvecs(dim,dim-1, &mySkinEnts[0],proc_nranges);
     if(rval != MB_SUCCESS){
       return rval;
     }

diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index 19a68e6..1955093 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -1229,10 +1229,10 @@ namespace moab {
                                std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs,
                                Range &proc_verts);
   
-    ErrorCode tag_shared_ents(int resolve_dim,
-                              int shared_dim,
-                              Range *skin_ents,
-                              std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
+    ErrorCode get_proc_nvecs(int resolve_dim,
+                             int shared_dim,
+                             Range *skin_ents,
+                             std::map<std::vector<int>, std::vector<EntityHandle> > &proc_nvecs);
 
     // after verifying shared entities, now parent/child links between sets can be established
     ErrorCode create_iface_pc_links();


https://bitbucket.org/fathomteam/moab/commits/709cbe93b71a/
Changeset:   709cbe93b71a
Branch:      None
User:        tautges
Date:        2013-09-04 17:00:19
Summary:     Slight modification to assign_global_ids, with another variant.

Passes tests.

Affected #:  2 files

diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index e8a31f9..d71f03c 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -409,7 +409,6 @@ namespace moab {
                                              const bool owned_only) 
   {
     Range entities[4];
-    int local_num_elements[4];
     ErrorCode result;
     std::vector<unsigned char> pstatus;
     for (int dim = 0; dim <= dimension; dim++) {
@@ -430,7 +429,23 @@ namespace moab {
         if (pstatus[i] & PSTATUS_NOT_OWNED)
           dum_range.insert(*rit);
       entities[dim] = subtract( entities[dim], dum_range);
+    }
+    
+    return assign_global_ids(entities, dimension, start_id, parallel, owned_only);
+  }
     
+  //! assign a global id space, for largest-dimension or all entities (and
+  //! in either case for vertices too)
+  ErrorCode ParallelComm::assign_global_ids( Range entities[],
+                                             const int dimension, 
+                                             const int start_id,
+                                             const bool parallel,
+                                             const bool owned_only) 
+  {
+    int local_num_elements[4];
+    ErrorCode result;
+    std::vector<unsigned char> pstatus;
+    for (int dim = 0; dim <= dimension; dim++) {
       local_num_elements[dim] = entities[dim].size();
     }
   

diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index 1955093..402b264 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -107,6 +107,14 @@ namespace moab {
                                 const bool parallel = true,
                                 const bool owned_only = false);
 
+  //! assign a global id space, for largest-dimension or all entities (and
+  //! in either case for vertices too)
+  ErrorCode assign_global_ids( Range entities[],
+                               const int dimension, 
+                               const int start_id,
+                               const bool parallel,
+                               const bool owned_only);
+    
     //! check for global ids; based only on tag handle being there or not;
     //! if it's not there, create them for the specified dimensions
     //!\param owned_only If true, do not get global IDs for non-owned entities


https://bitbucket.org/fathomteam/moab/commits/16ed6b268825/
Changeset:   16ed6b268825
Branch:      None
User:        tautges
Date:        2013-09-04 17:10:46
Summary:     Eliminating unused idx_tag processing.

Passes tests.

Affected #:  1 file

diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index d71f03c..ec3b4ab 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -3671,16 +3671,16 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
                                               const Tag* id_tag)
   { // resolve shared vertices first
     ErrorCode result;
-    std::vector<int> gid_data;
     std::vector<EntityHandle> handle_vec;
     int skin_dim = resolve_dim-1;
 
     // global id tag
-    Tag gid_tag; int def_val = -1;
+    Tag gid_tag; 
     if (id_tag)
       gid_tag = *id_tag;
     else {
       bool tag_created = false;
+      int def_val = -1;
       result = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER,
                                       gid_tag, MB_TAG_DENSE|MB_TAG_CREAT, 
                                       &def_val, &tag_created );
@@ -3693,20 +3693,8 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
       }
     }
   
-    // store index in temp tag; reuse gid_data 
-    gid_data.resize(2*skin_ents[0].size());
-    int idx = 0;
-    for (Range::iterator rit = skin_ents[0].begin(); 
-         rit != skin_ents[0].end(); rit++) 
-      gid_data[idx] = idx, idx++;
-    Tag idx_tag;
-    result = mbImpl->tag_get_handle("__idx_tag", 1, MB_TYPE_INTEGER,
-                                    idx_tag, MB_TAG_DENSE|MB_TAG_CREAT, &def_val ); 
-    if (MB_SUCCESS != result) return result;
-    result = mbImpl->tag_set_data(idx_tag, skin_ents[0], &gid_data[0]);
-    RRA("Couldn't assign index tag.");
-
     // get gids for skin ents in a vector, to pass to gs
+    std::vector<int> gid_data(skin_ents[0].size());
     result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);
     RRA("Couldn't get gid tag for skin vertices.");
 


https://bitbucket.org/fathomteam/moab/commits/75633d95440d/
Changeset:   75633d95440d
Branch:      None
User:        tautges
Date:        2013-09-04 17:29:47
Summary:     On my way to eliminating one variant of resolve_shared_ents.

Passes tests.

Affected #:  3 files

diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index ec3b4ab..fbf4662 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -3587,13 +3587,14 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
   
     // must call even if we don't have any entities, to make sure
     // collective comm'n works
-    return resolve_shared_ents(this_set, proc_ents, resolve_dim, shared_dim, id_tag);
+    return resolve_shared_ents(this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag);
   }
   
   ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
                                               Range &proc_ents,
                                               int resolve_dim,
                                               int shared_dim,
+                                              Range *skin_ents,
                                               const Tag* id_tag) 
   {
 #ifdef USE_MPE
@@ -3616,49 +3617,38 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
         shared_dim = mbImpl->dimension_from_handle(*proc_ents.begin())-1;
       else if (resolve_dim == 3)
         shared_dim = 2;
-      else {
-        assert(false && "Unable to guess shared_dim.");
-        return MB_FAILURE;
-      }
     }
-    assert(shared_dim >= 0 && resolve_dim >= 0);
+    
+    if (shared_dim < 0 || resolve_dim < 0) {
+      result = MB_FAILURE;
+      RRA("Unable to guess shared_dim or resolve_dim.");
+    }
   
     // get the skin entities by dimension
-    Range skin_ents[4];
-    std::vector<int> gid_data;
-    std::vector<EntityHandle> handle_vec;
-    int skin_dim;
+    Range tmp_skin_ents[4];
 
     // get the entities to be skinned
-    if (resolve_dim < shared_dim) {
-      // for vertex-based partition, it's the elements adj to the vertices
-      result = mbImpl->get_adjacencies(proc_ents, shared_dim,
-                                       false, skin_ents[resolve_dim],
-                                       Interface::UNION);
-      RRA("Failed getting skinned entities.");
-      skin_dim = shared_dim-1;
-    }
-    else {
-      // for element-based partition, it's just the elements
-      skin_ents[resolve_dim] = proc_ents;
-      skin_dim = resolve_dim-1;
-    }
-
     // find the skin
-    Skinner skinner(mbImpl);
-    result = skinner.find_skin(this_set, skin_ents[skin_dim+1], false, skin_ents[skin_dim],
-                               NULL, true, true, true);
-    RRA("Failed to find skin.");
-    myDebug->tprintf(1, "Found skin, now resolving.\n");
-
-    // get entities adjacent to skin ents from shared_dim down to zero
-    for (int this_dim = skin_dim-1; this_dim >= 0; this_dim--) {
-      result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
-                                       true, skin_ents[this_dim],
-                                       Interface::UNION);
-      RRA("Failed getting skin adjacencies.");
+    int skin_dim = resolve_dim-1;
+    if (!skin_ents) {
+      skin_ents = tmp_skin_ents;
+      skin_ents[resolve_dim] = proc_ents;
+      Skinner skinner(mbImpl);
+      result = skinner.find_skin(this_set, skin_ents[skin_dim+1], false, skin_ents[skin_dim],
+                                 NULL, true, true, true);
+      RRA("Failed to find skin.");
+      myDebug->tprintf(1, "Found skin, now resolving.\n");
+
+        // get entities adjacent to skin ents from shared_dim down to zero
+      for (int this_dim = skin_dim-1; this_dim >= 0; this_dim--) {
+        result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
+                                         true, skin_ents[this_dim],
+                                         Interface::UNION);
+        RRA("Failed getting skin adjacencies.");
+      }
     }
-
+    else if (skin_ents[resolve_dim].empty()) skin_ents[resolve_dim] = proc_ents;
+    
     return resolve_shared_ents(this_set, proc_ents, skin_ents,
                                resolve_dim, shared_dim, id_tag);
   }
@@ -3673,6 +3663,11 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
     ErrorCode result;
     std::vector<EntityHandle> handle_vec;
     int skin_dim = resolve_dim-1;
+    assert(resolve_dim >= shared_dim);
+    if (resolve_dim < shared_dim) {
+      result = MB_FAILURE;
+      RRA("Resolve dim must be >= shared_dim.");
+    }
 
     // global id tag
     Tag gid_tag; 

diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index 402b264..c21f37f 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -420,6 +420,7 @@ namespace moab {
                                   Range &proc_ents, 
                                   int resolve_dim = -1,
                                   int shared_dim = -1,
+                                  Range *skin_ents = NULL,
                                   const Tag* id_tag = 0);
   
     /** \brief Resolve shared entities between processors

diff --git a/test/parallel/parallel_hdf5_test.cc b/test/parallel/parallel_hdf5_test.cc
index 2b2a1c8..355e99e 100644
--- a/test/parallel/parallel_hdf5_test.cc
+++ b/test/parallel/parallel_hdf5_test.cc
@@ -1491,7 +1491,7 @@ void test_write_unbalanced()
   ParallelComm* pcomm = ParallelComm::get_pcomm( &mb, 0 );
   if (0 == pcomm)
     pcomm = new ParallelComm( &mb, MPI_COMM_WORLD );
-  rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, &idtag );
+  rval = pcomm->resolve_shared_ents( 0, entities, 2, 0, NULL, &idtag );
   CHECK_ERR(rval);
   rval = pcomm->resolve_shared_sets( sets, idtag );
   CHECK_ERR(rval);


https://bitbucket.org/fathomteam/moab/commits/0604b05a689b/
Changeset:   0604b05a689b
Branch:      None
User:        tautges
Date:        2013-09-04 17:34:35
Summary:     Eliminated one variant of resolve_shared_ents.

Tests all pass.

Affected #:  2 files

diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index fbf4662..aecc6f2 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -3649,26 +3649,6 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
     }
     else if (skin_ents[resolve_dim].empty()) skin_ents[resolve_dim] = proc_ents;
     
-    return resolve_shared_ents(this_set, proc_ents, skin_ents,
-                               resolve_dim, shared_dim, id_tag);
-  }
-
-  ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
-                                              Range &proc_ents,
-                                              Range skin_ents[],
-                                              int resolve_dim,
-                                              int shared_dim,
-                                              const Tag* id_tag)
-  { // resolve shared vertices first
-    ErrorCode result;
-    std::vector<EntityHandle> handle_vec;
-    int skin_dim = resolve_dim-1;
-    assert(resolve_dim >= shared_dim);
-    if (resolve_dim < shared_dim) {
-      result = MB_FAILURE;
-      RRA("Resolve dim must be >= shared_dim.");
-    }
-
     // global id tag
     Tag gid_tag; 
     if (id_tag)
@@ -3694,6 +3674,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
     RRA("Couldn't get gid tag for skin vertices.");
 
     // put handles in vector for passing to gs setup
+    std::vector<EntityHandle> handle_vec;
     std::copy(skin_ents[0].begin(), skin_ents[0].end(), 
               std::back_inserter(handle_vec));
 
@@ -3706,7 +3687,6 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
     // get a crystal router
     gs_data::crystal_data *cd = procConfig.crystal_router();
 
-
     /*  
     // get total number of entities; will overshoot highest global id, but
     // that's ok

diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index c21f37f..93dc716 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -439,20 +439,6 @@ namespace moab {
                                   int shared_dim = -1,
                                   const Tag* id_tag = 0);
 
-    /** \brief Resolve shared entities between processors
-     *
-     * Entity skin array is offered by user not by skinner
-     * It is used by other resolve_shared_ents functions above 
-
-     * \param skin_ents[] entity skin array by user
-     */
-    ErrorCode resolve_shared_ents(EntityHandle this_set,
-                                  Range &proc_ents,
-				  Range skin_ents[],
-				  int resolve_dim = 3,
-				  int shared_dim = -1,
-				  const Tag* id_tag = 0);
-    
     static ErrorCode resolve_shared_ents(ParallelComm **pc, 
                                          const unsigned int np, 
                                          EntityHandle this_set,


https://bitbucket.org/fathomteam/moab/commits/00648a6bdabe/
Changeset:   00648a6bdabe
Branch:      master
User:        tautges
Date:        2013-09-04 19:42:14
Summary:     ScdInterface: add a function to assign global ids, called from some climate readers;
  also added a variant of tag_shared_verts that works on Box objects in addition to entity sets;
  also removed Core* from public functions, in favor of Interface*.
NCHelper: use ScdInterface assign_global_ids function instead of implementing its own
HomXform: add operator>>

Passes all tests.

Affected #:  4 files

diff --git a/src/ScdInterface.cpp b/src/ScdInterface.cpp
index 5413e3e..6382871 100644
--- a/src/ScdInterface.cpp
+++ b/src/ScdInterface.cpp
@@ -5,6 +5,7 @@
 #include "StructuredElementSeq.hpp"
 #include "VertexSequence.hpp"
 #include "ScdVertexData.hpp"
+#include "MBTagConventions.hpp"
 #ifdef USE_MPI
 #  include "moab/ParallelComm.hpp"
 #  include "moab/TupleList.hpp"
@@ -23,7 +24,7 @@ namespace moab
 
 const char *ScdParData::PartitionMethodNames[] = {"alljorkori", "alljkbal", "sqij", "sqjk", "sqijk", "trivial", "nopart"};
 
-ScdInterface::ScdInterface(Core *imp, bool boxes) 
+ScdInterface::ScdInterface(Interface *imp, bool boxes) 
         : mbImpl(imp), 
           searchedBoxes(false),
           boxPeriodicTag(0),
@@ -102,7 +103,8 @@ ScdBox *ScdInterface::get_scd_box(EntityHandle eh)
 }
 
 ErrorCode ScdInterface::construct_box(HomCoord low, HomCoord high, const double * const coords, unsigned int num_coords,
-                                      ScdBox *& new_box, int * const lperiodic, ScdParData *par_data)
+                                      ScdBox *& new_box, int * const lperiodic, ScdParData *par_data,
+                                      bool assign_gids)
 {
     // create a rectangular structured mesh block
   ErrorCode rval;
@@ -137,7 +139,8 @@ ErrorCode ScdInterface::construct_box(HomCoord low, HomCoord high, const double
   }
 
     // create element sequence
-  SequenceManager *seq_mgr = mbImpl->sequence_manager();
+  Core *mbcore = dynamic_cast<Core*>(mbImpl);
+  SequenceManager *seq_mgr = mbcore->sequence_manager();
 
   EntitySequence *tmp_seq;
   EntityHandle start_ent;
@@ -172,9 +175,45 @@ ErrorCode ScdInterface::construct_box(HomCoord low, HomCoord high, const double
 
   if (par_data) new_box->par_data(*par_data);
   
+
+  if (assign_gids) {
+    rval = assign_global_ids(new_box);
+    ERRORR(rval, "Trouble assigning global ids");
+  }
+
   return MB_SUCCESS;
 }
 
+ErrorCode ScdInterface::assign_global_ids(ScdBox *box)
+{
+  // Get a ptr to global id memory
+  void* data;
+  int count = 0;
+  Tag gid_tag;
+  ErrorCode rval = mbImpl->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, 
+                                          MB_TAG_CREAT & MB_TAG_DENSE, &count);
+  ERRORR(rval, "Trouble getting global id tag handle.");
+  Range tmp_range(box->start_vertex(), box->start_vertex() + box->num_vertices());
+  rval = mbImpl->tag_iterate(gid_tag, tmp_range.begin(), tmp_range.end(), count, data);
+  ERRORR(rval, "Failed to get tag iterator.");
+  assert(count == box->num_vertices());
+  int* gid_data = (int*) data;
+  int di = box->par_data().gDims[3] - box->par_data().gDims[0] + 1;
+  int dj = box->par_data().gDims[4] - box->par_data().gDims[1] + 1;
+
+  for (int kl = box->box_dims()[2]; kl <= box->box_dims()[5]; kl++) {
+    for (int jl = box->box_dims()[1]; jl <= box->box_dims()[4]; jl++) {
+      for (int il = box->box_dims()[0]; il <= box->box_dims()[3]; il++) {
+        int itmp = (!box->locally_periodic()[0] && box->par_data().gPeriodic[0] && il == box->par_data().gDims[3] ? 
+                    box->par_data().gDims[0] : il);
+        *gid_data = (-1 != kl ? kl * di * dj : 0) + jl * di + itmp + 1;
+        gid_data++;
+      }
+    }
+  }
+
+  return MB_SUCCESS;
+}
 
 ErrorCode ScdInterface::create_scd_sequence(HomCoord low, HomCoord high, EntityType tp,
                                             int starting_id, ScdBox *&new_box,
@@ -186,7 +225,8 @@ ErrorCode ScdInterface::create_scd_sequence(HomCoord low, HomCoord high, EntityT
       (tp == MBEDGE && 1 >= tmp_size[0]))
     return MB_TYPE_OUT_OF_RANGE;
 
-  SequenceManager *seq_mgr = mbImpl->sequence_manager();
+  Core *mbcore = dynamic_cast<Core*>(mbImpl);
+  SequenceManager *seq_mgr = mbcore->sequence_manager();
 
   EntitySequence *tmp_seq;
   EntityHandle start_ent, scd_set;
@@ -551,32 +591,18 @@ ErrorCode ScdBox::get_adj_edge_or_face(int dim, int i, int j, int k, int dir, En
 }
     
 #ifndef USE_MPI
-ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *, EntityHandle ) 
+ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *, ScdBox *) 
 {
   return MB_FAILURE;
 #else
-ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, EntityHandle seth) 
+ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, ScdBox *box) 
 {
-    // first, look for box data on the set
-  ScdBox *box = get_scd_box(seth);
-  Range tmp_range;
-  ErrorCode rval;
-  if (!box) {
-      // look for contained boxes
-    rval = mbImpl->get_entities_by_type(seth, MBENTITYSET, tmp_range);
-    if (MB_SUCCESS != rval) return rval;
-    for (Range::iterator rit = tmp_range.begin(); rit != tmp_range.end(); rit++) {
-      box = get_scd_box(*rit);
-      if (box) break;
-    }
-  }
-  
-  if (!box) return MB_FAILURE;
+  EntityHandle seth = box->box_set();
 
     // check the # ents in the box against the num in the set, to make sure it's only 1 box;
     // reuse tmp_range
-  tmp_range.clear();
-  rval = mbImpl->get_entities_by_dimension(seth, box->box_dimension(), tmp_range);
+  Range tmp_range;
+  ErrorCode rval = mbImpl->get_entities_by_dimension(seth, box->box_dimension(), tmp_range);
   if (MB_SUCCESS != rval) return rval;
   if (box->num_elements() != (int)tmp_range.size()) return MB_FAILURE;
     
@@ -670,7 +696,6 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, EntityHandle se
     pcomm->get_buffers(*pit);
 
 
-  shared_data.reset();  
 #ifndef NDEBUG
   rval = pcomm->check_all_shared_handles();
   if (MB_SUCCESS != rval) return rval;

diff --git a/src/io/NCHelper.cpp b/src/io/NCHelper.cpp
index f4fd618..0ad7e19 100644
--- a/src/io/NCHelper.cpp
+++ b/src/io/NCHelper.cpp
@@ -820,48 +820,44 @@ ErrorCode ScdNCHelper::create_mesh(Range& faces)
   Range tmp_range;
   ScdBox *scd_box;
 
-  ErrorCode rval = scdi->construct_box(HomCoord(lDims[0], lDims[1], lDims[2], 1), HomCoord(lDims[3], lDims[4], lDims[5], 1), NULL,
-      0, scd_box, locallyPeriodic, &parData);
+  ErrorCode rval = scdi->construct_box(HomCoord(lDims[0], lDims[1], lDims[2], 1), HomCoord(lDims[3], lDims[4], lDims[5], 1), 
+                                       NULL, 0, scd_box, locallyPeriodic, &parData, true);
   ERRORR(rval, "Trouble creating scd vertex sequence.");
 
-  // Add box set and new vertices, elements to the file set
+    // add verts to tmp_range first, so we can duplicate global ids in vertex ids
   tmp_range.insert(scd_box->start_vertex(), scd_box->start_vertex() + scd_box->num_vertices() - 1);
-  tmp_range.insert(scd_box->start_element(), scd_box->start_element() + scd_box->num_elements() - 1);
-  tmp_range.insert(scd_box->box_set());
-  rval = mbImpl->add_entities(_fileSet, tmp_range);
-  ERRORR(rval, "Couldn't add new vertices to file set.");
 
-  dbgOut.tprintf(1, "scdbox %d quads, %d vertices\n", scd_box->num_elements(), scd_box->num_vertices());
-
-  // Get a ptr to global id memory
-  void* data;
-  int count;
-  const Range::iterator topv = tmp_range.upper_bound(tmp_range.begin(), tmp_range.end(), scd_box->start_vertex()
-      + scd_box->num_vertices());
-  rval = mbImpl->tag_iterate(mGlobalIdTag, tmp_range.begin(), topv, count, data);
-  ERRORR(rval, "Failed to get tag iterator.");
-  assert(count == scd_box->num_vertices());
-  int* gid_data = (int*) data;
-
-  // Duplicate global id data, which will be used to resolve sharing
-  int* fid_data;
   if (mpFileIdTag) {
+    Range::iterator topv = tmp_range.end();
+    int count;
+    void *data;
     rval = mbImpl->tag_iterate(*mpFileIdTag, tmp_range.begin(), topv, count, data);
     ERRORR(rval, "Failed to get tag iterator on file id tag.");
     assert(count == scd_box->num_vertices());
-    fid_data = (int*) data;
+    int *fid_data = (int*) data;
+    rval = mbImpl->tag_iterate(mGlobalIdTag, tmp_range.begin(), topv, count, data);
+    ERRORR(rval, "Failed to get tag iterator on file id tag.");
+    assert(count == scd_box->num_vertices());
+    int *gid_data = (int*) data;
+    for (int i = 0; i < count; i++) fid_data[i] = gid_data[i];
   }
 
+    // Then add box set and elements to the range, then to the file set
+  tmp_range.insert(scd_box->start_element(), scd_box->start_element() + scd_box->num_elements() - 1);
+  tmp_range.insert(scd_box->box_set());
+  rval = mbImpl->add_entities(_fileSet, tmp_range);
+  ERRORR(rval, "Couldn't add new vertices to file set.");
+
+  dbgOut.tprintf(1, "scdbox %d quads, %d vertices\n", scd_box->num_elements(), scd_box->num_vertices());
+
   // Set the vertex coordinates
   double *xc, *yc, *zc;
   rval = scd_box->get_coordinate_arrays(xc, yc, zc);
   ERRORR(rval, "Couldn't get vertex coordinate arrays.");
 
-  int i, j, k, il, jl, kl, itmp, id;
+  int i, j, k, il, jl, kl;
   int dil = lDims[3] - lDims[0] + 1;
   int djl = lDims[4] - lDims[1] + 1;
-  int di = gDims[3] - gDims[0] + 1;
-  int dj = gDims[4] - gDims[1] + 1;
   assert(dil == (int)ilVals.size() && djl == (int)jlVals.size() &&
       (-1 == lDims[2] || lDims[5]-lDims[2] + 1 == (int)levVals.size()));
 #define INDEX(i, j, k) ()
@@ -875,14 +871,6 @@ ErrorCode ScdNCHelper::create_mesh(Range& faces)
         xc[pos] = ilVals[i];
         yc[pos] = jlVals[j];
         zc[pos] = (-1 == lDims[2] ? 0.0 : levVals[k]);
-        itmp = (!locallyPeriodic[0] && globallyPeriodic[0] && il == gDims[3] ? gDims[0] : il);
-        id = (-1 != kl ? kl * di * dj : 0) + jl * di + itmp + 1;
-        *gid_data = id;
-        gid_data++;
-        if (mpFileIdTag) {
-          *fid_data = id;
-          fid_data++;
-        }
       }
     }
   }

diff --git a/src/moab/HomXform.hpp b/src/moab/HomXform.hpp
index 8110afa..8f3a1a7 100644
--- a/src/moab/HomXform.hpp
+++ b/src/moab/HomXform.hpp
@@ -31,6 +31,7 @@
 #define XFORM_INDEX(a,b) 4*a+b
 
 #include <math.h>
+#include <ostream>
 
 namespace moab {
 
@@ -473,6 +474,12 @@ inline int &HomCoord::operator[](const int &param)
   return homCoord[param];
 }
 
+inline std::ostream &operator<<(std::ostream &str, const HomCoord &hc)
+{
+  str << "(" << hc.i() << "," << hc.j() << "," << hc.k() << ")";
+  return str;
+}
+
 inline HomXform::HomXform(const int matrix[16]) 
 {
   for (int i = 0; i < 16; i++)

diff --git a/src/moab/ScdInterface.hpp b/src/moab/ScdInterface.hpp
index f8a4df8..bb24835 100644
--- a/src/moab/ScdInterface.hpp
+++ b/src/moab/ScdInterface.hpp
@@ -17,7 +17,6 @@ class EntitySequence;
 class ScdVertexData;
 class EntitySequence;
 class ScdBox;
-class Core;
 class ParallelComm;
 
 /** \class ScdInterface ScdInterface.hpp "moab/ScdInterface.hpp"
@@ -145,7 +144,7 @@ public:
      * \param impl MOAB instance
      * \param find_boxes If true, search all the entity sets, caching the structured mesh blocks
      */
-  ScdInterface(Core *impl, bool find_boxes = false);
+  ScdInterface(Interface *impl, bool find_boxes = false);
   
     // Destructor
   ~ScdInterface();
@@ -168,10 +167,12 @@ public:
      * \param lperiodic[2] If lperiodic[s] != 0, direction s is locally periodic
      * \param par_data If non-NULL, this will get stored on the ScdBox once created, contains info
      *                 about global parallel nature of ScdBox across procs
+     * \param assign_global_ids If true, assigns 1-based global ids to vertices using GLOBAL_ID_TAG_NAME
      */
   ErrorCode construct_box(HomCoord low, HomCoord high, const double * const coords, unsigned int num_coords,
                           ScdBox *& new_box, int * const lperiodic = NULL, 
-                          ScdParData * const par_data = NULL);
+                          ScdParData * const par_data = NULL,
+                          bool assign_global_ids = false);
 
     //! Create a structured sequence of vertices, quads, or hexes
     /** Starting handle for the sequence is available from the returned ScdBox.  
@@ -281,6 +282,11 @@ public:
      */
   ErrorCode tag_shared_vertices(ParallelComm *pcomm, EntityHandle seth);
   
+    //! Tag vertices with sharing data for parallel representations
+    /** Given the ParallelComm object to use, tag the vertices shared with other processors
+     */
+  ErrorCode tag_shared_vertices(ParallelComm *pcomm, ScdBox *box);
+  
 protected:
     //! Remove the box from the list on ScdInterface
   ErrorCode remove_box(ScdBox *box);
@@ -372,8 +378,11 @@ private:
   
   static int gtol(const int *gijk, int i, int j, int k);
 
+    //! assign global ids to vertices in this box
+  ErrorCode assign_global_ids(ScdBox *box);
+  
   //! interface instance
-  Core *mbImpl;
+  Interface *mbImpl;
 
     //! whether we've searched the database for boxes yet
   bool searchedBoxes;
@@ -619,12 +628,18 @@ public:
      */
   bool locally_periodic_k() const;
   
-    //! Return whether box is locally periodic in i and j
-    /** Return whether box is locally periodic in i and j
-     * \param lperiodic Non-zero if locally periodic in i [0] or j [1]
-     */
-  void locally_periodic(bool lperiodic[3]) const;
+    //! Set local periodicity
+    /** 
+     * \param lperiodic Vector of ijk periodicities to set this box to
+      */
+  void locally_periodic(bool lperiodic[3]);
 
+    //! Get local periodicity
+    /** 
+     * \return Vector of ijk periodicities for this box
+     */
+  const int *locally_periodic() const;
+ 
     //! Return parallel data 
     /** Return parallel data, if there is any
      * \return par_data Parallel data set on this box 
@@ -1214,6 +1229,25 @@ inline ErrorCode ScdInterface::get_neighbor(int np, int pfrom, const ScdParData
   return MB_FAILURE;
 }
 
+inline ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, EntityHandle seth) 
+{
+  ScdBox *box = get_scd_box(seth);
+  if (!box) {
+      // look for contained boxes
+    Range tmp_range;
+    ErrorCode rval = mbImpl->get_entities_by_type(seth, MBENTITYSET, tmp_range);
+    if (MB_SUCCESS != rval) return rval;
+    for (Range::iterator rit = tmp_range.begin(); rit != tmp_range.end(); rit++) {
+      box = get_scd_box(*rit);
+      if (box) break;
+    }
+  }
+  
+  if (!box) return MB_FAILURE;
+
+  return tag_shared_vertices(pcomm, box);
+}
+
 inline ScdInterface *ScdBox::sc_impl() const 
 {
   return scImpl;
@@ -1382,12 +1416,17 @@ inline bool ScdBox::locally_periodic_k() const
   return locallyPeriodic[2];
 }
 
-inline void ScdBox::locally_periodic(bool lperiodic[3]) const 
+inline void ScdBox::locally_periodic(bool lperiodic[3])
 {
-  for (int i = 0; i < 3; i++) 
-    lperiodic[i] = locallyPeriodic[i];
+   for (int i = 0; i < 3; i++) 
+    locallyPeriodic[i] = lperiodic[i];
 }
 
+inline const int *ScdBox::locally_periodic() const
+{
+  return locallyPeriodic;
+}
+ 
 inline std::ostream &operator<<(std::ostream &str, const ScdParData &pd) 
 {
   static const char *PartitionMethodNames[] = {"NOPART", "ALLJORKORI", "ALLJKBAL", "SQIJ", "SQJK", "SQIJK"};

Repository URL: https://bitbucket.org/fathomteam/moab/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the moab-dev mailing list