[MOAB-dev] commit/MOAB: 24 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Jun 27 17:01:16 CDT 2014


24 new commits in MOAB:

https://bitbucket.org/fathomteam/moab/commits/c352cb1ed60d/
Changeset:   c352cb1ed60d
Branch:      None
User:        pshriwise
Date:        2014-05-21 15:56:51
Summary:     Implemented a new function add_vertices in ReadCGM.

Affected #:  2 files

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index c4db384..56fe10a 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -481,6 +481,7 @@ ErrorCode ReadCGM::store_group_content( Interface* /* moab */, std::map<RefEntit
   return MB_SUCCESS;
 }
 
+
 void ReadCGM::set_cgm_attributes(bool const act_attributes, bool const verbose)
 {
 
@@ -497,6 +498,31 @@ void ReadCGM::set_cgm_attributes(bool const act_attributes, bool const verbose)
 
 }
 
+
+  ErrorCode ReadCGM::add_vertices( Interface* moab, std::map<RefEntity*,EntityHandle> entitymap[5] )
+{
+
+ ErrorCode rval;
+ std::map<RefEntity*,EntityHandle>::iterator ci;
+ for (ci = entitymap[0].begin(); ci != entitymap[0].end(); ++ci) {
+    CubitVector pos = dynamic_cast<RefVertex*>(ci->first)->coordinates();
+    double coords[3] = {pos.x(), pos.y(), pos.z()};
+    EntityHandle vh;
+    rval = moab->create_vertex( coords, vh );
+    if (MB_SUCCESS != rval)
+      return MB_FAILURE;
+    
+    rval = moab->add_entities( ci->second, &vh, 1 );
+    if (MB_SUCCESS != rval)
+      return MB_FAILURE;
+    
+    ci->second = vh;
+  }
+  return MB_SUCCESS;
+}
+
+
+
 // copy geometry into mesh database
 ErrorCode ReadCGM::load_file(const char *cgm_file_name,
                       const EntityHandle* file_set,
@@ -584,23 +610,12 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   entmap[3].clear();
   entmap[4].clear();
   
+
   // create geometry for all vertices and replace 
   // vertex set handles with vertex handles in map
-  for (ci = entmap[0].begin(); ci != entmap[0].end(); ++ci) {
-    CubitVector pos = dynamic_cast<RefVertex*>(ci->first)->coordinates();
-    double coords[3] = {pos.x(), pos.y(), pos.z()};
-    EntityHandle vh;
-    rval = mdbImpl->create_vertex( coords, vh );
-    if (MB_SUCCESS != rval)
-      return MB_FAILURE;
-    
-    rval = mdbImpl->add_entities( ci->second, &vh, 1 );
-    if (MB_SUCCESS != rval)
-      return MB_FAILURE;
-    
-    ci->second = vh;
-  }
-
+  rval = add_vertices( mdbImpl, entmap );
+  if(rval!=MB_SUCCESS) return rval; 
+ 
   // maximum allowable curve-endpoint proximity warnings
   // if this integer becomes negative, then abs(curve_warnings) is the 
   // number of warnings that were suppressed.

diff --git a/src/io/ReadCGM.hpp b/src/io/ReadCGM.hpp
index 2e38bbd..8150db4 100644
--- a/src/io/ReadCGM.hpp
+++ b/src/io/ReadCGM.hpp
@@ -95,8 +95,12 @@ public:
 
   ErrorCode store_group_content( Interface* moab, std::map<RefEntity*,EntityHandle>* entitymap );
 
+
   void set_cgm_attributes(bool const act_attributes, bool const verbost);
 
+  ErrorCode add_vertices( Interface* moab, std::map<RefEntity*,EntityHandle> entmap[5] );
+
+
    //! Constructor
    ReadCGM(Interface* impl = NULL);
 


https://bitbucket.org/fathomteam/moab/commits/3821bfdb10f1/
Changeset:   3821bfdb10f1
Branch:      None
User:        pshriwise
Date:        2014-05-21 15:58:54
Summary:     Created a new function for faceting curves.

Affected #:  2 files

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index 56fe10a..d5a5acf 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -473,7 +473,7 @@ ErrorCode ReadCGM::store_group_content( Interface* /* moab */, std::map<RefEntit
     }
     
     if (!entities.empty()) {
-      rval = mdbImpl->add_entities( ci->second, entities );
+      rval = moab->add_entities( ci->second, entities );
       if (MB_SUCCESS != rval)
         return MB_FAILURE;
     }
@@ -521,6 +521,128 @@ void ReadCGM::set_cgm_attributes(bool const act_attributes, bool const verbose)
   return MB_SUCCESS;
 }
 
+ErrorCode ReadCGM::create_curve_facets( Interface* moab, 
+                                        std::map<RefEntity*,EntityHandle> entitymap[5], 
+                                        int norm_tol, 
+                                        double faceting_tol, 
+                                        bool verbose_warn )
+{
+ 
+  ErrorCode rval;
+  CubitStatus s;
+  // maximum allowable curve-endpoint proximity warnings
+  // if this integer becomes negative, then abs(curve_warnings) is the 
+  // number of warnings that were suppressed.
+  int curve_warnings = 10;
+ 
+  //map iterator
+  std::map<RefEntity*,EntityHandle>::iterator ci; 
+  
+  // create geometry for all curves
+  GMem data;
+  for (ci = entitymap[1].begin(); ci != entitymap[1].end(); ++ci) {
+    RefEdge* edge = dynamic_cast<RefEdge*>(ci->first);
+    Curve* curve = edge->get_curve_ptr();
+    data.clean_out();
+#if  CGM_MAJOR_VERSION>12
+    s = edge->get_graphics( data, norm_tol, faceting_tol);
+#else
+    s = edge->get_graphics( data, faceting_tol);
+#endif
+     if (CUBIT_SUCCESS != s)
+        return MB_FAILURE;
+      
+    std::vector<CubitVector> points;
+    for (int i = 0; i < data.pointListCount; ++i)
+      points.push_back( CubitVector( data.point_list()[i].x,
+                                     data.point_list()[i].y,
+                                     data.point_list()[i].z ) );
+
+      // need to reverse data?
+    if (curve->bridge_sense() == CUBIT_REVERSED) 
+      std::reverse( points.begin(), points.end() );
+    
+       // check for closed curve
+    RefVertex *start_vtx, *end_vtx;
+    start_vtx = edge->start_vertex();
+    end_vtx = edge->end_vertex();
+    
+      // Special case for point curve
+    if (points.size() < 2) {
+      if (start_vtx != end_vtx || curve->measure() > GEOMETRY_RESABS ) {
+        std::cerr << "Warning: No facetting for curve " << edge->id() << std::endl;
+        continue;
+      }
+      EntityHandle h = entitymap[0][start_vtx];
+      rval = moab->add_entities( ci->second, &h, 1 );
+      if (MB_SUCCESS != rval)
+        return MB_FAILURE;
+      continue;
+    }
+    
+    const bool closed = (points.front() - points.back()).length() < GEOMETRY_RESABS;
+    if (closed != (start_vtx == end_vtx)) {
+      std::cerr << "Warning: topology and geometry inconsistant for possibly closed curve "
+                << edge->id() << std::endl;
+    }
+    
+      // check proximity of vertices to end coordinates
+    if ((start_vtx->coordinates() - points.front()).length() > GEOMETRY_RESABS
+     || (  end_vtx->coordinates() - points.back() ).length() > GEOMETRY_RESABS ) {
+
+      curve_warnings--;
+      if( curve_warnings >= 0 || verbose_warn){ 
+	std::cerr << "Warning: vertices not at ends of curve " << edge->id() << std::endl;
+	if( curve_warnings == 0 && !verbose_warn){
+	  std::cerr << "         further instances of this warning will be suppressed..." << std::endl;
+	}
+      }
+
+    }    
+      // create interior points
+    std::vector<EntityHandle> verts, edges;
+    verts.push_back( entitymap[0][start_vtx] );
+    for (size_t i = 1; i < points.size() - 1; ++i) {
+      double coords[] = { points[i].x(), points[i].y(), points[i].z() };
+      EntityHandle h;
+      rval = moab->create_vertex( coords, h );
+      if (MB_SUCCESS != rval)
+        return MB_FAILURE;
+      verts.push_back( h );
+    }
+    verts.push_back( entitymap[0][end_vtx] );
+    
+      // create edges
+    for (size_t i = 0; i < verts.size()-1; ++i) {
+      EntityHandle h;
+      rval = moab->create_element( MBEDGE, &verts[i], 2, h );
+      if (MB_SUCCESS != rval)
+        return MB_FAILURE;
+      edges.push_back( h );
+    }
+    
+      // if closed, remove duplicate
+    if (verts.front() == verts.back())
+      verts.pop_back();
+    
+    rval = moab->add_entities( ci->second, &verts[0], verts.size() );
+    if (MB_SUCCESS != rval)
+      return MB_FAILURE;
+    rval = moab->add_entities( ci->second, &edges[0], edges.size() );
+    if (MB_SUCCESS != rval)
+      return MB_FAILURE;
+  }
+
+  if( !verbose_warn && curve_warnings < 0 ){
+    std::cerr << "Suppressed " << -curve_warnings 
+	      << " 'vertices not at ends of curve' warnings." << std::endl;
+    std::cerr << "To see all warnings, use reader param VERBOSE_CGM_WARNINGS." << std::endl;
+  }
+
+  return MB_SUCCESS;
+}
+
+
 
 
 // copy geometry into mesh database
@@ -615,8 +737,8 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   // vertex set handles with vertex handles in map
   rval = add_vertices( mdbImpl, entmap );
   if(rval!=MB_SUCCESS) return rval; 
- 
-  // maximum allowable curve-endpoint proximity warnings
+
+  // maximum allowable curve-endpoint proximity warningsg
   // if this integer becomes negative, then abs(curve_warnings) is the 
   // number of warnings that were suppressed.
   int curve_warnings = 10;
@@ -722,7 +844,7 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
 	      << " 'vertices not at ends of curve' warnings." << std::endl;
     std::cerr << "To see all warnings, use reader param VERBOSE_CGM_WARNINGS." << std::endl;
   }
-  
+
     // create geometry for all surfaces
   for (ci = entmap[2].begin(); ci != entmap[2].end(); ++ci) {
     RefFace* face = dynamic_cast<RefFace*>(ci->first);

diff --git a/src/io/ReadCGM.hpp b/src/io/ReadCGM.hpp
index 8150db4..8304e0c 100644
--- a/src/io/ReadCGM.hpp
+++ b/src/io/ReadCGM.hpp
@@ -100,6 +100,11 @@ public:
 
   ErrorCode add_vertices( Interface* moab, std::map<RefEntity*,EntityHandle> entmap[5] );
 
+  ErrorCode create_curve_facets( Interface* moab, 
+                                 std::map<RefEntity*,EntityHandle> entitymap[5],
+                                 int norm_tol,
+                                 double faceting_tol,
+                                 bool verbose_warn = false );
 
    //! Constructor
    ReadCGM(Interface* impl = NULL);


https://bitbucket.org/fathomteam/moab/commits/1f451ae55ff7/
Changeset:   1f451ae55ff7
Branch:      None
User:        pshriwise
Date:        2014-05-21 15:58:54
Summary:     Implemented function for faceting curves in ReadCGM.

Affected #:  1 file

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index d5a5acf..f221fcf 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -738,113 +738,12 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   rval = add_vertices( mdbImpl, entmap );
   if(rval!=MB_SUCCESS) return rval; 
 
-  // maximum allowable curve-endpoint proximity warningsg
-  // if this integer becomes negative, then abs(curve_warnings) is the 
-  // number of warnings that were suppressed.
-  int curve_warnings = 10;
+  // create facets for all curves
+  rval = create_curve_facets( mdbImpl, entmap, norm_tol, faceting_tol, verbose_warnings );
+  if(rval!=MB_SUCCESS) return rval;
+
   
-    // create geometry for all curves
   GMem data;
-  for (ci = entmap[1].begin(); ci != entmap[1].end(); ++ci) {
-    RefEdge* edge = dynamic_cast<RefEdge*>(ci->first);
-    Curve* curve = edge->get_curve_ptr();
-    data.clean_out();
-#if  CGM_MAJOR_VERSION>12
-    edge->get_graphics( data, norm_tol, faceting_tol);
-#else
-    edge->get_graphics( data, faceting_tol);
-#endif
-    if (CUBIT_SUCCESS != s)
-      return MB_FAILURE;
-      
-    std::vector<CubitVector> points;
-    for (int i = 0; i < data.pointListCount; ++i)
-      points.push_back( CubitVector( data.point_list()[i].x,
-                                     data.point_list()[i].y,
-                                     data.point_list()[i].z ) );
-
-      // need to reverse data?
-    if (curve->bridge_sense() == CUBIT_REVERSED) 
-      std::reverse( points.begin(), points.end() );
-    
-       // check for closed curve
-    RefVertex *start_vtx, *end_vtx;
-    start_vtx = edge->start_vertex();
-    end_vtx = edge->end_vertex();
-    
-      // Special case for point curve
-    if (points.size() < 2) {
-      if (start_vtx != end_vtx || curve->measure() > GEOMETRY_RESABS ) {
-        std::cerr << "Warning: No facetting for curve " << edge->id() << std::endl;
-        continue;
-      }
-      EntityHandle h = entmap[0][start_vtx];
-      rval = mdbImpl->add_entities( ci->second, &h, 1 );
-      if (MB_SUCCESS != rval)
-        return MB_FAILURE;
-      continue;
-    }
-    
-    const bool closed = (points.front() - points.back()).length() < GEOMETRY_RESABS;
-    if (closed != (start_vtx == end_vtx)) {
-      std::cerr << "Warning: topology and geometry inconsistant for possibly closed curve "
-                << edge->id() << std::endl;
-    }
-    
-      // check proximity of vertices to end coordinates
-    if ((start_vtx->coordinates() - points.front()).length() > GEOMETRY_RESABS
-     || (  end_vtx->coordinates() - points.back() ).length() > GEOMETRY_RESABS ) {
-
-      curve_warnings--;
-      if( curve_warnings >= 0 || verbose_warnings ){ 
-	std::cerr << "Warning: vertices not at ends of curve " << edge->id() << std::endl;
-	if( curve_warnings == 0 && !verbose_warnings ){
-	  std::cerr << "         further instances of this warning will be suppressed..." << std::endl;
-	}
-      }
-
-    }
-    
-      // create interior points
-    std::vector<EntityHandle> verts, edges;
-    verts.push_back( entmap[0][start_vtx] );
-    for (size_t i = 1; i < points.size() - 1; ++i) {
-      double coords[] = { points[i].x(), points[i].y(), points[i].z() };
-      EntityHandle h;
-      rval = mdbImpl->create_vertex( coords, h );
-      if (MB_SUCCESS != rval)
-        return MB_FAILURE;
-      verts.push_back( h );
-    }
-    verts.push_back( entmap[0][end_vtx] );
-    
-      // create edges
-    for (size_t i = 0; i < verts.size()-1; ++i) {
-      EntityHandle h;
-      rval = mdbImpl->create_element( MBEDGE, &verts[i], 2, h );
-      if (MB_SUCCESS != rval)
-        return MB_FAILURE;
-      edges.push_back( h );
-    }
-    
-      // if closed, remove duplicate
-    if (verts.front() == verts.back())
-      verts.pop_back();
-    
-    rval = mdbImpl->add_entities( ci->second, &verts[0], verts.size() );
-    if (MB_SUCCESS != rval)
-      return MB_FAILURE;
-    rval = mdbImpl->add_entities( ci->second, &edges[0], edges.size() );
-    if (MB_SUCCESS != rval)
-      return MB_FAILURE;
-  }
-
-  if( !verbose_warnings && curve_warnings < 0 ){
-    std::cerr << "Suppressed " << -curve_warnings 
-	      << " 'vertices not at ends of curve' warnings." << std::endl;
-    std::cerr << "To see all warnings, use reader param VERBOSE_CGM_WARNINGS." << std::endl;
-  }
-
     // create geometry for all surfaces
   for (ci = entmap[2].begin(); ci != entmap[2].end(); ++ci) {
     RefFace* face = dynamic_cast<RefFace*>(ci->first);


https://bitbucket.org/fathomteam/moab/commits/1b257b3a190c/
Changeset:   1b257b3a190c
Branch:      None
User:        pshriwise
Date:        2014-05-21 15:58:54
Summary:     Added comments to the create_curve_facets function in ReadCGM.

Affected #:  1 file

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index f221fcf..b0d5b2b 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -541,9 +541,13 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
   // create geometry for all curves
   GMem data;
   for (ci = entitymap[1].begin(); ci != entitymap[1].end(); ++ci) {
+    //get the start and end points of the curve in the form of a refernce edge
     RefEdge* edge = dynamic_cast<RefEdge*>(ci->first);
+    //get the edge's curve information
     Curve* curve = edge->get_curve_ptr();
+    //clean out previous curve information
     data.clean_out();
+    //facet curve accoring to parameters and CGM version
 #if  CGM_MAJOR_VERSION>12
     s = edge->get_graphics( data, norm_tol, faceting_tol);
 #else
@@ -554,6 +558,7 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
       
     std::vector<CubitVector> points;
     for (int i = 0; i < data.pointListCount; ++i)
+      //add Cubit vertext points to a list
       points.push_back( CubitVector( data.point_list()[i].x,
                                      data.point_list()[i].y,
                                      data.point_list()[i].z ) );
@@ -579,7 +584,8 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
         return MB_FAILURE;
       continue;
     }
-    
+    // check to see if the first and last interior vertices are considered to be 
+    // coincident by CUBIT
     const bool closed = (points.front() - points.back()).length() < GEOMETRY_RESABS;
     if (closed != (start_vtx == end_vtx)) {
       std::cerr << "Warning: topology and geometry inconsistant for possibly closed curve "
@@ -605,6 +611,7 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
     for (size_t i = 1; i < points.size() - 1; ++i) {
       double coords[] = { points[i].x(), points[i].y(), points[i].z() };
       EntityHandle h;
+      //create vertex entity
       rval = moab->create_vertex( coords, h );
       if (MB_SUCCESS != rval)
         return MB_FAILURE;
@@ -624,7 +631,7 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
       // if closed, remove duplicate
     if (verts.front() == verts.back())
       verts.pop_back();
-    
+    //Add entities to the curve meshset from entitymap
     rval = moab->add_entities( ci->second, &verts[0], verts.size() );
     if (MB_SUCCESS != rval)
       return MB_FAILURE;
@@ -741,7 +748,6 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   // create facets for all curves
   rval = create_curve_facets( mdbImpl, entmap, norm_tol, faceting_tol, verbose_warnings );
   if(rval!=MB_SUCCESS) return rval;
-
   
   GMem data;
     // create geometry for all surfaces


https://bitbucket.org/fathomteam/moab/commits/bbad39e64a08/
Changeset:   bbad39e64a08
Branch:      None
User:        pshriwise
Date:        2014-05-21 15:59:42
Summary:     Added comments to ReadCGM. Moved a variable initialization closer to its use point.

Affected #:  1 file

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index b0d5b2b..90dbac5 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -650,8 +650,6 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
 }
 
 
-
-
 // copy geometry into mesh database
 ErrorCode ReadCGM::load_file(const char *cgm_file_name,
                       const EntityHandle* file_set,
@@ -690,7 +688,7 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   //const char geom_categories[][CATEGORY_TAG_SIZE] =
       //{"Vertex\0", "Curve\0", "Surface\0", "Volume\0", "Group\0"};
  
-  DLIList<ModelEntity*> me_list;
+
 
   // Initialize CGM
   InitCGMA::initialize_cgma();
@@ -749,6 +747,7 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   rval = create_curve_facets( mdbImpl, entmap, norm_tol, faceting_tol, verbose_warnings );
   if(rval!=MB_SUCCESS) return rval;
   
+  DLIList<ModelEntity*> me_list;
   GMem data;
     // create geometry for all surfaces
   for (ci = entmap[2].begin(); ci != entmap[2].end(); ++ci) {
@@ -763,22 +762,28 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
       // declare array of all vertex handles
     std::vector<EntityHandle> verts( data.pointListCount, 0 );
     
-      // get list of vertices in surface
+      // get list of geometric vertices in surface
     me_list.clean_out();
     ModelQueryEngine::instance()->query_model( *face, DagType::ref_vertex_type(), me_list );
 
-      // for each vertex, find coincident point in facets
+      // for each geometric vertex, find a single coincident point in facets
+      // otherwise, print a warning
     for (int i = me_list.size(); i--; ) {
+      //assign geometric vertex
       RefVertex* vtx = dynamic_cast<RefVertex*>(me_list.get_and_step());
       CubitVector pos = vtx->coordinates();
 
       for (int j = 0; j < data.pointListCount; ++j) {
+        //assign facet vertex
         CubitVector vpos( data.point_list()[j].x,
                           data.point_list()[j].y,
                           data.point_list()[j].z );
+        //check to see if they are considered coincident
         if ((pos - vpos).length_squared() < GEOMETRY_RESABS*GEOMETRY_RESABS ) {
+          // if this facet vertex has already been found coincident, print warning
           if (verts[j])
             std::cerr << "Warning: Coincident vertices in surface " << face->id() << std::endl;
+          //if a coincidence is found, keep track of it in the verts vector
           verts[j] = entmap[0][vtx];
           break;
         }
@@ -792,6 +797,8 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
       double coords[] = { data.point_list()[i].x,
                           data.point_list()[i].y,
                           data.point_list()[i].z };
+      // return vertex handle to verts to fill in all remaining facet
+      // vertices
       rval = mdbImpl->create_vertex( coords, verts[i] );
       if (MB_SUCCESS != rval)
         return rval;
@@ -801,6 +808,7 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
     Range facets;
     std::vector<EntityHandle> corners;
     for (int i = 0; i < data.fListCount; i += data.facet_list()[i]+1) {
+      // get number of facet verts
       int* facet = data.facet_list() + i;
       corners.resize( *facet );
       for (int j = 1; j <= *facet; ++j) {


https://bitbucket.org/fathomteam/moab/commits/61694640b101/
Changeset:   61694640b101
Branch:      None
User:        pshriwise
Date:        2014-05-21 15:59:42
Summary:     Added a new function for creating surface facets in ReadCGM.

Affected #:  2 files

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index 90dbac5..465b874 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -649,6 +649,122 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
   return MB_SUCCESS;
 }
 
+ErrorCode ReadCGM::create_surface_facets( Interface* moab, 
+                                          std::map<RefEntity*,EntityHandle> entitymap[5],
+                                          int norm_tol, 
+                                          double facet_tol, 
+                                          double length_tol )
+{
+
+  ErrorCode rval;
+  std::map<RefEntity*,EntityHandle>::iterator ci;
+  CubitStatus s;
+  DLIList<ModelEntity*> me_list;
+  GMem data;
+    // create geometry for all surfaces
+  for (ci = entitymap[2].begin(); ci != entitymap[2].end(); ++ci) {
+    RefFace* face = dynamic_cast<RefFace*>(ci->first);
+
+    data.clean_out();
+    s = face->get_graphics( data, norm_tol, facet_tol, length_tol );
+
+    if (CUBIT_SUCCESS != s)
+      return MB_FAILURE;
+
+      // declare array of all vertex handles
+    std::vector<EntityHandle> verts( data.pointListCount, 0 );
+    
+      // get list of geometric vertices in surface
+    me_list.clean_out();
+    ModelQueryEngine::instance()->query_model( *face, DagType::ref_vertex_type(), me_list );
+
+      // for each geometric vertex, find a single coincident point in facets
+      // otherwise, print a warning
+    for (int i = me_list.size(); i--; ) {
+      //assign geometric vertex
+      RefVertex* vtx = dynamic_cast<RefVertex*>(me_list.get_and_step());
+      CubitVector pos = vtx->coordinates();
+
+      for (int j = 0; j < data.pointListCount; ++j) {
+        //assign facet vertex
+        CubitVector vpos( data.point_list()[j].x,
+                          data.point_list()[j].y,
+                          data.point_list()[j].z );
+        //check to see if they are considered coincident
+        if ((pos - vpos).length_squared() < GEOMETRY_RESABS*GEOMETRY_RESABS ) {
+          // if this facet vertex has already been found coincident, print warning
+          if (verts[j])
+            std::cerr << "Warning: Coincident vertices in surface " << face->id() << std::endl;
+          //if a coincidence is found, keep track of it in the verts vector
+          verts[j] = entitymap[0][vtx];
+          break;
+        }
+      }
+    }
+    
+      // now create vertices for the remaining points in the facetting
+    for (int i = 0; i < data.pointListCount; ++i) {
+      if (verts[i]) // if a geometric vertex
+        continue;
+      double coords[] = { data.point_list()[i].x,
+                          data.point_list()[i].y,
+                          data.point_list()[i].z };
+      // return vertex handle to verts to fill in all remaining facet
+      // vertices
+      rval = mdbImpl->create_vertex( coords, verts[i] );
+      if (MB_SUCCESS != rval)
+        return rval;
+    }
+    
+      // now create facets
+    Range facets;
+    std::vector<EntityHandle> corners;
+    for (int i = 0; i < data.fListCount; i += data.facet_list()[i]+1) {
+      // get number of facet verts
+      int* facet = data.facet_list() + i;
+      corners.resize( *facet );
+      for (int j = 1; j <= *facet; ++j) {
+        if (facet[j] >= (int)verts.size()) {
+          std::cerr << "ERROR: Invalid facet data for surface " << face->id() << std::endl;
+          return MB_FAILURE;
+        }
+        corners[j-1] = verts[facet[j]];
+      }
+      EntityType type;
+      if (*facet == 3)
+        type = MBTRI;
+      else {
+        std::cerr << "Warning: non-triangle facet in surface " << face->id() << std::endl;
+	std::cerr << "  entity has " << *facet << " edges" << std::endl;
+        if (*facet == 4)
+          type = MBQUAD;
+        else
+          type = MBPOLYGON;
+      }
+      
+      // if (surf->bridge_sense() == CUBIT_REVERSED)
+      //   std::reverse( corners.begin(), corners.end() );
+      
+      EntityHandle h;
+      rval = mdbImpl->create_element( type, &corners[0], corners.size(), h );
+      if (MB_SUCCESS != rval)
+        return MB_FAILURE;
+        
+      facets.insert( h );
+    }
+    
+      // add vertices and facets to surface set
+    rval = mdbImpl->add_entities( ci->second, &verts[0], verts.size() );
+    if (MB_SUCCESS != rval)
+      return MB_FAILURE;
+    rval = mdbImpl->add_entities( ci->second, facets );
+    if (MB_SUCCESS != rval)
+      return MB_FAILURE;
+  }
+
+  return MB_SUCCESS;
+}
+
 
 // copy geometry into mesh database
 ErrorCode ReadCGM::load_file(const char *cgm_file_name,
@@ -747,6 +863,7 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   rval = create_curve_facets( mdbImpl, entmap, norm_tol, faceting_tol, verbose_warnings );
   if(rval!=MB_SUCCESS) return rval;
   
+
   DLIList<ModelEntity*> me_list;
   GMem data;
     // create geometry for all surfaces

diff --git a/src/io/ReadCGM.hpp b/src/io/ReadCGM.hpp
index 8304e0c..395614a 100644
--- a/src/io/ReadCGM.hpp
+++ b/src/io/ReadCGM.hpp
@@ -106,6 +106,12 @@ public:
                                  double faceting_tol,
                                  bool verbose_warn = false );
 
+  ErrorCode create_surface_facets( Interface* moab, 
+                                   std::map<RefEntity*,EntityHandle> entitymap[5],
+                                   int norm_tol, 
+                                   double facet_tol, 
+                                   double length_tol );
+
    //! Constructor
    ReadCGM(Interface* impl = NULL);
 


https://bitbucket.org/fathomteam/moab/commits/d3b900354663/
Changeset:   d3b900354663
Branch:      None
User:        pshriwise
Date:        2014-05-21 15:59:42
Summary:     Implemented the function create_surface_facets in ReadCGM.

Affected #:  1 file

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index 465b874..6fe737e 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -852,120 +852,18 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   // done with volumes and groups
   entmap[3].clear();
   entmap[4].clear();
-  
 
   // create geometry for all vertices and replace 
-  // vertex set handles with vertex handles in map
   rval = add_vertices( mdbImpl, entmap );
   if(rval!=MB_SUCCESS) return rval; 
 
   // create facets for all curves
   rval = create_curve_facets( mdbImpl, entmap, norm_tol, faceting_tol, verbose_warnings );
   if(rval!=MB_SUCCESS) return rval;
-  
-
-  DLIList<ModelEntity*> me_list;
-  GMem data;
-    // create geometry for all surfaces
-  for (ci = entmap[2].begin(); ci != entmap[2].end(); ++ci) {
-    RefFace* face = dynamic_cast<RefFace*>(ci->first);
-
-    data.clean_out();
-    s = face->get_graphics( data, norm_tol, faceting_tol, len_tol );
-
-    if (CUBIT_SUCCESS != s)
-      return MB_FAILURE;
-
-      // declare array of all vertex handles
-    std::vector<EntityHandle> verts( data.pointListCount, 0 );
-    
-      // get list of geometric vertices in surface
-    me_list.clean_out();
-    ModelQueryEngine::instance()->query_model( *face, DagType::ref_vertex_type(), me_list );
-
-      // for each geometric vertex, find a single coincident point in facets
-      // otherwise, print a warning
-    for (int i = me_list.size(); i--; ) {
-      //assign geometric vertex
-      RefVertex* vtx = dynamic_cast<RefVertex*>(me_list.get_and_step());
-      CubitVector pos = vtx->coordinates();
 
-      for (int j = 0; j < data.pointListCount; ++j) {
-        //assign facet vertex
-        CubitVector vpos( data.point_list()[j].x,
-                          data.point_list()[j].y,
-                          data.point_list()[j].z );
-        //check to see if they are considered coincident
-        if ((pos - vpos).length_squared() < GEOMETRY_RESABS*GEOMETRY_RESABS ) {
-          // if this facet vertex has already been found coincident, print warning
-          if (verts[j])
-            std::cerr << "Warning: Coincident vertices in surface " << face->id() << std::endl;
-          //if a coincidence is found, keep track of it in the verts vector
-          verts[j] = entmap[0][vtx];
-          break;
-        }
-      }
-    }
-    
-      // now create vertices for the remaining points in the facetting
-    for (int i = 0; i < data.pointListCount; ++i) {
-      if (verts[i]) // if a geometric vertex
-        continue;
-      double coords[] = { data.point_list()[i].x,
-                          data.point_list()[i].y,
-                          data.point_list()[i].z };
-      // return vertex handle to verts to fill in all remaining facet
-      // vertices
-      rval = mdbImpl->create_vertex( coords, verts[i] );
-      if (MB_SUCCESS != rval)
-        return rval;
-    }
-    
-      // now create facets
-    Range facets;
-    std::vector<EntityHandle> corners;
-    for (int i = 0; i < data.fListCount; i += data.facet_list()[i]+1) {
-      // get number of facet verts
-      int* facet = data.facet_list() + i;
-      corners.resize( *facet );
-      for (int j = 1; j <= *facet; ++j) {
-        if (facet[j] >= (int)verts.size()) {
-          std::cerr << "ERROR: Invalid facet data for surface " << face->id() << std::endl;
-          return MB_FAILURE;
-        }
-        corners[j-1] = verts[facet[j]];
-      }
-      EntityType type;
-      if (*facet == 3)
-        type = MBTRI;
-      else {
-        std::cerr << "Warning: non-triangle facet in surface " << face->id() << std::endl;
-	std::cerr << "  entity has " << *facet << " edges" << std::endl;
-        if (*facet == 4)
-          type = MBQUAD;
-        else
-          type = MBPOLYGON;
-      }
-      
-      // if (surf->bridge_sense() == CUBIT_REVERSED)
-      //   std::reverse( corners.begin(), corners.end() );
-      
-      EntityHandle h;
-      rval = mdbImpl->create_element( type, &corners[0], corners.size(), h );
-      if (MB_SUCCESS != rval)
-        return MB_FAILURE;
-        
-      facets.insert( h );
-    }
-    
-      // add vertices and facets to surface set
-    rval = mdbImpl->add_entities( ci->second, &verts[0], verts.size() );
-    if (MB_SUCCESS != rval)
-      return MB_FAILURE;
-    rval = mdbImpl->add_entities( ci->second, facets );
-    if (MB_SUCCESS != rval)
-      return MB_FAILURE;
-  }
+  // create facets for surfaces
+  rval = create_surface_facets( mdbImpl, entmap, norm_tol, faceting_tol, len_tol);
+  if(rval!=MB_SUCCESS) return rval;
 
   return MB_SUCCESS;
 }


https://bitbucket.org/fathomteam/moab/commits/e2b837535b6e/
Changeset:   e2b837535b6e
Branch:      None
User:        pshriwise
Date:        2014-05-21 16:15:58
Summary:     Renamed add_vertices in ReadCGM to create_vertices.

Affected #:  2 files

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index 6fe737e..a04bf96 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -499,7 +499,7 @@ void ReadCGM::set_cgm_attributes(bool const act_attributes, bool const verbose)
 }
 
 
-  ErrorCode ReadCGM::add_vertices( Interface* moab, std::map<RefEntity*,EntityHandle> entitymap[5] )
+  ErrorCode ReadCGM::create_vertices( Interface* moab, std::map<RefEntity*,EntityHandle> entitymap[5] )
 {
 
  ErrorCode rval;
@@ -854,7 +854,7 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   entmap[4].clear();
 
   // create geometry for all vertices and replace 
-  rval = add_vertices( mdbImpl, entmap );
+  rval = create_vertices( mdbImpl, entmap );
   if(rval!=MB_SUCCESS) return rval; 
 
   // create facets for all curves

diff --git a/src/io/ReadCGM.hpp b/src/io/ReadCGM.hpp
index 395614a..9eeadf6 100644
--- a/src/io/ReadCGM.hpp
+++ b/src/io/ReadCGM.hpp
@@ -98,7 +98,9 @@ public:
 
   void set_cgm_attributes(bool const act_attributes, bool const verbost);
 
-  ErrorCode add_vertices( Interface* moab, std::map<RefEntity*,EntityHandle> entmap[5] );
+
+  ErrorCode create_vertices( Interface* moab, std::map<RefEntity*,EntityHandle> entmap[5] );
+
 
   ErrorCode create_curve_facets( Interface* moab, 
                                  std::map<RefEntity*,EntityHandle> entitymap[5],


https://bitbucket.org/fathomteam/moab/commits/7cc7c5f2ae25/
Changeset:   7cc7c5f2ae25
Branch:      None
User:        pshriwise
Date:        2014-05-21 16:41:19
Summary:     Removed the moab interface parameter from the store_group_content function in ReadCGM.

Affected #:  2 files

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index a04bf96..5ac234b 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -318,7 +318,7 @@ ErrorCode ReadCGM::store_curve_senses( std::map<RefEntity*,EntityHandle> entitym
   if(rval!=MB_SUCCESS) return rval;
   
   // store group names and entities in the mesh
-  rval = store_group_content( moab, entitymap );
+  rval = store_group_content(entitymap );
   if(rval!=MB_SUCCESS) return rval;
  
 
@@ -421,7 +421,7 @@ ErrorCode ReadCGM::create_group_entsets( Interface* moab, std::map<RefEntity*,En
   return MB_SUCCESS;
 }
 
-ErrorCode ReadCGM::store_group_content( Interface* /* moab */, std::map<RefEntity*,EntityHandle>* entitymap )
+ErrorCode ReadCGM::store_group_content(std::map<RefEntity*,EntityHandle>* entitymap )
 {
 
   ErrorCode rval;
@@ -473,7 +473,7 @@ ErrorCode ReadCGM::store_group_content( Interface* /* moab */, std::map<RefEntit
     }
     
     if (!entities.empty()) {
-      rval = moab->add_entities( ci->second, entities );
+      rval = mdbImpl->add_entities( ci->second, entities );
       if (MB_SUCCESS != rval)
         return MB_FAILURE;
     }

diff --git a/src/io/ReadCGM.hpp b/src/io/ReadCGM.hpp
index 9eeadf6..4a5a0b6 100644
--- a/src/io/ReadCGM.hpp
+++ b/src/io/ReadCGM.hpp
@@ -93,7 +93,7 @@ public:
   ErrorCode create_group_entsets( Interface* moab, 
                                    std::map<RefEntity*,EntityHandle>& entitymap );
 
-  ErrorCode store_group_content( Interface* moab, std::map<RefEntity*,EntityHandle>* entitymap );
+  ErrorCode store_group_content( std::map<RefEntity*,EntityHandle>* entitymap );
 
 
   void set_cgm_attributes(bool const act_attributes, bool const verbost);


https://bitbucket.org/fathomteam/moab/commits/2dc96cc07e8c/
Changeset:   2dc96cc07e8c
Branch:      None
User:        pshriwise
Date:        2014-05-21 17:18:08
Summary:     Removed un-needed moab interface passes to multiple functions.

Affected #:  2 files

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index 5ac234b..f5cb697 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -156,7 +156,7 @@ ErrorCode ReadCGM::set_options( const FileOptions& opts,
   return MB_SUCCESS;
 }
 
-  ErrorCode ReadCGM::create_entity_sets( Interface* moab, std::map<RefEntity*, EntityHandle> (&entmap)[5] )
+  ErrorCode ReadCGM::create_entity_sets( std::map<RefEntity*, EntityHandle> (&entmap)[5] )
 {
   ErrorCode rval; 
   const char geom_categories[][CATEGORY_TAG_SIZE] = 
@@ -175,21 +175,21 @@ ErrorCode ReadCGM::set_options( const FileOptions& opts,
          RefEntity* ent = entlist.get_and_step(); 
          EntityHandle handle;  
          // create the new meshset
-         rval = moab->create_meshset( dim == 1 ? MESHSET_ORDERED : MESHSET_SET, handle);
+         rval = mdbImpl->create_meshset( dim == 1 ? MESHSET_ORDERED : MESHSET_SET, handle);
          if (MB_SUCCESS != rval) return rval; 
 
          // map the geom reference entity to the corresponding moab meshset
          entmap[dim][ent] = handle; 
 
          // create tags for the new meshset
-         rval = moab->tag_set_data( geom_tag, &handle, 1, &dim ); 
+         rval = mdbImpl->tag_set_data( geom_tag, &handle, 1, &dim ); 
          if (MB_SUCCESS != rval) return rval; 
 
          int id = ent->id();
-         rval = moab->tag_set_data( id_tag, &handle, 1, &id );
+         rval = mdbImpl->tag_set_data( id_tag, &handle, 1, &id );
          if (MB_SUCCESS != rval) return rval;
 
-         rval = moab->tag_set_data( category_tag, &handle, 1, &geom_categories[dim] );
+         rval = mdbImpl->tag_set_data( category_tag, &handle, 1, &geom_categories[dim] );
          if (MB_SUCCESS != rval) return rval;
  
        }
@@ -200,7 +200,7 @@ ErrorCode ReadCGM::set_options( const FileOptions& opts,
 
 
 
-ErrorCode ReadCGM::create_topology( Interface* moab, std::map<RefEntity*,EntityHandle> entitymap[5] )
+ErrorCode ReadCGM::create_topology( std::map<RefEntity*,EntityHandle> entitymap[5] )
 {
   ErrorCode rval;
   DLIList<RefEntity*> entitylist;
@@ -215,7 +215,7 @@ ErrorCode ReadCGM::create_topology( Interface* moab, std::map<RefEntity*,EntityH
       for (int i = entitylist.size(); i--; ) {
         RefEntity* ent = entitylist.get_and_step();
         EntityHandle h = entitymap[dim-1][ent];
-        rval = moab->add_parent_child( ci->second, h );
+        rval = mdbImpl->add_parent_child( ci->second, h );
         if (MB_SUCCESS != rval)
           return rval;
       }
@@ -309,12 +309,12 @@ ErrorCode ReadCGM::store_curve_senses( std::map<RefEntity*,EntityHandle> entitym
   return MB_SUCCESS;
 }
 
-  ErrorCode ReadCGM::store_groups( Interface* moab, std::map<RefEntity*,EntityHandle>* entitymap )
+  ErrorCode ReadCGM::store_groups( std::map<RefEntity*,EntityHandle>* entitymap )
 {
   ErrorCode rval;
 
   // create eneity sets for all ref groups
-  rval = create_group_entsets( moab, entitymap[4] );
+  rval = create_group_entsets( entitymap[4] );
   if(rval!=MB_SUCCESS) return rval;
   
   // store group names and entities in the mesh
@@ -325,7 +325,7 @@ ErrorCode ReadCGM::store_curve_senses( std::map<RefEntity*,EntityHandle> entitym
   return MB_SUCCESS;
 }
 
-ErrorCode ReadCGM::create_group_entsets( Interface* moab, std::map<RefEntity*,EntityHandle>& entitymap )
+ErrorCode ReadCGM::create_group_entsets( std::map<RefEntity*,EntityHandle>& entitymap )
 {
 
   ErrorCode rval;
@@ -367,7 +367,7 @@ ErrorCode ReadCGM::create_group_entsets( Interface* moab, std::map<RefEntity*,En
 #endif
     // create entity handle for the group
     EntityHandle h;
-    rval = moab->create_meshset( MESHSET_SET, h );
+    rval = mdbImpl->create_meshset( MESHSET_SET, h );
     if (MB_SUCCESS != rval)
       return rval;
     //set tag data for the group
@@ -377,16 +377,16 @@ ErrorCode ReadCGM::create_group_entsets( Interface* moab, std::map<RefEntity*,En
     if (name1.length() >= (unsigned)NAME_TAG_SIZE)
       std::cout << "WARNING: group name '" << name1.c_str()
                 << "' truncated to '" << namebuf << "'" << std::endl;
-    rval = moab->tag_set_data( name_tag, &h, 1, namebuf );
+    rval = mdbImpl->tag_set_data( name_tag, &h, 1, namebuf );
     if (MB_SUCCESS != rval)
       return MB_FAILURE;
       
     int id = grp->id();
-    rval = moab->tag_set_data( id_tag, &h, 1, &id );
+    rval = mdbImpl->tag_set_data( id_tag, &h, 1, &id );
     if (MB_SUCCESS != rval)
       return MB_FAILURE;
       
-    rval = moab->tag_set_data( category_tag, &h, 1, &geom_categories[4] );
+    rval = mdbImpl->tag_set_data( category_tag, &h, 1, &geom_categories[4] );
     if (MB_SUCCESS != rval)
       return MB_FAILURE;
     //check for extra group names  
@@ -394,7 +394,7 @@ ErrorCode ReadCGM::create_group_entsets( Interface* moab, std::map<RefEntity*,En
       for (int j = extra_name_tags.size(); j < name_list.size(); ++j) {
         sprintf( namebuf, "EXTRA_%s%d", NAME_TAG_NAME, j );
         Tag t;
-        rval = moab->tag_get_handle( namebuf, NAME_TAG_SIZE, MB_TYPE_OPAQUE, t, MB_TAG_SPARSE|MB_TAG_CREAT );
+        rval = mdbImpl->tag_get_handle( namebuf, NAME_TAG_SIZE, MB_TYPE_OPAQUE, t, MB_TAG_SPARSE|MB_TAG_CREAT );
         assert(!rval);
         extra_name_tags.push_back(t);
       }
@@ -410,7 +410,7 @@ ErrorCode ReadCGM::create_group_entsets( Interface* moab, std::map<RefEntity*,En
         if (name1.length() >= (unsigned)NAME_TAG_SIZE)
           std::cout << "WARNING: group name '" << name1.c_str()
                     << "' truncated to '" << namebuf << "'" << std::endl;
-        rval = moab->tag_set_data( extra_name_tags[j], &h, 1, namebuf );
+        rval = mdbImpl->tag_set_data( extra_name_tags[j], &h, 1, namebuf );
         if (MB_SUCCESS != rval)
           return MB_FAILURE;
       }
@@ -499,7 +499,7 @@ void ReadCGM::set_cgm_attributes(bool const act_attributes, bool const verbose)
 }
 
 
-  ErrorCode ReadCGM::create_vertices( Interface* moab, std::map<RefEntity*,EntityHandle> entitymap[5] )
+  ErrorCode ReadCGM::create_vertices( std::map<RefEntity*,EntityHandle> entitymap[5] )
 {
 
  ErrorCode rval;
@@ -508,11 +508,11 @@ void ReadCGM::set_cgm_attributes(bool const act_attributes, bool const verbose)
     CubitVector pos = dynamic_cast<RefVertex*>(ci->first)->coordinates();
     double coords[3] = {pos.x(), pos.y(), pos.z()};
     EntityHandle vh;
-    rval = moab->create_vertex( coords, vh );
+    rval = mdbImpl->create_vertex( coords, vh );
     if (MB_SUCCESS != rval)
       return MB_FAILURE;
     
-    rval = moab->add_entities( ci->second, &vh, 1 );
+    rval = mdbImpl->add_entities( ci->second, &vh, 1 );
     if (MB_SUCCESS != rval)
       return MB_FAILURE;
     
@@ -521,8 +521,7 @@ void ReadCGM::set_cgm_attributes(bool const act_attributes, bool const verbose)
   return MB_SUCCESS;
 }
 
-ErrorCode ReadCGM::create_curve_facets( Interface* moab, 
-                                        std::map<RefEntity*,EntityHandle> entitymap[5], 
+ErrorCode ReadCGM::create_curve_facets( std::map<RefEntity*,EntityHandle> entitymap[5], 
                                         int norm_tol, 
                                         double faceting_tol, 
                                         bool verbose_warn )
@@ -579,7 +578,7 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
         continue;
       }
       EntityHandle h = entitymap[0][start_vtx];
-      rval = moab->add_entities( ci->second, &h, 1 );
+      rval = mdbImpl->add_entities( ci->second, &h, 1 );
       if (MB_SUCCESS != rval)
         return MB_FAILURE;
       continue;
@@ -612,7 +611,7 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
       double coords[] = { points[i].x(), points[i].y(), points[i].z() };
       EntityHandle h;
       //create vertex entity
-      rval = moab->create_vertex( coords, h );
+      rval = mdbImpl->create_vertex( coords, h );
       if (MB_SUCCESS != rval)
         return MB_FAILURE;
       verts.push_back( h );
@@ -622,7 +621,7 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
       // create edges
     for (size_t i = 0; i < verts.size()-1; ++i) {
       EntityHandle h;
-      rval = moab->create_element( MBEDGE, &verts[i], 2, h );
+      rval = mdbImpl->create_element( MBEDGE, &verts[i], 2, h );
       if (MB_SUCCESS != rval)
         return MB_FAILURE;
       edges.push_back( h );
@@ -632,10 +631,10 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
     if (verts.front() == verts.back())
       verts.pop_back();
     //Add entities to the curve meshset from entitymap
-    rval = moab->add_entities( ci->second, &verts[0], verts.size() );
+    rval = mdbImpl->add_entities( ci->second, &verts[0], verts.size() );
     if (MB_SUCCESS != rval)
       return MB_FAILURE;
-    rval = moab->add_entities( ci->second, &edges[0], edges.size() );
+    rval = mdbImpl->add_entities( ci->second, &edges[0], edges.size() );
     if (MB_SUCCESS != rval)
       return MB_FAILURE;
   }
@@ -649,8 +648,7 @@ ErrorCode ReadCGM::create_curve_facets( Interface* moab,
   return MB_SUCCESS;
 }
 
-ErrorCode ReadCGM::create_surface_facets( Interface* moab, 
-                                          std::map<RefEntity*,EntityHandle> entitymap[5],
+ErrorCode ReadCGM::create_surface_facets( std::map<RefEntity*,EntityHandle> entitymap[5],
                                           int norm_tol, 
                                           double facet_tol, 
                                           double length_tol )
@@ -830,11 +828,11 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   DLIList<RefEntity*> entlist;
   std::map<RefEntity*,EntityHandle> entmap[5]; // one for each dim, and one for groups
   //std::map<RefEntity*,EntityHandle>* entmap_ptr = entmap;
-  rval = create_entity_sets( mdbImpl, entmap );
+  rval = create_entity_sets( entmap );
   if (rval!=MB_SUCCESS) return rval;
 
   // create topology for all geometric entities
-  rval = create_topology( mdbImpl, entmap );
+  rval = create_topology( entmap );
   if(rval!=MB_SUCCESS) return rval;
 
   // store CoFace senses
@@ -846,7 +844,7 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   if (rval!=MB_SUCCESS) return rval;
 
   // get group information and store it in the mesh 
-  rval = store_groups( mdbImpl, entmap );
+  rval = store_groups( entmap );
   if(rval!=MB_SUCCESS) return rval;
  
   // done with volumes and groups
@@ -854,15 +852,15 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   entmap[4].clear();
 
   // create geometry for all vertices and replace 
-  rval = create_vertices( mdbImpl, entmap );
+  rval = create_vertices( entmap );
   if(rval!=MB_SUCCESS) return rval; 
 
   // create facets for all curves
-  rval = create_curve_facets( mdbImpl, entmap, norm_tol, faceting_tol, verbose_warnings );
+  rval = create_curve_facets( entmap, norm_tol, faceting_tol, verbose_warnings );
   if(rval!=MB_SUCCESS) return rval;
 
   // create facets for surfaces
-  rval = create_surface_facets( mdbImpl, entmap, norm_tol, faceting_tol, len_tol);
+  rval = create_surface_facets( entmap, norm_tol, faceting_tol, len_tol);
   if(rval!=MB_SUCCESS) return rval;
 
   return MB_SUCCESS;

diff --git a/src/io/ReadCGM.hpp b/src/io/ReadCGM.hpp
index 4a5a0b6..35e8dc0 100644
--- a/src/io/ReadCGM.hpp
+++ b/src/io/ReadCGM.hpp
@@ -78,20 +78,17 @@ public:
 			        bool& act_att,
                                 bool& verbose_warnings);
 
-  ErrorCode create_entity_sets( Interface* moab,
-                                std::map<RefEntity*,EntityHandle> (&entmap)[5] );
+  ErrorCode create_entity_sets( std::map<RefEntity*,EntityHandle> (&entmap)[5] );
 
-  ErrorCode create_topology( Interface* moab, 
-                             std::map<RefEntity*,EntityHandle> entitymap[5] );
+  ErrorCode create_topology( std::map<RefEntity*,EntityHandle> entitymap[5] );
 
   ErrorCode store_surface_senses( std::map<RefEntity*,EntityHandle> entitymap[5] );
 
   ErrorCode store_curve_senses( std::map<RefEntity*,EntityHandle> entitymap[5] );
 
-  ErrorCode store_groups( Interface* moab, std::map<RefEntity*,EntityHandle>* entitymap );
+  ErrorCode store_groups( std::map<RefEntity*,EntityHandle>* entitymap );
 
-  ErrorCode create_group_entsets( Interface* moab, 
-                                   std::map<RefEntity*,EntityHandle>& entitymap );
+  ErrorCode create_group_entsets( std::map<RefEntity*,EntityHandle>& entitymap );
 
   ErrorCode store_group_content( std::map<RefEntity*,EntityHandle>* entitymap );
 
@@ -99,17 +96,15 @@ public:
   void set_cgm_attributes(bool const act_attributes, bool const verbost);
 
 
-  ErrorCode create_vertices( Interface* moab, std::map<RefEntity*,EntityHandle> entmap[5] );
+  ErrorCode create_vertices( std::map<RefEntity*,EntityHandle> entmap[5] );
 
 
-  ErrorCode create_curve_facets( Interface* moab, 
-                                 std::map<RefEntity*,EntityHandle> entitymap[5],
+  ErrorCode create_curve_facets( std::map<RefEntity*,EntityHandle> entitymap[5],
                                  int norm_tol,
                                  double faceting_tol,
                                  bool verbose_warn = false );
 
-  ErrorCode create_surface_facets( Interface* moab, 
-                                   std::map<RefEntity*,EntityHandle> entitymap[5],
+  ErrorCode create_surface_facets( std::map<RefEntity*,EntityHandle> entitymap[5],
                                    int norm_tol, 
                                    double facet_tol, 
                                    double length_tol );


https://bitbucket.org/fathomteam/moab/commits/826f0bdf0801/
Changeset:   826f0bdf0801
Branch:      None
User:        pshriwise
Date:        2014-05-28 23:15:31
Summary:     Merge branch 'master' of https://bitbucket.org/fathomteam/moab

Affected #:  4 files

diff --git a/.gitignore b/.gitignore
index 073629d..52e35a8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -97,9 +97,9 @@ Makefile
 Makefile.in
 moab.config
 MOABConfig.cmake
-moab.creator*
-moab.files
-moab.includes
+*.creator*
+*.files
+*.includes
 moab.make
 *.o
 .project

diff --git a/examples/makefile b/examples/makefile
index 00d889b..787c8d3 100644
--- a/examples/makefile
+++ b/examples/makefile
@@ -71,9 +71,6 @@ VisTags: VisTags.o ${MOAB_LIBDIR}/libMOAB.la
 ReadWriteTest: ReadWriteTest.o ${MOAB_LIBDIR}/libMOAB.la
 	${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK} 
 	
-ptest: ptest.o ${MOAB_LIBDIR}/libMOAB.la
-	${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
-
 clean:
 	rm -rf *.o *.mod *.h5m ${EXAMPLES} ${PAREXAMPLES} ${EXOIIEXAMPLES} ${F90EXAMPLES}
 

diff --git a/src/io/Tqdcfr.cpp b/src/io/Tqdcfr.cpp
index 522bc55..ddcdd8a 100644
--- a/src/io/Tqdcfr.cpp
+++ b/src/io/Tqdcfr.cpp
@@ -2329,7 +2329,7 @@ ErrorCode Tqdcfr::read_acis_records( const char* sat_filename )
       
         // get next occurrence of '#' (record terminator)
       ret = strchr(&(char_buf[buf_pos]), '#');
-      while (ret && (unsigned int)(ret+1-&char_buf[0]) < bytes_left && *(ret+1) != '\n')
+      while (ret && (unsigned int)(ret+1-&char_buf[0]) < bytes_left && *(ret+1) != '\n' && *(ret+1) != 0)
         ret = strchr(ret+1, '#');
       if (NULL != ret) {
           // grab the string (inclusive of the record terminator and the line feed) and complete the record

diff --git a/test/io/read_gcrm_nc.cpp b/test/io/read_gcrm_nc.cpp
index d38ca2c..1304495 100644
--- a/test/io/read_gcrm_nc.cpp
+++ b/test/io/read_gcrm_nc.cpp
@@ -21,13 +21,13 @@ void test_read_onevar();
 void test_read_onetimestep();
 void test_read_nomesh();
 void test_read_novars();
-void test_read_no_mixed_elements(); // Test read option NO_MIXED_ELEMENTS
 void test_read_no_edges(); // Test read option NO_EDGES
 void test_gather_onevar(); // Test gather set with one variable
 
 void get_options(std::string& opts);
 
-const double eps = 1e-20;
+const double eps = 1e-6;
+const int layers = 256;
 
 int main(int argc, char* argv[])
 {
@@ -42,12 +42,11 @@ int main(int argc, char* argv[])
 #endif
 
   result += RUN_TEST(test_read_all);
-  //result += RUN_TEST(test_read_onevar);
-  //result += RUN_TEST(test_read_onetimestep);
-  //result += RUN_TEST(test_read_nomesh);
-  //result += RUN_TEST(test_read_novars);
-  //result += RUN_TEST(test_read_no_mixed_elements);
-  //result += RUN_TEST(test_read_no_edges);
+  result += RUN_TEST(test_read_onevar);
+  result += RUN_TEST(test_read_onetimestep);
+  result += RUN_TEST(test_read_nomesh);
+  result += RUN_TEST(test_read_novars);
+  result += RUN_TEST(test_read_no_edges);
   //result += RUN_TEST(test_gather_onevar);
 
 #ifdef USE_MPI
@@ -66,14 +65,13 @@ void test_read_all()
 
   std::string opts;
   get_options(opts);
-  opts+=";DEBUG_IO=2";
 
   // Read mesh and read all variables at all timesteps
   ErrorCode rval = mb.load_file(example, 0, opts.c_str());
   CHECK_ERR(rval);
 
   mb.write_file("gcrm.h5m");
-#if 0
+
   int procs = 1;
 #ifdef USE_MPI
   ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
@@ -82,14 +80,15 @@ void test_read_all()
 
   // Make check runs this test on one processor
   if (1 == procs) {
-    // For each tag, check two values
-    double val[2];
+    // For each tag, check values on two entities
+    // There are 256 layers, only check on first two
+    double val[2 * layers];
 
-    // Check tags for vertex variable vorticity
-    Tag vorticity_tag0, vorticity_tag1;
-    rval = mb.tag_get_handle("vorticity0", 1, MB_TYPE_DOUBLE, vorticity_tag0);
+    // Check tags for vertex variable u
+    Tag u_tag0, u_tag1;
+    rval = mb.tag_get_handle("u0", layers, MB_TYPE_DOUBLE, u_tag0);
     CHECK_ERR(rval);
-    rval = mb.tag_get_handle("vorticity1", 1, MB_TYPE_DOUBLE, vorticity_tag1);
+    rval = mb.tag_get_handle("u1", layers, MB_TYPE_DOUBLE, u_tag1);
     CHECK_ERR(rval);
 
     // Get vertices (1280 edges)
@@ -99,22 +98,34 @@ void test_read_all()
     CHECK_EQUAL((size_t)1280, verts.size());
     CHECK_EQUAL((size_t)1, verts.psize());
 
-    // Check vorticity tag values on first two vertices
-    EntityHandle vert_ents[] = {verts[0], verts[1]};
-    rval = mb.tag_get_data(vorticity_tag0, vert_ents, 2, val);
+    // Check u tag values on first and last vertices
+    EntityHandle vert_ents[] = {verts[0], verts[1279]};
+
+    // Timestep 0
+    rval = mb.tag_get_data(u_tag0, vert_ents, 2, val);
     CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(1.1, val[0], eps);
-    CHECK_REAL_EQUAL(1.2, val[1], eps);
-    rval = mb.tag_get_data(vorticity_tag1, vert_ents, 2, val);
+    // Layer 0
+    CHECK_REAL_EQUAL(-4.839992, val[0 * layers], eps);
+    CHECK_REAL_EQUAL(-3.699257, val[1 * layers], eps);
+    // Layer 1
+    CHECK_REAL_EQUAL(-4.839925, val[0 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-3.699206, val[1 * layers + 1], eps);
+
+    // Timestep 1
+    rval = mb.tag_get_data(u_tag1, vert_ents, 2, val);
     CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(2.1, val[0], eps);
-    CHECK_REAL_EQUAL(2.2, val[1], eps);
-
-    // Check tags for edge variable u
-    Tag u_tag0, u_tag1;
-    rval = mb.tag_get_handle("u0", 1, MB_TYPE_DOUBLE, u_tag0);
+    // Layer 0
+    CHECK_REAL_EQUAL(-4.712473, val[0 * layers], eps);
+    CHECK_REAL_EQUAL(-3.601793, val[1 * layers], eps);
+    // Layer 1
+    CHECK_REAL_EQUAL(-4.712409, val[0 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-3.601743, val[1 * layers + 1], eps);
+
+    // Check tags for edge variable wind
+    Tag wind_tag0, wind_tag1;
+    rval = mb.tag_get_handle("wind0", layers, MB_TYPE_DOUBLE, wind_tag0);
     CHECK_ERR(rval);
-    rval = mb.tag_get_handle("u1", 1, MB_TYPE_DOUBLE, u_tag1);
+    rval = mb.tag_get_handle("wind1", layers, MB_TYPE_DOUBLE, wind_tag1);
     CHECK_ERR(rval);
 
     // Get edges (1920 edges)
@@ -124,22 +135,34 @@ void test_read_all()
     CHECK_EQUAL((size_t)1920, edges.size());
     CHECK_EQUAL((size_t)1, edges.psize());
 
-    // Check u tag values on two specified edges
-    EntityHandle edge_ents[] = {edges[5], edges[6]};
-    rval = mb.tag_get_data(u_tag0, edge_ents, 2, val);
+    // Check wind tag values on first and last edges
+    EntityHandle edge_ents[] = {edges[0], edges[1919]};
+
+    // Timestep 0
+    rval = mb.tag_get_data(wind_tag0, edge_ents, 2, val);
     CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(1.113138721544778, val[0], eps);
-    CHECK_REAL_EQUAL(-1.113138721930009, val[1], eps);
-    rval = mb.tag_get_data(u_tag1, edge_ents, 2, val);
+    // Layer 0
+    CHECK_REAL_EQUAL(-5.081991, val[0 * layers], eps);
+    CHECK_REAL_EQUAL(-6.420274, val[1 * layers], eps);
+    // Layer 1
+    CHECK_REAL_EQUAL(-5.081781, val[0 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-6.419831, val[1 * layers + 1], eps);
+
+    // Timestep 1
+    rval = mb.tag_get_data(wind_tag1, edge_ents, 2, val);
     CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(2.113138721544778, val[0], eps);
-    CHECK_REAL_EQUAL(-2.113138721930009, val[1], eps);
-
-    // Check tags for cell variable ke
-    Tag ke_tag0, ke_tag1;
-    rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
+    // Layer 0
+    CHECK_REAL_EQUAL(-4.948097, val[0 * layers], eps);
+    CHECK_REAL_EQUAL(-6.251121, val[1 * layers], eps);
+    // Layer 1
+    CHECK_REAL_EQUAL(-4.947892, val[0 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-6.250690, val[1 * layers + 1], eps);
+
+    // Check tags for cell variable vorticity
+    Tag vorticity_tag0, vorticity_tag1;
+    rval = mb.tag_get_handle("vorticity0", layers, MB_TYPE_DOUBLE, vorticity_tag0);
     CHECK_ERR(rval);
-    rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
+    rval = mb.tag_get_handle("vorticity1", layers, MB_TYPE_DOUBLE, vorticity_tag1);
     CHECK_ERR(rval);
 
     // Get cells (12 pentagons and 630 hexagons)
@@ -147,27 +170,33 @@ void test_read_all()
     rval = mb.get_entities_by_type(0, MBPOLYGON, cells);
     CHECK_ERR(rval);
     CHECK_EQUAL((size_t)642, cells.size());
-#ifdef USE_MPI
-    // If MOAB is compiled parallel, sequence size requested are increased
-    // by a factor of 1.5, to allow for ghosts. This will introduce a gap
-    // between the two face sequences.
-    CHECK_EQUAL((size_t)2, cells.psize());
-#else
+
+    // GCRM pentagons are always padded to hexagons
     CHECK_EQUAL((size_t)1, cells.psize());
-#endif
 
-    // Check ke tag values on first pentagon and first hexagon
-    EntityHandle cell_ents[] = {cells[0], cells[12]};
-    rval = mb.tag_get_data(ke_tag0, cell_ents, 2, val);
+    // Check vorticity tag values on first and last cells
+    EntityHandle cell_ents[] = {cells[0], cells[641]};
+
+    // Timestep 0
+    rval = mb.tag_get_data(vorticity_tag0, cell_ents, 2, val);
     CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(15.001, val[0], eps);
-    CHECK_REAL_EQUAL(16.013, val[1], eps);
-    rval = mb.tag_get_data(ke_tag1, cell_ents, 2, val);
+    // Layer 0
+    CHECK_REAL_EQUAL(3.629994, val[0 * layers], eps);
+    CHECK_REAL_EQUAL(-0.554888, val[1 * layers], eps);
+    // Layer 1
+    CHECK_REAL_EQUAL(3.629944, val[0 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-0.554881, val[1 * layers + 1], eps);
+
+    // Timestep 1
+    rval = mb.tag_get_data(vorticity_tag1, cell_ents, 2, val);
     CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(25.001, val[0], eps);
-    CHECK_REAL_EQUAL(26.013, val[1], eps);
+    // Layer 0
+    CHECK_REAL_EQUAL(3.534355, val[0 * layers], eps);
+    CHECK_REAL_EQUAL(-0.540269, val[1 * layers], eps);
+    // Layer 1
+    CHECK_REAL_EQUAL(3.534306, val[0 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-0.540262, val[1 * layers + 1], eps);
   }
-#endif
 }
 
 void test_read_onevar()
@@ -178,8 +207,8 @@ void test_read_onevar()
   std::string opts;
   get_options(opts);
 
-  // Read mesh and read cell variable ke at all timesteps
-  opts += ";VARIABLE=ke";
+  // Read mesh and read cell variable vorticity at all timesteps
+  opts += ";VARIABLE=vorticity";
   ErrorCode rval = mb.load_file(example, NULL, opts.c_str());
   CHECK_ERR(rval);
 
@@ -191,11 +220,11 @@ void test_read_onevar()
 
   // Make check runs this test on one processor
   if (1 == procs) {
-    // Check ke tags
-    Tag ke_tag0, ke_tag1;
-    rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
+    // Check vorticity tags
+    Tag vorticity_tag0, vorticity_tag1;
+    rval = mb.tag_get_handle("vorticity0", layers, MB_TYPE_DOUBLE, vorticity_tag0);
     CHECK_ERR(rval);
-    rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
+    rval = mb.tag_get_handle("vorticity1", layers, MB_TYPE_DOUBLE, vorticity_tag1);
     CHECK_ERR(rval);
 
     // Get cells (12 pentagons and 630 hexagons)
@@ -204,32 +233,41 @@ void test_read_onevar()
     CHECK_ERR(rval);
     CHECK_EQUAL((size_t)642, cells.size());
 
-#ifdef USE_MPI
-    // If MOAB is compiled parallel, sequence size requested are increased
-    // by a factor of 1.5, to allow for ghosts. This will introduce a gap
-    // between the two face sequences.
-    CHECK_EQUAL((size_t)2, cells.psize());
-#else
+    // GCRM pentagons are always padded to hexagons
     CHECK_EQUAL((size_t)1, cells.psize());
-#endif
 
-    // Check ke tag values on 4 cells: first pentagon, last pentagon,
-    // first hexagon, and last hexagon
-    EntityHandle cell_ents[] = {cells[0], cells[11], cells[12], cells[641]};
-    double ke0_val[4];
-    rval = mb.tag_get_data(ke_tag0, cell_ents, 4, ke0_val);
+    // Check vorticity tag values on 4 cells: first cell, two median cells, and last cell
+    EntityHandle cell_ents[] = {cells[0], cells[320], cells[321], cells[641]};
+    // There are 256 layers, only check on first two
+    double vorticity_val[4 * layers];
+
+    // Timestep 0
+    rval = mb.tag_get_data(vorticity_tag0, cell_ents, 4, vorticity_val);
     CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
-    CHECK_REAL_EQUAL(15.012, ke0_val[1], eps);
-    CHECK_REAL_EQUAL(16.013, ke0_val[2], eps);
-    CHECK_REAL_EQUAL(16.642, ke0_val[3], eps);
-    double ke1_val[4];
-    rval = mb.tag_get_data(ke_tag1, cell_ents, 4, ke1_val);
+    // Layer 0
+    CHECK_REAL_EQUAL(3.629994, vorticity_val[0 * layers], eps);
+    CHECK_REAL_EQUAL(0.131688, vorticity_val[1 * layers], eps);
+    CHECK_REAL_EQUAL(-0.554888, vorticity_val[2 * layers], eps);
+    CHECK_REAL_EQUAL(-0.554888, vorticity_val[3 * layers], eps);
+    // Layer 1
+    CHECK_REAL_EQUAL(3.629944, vorticity_val[0 * layers + 1], eps);
+    CHECK_REAL_EQUAL(0.131686, vorticity_val[1 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-0.554881, vorticity_val[2 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-0.554881, vorticity_val[3 * layers + 1], eps);
+
+    // Timestep 1
+    rval = mb.tag_get_data(vorticity_tag1, cell_ents, 4, vorticity_val);
     CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(25.001, ke1_val[0], eps);
-    CHECK_REAL_EQUAL(25.012, ke1_val[1], eps);
-    CHECK_REAL_EQUAL(26.013, ke1_val[2], eps);
-    CHECK_REAL_EQUAL(26.642, ke1_val[3], eps);
+    // Layer 0
+    CHECK_REAL_EQUAL(3.534355, vorticity_val[0 * layers], eps);
+    CHECK_REAL_EQUAL(0.128218, vorticity_val[1 * layers], eps);
+    CHECK_REAL_EQUAL(-0.540269, vorticity_val[2 * layers], eps);
+    CHECK_REAL_EQUAL(-0.540269, vorticity_val[3 * layers], eps);
+    // Layer 1
+    CHECK_REAL_EQUAL(3.534306, vorticity_val[0 * layers + 1], eps);
+    CHECK_REAL_EQUAL(0.128216, vorticity_val[1 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-0.540262, vorticity_val[2 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-0.540262, vorticity_val[3 * layers + 1], eps);
   }
 }
 
@@ -248,10 +286,10 @@ void test_read_onetimestep()
 
   // Check vorticity tags
   Tag vorticity_tag0, vorticity_tag1;
-  rval = mb.tag_get_handle("vorticity0", 1, MB_TYPE_DOUBLE, vorticity_tag0);
+  rval = mb.tag_get_handle("vorticity0", layers, MB_TYPE_DOUBLE, vorticity_tag0);
   // Tag vorticity0 should not exist
   CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
-  rval = mb.tag_get_handle("vorticity1", 1, MB_TYPE_DOUBLE, vorticity_tag1);
+  rval = mb.tag_get_handle("vorticity1", layers, MB_TYPE_DOUBLE, vorticity_tag1);
   CHECK_ERR(rval);
 }
 
@@ -275,10 +313,10 @@ void test_read_nomesh()
 
   // Check u tags
   Tag u_tag0, u_tag1;
-  rval = mb.tag_get_handle("u0", 1, MB_TYPE_DOUBLE, u_tag0);
+  rval = mb.tag_get_handle("u0", layers, MB_TYPE_DOUBLE, u_tag0);
   CHECK_ERR(rval);
   // Tag u1 should not exist
-  rval = mb.tag_get_handle("u1", 1, MB_TYPE_DOUBLE, u_tag1);
+  rval = mb.tag_get_handle("u1", layers, MB_TYPE_DOUBLE, u_tag1);
   CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
 
   // Read all variables at 2nd timestep 0, no need to read mesh
@@ -287,7 +325,7 @@ void test_read_nomesh()
   CHECK_ERR(rval);
 
   // Check tag u1 again
-  rval = mb.tag_get_handle("u1", 1, MB_TYPE_DOUBLE, u_tag1);
+  rval = mb.tag_get_handle("u1", layers, MB_TYPE_DOUBLE, u_tag1);
   // Tag u1 should exist at this time
   CHECK_ERR(rval);
 }
@@ -312,42 +350,40 @@ void test_read_novars()
   CHECK_ERR(rval);
 
   // Read mesh, but still no variables
-  opts = orig + ";VARIABLE=;TIMESTEP=0;DEBUG_IO=2";
+  opts = orig + ";VARIABLE=";
   rval = mb.load_file(example, &file_set, opts.c_str());
   CHECK_ERR(rval);
 
-  rval = mb.write_file("gcrm.vtk");
-#if 0
-  // Check ke tags
-  Tag ke_tag0, ke_tag1;
-  rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
-  // Tag ke0 should not exist
+  // Check vorticity tags
+  Tag vorticity_tag0, vorticity_tag1;
+  rval = mb.tag_get_handle("vorticity0", layers, MB_TYPE_DOUBLE, vorticity_tag0);
+  // Tag vorticity0 should not exist
   CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
-  rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
-  // Tag ke1 should not exist
+  rval = mb.tag_get_handle("vorticity1", layers, MB_TYPE_DOUBLE, vorticity_tag1);
+  // Tag vorticity1 should not exist
   CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
 
-  // Read ke at 1st timestep, no need to read mesh
-  opts = orig + ";VARIABLE=ke;TIMESTEP=0;NOMESH";
+  // Read vorticity at 1st timestep, no need to read mesh
+  opts = orig + ";VARIABLE=vorticity;TIMESTEP=0;NOMESH";
   rval = mb.load_file(example, &file_set, opts.c_str());
   CHECK_ERR(rval);
 
-  // Check ke tags again
-  rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
-  // Tag ke0 should exist at this time
+  // Check vorticity tags again
+  rval = mb.tag_get_handle("vorticity0", layers, MB_TYPE_DOUBLE, vorticity_tag0);
+  // Tag vorticity0 should exist at this time
   CHECK_ERR(rval);
-  // Tag ke1 should still not exist
-  rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
+  // Tag vorticity1 should still not exist
+  rval = mb.tag_get_handle("vorticity1", layers, MB_TYPE_DOUBLE, vorticity_tag1);
   CHECK_EQUAL(rval, MB_TAG_NOT_FOUND);
 
-  // Read ke at 2nd timestep, no need to read mesh
-  opts = orig + ";VARIABLE=ke;TIMESTEP=1;NOMESH";
+  // Read vorticity at 2nd timestep, no need to read mesh
+  opts = orig + ";VARIABLE=vorticity;TIMESTEP=1;NOMESH";
   rval = mb.load_file(example, &file_set, opts.c_str());
   CHECK_ERR(rval);
 
-  // Check tag ke1 again
-  rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
-  // Tag ke1 should exist at this time
+  // Check tag vorticity1 again
+  rval = mb.tag_get_handle("vorticity1", layers, MB_TYPE_DOUBLE, vorticity_tag1);
+  // Tag vorticity1 should exist at this time
   CHECK_ERR(rval);
 
   int procs = 1;
@@ -364,90 +400,41 @@ void test_read_novars()
     CHECK_ERR(rval);
     CHECK_EQUAL((size_t)642, cells.size());
 
-#ifdef USE_MPI
-    // If MOAB is compiled parallel, sequence size requested are increased
-    // by a factor of 1.5, to allow for ghosts. This will introduce a gap
-    // between the two face sequences.
-    CHECK_EQUAL((size_t)2, cells.psize());
-#else
+    // GCRM pentagons are always padded to hexagons
     CHECK_EQUAL((size_t)1, cells.psize());
-#endif
-
-    // Check ke tag values on 4 cells: first pentagon, last pentagon,
-    // first hexagon, and last hexagon
-    EntityHandle cell_ents[] = {cells[0], cells[11], cells[12], cells[641]};
-    double ke0_val[4];
-    rval = mb.tag_get_data(ke_tag0, cell_ents, 4, ke0_val);
-    CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
-    CHECK_REAL_EQUAL(15.012, ke0_val[1], eps);
-    CHECK_REAL_EQUAL(16.013, ke0_val[2], eps);
-    CHECK_REAL_EQUAL(16.642, ke0_val[3], eps);
-    double ke1_val[4];
-    rval = mb.tag_get_data(ke_tag1, cell_ents, 4, ke1_val);
-    CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(25.001, ke1_val[0], eps);
-    CHECK_REAL_EQUAL(25.012, ke1_val[1], eps);
-    CHECK_REAL_EQUAL(26.013, ke1_val[2], eps);
-    CHECK_REAL_EQUAL(26.642, ke1_val[3], eps);
-  }
-#endif
-}
 
-void test_read_no_mixed_elements()
-{
-  Core moab;
-  Interface& mb = moab;
+    // Check vorticity tag values on 4 cells: first cell, two median cells, and last cell
+    EntityHandle cell_ents[] = {cells[0], cells[320], cells[321], cells[641]};
+    // There are 256 layers, only check on first two
+    double vorticity_val[4 * layers];
 
-  std::string opts;
-  get_options(opts);
-
-  // Read mesh with no mixed elements and read all variables at all timesteps
-  opts += ";NO_MIXED_ELEMENTS";
-  ErrorCode rval = mb.load_file(example, NULL, opts.c_str());
-  CHECK_ERR(rval);
-
-  int procs = 1;
-#ifdef USE_MPI
-  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
-  procs = pcomm->proc_config().proc_size();
-#endif
-
-  // Make check runs this test on one processor
-  if (1 == procs) {
-    // Check ke tags
-    Tag ke_tag0, ke_tag1;
-    rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0);
-    CHECK_ERR(rval);
-    rval = mb.tag_get_handle("ke1", 1, MB_TYPE_DOUBLE, ke_tag1);
-    CHECK_ERR(rval);
-
-    // Get cells (12 pentagons and 630 hexagons)
-    Range cells;
-    rval = mb.get_entities_by_type(0, MBPOLYGON, cells);
+    // Timestep 0
+    rval = mb.tag_get_data(vorticity_tag0, cell_ents, 4, vorticity_val);
     CHECK_ERR(rval);
-    CHECK_EQUAL((size_t)642, cells.size());
-    // Only one group of cells (pentagons are padded to hexagons,
-    // e.g. connectivity [1 2 3 4 5] => [1 2 3 4 5 5])
-    CHECK_EQUAL((size_t)1, cells.psize());
-
-    // Check ke tag values on 4 cells: first pentagon, last pentagon,
-    // first hexagon, and last hexagon
-    EntityHandle cell_ents[] = {cells[0], cells[11], cells[12], cells[641]};
-    double ke0_val[4];
-    rval = mb.tag_get_data(ke_tag0, cell_ents, 4, ke0_val);
+    // Layer 0
+    CHECK_REAL_EQUAL(3.629994, vorticity_val[0 * layers], eps);
+    CHECK_REAL_EQUAL(0.131688, vorticity_val[1 * layers], eps);
+    CHECK_REAL_EQUAL(-0.554888, vorticity_val[2 * layers], eps);
+    CHECK_REAL_EQUAL(-0.554888, vorticity_val[3 * layers], eps);
+    // Layer 1
+    CHECK_REAL_EQUAL(3.629944, vorticity_val[0 * layers + 1], eps);
+    CHECK_REAL_EQUAL(0.131686, vorticity_val[1 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-0.554881, vorticity_val[2 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-0.554881, vorticity_val[3 * layers + 1], eps);
+
+    // Timestep 1
+    rval = mb.tag_get_data(vorticity_tag1, cell_ents, 4, vorticity_val);
     CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
-    CHECK_REAL_EQUAL(15.012, ke0_val[1], eps);
-    CHECK_REAL_EQUAL(16.013, ke0_val[2], eps);
-    CHECK_REAL_EQUAL(16.642, ke0_val[3], eps);
-    double ke1_val[4];
-    rval = mb.tag_get_data(ke_tag1, cell_ents, 4, ke1_val);
-    CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(25.001, ke1_val[0], eps);
-    CHECK_REAL_EQUAL(25.012, ke1_val[1], eps);
-    CHECK_REAL_EQUAL(26.013, ke1_val[2], eps);
-    CHECK_REAL_EQUAL(26.642, ke1_val[3], eps);
+    // Layer 0
+    CHECK_REAL_EQUAL(3.534355, vorticity_val[0 * layers], eps);
+    CHECK_REAL_EQUAL(0.128218, vorticity_val[1 * layers], eps);
+    CHECK_REAL_EQUAL(-0.540269, vorticity_val[2 * layers], eps);
+    CHECK_REAL_EQUAL(-0.540269, vorticity_val[3 * layers], eps);
+    // Layer 1
+    CHECK_REAL_EQUAL(3.534306, vorticity_val[0 * layers + 1], eps);
+    CHECK_REAL_EQUAL(0.128216, vorticity_val[1 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-0.540262, vorticity_val[2 * layers + 1], eps);
+    CHECK_REAL_EQUAL(-0.540262, vorticity_val[3 * layers + 1], eps);
   }
 }
 
@@ -472,73 +459,7 @@ void test_read_no_edges()
 
 void test_gather_onevar()
 {
-  Core moab;
-  Interface& mb = moab;
-
-  EntityHandle file_set;
-  ErrorCode rval = mb.create_meshset(MESHSET_SET, file_set);
-  CHECK_ERR(rval);
-
-  std::string opts;
-  get_options(opts);
-
-  // Read cell variable ke and create gather set on processor 0
-  opts += ";VARIABLE=ke;GATHER_SET=0";
-  rval = mb.load_file(example, &file_set, opts.c_str());
-  CHECK_ERR(rval);
-
-#ifdef USE_MPI
-  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
-  int rank = pcomm->proc_config().proc_rank();
-
-  Range cells, cells_owned;
-  rval = mb.get_entities_by_type(file_set, MBPOLYGON, cells);
-  CHECK_ERR(rval);
-
-  // Get local owned cells
-  rval = pcomm->filter_pstatus(cells, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &cells_owned);
-  CHECK_ERR(rval);
-
-  EntityHandle gather_set = 0;
-  if (0 == rank) {
-    // Get gather set
-    ReadUtilIface* readUtilIface;
-    mb.query_interface(readUtilIface);
-    rval = readUtilIface->get_gather_set(gather_set);
-    CHECK_ERR(rval);
-    assert(gather_set != 0);
-  }
-
-  Tag ke_tag0, gid_tag;
-  rval = mb.tag_get_handle("ke0", 1, MB_TYPE_DOUBLE, ke_tag0, MB_TAG_DENSE);
-  CHECK_ERR(rval);
-
-  rval = mb.tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE);
-  CHECK_ERR(rval);
-
-  pcomm->gather_data(cells_owned, ke_tag0, gid_tag, gather_set, 0);
-
-  if (0 == rank) {
-    // Get gather set cells
-    Range gather_set_cells;
-    rval = mb.get_entities_by_type(gather_set, MBPOLYGON, gather_set_cells);
-    CHECK_ERR(rval);
-    CHECK_EQUAL((size_t)642, gather_set_cells.size());
-    CHECK_EQUAL((size_t)2, gather_set_cells.psize());
-
-    // Check ke0 tag values on 4 gather set cells: first pentagon, last pentagon,
-    // first hexagon, and last hexagon
-    double ke0_val[4];
-    EntityHandle cell_ents[] = {gather_set_cells[0], gather_set_cells[11],
-                                gather_set_cells[12], gather_set_cells[641]};
-    rval = mb.tag_get_data(ke_tag0, cell_ents, 4, ke0_val);
-    CHECK_ERR(rval);
-    CHECK_REAL_EQUAL(15.001, ke0_val[0], eps);
-    CHECK_REAL_EQUAL(15.012, ke0_val[1], eps);
-    CHECK_REAL_EQUAL(16.013, ke0_val[2], eps);
-    CHECK_REAL_EQUAL(16.642, ke0_val[3], eps);
-  }
-#endif
+  // TBD
 }
 
 void get_options(std::string& opts)


https://bitbucket.org/fathomteam/moab/commits/6ade227de745/
Changeset:   6ade227de745
Branch:      None
User:        pshriwise
Date:        2014-06-02 18:39:04
Summary:     Merge branch 'master' of https://bitbucket.org/fathomteam/moab

Affected #:  4 files

diff --git a/MeshFiles/unittest/io/gcrm_r3.nc b/MeshFiles/unittest/io/gcrm_r3.nc
index 6d1b8d2..1c7328b 100644
Binary files a/MeshFiles/unittest/io/gcrm_r3.nc and b/MeshFiles/unittest/io/gcrm_r3.nc differ

diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index bcc555a..10ce709 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -7038,8 +7038,8 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
 
     // post ghost irecv's for all interface procs
     // index greqs the same as buffer/sharing procs indices
-    std::vector<MPI_Request> recv_tag_reqs(2*buffProcs.size(), MPI_REQUEST_NULL),
-      sent_ack_reqs(buffProcs.size(), MPI_REQUEST_NULL);
+    std::vector<MPI_Request> recv_tag_reqs(3*buffProcs.size(), MPI_REQUEST_NULL);
+    //  sent_ack_reqs(buffProcs.size(), MPI_REQUEST_NULL);
     std::vector<unsigned int>::iterator sit;
     int ind;
 
@@ -7054,7 +7054,7 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
                           MPI_UNSIGNED_CHAR, *sit,
                           MB_MESG_TAGS_SIZE, procConfig.proc_comm(), 
-                          &recv_tag_reqs[2*ind]);
+                          &recv_tag_reqs[3*ind]);
       if (success != MPI_SUCCESS) {
         result = MB_FAILURE;
         RRA("Failed to post irecv in ghost exchange.");
@@ -7064,7 +7064,7 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
   
     // pack and send tags from this proc to others
     // make sendReqs vector to simplify initialization
-    sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
+    sendReqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
   
     // take all shared entities if incoming list is empty
     Range entities;
@@ -7114,8 +7114,8 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
       RRA("Failed to count buffer in pack_send_tag.");
 
       // now send it
-      result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[2*ind],
-                           recv_tag_reqs[2*ind+1], &dum_ack_buff, incoming);
+      result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[3*ind],
+                           recv_tag_reqs[3*ind+2], &dum_ack_buff, incoming);
       RRA("Failed to send buffer.");
                          
     }
@@ -7123,13 +7123,16 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
     // receive/unpack tags
     while (incoming) {
       MPI_Status status;
+      int index_in_recv_requests;
       PRINT_DEBUG_WAITANY(recv_tag_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
-      success = MPI_Waitany(2*buffProcs.size(), &recv_tag_reqs[0], &ind, &status);
+      success = MPI_Waitany(3*buffProcs.size(), &recv_tag_reqs[0], &index_in_recv_requests, &status);
       if (MPI_SUCCESS != success) {
         result = MB_FAILURE;
-        RRA("Failed in waitany in ghost exchange.");
+        RRA("Failed in waitany in tag exchange.");
       }
-    
+      // processor index in the list is divided by 3
+      ind = index_in_recv_requests/3;
+
       PRINT_DEBUG_RECD(status);
 
       // ok, received something; decrement incoming counter
@@ -7139,16 +7142,19 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
       std::vector<EntityHandle> dum_vec;
       result = recv_buffer(MB_MESG_TAGS_SIZE,
                            status,
-                           remoteOwnedBuffs[ind/2],
-                           recv_tag_reqs[ind/2 * 2], recv_tag_reqs[ind/2 * 2 + 1],
+                           remoteOwnedBuffs[ind],
+                           recv_tag_reqs[3*ind + 1], // this is for receiving the second message
+                           recv_tag_reqs[3*ind + 2], // this would be for ack, but it is not used; consider removing it
                            incoming,
-                           localOwnedBuffs[ind/2], sendReqs[ind/2*2], sendReqs[ind/2*2+1],
+                           localOwnedBuffs[ind],
+                           sendReqs[3*ind+1], // send request for sending the second message
+                           sendReqs[3*ind+2], // this is for sending the ack
                            done);
       RRA("Failed to resize recv buffer.");
       if (done) {
-        remoteOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
-        result = unpack_tags(remoteOwnedBuffs[ind/2]->buff_ptr,
-                             dum_vec, true, buffProcs[ind/2]);
+        remoteOwnedBuffs[ind]->reset_ptr(sizeof(int));
+        result = unpack_tags(remoteOwnedBuffs[ind]->buff_ptr,
+                             dum_vec, true, buffProcs[ind]);
         RRA("Failed to recv-unpack-tag message.");
       }
     }
@@ -7158,8 +7164,8 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
       success = MPI_Barrier(procConfig.proc_comm());
     }
     else {
-      MPI_Status status[2*MAX_SHARING_PROCS];
-      success = MPI_Waitall(2*buffProcs.size(), &sendReqs[0], status);
+      MPI_Status status[3*MAX_SHARING_PROCS];
+      success = MPI_Waitall(3*buffProcs.size(), &sendReqs[0], status);
     }
     if (MPI_SUCCESS != success) {
       result = MB_FAILURE;
@@ -7279,8 +7285,8 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
 
     // post ghost irecv's for all interface procs
     // index greqs the same as buffer/sharing procs indices
-    std::vector<MPI_Request> recv_tag_reqs(2*buffProcs.size(), MPI_REQUEST_NULL),
-      sent_ack_reqs(buffProcs.size(), MPI_REQUEST_NULL);
+    std::vector<MPI_Request> recv_tag_reqs(3*buffProcs.size(), MPI_REQUEST_NULL);
+
     std::vector<unsigned int>::iterator sit;
     int ind;
 
@@ -7295,7 +7301,7 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
       success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
                           MPI_UNSIGNED_CHAR, *sit,
                           MB_MESG_TAGS_SIZE, procConfig.proc_comm(), 
-                          &recv_tag_reqs[2*ind]);
+                          &recv_tag_reqs[3*ind]);
       if (success != MPI_SUCCESS) {
         result = MB_FAILURE;
         RRA("Failed to post irecv in ghost exchange.");
@@ -7305,7 +7311,7 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
   
     // pack and send tags from this proc to others
     // make sendReqs vector to simplify initialization
-    sendReqs.resize(2*buffProcs.size(), MPI_REQUEST_NULL);
+    sendReqs.resize(3*buffProcs.size(), MPI_REQUEST_NULL);
   
     // take all shared entities if incoming list is empty
     Range entities;
@@ -7362,8 +7368,8 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
       RRA("Failed to count buffer in pack_send_tag.");
 
       // now send it
-      result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[2*ind],
-                           recv_tag_reqs[2*ind+1], &dum_ack_buff, incoming);
+      result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[3*ind],
+                           recv_tag_reqs[3*ind+2], &dum_ack_buff, incoming);
       RRA("Failed to send buffer.");
                          
     }
@@ -7371,12 +7377,14 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
     // receive/unpack tags
     while (incoming) {
       MPI_Status status;
+      int index_in_recv_requests;
       PRINT_DEBUG_WAITANY(recv_tag_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
-      success = MPI_Waitany(2*buffProcs.size(), &recv_tag_reqs[0], &ind, &status);
+      success = MPI_Waitany(3*buffProcs.size(), &recv_tag_reqs[0], &index_in_recv_requests, &status);
       if (MPI_SUCCESS != success) {
         result = MB_FAILURE;
         RRA("Failed in waitany in ghost exchange.");
       }
+      ind = index_in_recv_requests/3;
     
       PRINT_DEBUG_RECD(status);
 
@@ -7385,15 +7393,19 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
     
       bool done = false;
       std::vector<EntityHandle> dum_vec;
-      result = recv_buffer(MB_MESG_TAGS_SIZE, status, remoteOwnedBuffs[ind/2],
-                           recv_tag_reqs[ind/2 * 2], recv_tag_reqs[ind/2 * 2 + 1],
-                           incoming, localOwnedBuffs[ind/2], sendReqs[ind/2*2], sendReqs[ind/2*2+1], 
-                           done);
+      result = recv_buffer(MB_MESG_TAGS_SIZE, status,
+                        remoteOwnedBuffs[ind],
+                        recv_tag_reqs[3*ind+1], // this is for receiving the second message
+                        recv_tag_reqs[3*ind+2], // this would be for ack, but it is not used; consider removing it
+                        incoming, localOwnedBuffs[ind],
+                        sendReqs[3*ind+1],// send request for sending the second message
+                        sendReqs[3*ind+2], // this is for sending the ack
+                        done);
       RRA("Failed to resize recv buffer.");
       if (done) {
-        remoteOwnedBuffs[ind/2]->reset_ptr(sizeof(int));
-        result = unpack_tags(remoteOwnedBuffs[ind/2]->buff_ptr,
-                               dum_vec, true, buffProcs[ind/2], &mpi_op);
+        remoteOwnedBuffs[ind]->reset_ptr(sizeof(int));
+        result = unpack_tags(remoteOwnedBuffs[ind]->buff_ptr,
+                               dum_vec, true, buffProcs[ind], &mpi_op);
         RRA("Failed to recv-unpack-tag message.");
       }
     }
@@ -7403,8 +7415,8 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
       success = MPI_Barrier(procConfig.proc_comm());
     }
     else {
-      MPI_Status status[2*MAX_SHARING_PROCS];
-      success = MPI_Waitall(2*buffProcs.size(), &sendReqs[0], status);
+      MPI_Status status[3*MAX_SHARING_PROCS];
+      success = MPI_Waitall(3*buffProcs.size(), &sendReqs[0], status);
     }
     if (MPI_SUCCESS != success) {
       result = MB_FAILURE;
@@ -7416,158 +7428,6 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
     return MB_SUCCESS;
   }
 
-  /*
-    ErrorCode ParallelComm::exchange_tags( Tag src_tag, 
-    Tag dst_tag, 
-    const Range& entities )
-    {
-    ErrorCode result;
-    int success;
-
-    // get all procs interfacing to this proc
-    std::set<unsigned int> exch_procs;
-    result = get_comm_procs(exch_procs);  
-
-    // post ghost irecv's for all interface procs
-    // index greqs the same as buffer/sharing procs indices
-    std::vector<MPI_Request> recv_reqs(MAX_SHARING_PROCS, MPI_REQUEST_NULL);
-    std::vector<MPI_Status> gstatus(MAX_SHARING_PROCS);
-    std::vector<unsigned int>::iterator sit;
-    int ind;
-    for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
-    success = MPI_Irecv(&ghostRBuffs[ind][0], ghostRBuffs[ind].size(), 
-    MPI_UNSIGNED_CHAR, *sit,
-    MB_MESG_ANY, procConfig.proc_comm(), 
-    &recv_reqs[ind]);
-    if (success != MPI_SUCCESS) {
-    result = MB_FAILURE;
-    RRA("Failed to post irecv in ghost exchange.");
-    }
-    }
-  
-    // figure out which entities are shared with which processors
-    std::map<int,Range> proc_ents;
-    int other_procs[MAX_SHARING_PROCS], num_sharing;
-    for (Range::const_iterator i = entities.begin(); i != entities.end(); ++i) {
-    int owner;
-    result = get_owner( *i, owner );
-    RRA("Failed to get entity owner.");
-
-    // only send entities that this proc owns
-    if ((unsigned)owner != proc_config().proc_rank()) 
-    continue;
-    
-    result = get_sharing_parts( *i, other_procs, num_sharing );
-    RRA("Failed to get procs sharing entity.");
-    if (num_sharing == 0) // keep track of non-shared entities for later
-    proc_ents[proc_config().proc_rank()].insert( *i );
-    for (int j = 0; j < num_sharing; ++j)
-    proc_ents[other_procs[j]].insert( *i );
-    }
-  
-    // pack and send tags from this proc to others
-    // make sendReqs vector to simplify initialization
-    std::fill(sendReqs, sendReqs+MAX_SHARING_PROCS, MPI_REQUEST_NULL);
-    std::map<unsigned int,Range>::const_iterator mit;
-  
-    for (ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); sit++, ind++) {
-    
-    // count first
-    // buffer needs to begin with the number of tags (one)
-    int buff_size = sizeof(int);
-    result = packed_tag_size( src_tag, proc_ents[*sit], buff_size );
-    RRA("Failed to count buffer in pack_send_tag.");
-
-    unsigned char *buff_ptr = &ownerSBuffs[ind][0];
-    buff->check_space(ownerSBuffs[ind], buff_ptr, buff_size);
-    PACK_INT( buff_ptr, 1 ); // number of tags
-    result = pack_tag( src_tag, dst_tag, proc_ents[*sit], proc_ents[*sit],
-    ownerSBuffs[ind], buff_ptr, true, *sit );
-    RRA("Failed to pack buffer in pack_send_tag.");
-
-    // if the message is large, send a first message to tell how large
-    if (INITIAL_BUFF_SIZE < buff_size) {
-    int tmp_buff_size = -buff_size;
-    int success = MPI_Send(&tmp_buff_size, sizeof(int), MPI_UNSIGNED_CHAR, 
-    *sit, MB_MESG_SIZE, procConfig.proc_comm());
-    if (success != MPI_SUCCESS) return MB_FAILURE;
-    }
-    
-    // send the buffer
-    success = MPI_Isend(&ownerSBuffs[ind][0], buff_size, MPI_UNSIGNED_CHAR, *sit, 
-    MB_MESG_TAGS, procConfig.proc_comm(), &sendReqs[ind]);
-    if (success != MPI_SUCCESS) return MB_FAILURE;
-    }
-  
-    // receive/unpack tags
-    int num_incoming = exch_procs.size();
-  
-    while (num_incoming) {
-    int ind;
-    MPI_Status status;
-    success = MPI_Waitany(MAX_SHARING_PROCS, &recv_reqs[0], &ind, &status);
-    if (MPI_SUCCESS != success) {
-    result = MB_FAILURE;
-    RRA("Failed in waitany in ghost exchange.");
-    }
-    
-    // ok, received something; decrement incoming counter
-    num_incoming--;
-    
-    int new_size;
-    unsigned char *buff_ptr;
-    Range dum_range;
-    
-    // branch on message type
-    switch (status.MPI_TAG) {
-    case MB_MESG_SIZE:
-    // incoming message just has size; resize buffer and re-call recv,
-    // then re-increment incoming count
-    assert(ind < MAX_SHARING_PROCS);
-    new_size = *((int*)&ghostRBuffs[ind][0]);
-    assert(0 > new_size);
-    result = recv_size_buff(buffProcs[ind], ghostRBuffs[ind], recv_reqs[ind],
-    MB_MESG_TAGS);
-    RRA("Failed to resize recv buffer.");
-    num_incoming++;
-    break;
-    case MB_MESG_TAGS:
-    // incoming ghost entities; process
-    buff_ptr = &ghostRBuffs[ind][0];
-    result = unpack_tags(buff_ptr, dum_range, true,
-    buffProcs[ind]);
-    RRA("Failed to recv-unpack-tag message.");
-    break;
-    default:
-    result = MB_FAILURE;
-    RRA("Failed to get message of correct type in exch_tags.");
-    break;
-    }
-    }
-  
-    // ok, now wait
-    MPI_Status status[MAX_SHARING_PROCS];
-    success = MPI_Waitall(MAX_SHARING_PROCS, &sendReqs[0], status);
-    if (MPI_SUCCESS != success) {
-    result = MB_FAILURE;
-    RRA("Failure in waitall in tag exchange.");
-    }
-  
-    // if src and destination tags aren't the same, need to copy 
-    // values for local entities
-    if (src_tag != dst_tag) {
-    const Range& myents = proc_ents[proc_config().proc_rank()];
-    std::vector<const void*> data_ptrs(myents.size());
-    std::vector<int> data_sizes(myents.size());
-    result = get_moab()->tag_get_data( src_tag, myents, &data_ptrs[0], &data_sizes[0] );
-    RRA("Failure to get pointers to local data.");
-    result = get_moab()->tag_set_data( dst_tag, myents, &data_ptrs[0], &data_sizes[0] );
-    RRA("Failure to get pointers to local data.");
-    }  
-  
-    return MB_SUCCESS;
-    }
-  */
 
   //! return sharedp tag
   Tag ParallelComm::sharedp_tag()
@@ -8714,8 +8574,7 @@ ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared
 
   // post ghost irecv's for all interface procs
   // index requests the same as buffer/sharing procs indices
-  std::vector<MPI_Request>  recv_intx_reqs(2 * buffProcs.size(), MPI_REQUEST_NULL),
-      sent_ack_reqs(buffProcs.size(), MPI_REQUEST_NULL);
+  std::vector<MPI_Request>  recv_intx_reqs(3 * buffProcs.size(), MPI_REQUEST_NULL);
   std::vector<unsigned int>::iterator sit;
   int ind;
 
@@ -8729,7 +8588,7 @@ ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared
 
     success = MPI_Irecv(remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
         MPI_UNSIGNED_CHAR, *sit, MB_MESG_TAGS_SIZE, procConfig.proc_comm(),
-        &recv_intx_reqs[2 * ind]);
+        &recv_intx_reqs[3 * ind]);
     if (success != MPI_SUCCESS) {
       result = MB_FAILURE;
       RRA("Failed to post irecv in settle intersection point.");
@@ -8739,7 +8598,7 @@ ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared
 
   // pack and send intersection points from this proc to others
   // make sendReqs vector to simplify initialization
-  sendReqs.resize(2 * buffProcs.size(), MPI_REQUEST_NULL);
+  sendReqs.resize(3 * buffProcs.size(), MPI_REQUEST_NULL);
 
   // take all shared entities if incoming list is empty
   Range & entities = shared_edges_owned;
@@ -8815,7 +8674,7 @@ ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared
 
     // now send it
     result = send_buffer(*sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE,
-        sendReqs[2 * ind], recv_intx_reqs[2 * ind + 1], &dum_ack_buff, incoming);
+        sendReqs[3 * ind], recv_intx_reqs[3 * ind + 2], &dum_ack_buff, incoming);
     RRA("Failed to send buffer.");
 
   }
@@ -8823,13 +8682,16 @@ ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared
   // receive/unpack intx points
   while (incoming) {
     MPI_Status status;
+    int index_in_recv_requests;
     PRINT_DEBUG_WAITANY(recv_intx_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank());
-    success = MPI_Waitany(2 * buffProcs.size(), &recv_intx_reqs[0], &ind,
-        &status);
+    success = MPI_Waitany(3 * buffProcs.size(), &recv_intx_reqs[0],
+        &index_in_recv_requests, &status);
     if (MPI_SUCCESS != success) {
       result = MB_FAILURE;
       RRA("Failed in waitany in ghost exchange.");
     }
+    // processor index in the list is divided by 3
+    ind = index_in_recv_requests/3;
 
     PRINT_DEBUG_RECD(status);
 
@@ -8838,13 +8700,18 @@ ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared
 
     bool done = false;
     std::vector<EntityHandle> dum_vec;
-    result = recv_buffer(MB_MESG_TAGS_SIZE, status, remoteOwnedBuffs[ind / 2],
-        recv_intx_reqs[ind / 2 * 2], recv_intx_reqs[ind / 2 * 2 + 1], incoming,
-        localOwnedBuffs[ind / 2], sendReqs[ind / 2 * 2],
-        sendReqs[ind / 2 * 2 + 1], done);
+    result = recv_buffer(MB_MESG_TAGS_SIZE, status,
+        remoteOwnedBuffs[ind],
+        recv_intx_reqs[3*ind+1], // this is for receiving the second message
+        recv_intx_reqs[3*ind+2], // this would be for ack, but it is not used; consider removing it
+        incoming,
+        localOwnedBuffs[ind],
+        sendReqs[3*ind+1], // send request for sending the second message
+        sendReqs[3*ind+2], // this is for sending the ack
+        done);
     RRA("Failed to resize recv buffer.");
     if (done) {
-      Buffer * buff = remoteOwnedBuffs[ind / 2];
+      Buffer * buff = remoteOwnedBuffs[ind];
       buff->reset_ptr(sizeof(int));
       /*result = unpack_tags(remoteOwnedBuffs[ind / 2]->buff_ptr, dum_vec, true,
           buffProcs[ind / 2]);*/
@@ -8910,8 +8777,8 @@ ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared
   if (myDebug->get_verbosity() == 5) {
     success = MPI_Barrier(procConfig.proc_comm());
   } else {
-    MPI_Status status[2 * MAX_SHARING_PROCS];
-    success = MPI_Waitall(2 * buffProcs.size(), &sendReqs[0], status);
+    MPI_Status status[3 * MAX_SHARING_PROCS];
+    success = MPI_Waitall(3 * buffProcs.size(), &sendReqs[0], status);
   }
   if (MPI_SUCCESS != success) {
     result = MB_FAILURE;

diff --git a/test/io/read_gcrm_nc.cpp b/test/io/read_gcrm_nc.cpp
index 1304495..91608f2 100644
--- a/test/io/read_gcrm_nc.cpp
+++ b/test/io/read_gcrm_nc.cpp
@@ -27,7 +27,7 @@ void test_gather_onevar(); // Test gather set with one variable
 void get_options(std::string& opts);
 
 const double eps = 1e-6;
-const int layers = 256;
+const int layers = 3;
 
 int main(int argc, char* argv[])
 {

diff --git a/test/io/write_nc.cpp b/test/io/write_nc.cpp
index d59829d..15979c0 100644
--- a/test/io/write_nc.cpp
+++ b/test/io/write_nc.cpp
@@ -747,7 +747,8 @@ void test_mpas_check_vars()
   }
 }
 
-// Write vertex variable vorticity, edge variable u and cell veriable ke
+// Check vertex variable u, edge variable wind, cell variable vorticity (on layers),
+// and cell variable pressure (on interfaces)
 void test_gcrm_read_write_vars()
 {
   int procs = 1;
@@ -772,15 +773,15 @@ void test_gcrm_read_write_vars()
   ErrorCode rval = mb.create_meshset(MESHSET_SET, set);
   CHECK_ERR(rval);
 
-  // Read non-set variables vorticity (cells) and u (corners)
-  read_opts += ";VARIABLE=vorticity,u;DEBUG_IO=0";
+  // Read non-set variables u, wind, vorticity and pressure
+  read_opts += ";VARIABLE=u,wind,vorticity,pressure;DEBUG_IO=0";
   if (procs > 1)
     read_opts += ";PARALLEL_RESOLVE_SHARED_ENTS";
   rval = mb.load_file(example_gcrm, &set, read_opts.c_str());
   CHECK_ERR(rval);
 
-  // Write variables vorticity, u
-  std::string write_opts = ";;VARIABLE=vorticity,u;DEBUG_IO=0";
+  // Write variables u, wind, vorticity and pressure
+  std::string write_opts = ";;VARIABLE=u,wind,vorticity,pressure;DEBUG_IO=0";
 #ifdef USE_MPI
   // Use parallel options
   write_opts += ";PARALLEL=WRITE_PART";


https://bitbucket.org/fathomteam/moab/commits/0aeaec09ab16/
Changeset:   0aeaec09ab16
Branch:      None
User:        pshriwise
Date:        2014-06-09 13:47:31
Summary:     Merge branch 'master' of https://bitbucket.org/fathomteam/moab

Affected #:  14 files

diff --git a/src/MergeMesh.cpp b/src/MergeMesh.cpp
index 4978f79..cb43b85 100644
--- a/src/MergeMesh.cpp
+++ b/src/MergeMesh.cpp
@@ -5,6 +5,7 @@
 #include "moab/Range.hpp"
 #include "moab/CartVect.hpp"
 
+#include "Internals.hpp"
 #include <vector>
 #include <algorithm>
 #include <string>
@@ -15,108 +16,123 @@
 
 namespace moab {
 
-  moab::ErrorCode MergeMesh::merge_entities(moab::EntityHandle *elems,
-					    int elems_size,
-					    const double merge_tol,
-					    const int do_merge,
-					    const int update_sets,
-					    moab::Tag merge_tag, 
-					    bool do_higher_dim) 
+ErrorCode MergeMesh::merge_entities(EntityHandle *elems,
+    int elems_size, const double merge_tol, const int do_merge,
+    const int update_sets, Tag merge_tag, bool do_higher_dim)
 {
   mergeTol = merge_tol;
-  mergeTolSq = merge_tol*merge_tol;
-  moab::Range tmp_elems;
-  tmp_elems.insert( elems, elems + elems_size);
-  moab::ErrorCode result = merge_entities(tmp_elems, merge_tol, do_merge, update_sets,
-					  (moab::Tag)merge_tag, do_higher_dim);
+  mergeTolSq = merge_tol * merge_tol;
+  Range tmp_elems;
+  tmp_elems.insert(elems, elems + elems_size);
+  ErrorCode result = merge_entities(tmp_elems, merge_tol, do_merge,
+      update_sets, (Tag) merge_tag, do_higher_dim);
 
   return result;
 }
 
 /*  This function appears to be not necessary after MOAB conversion
 
-void MergeMesh::perform_merge(iBase_TagHandle merge_tag) 
-{
-  // put into a range
-  moab::ErrorCode result = perform_merge((moab::Tag) merge_tag);
-  if (result != moab::MB_SUCCESS)
-    throw MKException(iBase_FAILURE, "");
-}*/
-
-moab::ErrorCode MergeMesh::merge_entities(moab::Range &elems,
-                                          const double merge_tol,
-                                          const int do_merge,
-                                          const int ,
-                                          moab::Tag merge_tag,
-					  bool merge_higher_dim) 
+ void MergeMesh::perform_merge(iBase_TagHandle merge_tag)
+ {
+ // put into a range
+ ErrorCode result = perform_merge((Tag) merge_tag);
+ if (result != MB_SUCCESS)
+ throw MKException(iBase_FAILURE, "");
+ }*/
+
+ErrorCode MergeMesh::merge_entities(Range &elems,
+    const double merge_tol, const int do_merge, const int, Tag merge_tag,
+    bool merge_higher_dim)
 {
   //If merge_higher_dim is true, do_merge must also be true
-  if(merge_higher_dim && !do_merge){
-    return moab::MB_FAILURE;
+  if (merge_higher_dim && !do_merge)
+  {
+    return MB_FAILURE;
   }
 
   mergeTol = merge_tol;
-  mergeTolSq = merge_tol*merge_tol;
+  mergeTolSq = merge_tol * merge_tol;
 
   // get the skin of the entities
-  moab::Skinner skinner(mbImpl);
-  moab::Range skin_range;
-  moab::ErrorCode result = skinner.find_skin(0, elems, 0, skin_range, false, false);
-  if (moab::MB_SUCCESS != result) return result;
+  Skinner skinner(mbImpl);
+  Range skin_range;
+  ErrorCode result = skinner.find_skin(0, elems, 0, skin_range, false,
+      false);
+  if (MB_SUCCESS != result)
+    return result;
 
   // create a tag to mark merged-to entity; reuse tree_root
-  moab::EntityHandle tree_root = 0;
-  if (0 == merge_tag) {
-    result = mbImpl->tag_get_handle("__merge_tag", 1, moab::MB_TYPE_HANDLE, 
-                                    mbMergeTag, 
-                                    moab::MB_TAG_DENSE|moab::MB_TAG_EXCL,
-                                    &tree_root);
-    if (moab::MB_SUCCESS != result) return result;
+  EntityHandle tree_root = 0;
+  if (0 == merge_tag)
+  {
+    result = mbImpl->tag_get_handle("__merge_tag", 1, MB_TYPE_HANDLE,
+        mbMergeTag, MB_TAG_DENSE | MB_TAG_EXCL, &tree_root);
+    if (MB_SUCCESS != result)
+      return result;
   }
-  else mbMergeTag = merge_tag;
-  
+  else
+    mbMergeTag = merge_tag;
+
   // build a kd tree with the vertices
-  moab::AdaptiveKDTree kd(mbImpl);
+  AdaptiveKDTree kd(mbImpl);
   result = kd.build_tree(skin_range, &tree_root);
-  if (moab::MB_SUCCESS != result) return result;
+  if (MB_SUCCESS != result)
+    return result;
 
   // find matching vertices, mark them
   result = find_merged_to(tree_root, kd, mbMergeTag);
-  if (moab::MB_SUCCESS != result) return result;
+  if (MB_SUCCESS != result)
+    return result;
 
   // merge them if requested
-  if (do_merge) {
+  if (do_merge)
+  {
     result = perform_merge(mbMergeTag);
-    if (moab::MB_SUCCESS != result) return result;
+    if (MB_SUCCESS != result)
+      return result;
   }
 
-  if(merge_higher_dim && deadEnts.size() != 0){
+  if (merge_higher_dim && deadEnts.size() != 0)
+  {
     result = merge_higher_dimensions(elems);
-    if(moab::MB_SUCCESS != result) return result;
+    if (MB_SUCCESS != result)
+      return result;
   }
-  
-  return moab::MB_SUCCESS;
+
+  return MB_SUCCESS;
 }
 
-moab::ErrorCode MergeMesh::perform_merge(moab::Tag merge_tag) 
+ErrorCode MergeMesh::perform_merge(Tag merge_tag)
 {
-  moab::ErrorCode result;
-  if (deadEnts.size()==0){
-    if(printError)std::cout << "\nWarning: Geometries don't have a common face; Nothing to merge" << std::endl;
-    return moab::MB_SUCCESS; //nothing to merge carry on with the program
+  // we start with an empty range of vertices that are "merged to"
+  // they are used (eventually) for higher dim entities
+  mergedToVertices.clear();
+  ErrorCode result;
+  if (deadEnts.size() == 0)
+  {
+    if (printError)
+      std::cout
+          << "\nWarning: Geometries don't have a common face; Nothing to merge"
+          << std::endl;
+    return MB_SUCCESS; //nothing to merge carry on with the program
   }
-  if  (mbImpl->type_from_handle(*deadEnts.rbegin()) != moab::MBVERTEX) 
-    return moab::MB_FAILURE;
-  std::vector<moab::EntityHandle> merge_tag_val(deadEnts.size());
+  if (mbImpl->type_from_handle(*deadEnts.rbegin()) != MBVERTEX)
+    return MB_FAILURE;
+  std::vector<EntityHandle> merge_tag_val(deadEnts.size());
   result = mbImpl->tag_get_data(merge_tag, deadEnts, &merge_tag_val[0]);
-  if (moab::MB_SUCCESS != result) return result;
-  
-  moab::Range::iterator rit;
+  if (MB_SUCCESS != result)
+    return result;
+
+  Range::iterator rit;
   unsigned int i;
-  for (rit = deadEnts.begin(), i = 0; rit != deadEnts.end(); rit++, i++) {
+  for (rit = deadEnts.begin(), i = 0; rit != deadEnts.end(); rit++, i++)
+  {
     assert(merge_tag_val[i]);
+    if (MBVERTEX==TYPE_FROM_HANDLE(merge_tag_val[i]) )
+      mergedToVertices.insert(merge_tag_val[i]);
     result = mbImpl->merge_entities(merge_tag_val[i], *rit, false, false);
-    if (moab::MB_SUCCESS != result) {
+    if (MB_SUCCESS != result)
+    {
       return result;
     }
   }
@@ -124,142 +140,249 @@ moab::ErrorCode MergeMesh::perform_merge(moab::Tag merge_tag)
   return result;
 }
 
-moab::ErrorCode MergeMesh::find_merged_to(moab::EntityHandle &tree_root, 
-                                          moab::AdaptiveKDTree &tree,
-					  moab::Tag merge_tag) 
+ErrorCode MergeMesh::find_merged_to(EntityHandle &tree_root,
+    AdaptiveKDTree &tree, Tag merge_tag)
 {
-  moab::AdaptiveKDTreeIter iter;
-  
+  AdaptiveKDTreeIter iter;
+
   // evaluate vertices in this leaf
-  moab::Range leaf_range, leaf_range2;
-  std::vector<moab::EntityHandle> sorted_leaves;
+  Range leaf_range, leaf_range2;
+  std::vector<EntityHandle> sorted_leaves;
   std::vector<double> coords;
-  std::vector<moab::EntityHandle> merge_tag_val, leaves_out;
-  
-  moab::ErrorCode result = tree.get_tree_iterator(tree_root, iter);
-  if (moab::MB_SUCCESS != result) return result;
-  while (result == moab::MB_SUCCESS) {
-    sorted_leaves.push_back( iter.handle() );
+  std::vector<EntityHandle> merge_tag_val, leaves_out;
+
+  ErrorCode result = tree.get_tree_iterator(tree_root, iter);
+  if (MB_SUCCESS != result)
+    return result;
+  while (result == MB_SUCCESS)
+  {
+    sorted_leaves.push_back(iter.handle());
     result = iter.step();
   }
-  if (result != moab::MB_ENTITY_NOT_FOUND)
+  if (result != MB_ENTITY_NOT_FOUND)
     return result;
-  std::sort( sorted_leaves.begin(), sorted_leaves.end() );
-  
-  std::vector<moab::EntityHandle>::iterator it;
-  for (it = sorted_leaves.begin(); it != sorted_leaves.end(); ++it) {
+  std::sort(sorted_leaves.begin(), sorted_leaves.end());
+
+  std::vector<EntityHandle>::iterator it;
+  for (it = sorted_leaves.begin(); it != sorted_leaves.end(); ++it)
+  {
 
     leaf_range.clear();
     result = mbImpl->get_entities_by_handle(*it, leaf_range);
-    if (moab::MB_SUCCESS != result) return result;
-    coords.resize(3*leaf_range.size());
+    if (MB_SUCCESS != result)
+      return result;
+    coords.resize(3 * leaf_range.size());
     merge_tag_val.resize(leaf_range.size());
     result = mbImpl->get_coords(leaf_range, &coords[0]);
-    if (moab::MB_SUCCESS != result) return result;
+    if (MB_SUCCESS != result)
+      return result;
     result = mbImpl->tag_get_data(merge_tag, leaf_range, &merge_tag_val[0]);
-    if (moab::MB_SUCCESS != result) return result;
-    moab::Range::iterator rit;
+    if (MB_SUCCESS != result)
+      return result;
+    Range::iterator rit;
     unsigned int i;
     bool inleaf_merged, outleaf_merged = false;
     unsigned int lr_size = leaf_range.size();
-    
-    for (i = 0, rit = leaf_range.begin(); i != lr_size; rit++, i++) {
-      if (0 != merge_tag_val[i]) continue;
-      moab::CartVect from(&coords[3*i]);
+
+    for (i = 0, rit = leaf_range.begin(); i != lr_size; rit++, i++)
+    {
+      if (0 != merge_tag_val[i])
+        continue;
+      CartVect from(&coords[3 * i]);
       inleaf_merged = false;
 
       // check close-by leaves too
       leaves_out.clear();
-      result = tree.distance_search(from.array(), mergeTol,
-                                    leaves_out, mergeTol, 1.0e-6, NULL, NULL, &tree_root);
+      result = tree.distance_search(from.array(), mergeTol, leaves_out,
+          mergeTol, 1.0e-6, NULL, NULL, &tree_root);
       leaf_range2.clear();
-      for (std::vector<moab::EntityHandle>::iterator vit = leaves_out.begin();
-           vit != leaves_out.end(); vit++) {
-        if (*vit > *it) { // if we haven't visited this leaf yet in the outer loop
-          result = mbImpl->get_entities_by_handle(*vit, leaf_range2, moab::Interface::UNION);
-          if (moab::MB_SUCCESS != result) return result;
+      for (std::vector<EntityHandle>::iterator vit = leaves_out.begin();
+          vit != leaves_out.end(); vit++)
+      {
+        if (*vit > *it)
+        { // if we haven't visited this leaf yet in the outer loop
+          result = mbImpl->get_entities_by_handle(*vit, leaf_range2,
+              Interface::UNION);
+          if (MB_SUCCESS != result)
+            return result;
         }
       }
-      if (!leaf_range2.empty()) {
-        coords.resize(3*(lr_size+leaf_range2.size()));
-        merge_tag_val.resize(lr_size+leaf_range2.size());
-        result = mbImpl->get_coords(leaf_range2, &coords[3*lr_size]);
-        if (moab::MB_SUCCESS != result) return result;
-        result = mbImpl->tag_get_data(merge_tag, leaf_range2, &merge_tag_val[lr_size]);
-        if (moab::MB_SUCCESS != result) return result;
+      if (!leaf_range2.empty())
+      {
+        coords.resize(3 * (lr_size + leaf_range2.size()));
+        merge_tag_val.resize(lr_size + leaf_range2.size());
+        result = mbImpl->get_coords(leaf_range2, &coords[3 * lr_size]);
+        if (MB_SUCCESS != result)
+          return result;
+        result = mbImpl->tag_get_data(merge_tag, leaf_range2,
+            &merge_tag_val[lr_size]);
+        if (MB_SUCCESS != result)
+          return result;
         outleaf_merged = false;
       }
 
       // check other verts in this leaf
-      for (unsigned int j = i+1; j < merge_tag_val.size(); j++) {
-        moab::EntityHandle to_ent = j >= lr_size ? leaf_range2[j-lr_size] : 
-	  leaf_range[j];
-        
-        if (*rit == to_ent) continue;
-        
-        if ((from - moab::CartVect(&coords[3*j])).length_squared() < mergeTolSq) {
+      for (unsigned int j = i + 1; j < merge_tag_val.size(); j++)
+      {
+        EntityHandle to_ent =
+            j >= lr_size ? leaf_range2[j - lr_size] : leaf_range[j];
+
+        if (*rit == to_ent)
+          continue;
+
+        if ((from - CartVect(&coords[3 * j])).length_squared()
+            < mergeTolSq)
+        {
           merge_tag_val[j] = *rit;
-          if (j < lr_size){
-	    inleaf_merged = true;}
-          else{
-	    outleaf_merged = true;}
+          if (j < lr_size)
+          {
+            inleaf_merged = true;
+          }
+          else
+          {
+            outleaf_merged = true;
+          }
           deadEnts.insert(to_ent);
         }
 
       }
-      if (outleaf_merged) {
-	result = mbImpl->tag_set_data(merge_tag, leaf_range2, &merge_tag_val[leaf_range.size()]);
-        if (moab::MB_SUCCESS != result) return result;
-	outleaf_merged = false;
+      if (outleaf_merged)
+      {
+        result = mbImpl->tag_set_data(merge_tag, leaf_range2,
+            &merge_tag_val[leaf_range.size()]);
+        if (MB_SUCCESS != result)
+          return result;
+        outleaf_merged = false;
       }
-      if (inleaf_merged) {
-	result = mbImpl->tag_set_data(merge_tag, leaf_range, &merge_tag_val[0]);
-	if (moab::MB_SUCCESS != result) return result;
+      if (inleaf_merged)
+      {
+        result = mbImpl->tag_set_data(merge_tag, leaf_range, &merge_tag_val[0]);
+        if (MB_SUCCESS != result)
+          return result;
       }
 
     }
   }
-  return moab::MB_SUCCESS;
+  return MB_SUCCESS;
 }
 
-
 //Determine which higher dimensional entities should be merged
-moab::ErrorCode MergeMesh::merge_higher_dimensions(moab::Range &elems)
-{ 
-  Range skinEnts, adj, matches, moreDeadEnts;  moab::ErrorCode result;
-  moab::Skinner skinner(mbImpl);
+ErrorCode MergeMesh::merge_higher_dimensions(Range &elems)
+{
+  // apply a different strategy
+  // look at the vertices that were merged to, earlier, and find all entities adjacent to them
+  // elems (input) are used just for initial connectivity
+  ErrorCode result;
+  Range verts;
+  result = mbImpl->get_connectivity(elems, verts);
+  if (MB_SUCCESS!=result)
+    return result;
+
+  // all higher dim entities that will be merged will be connected to the vertices that were
+  // merged earlier; we will look at these vertices only
+  Range vertsOfInterest=intersect(this->mergedToVertices, verts);
   //Go through each dimension
-  for(int dim = 1; dim <3; dim++){
+  Range possibleEntsToMerge, conn, matches, moreDeadEnts;
+
+  for (int dim = 1; dim < 3; dim++)
+  {
+    moreDeadEnts.clear();
+    possibleEntsToMerge.clear();
+    result = mbImpl->get_adjacencies(vertsOfInterest,
+                                             dim, false, possibleEntsToMerge,
+                                             Interface::UNION);
+    if (MB_SUCCESS!=result)
+      return result;
+    //Go through each possible entity and see if it shares vertices with another entity of same dimension
+    for (Range::iterator pit = possibleEntsToMerge.begin();
+        pit != possibleEntsToMerge.end(); pit++)
+    {
+      EntityHandle eh=*pit;//possible entity to be matched
+      conn.clear();
+      //Get the vertices connected to it in a range
+
+      result = mbImpl->get_connectivity(&eh, 1, conn);
+      if (MB_SUCCESS!=result)
+        return result;
+      matches.clear();
+      // now retrieve all entities connected to all conn vertices
+      result = mbImpl->get_adjacencies(conn, dim, false, matches,
+                                                   Interface::INTERSECT);
+      if (MB_SUCCESS!=result)
+        return result;
+      if (matches.size() > 1)
+      {
+        for (Range::iterator matchIt = matches.begin();
+            matchIt != matches.end(); matchIt++)
+        {
+          EntityHandle to_remove=*matchIt;
+          if (to_remove != eh)
+          {
+            moreDeadEnts.insert(to_remove);
+            result = mbImpl->merge_entities(eh, to_remove, false, false);
+            if (result != MB_SUCCESS)
+              return result;
+            possibleEntsToMerge.erase(to_remove);
+          }
+        }
+      }
+
+    }
+    //Delete the entities of dimension dim
+    result = mbImpl->delete_entities(moreDeadEnts);
+    if (result != MB_SUCCESS)
+      return result;
+  }
+  return MB_SUCCESS;
+#if 0
+  Range skinEnts, adj, matches, moreDeadEnts;
+  ErrorCode result;
+  Skinner skinner(mbImpl);
+  //Go through each dimension
+  for (int dim = 1; dim < 3; dim++)
+  {
     skinEnts.clear();
     moreDeadEnts.clear();
     result = skinner.find_skin(0, elems, dim, skinEnts, false, false);
     //Go through each skin entity and see if it shares adjacancies with another entity
-    for(moab::Range::iterator skinIt = skinEnts.begin(); skinIt != skinEnts.end(); skinIt++){
+    for (Range::iterator skinIt = skinEnts.begin();
+        skinIt != skinEnts.end(); skinIt++)
+    {
       adj.clear();
       //Get the adjacencies 1 dimension lower
-      result = mbImpl->get_adjacencies(&(*skinIt), 1, dim-1, false, adj);
-      if(result != moab::MB_SUCCESS) return result;
+      result = mbImpl->get_adjacencies(&(*skinIt), 1, dim - 1, false, adj);
+      if (result != MB_SUCCESS)
+        return result;
       //See what other entities share these adjacencies
       matches.clear();
-      result = mbImpl->get_adjacencies(adj, dim, false, matches, moab::Interface::INTERSECT);
-      if(result != moab::MB_SUCCESS) return result;
+      result = mbImpl->get_adjacencies(adj, dim, false, matches,
+          Interface::INTERSECT);
+      if (result != MB_SUCCESS)
+        return result;
       //If there is more than one entity, then we have some to merge and erase
-      if(matches.size() > 1){
-	for(moab::Range::iterator matchIt = matches.begin(); matchIt != matches.end(); matchIt++){
-	  if(*matchIt != *skinIt){
-	    moreDeadEnts.insert(*matchIt);
-	    result = mbImpl->merge_entities(*skinIt, *matchIt, false, false);
-	    if(result != moab::MB_SUCCESS) return result;
-	    skinEnts.erase(*matchIt);
-	  }
-	}
-      }      
+      if (matches.size() > 1)
+      {
+        for (Range::iterator matchIt = matches.begin();
+            matchIt != matches.end(); matchIt++)
+        {
+          if (*matchIt != *skinIt)
+          {
+            moreDeadEnts.insert(*matchIt);
+            result = mbImpl->merge_entities(*skinIt, *matchIt, false, false);
+            if (result != MB_SUCCESS)
+              return result;
+            skinEnts.erase(*matchIt);
+          }
+        }
+      }
     }
     //Delete the entities
     result = mbImpl->delete_entities(moreDeadEnts);
-    if(result != moab::MB_SUCCESS)return result;
+    if (result != MB_SUCCESS)
+      return result;
   }
-  return moab::MB_SUCCESS;
+  return MB_SUCCESS;
+#endif
 }
 
-}//End namespace moab
+} //End namespace moab

diff --git a/src/io/NCHelperGCRM.cpp b/src/io/NCHelperGCRM.cpp
index f61cca5..14c4b5f 100644
--- a/src/io/NCHelperGCRM.cpp
+++ b/src/io/NCHelperGCRM.cpp
@@ -18,12 +18,11 @@
 
 namespace moab {
 
-const int DEFAULT_MAX_EDGES_PER_CELL = 6;
+// GCRM cells are either pentagons or hexagons, and pentagons are always padded to hexagons
+const int EDGES_PER_CELL = 6;
 
 NCHelperGCRM::NCHelperGCRM(ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet)
 : UcdNCHelper(readNC, fileId, opts, fileSet)
-, maxEdgesPerCell(DEFAULT_MAX_EDGES_PER_CELL)
-, numCellGroups(0)
 , createGatherSet(false)
 {
   // Ignore variables containing topological information
@@ -38,7 +37,7 @@ bool NCHelperGCRM::can_read_file(ReadNC* readNC)
 {
   std::vector<std::string>& dimNames = readNC->dimNames;
 
-  // If dimension name "vertexDegree" exists then it should be the GCRM grid
+  // If dimension name "cells" exists then it should be the GCRM grid
   if (std::find(dimNames.begin(), dimNames.end(), std::string("cells")) != dimNames.end())
     return true;
 
@@ -94,7 +93,7 @@ ErrorCode NCHelperGCRM::init_mesh_vals()
   vDim = idx;
   nVertices = dimLens[idx];
 
-  // Get number of vertex levels
+  // Get number of layers
   if ((vit = std::find(dimNames.begin(), dimNames.end(), "layers")) != dimNames.end())
     idx = vit - dimNames.begin();
   else {
@@ -103,10 +102,10 @@ ErrorCode NCHelperGCRM::init_mesh_vals()
   levDim = idx;
   nLevels = dimLens[idx];
 
-  // Dimension numbers for other optional levels
+  // Dimension indices for other optional levels
   std::vector<unsigned int> opt_lev_dims;
 
-  // Get number of interface levels
+  // Get index of interface levels
   if ((vit = std::find(dimNames.begin(), dimNames.end(), "interfaces")) != dimNames.end()) {
     idx = vit - dimNames.begin();
     opt_lev_dims.push_back(idx);
@@ -179,14 +178,6 @@ ErrorCode NCHelperGCRM::check_existing_mesh()
   if (noMesh) {
     ErrorCode rval;
 
-    // Restore numCellGroups
-    if (0 == numCellGroups) {
-      Tag numCellGroupsTag;
-      rval = mbImpl->tag_get_handle("__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag);
-      if (MB_SUCCESS == rval)
-        rval = mbImpl->tag_get_data(numCellGroupsTag, &_fileSet, 1, &numCellGroups);
-    }
-
     if (localGidVerts.empty()) {
       // Get all vertices from tmp_set (it is the input set in no_mesh scenario)
       Range local_verts;
@@ -247,14 +238,6 @@ ErrorCode NCHelperGCRM::check_existing_mesh()
         // Restore localGidCells
         std::copy(gids.rbegin(), gids.rend(), range_inserter(localGidCells));
         nLocalCells = localGidCells.size();
-
-        if (numCellGroups > 1) {
-          // Restore cellHandleToGlobalID map
-          Range::const_iterator rit;
-          int i;
-          for (rit = local_cells.begin(), i = 0; rit != local_cells.end(); ++rit, i++)
-            cellHandleToGlobalID[*rit] = gids[i];
-        }
       }
     }
   }
@@ -264,9 +247,7 @@ ErrorCode NCHelperGCRM::check_existing_mesh()
 
 ErrorCode NCHelperGCRM::create_mesh(Range& faces)
 {
-  Interface*& mbImpl = _readNC->mbImpl;
   int& gatherSetRank = _readNC->gatherSetRank;
-  bool& noMixedElements = _readNC->noMixedElements;
   bool& noEdges = _readNC->noEdges;
   DebugOutput& dbgOut = _readNC->dbgOut;
 
@@ -317,9 +298,8 @@ ErrorCode NCHelperGCRM::create_mesh(Range& faces)
   int verticesOnCellVarId;
   int success = NCFUNC(inq_varid)(_fileId, "cell_corners", &verticesOnCellVarId);
   ERRORS(success, "Failed to get variable id of cell_corners.");
-  std::vector<int> vertices_on_local_cells(nLocalCells * maxEdgesPerCell);
+  std::vector<int> vertices_on_local_cells(nLocalCells * EDGES_PER_CELL);
   dbgOut.tprintf(1, " nLocalCells = %d\n", (int)nLocalCells);
-  dbgOut.tprintf(1, " maxEdgesPerCell = %d\n", (int)maxEdgesPerCell);
   dbgOut.tprintf(1, " vertices_on_local_cells.size() = %d\n", (int)vertices_on_local_cells.size());
 #ifdef PNETCDF_FILE
   size_t nb_reads = localGidCells.psize();
@@ -337,7 +317,7 @@ ErrorCode NCHelperGCRM::create_mesh(Range& faces)
     dbgOut.tprintf(1, " cell_corners   endh = %d\n", (int)endh);
     NCDF_SIZE read_starts[2] = {static_cast<NCDF_SIZE>(starth - 1), 0};
     NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1), 
-                                static_cast<NCDF_SIZE>(maxEdgesPerCell)};
+                                static_cast<NCDF_SIZE>(EDGES_PER_CELL)};
 
     // Do a partial read in each subrange
 #ifdef PNETCDF_FILE
@@ -350,7 +330,7 @@ ErrorCode NCHelperGCRM::create_mesh(Range& faces)
     ERRORS(success, "Failed to read cell_corners data in a loop");
 
     // Increment the index for next subrange
-    indexInArray += (endh - starth + 1) * maxEdgesPerCell;
+    indexInArray += (endh - starth + 1) * EDGES_PER_CELL;
   }
 
 #ifdef PNETCDF_FILE
@@ -359,11 +339,25 @@ ErrorCode NCHelperGCRM::create_mesh(Range& faces)
   ERRORS(success, "Failed on wait_all.");
 #endif
 
-  // GCRM is 0 based, convert vertex indices from 0 to 1 based
-  for (std::size_t idx = 0; idx < vertices_on_local_cells.size(); idx++) {
-      vertices_on_local_cells[idx] += 1;
+  // Correct vertices_on_local_cells array. Pentagons as hexagons should have
+  // a connectivity like 123455 and not 122345
+  for (int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++) {
+    int* pvertex = &vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL];
+    for (int k = 0; k < EDGES_PER_CELL - 2; k++) {
+      if (*(pvertex + k) == *(pvertex + k + 1)) {
+        // Shift the connectivity
+        for (int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++)
+          *(pvertex + kk) = *(pvertex + kk + 1);
+        // No need to try next k
+        break;
+      }
+    }
   }
 
+  // GCRM is 0 based, convert vertex indices from 0 to 1 based
+  for (std::size_t idx = 0; idx < vertices_on_local_cells.size(); idx++)
+    vertices_on_local_cells[idx] += 1;
+
   // Create local vertices
   EntityHandle start_vertex;
   ErrorCode rval = create_local_vertices(vertices_on_local_cells, start_vertex);
@@ -375,22 +369,9 @@ ErrorCode NCHelperGCRM::create_mesh(Range& faces)
     ERRORR(rval, "Failed to create local edges for GCRM mesh.");
   }
 
-  // Create local cells, either unpadded or padded
-  if (noMixedElements) {
-    rval = create_padded_local_cells(vertices_on_local_cells, start_vertex, faces);
-    ERRORR(rval, "Failed to create padded local cells for GCRM mesh.");
-  }
-  else {
-    rval = create_local_cells(vertices_on_local_cells, start_vertex, faces);
-    ERRORR(rval, "Failed to create local cells for GCRM mesh.");
-  }
-
-  // Set tag for numCellGroups
-  Tag numCellGroupsTag = 0;
-  rval = mbImpl->tag_get_handle("__NUM_CELL_GROUPS", 1, MB_TYPE_INTEGER, numCellGroupsTag, MB_TAG_SPARSE | MB_TAG_CREAT);
-  ERRORR(rval, "Failed to get __NUM_CELL_GROUPS tag.");
-  rval = mbImpl->tag_set_data(numCellGroupsTag, &_fileSet, 1, &numCellGroups);
-  ERRORR(rval, "Failed to set data for __NUM_CELL_GROUPS tag.");
+  // Create local cells with padding
+  rval = create_padded_local_cells(vertices_on_local_cells, start_vertex, faces);
+  ERRORR(rval, "Failed to create local cells for GCRM mesh.");
 
   if (createGatherSet) {
     EntityHandle gather_set;
@@ -408,15 +389,9 @@ ErrorCode NCHelperGCRM::create_mesh(Range& faces)
       ERRORR(rval, "Failed to create gather set edges for GCRM mesh.");
     }
 
-    // Create gather set cells, either unpadded or padded
-    if (noMixedElements) {
-      rval = create_padded_gather_set_cells(gather_set, start_gather_set_vertex);
-      ERRORR(rval, "Failed to create padded gather set cells for GCRM mesh.");
-    }
-    else {
-      rval = create_gather_set_cells(gather_set, start_gather_set_vertex);
-      ERRORR(rval, "Failed to create gather set cells for GCRM mesh.");
-    }
+    // Create gather set cells with padding
+    rval = create_padded_gather_set_cells(gather_set, start_gather_set_vertex);
+    ERRORR(rval, "Failed to create gather set cells for GCRM mesh.");
   }
 
   return MB_SUCCESS;
@@ -541,19 +516,13 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset_allocate(std::vector<ReadNC:
       }
 
       // Get ptr to tag space
-      if (vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1) {
-        // For a cell variable that is NOT on one contiguous chunk of faces, defer its tag space allocation
-        vdatas[i].varDatas[t] = NULL;
-      }
-      else {
-        assert(1 == range->psize());
-        void* data;
-        int count;
-        rval = mbImpl->tag_iterate(vdatas[i].varTags[t], range->begin(), range->end(), count, data);
-        ERRORR(rval, "Failed to iterate tag.");
-        assert((unsigned)count == range->size());
-        vdatas[i].varDatas[t] = data;
-      }
+      assert(1 == range->psize());
+      void* data;
+      int count;
+      rval = mbImpl->tag_iterate(vdatas[i].varTags[t], range->begin(), range->end(), count, data);
+      ERRORR(rval, "Failed to iterate tag.");
+      assert((unsigned)count == range->size());
+      vdatas[i].varDatas[t] = data;
     }
   }
 
@@ -563,7 +532,6 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset_allocate(std::vector<ReadNC:
 #ifdef PNETCDF_FILE
 ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset_async(std::vector<ReadNC::VarData>& vdatas, std::vector<int>& tstep_nums)
 {
-  Interface*& mbImpl = _readNC->mbImpl;
   bool& noEdges = _readNC->noEdges;
   DebugOutput& dbgOut = _readNC->dbgOut;
 
@@ -647,32 +615,9 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset_async(std::vector<ReadNC::Va
           success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
           ERRORS(success, "Failed on wait_all.");
 
-          if (vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1) {
-            // For a cell variable that is NOT on one contiguous chunk of faces, allocate tag space for
-            // each cell group, and utilize cellHandleToGlobalID map to read tag data
-            Range::iterator iter = facesOwned.begin();
-            while (iter != facesOwned.end()) {
-              int count;
-              void* ptr;
-              rval = mbImpl->tag_iterate(vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr);
-              ERRORR(rval, "Failed to iterate tag on owned faces.");
-
-              for (int j = 0; j < count; j++) {
-                int global_cell_idx = cellHandleToGlobalID[*(iter + j)]; // Global cell index, 1 based
-                int local_cell_idx = localGidCells.index(global_cell_idx); // Local cell index, 0 based
-                assert(local_cell_idx != -1);
-                for (int level = 0; level < vdatas[i].numLev; level++)
-                  ((double*) ptr)[j * vdatas[i].numLev + level] = tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
-              }
-
-              iter += count;
-            }
-          }
-          else {
-            void* data = vdatas[i].varDatas[t];
-            for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
-              ((double*) data)[idx] = tmpdoubledata[idx];
-          }
+          void* data = vdatas[i].varDatas[t];
+          for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
+            ((double*) data)[idx] = tmpdoubledata[idx];
 
           break;
         }
@@ -694,17 +639,6 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset_async(std::vector<ReadNC::Va
     }
   }
 
-  for (unsigned int i = 0; i < vdatas.size(); i++) {
-    if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
-      continue;
-
-    /*for (unsigned int t = 0; t < tstep_nums.size(); t++) {
-      dbgOut.tprintf(2, "Converting variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t]);
-      ErrorCode tmp_rval = convert_variable(vdatas[i], t);
-      if (MB_SUCCESS != tmp_rval)
-        rval = tmp_rval;
-    }*/
-  }
   // Debug output, if requested
   if (1 == dbgOut.get_verbosity()) {
     dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
@@ -790,32 +724,9 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset(std::vector<ReadNC::VarData>
           }
           assert(ic == pLocalGid->psize());
 
-          if (vdatas[i].entLoc == ReadNC::ENTLOCFACE && numCellGroups > 1) {
-            // For a cell variable that is NOT on one contiguous chunk of faces, allocate tag space for
-            // each cell group, and utilize cellHandleToGlobalID map to read tag data
-            Range::iterator iter = facesOwned.begin();
-            while (iter != facesOwned.end()) {
-              int count;
-              void* ptr;
-              rval = mbImpl->tag_iterate(vdatas[i].varTags[t], iter, facesOwned.end(), count, ptr);
-              ERRORR(rval, "Failed to iterate tag on owned faces.");
-
-              for (int j = 0; j < count; j++) {
-                int global_cell_idx = cellHandleToGlobalID[*(iter + j)]; // Global cell index, 1 based
-                int local_cell_idx = localGidCells.index(global_cell_idx); // Local cell index, 0 based
-                assert(local_cell_idx != -1);
-                for (int level = 0; level < vdatas[i].numLev; level++)
-                  ((double*) ptr)[j * vdatas[i].numLev + level] = tmpdoubledata[local_cell_idx * vdatas[i].numLev + level];
-              }
-
-              iter += count;
-            }
-          }
-          else {
-            void* data = vdatas[i].varDatas[t];
-            for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
-              ((double*) data)[idx] = tmpdoubledata[idx];
-          }
+          void* data = vdatas[i].varDatas[t];
+          for (std::size_t idx = 0; idx != tmpdoubledata.size(); idx++)
+            ((double*) data)[idx] = tmpdoubledata[idx];
 
           break;
         }
@@ -836,18 +747,6 @@ ErrorCode NCHelperGCRM::read_ucd_variable_to_nonset(std::vector<ReadNC::VarData>
     }
   }
 
-  for (unsigned int i = 0; i < vdatas.size(); i++) {
-    if (noEdges && vdatas[i].entLoc == ReadNC::ENTLOCEDGE)
-      continue;
-
-   /* for (unsigned int t = 0; t < tstep_nums.size(); t++) {
-      dbgOut.tprintf(2, "Converting variable %s, time step %d\n", vdatas[i].varName.c_str(), tstep_nums[t]);
-      ErrorCode tmp_rval = convert_variable(vdatas[i], t);
-      if (MB_SUCCESS != tmp_rval)
-        rval = tmp_rval;
-    }*/
-  }
-
   // Debug output, if requested
   if (1 == dbgOut.get_verbosity()) {
     dbgOut.printf(1, "Read variables: %s", vdatas.begin()->varName.c_str());
@@ -865,8 +764,8 @@ ErrorCode NCHelperGCRM::redistribute_local_cells(int start_cell_idx)
   // If possible, apply Zoltan partition
   if (_readNC->partMethod == ScdParData::RCBZOLTAN) {
 #if defined(USE_MPI) && defined(HAVE_ZOLTAN)
-    // Read lat/lon coordinates of cell centers
-    // then convert to spherical , and use them as input to zoltan partition
+    // Read lat/lon coordinates of cell centers, then convert spherical to
+    // Cartesian, and use them as input to Zoltan partition
     int xCellVarId;
     int success = NCFUNC(inq_varid)(_fileId, "grid_center_lat", &xCellVarId);
     ERRORS(success, "Failed to get variable id of grid_center_lat.");
@@ -874,7 +773,7 @@ ErrorCode NCHelperGCRM::redistribute_local_cells(int start_cell_idx)
     NCDF_SIZE read_start = static_cast<NCDF_SIZE>(start_cell_idx - 1);
     NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nLocalCells);
     success = NCFUNCAG(_vara_double)(_fileId, xCellVarId, &read_start, &read_count, &xCell[0]);
-    ERRORS(success, "Failed to read xCell data.");
+    ERRORS(success, "Failed to read grid_center_lat data.");
 
     // Read y coordinates of cell centers
     int yCellVarId;
@@ -882,14 +781,12 @@ ErrorCode NCHelperGCRM::redistribute_local_cells(int start_cell_idx)
     ERRORS(success, "Failed to get variable id of grid_center_lon.");
     std::vector<double> yCell(nLocalCells);
     success = NCFUNCAG(_vara_double)(_fileId, yCellVarId, &read_start, &read_count, &yCell[0]);
-    ERRORS(success, "Failed to read yCell data.");
+    ERRORS(success, "Failed to read grid_center_lon data.");
 
+    // Convert lon/lat/rad to x/y/z
     std::vector<double> zCell(nLocalCells);
-    // convert to xyz cartesian coordinates
-
-    double rad=8000; // this is just approx x is lat, y is lon
-    for (int i=0; i<nLocalCells; i++)
-    {
+    double rad = 8000.0; // This is just a approximate radius
+    for (int i = 0; i < nLocalCells; i++) {
       double cosphi = cos(xCell[i]);
       double zmult = sin(xCell[i]);
       double xmult = cosphi * cos(yCell[i]);
@@ -898,6 +795,7 @@ ErrorCode NCHelperGCRM::redistribute_local_cells(int start_cell_idx)
       yCell[i] = rad * ymult;
       zCell[i] = rad * zmult;
     }
+
     // Zoltan partition using RCB; maybe more studies would be good, as to which partition
     // is better
     Interface*& mbImpl = _readNC->mbImpl;
@@ -991,20 +889,18 @@ ErrorCode NCHelperGCRM::create_local_vertices(const std::vector<int>& vertices_o
     ERRORR(MB_FAILURE, "Couldn't find 'layers' or 'interfaces' variable.");
   }
 
-  {
-    // Decide whether down is positive
-    char posval[10] = {0};
-    int success = NCFUNC(get_att_text)(_fileId, (*vmit).second.varId, "positive", posval);
-    if (0 == success && !strncmp(posval, "down", 4)) {
-      for (std::vector<double>::iterator dvit = levVals.begin(); dvit != levVals.end(); ++dvit)
-        (*dvit) *= -1.0;
-    }
+  // Decide whether down is positive
+  char posval[10] = {0};
+  int success = NCFUNC(get_att_text)(_fileId, (*vmit).second.varId, "positive", posval);
+  if (0 == success && !strncmp(posval, "down", 4)) {
+    for (std::vector<double>::iterator dvit = levVals.begin(); dvit != levVals.end(); ++dvit)
+      (*dvit) *= -1.0;
   }
 
   // Read x coordinates for local vertices
   double* xptr = arrays[0];
   int xVertexVarId;
-  int success = NCFUNC(inq_varid)(_fileId, "grid_corner_lon", &xVertexVarId);
+  success = NCFUNC(inq_varid)(_fileId, "grid_corner_lon", &xVertexVarId);
   ERRORS(success, "Failed to get variable id of grid_corner_lon.");
   size_t indexInArray = 0;
   for (Range::pair_iterator pair_iter = localGidVerts.pair_begin();
@@ -1074,11 +970,10 @@ ErrorCode NCHelperGCRM::create_local_vertices(const std::vector<int>& vertices_o
 
   // Convert lon/lat/rad to x/y/z
   double* zptr = arrays[2];
-  //const double pideg = acos(-1.0) / 180.0;
   double rad = 8000.0 + levVals[0];
   for (int i = 0; i < nLocalVertices; i++) {
     double cosphi = cos(yptr[i]);
-    double zmult =  sin(yptr[i]);
+    double zmult = sin(yptr[i]);
     double xmult = cosphi * cos(xptr[i]);
     double ymult = cosphi * sin(xptr[i]);
     xptr[i] = rad * xmult;
@@ -1100,7 +995,7 @@ ErrorCode NCHelperGCRM::create_local_edges(EntityHandle start_vertex)
   int success = NCFUNC(inq_varid)(_fileId, "cell_edges", &edgesOnCellVarId);
   ERRORS(success, "Failed to get variable id of cell_edges.");
 
-  std::vector<int> edges_on_local_cells(nLocalCells * maxEdgesPerCell);
+  std::vector<int> edges_on_local_cells(nLocalCells * EDGES_PER_CELL);
   dbgOut.tprintf(1, "   edges_on_local_cells.size() = %d\n", (int)edges_on_local_cells.size());
 
 #ifdef PNETCDF_FILE
@@ -1118,7 +1013,7 @@ ErrorCode NCHelperGCRM::create_local_edges(EntityHandle start_vertex)
     dbgOut.tprintf(1, "   starth = %d\n", (int)starth);
     dbgOut.tprintf(1, "   endh = %d\n", (int)endh);
     NCDF_SIZE read_starts[2] = {static_cast<NCDF_SIZE>(starth - 1), 0};
-    NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1), static_cast<NCDF_SIZE>(maxEdgesPerCell)};
+    NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1), static_cast<NCDF_SIZE>(EDGES_PER_CELL)};
 
     // Do a partial read in each subrange
 #ifdef PNETCDF_FILE
@@ -1131,7 +1026,7 @@ ErrorCode NCHelperGCRM::create_local_edges(EntityHandle start_vertex)
     ERRORS(success, "Failed to read cell_edges data in a loop");
 
     // Increment the index for next subrange
-    indexInArray += (endh - starth + 1) * maxEdgesPerCell;
+    indexInArray += (endh - starth + 1) * EDGES_PER_CELL;
   }
 
 #ifdef PNETCDF_FILE
@@ -1141,9 +1036,8 @@ ErrorCode NCHelperGCRM::create_local_edges(EntityHandle start_vertex)
 #endif
 
   // GCRM is 0 based, convert edge indices from 0 to 1 based
-  for (std::size_t idx = 0; idx < edges_on_local_cells.size(); idx++) {
-      edges_on_local_cells[idx] += 1;
-  }
+  for (std::size_t idx = 0; idx < edges_on_local_cells.size(); idx++)
+    edges_on_local_cells[idx] += 1;
 
   // Collect local edges
   std::sort(edges_on_local_cells.begin(), edges_on_local_cells.end());
@@ -1217,16 +1111,12 @@ ErrorCode NCHelperGCRM::create_local_edges(EntityHandle start_vertex)
   ERRORS(success, "Failed on wait_all.");
 #endif
 
-  // GCRM is 0 based, convert edge indices from 0 to 1 based
-  for (int idx = 0; idx < nLocalEdges*2; idx++) {
-      vertices_on_local_edges[idx] += 1;
-  }
-
   // Populate connectivity data for local edges
   // Convert in-place from int (stored in the first half) to EntityHandle
   // Reading backward is the trick
   for (int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert--) {
-    int global_vert_idx = vertices_on_local_edges[edge_vert]; // Global vertex index, 1 based
+    // Note, indices stored in vertices_on_local_edges are 0 based
+    int global_vert_idx = vertices_on_local_edges[edge_vert] + 1; // Global vertex index, 1 based
     int local_vert_idx = localGidVerts.index(global_vert_idx); // Local vertex index, 0 based
     assert(local_vert_idx != -1);
     conn_arr_edges[edge_vert] = start_vertex + local_vert_idx;
@@ -1235,104 +1125,16 @@ ErrorCode NCHelperGCRM::create_local_edges(EntityHandle start_vertex)
   return MB_SUCCESS;
 }
 
-ErrorCode NCHelperGCRM::create_local_cells(const std::vector<int>& vertices_on_local_cells,
-                                                    EntityHandle start_vertex, Range& faces)
-{
-  Interface*& mbImpl = _readNC->mbImpl;
-  Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
-
-  // Divide local cells into groups based on the number of edges
-  Range local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
-  // Insert larger values before smaller ones to increase efficiency
-  for (int i = nLocalCells - 1; i >= 0; i--) {
-    int num_edges = DEFAULT_MAX_EDGES_PER_CELL;
-    local_cells_with_n_edges[num_edges].insert(localGidCells[i]); // Global cell index
-  }
-
-  std::vector<int> num_edges_on_cell_groups;
-  for (int i = 3; i <= maxEdgesPerCell; i++) {
-    if (local_cells_with_n_edges[i].size() > 0)
-      num_edges_on_cell_groups.push_back(i);
-  }
-  numCellGroups = num_edges_on_cell_groups.size();
-
-  EntityHandle* conn_arr_local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
-  for (int i = 0; i < numCellGroups; i++) {
-    int num_edges_per_cell = num_edges_on_cell_groups[i];
-    int num_group_cells = (int)local_cells_with_n_edges[num_edges_per_cell].size();
-
-    // Create local cells for each non-empty cell group
-    EntityHandle start_element;
-    ErrorCode rval = _readNC->readMeshIface->get_element_connect(num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
-                                                       conn_arr_local_cells_with_n_edges[num_edges_per_cell], num_group_cells);
-    ERRORR(rval, "Failed to create cells");
-    faces.insert(start_element, start_element + num_group_cells - 1);
-
-    // Add local cells to the file set
-    Range local_cells_range(start_element, start_element + num_group_cells - 1);
-    rval = _readNC->mbImpl->add_entities(_fileSet, local_cells_range);
-    ERRORR(rval, "Failed to add local cells to the file set.");
-
-    // Get ptr to gid memory for local cells
-    int count = 0;
-    void* data = NULL;
-    rval = mbImpl->tag_iterate(mGlobalIdTag, local_cells_range.begin(), local_cells_range.end(), count, data);
-    ERRORR(rval, "Failed to iterate global id tag on local cells.");
-    assert(count == num_group_cells);
-    int* gid_data = (int*) data;
-    std::copy(local_cells_with_n_edges[num_edges_per_cell].begin(), local_cells_with_n_edges[num_edges_per_cell].end(), gid_data);
-
-    // Set connectivity array with proper local vertices handles
-    for (int j = 0; j < num_group_cells; j++) {
-      EntityHandle global_cell_idx = local_cells_with_n_edges[num_edges_per_cell][j]; // Global cell index, 1 based
-      int local_cell_idx = localGidCells.index(global_cell_idx); // Local cell index, 0 based
-      assert(local_cell_idx != -1);
-
-      if (numCellGroups > 1) {
-        // Populate cellHandleToGlobalID map to read cell variables
-        cellHandleToGlobalID[start_element + j] = global_cell_idx;
-      }
-
-      for (int k = 0; k < num_edges_per_cell; k++) {
-        EntityHandle global_vert_idx = vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + k]; // Global vertex index, 1 based
-        int local_vert_idx = localGidVerts.index(global_vert_idx); // Local vertex index, 0 based
-        assert(local_vert_idx != -1);
-        conn_arr_local_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
-            start_vertex + local_vert_idx;
-      }
-      // make sure that if some nodes are repeated, they are at the end of the connectivity array
-      // so, pentagons as hexagons should have a connectivity like 123455 and not 122345
-      EntityHandle *pvertex= &(conn_arr_local_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell ]);
-      for (int  k = 0; k < num_edges_per_cell-2; k++)
-      {
-        if( *(pvertex+k) == *(pvertex+k+1) )
-        {
-          // shift the connectivity
-          for (int kk=k+1; kk<num_edges_per_cell-2; kk++)
-          {
-            *(pvertex+kk)=*(pvertex+kk+1);
-          }
-        }
-      }
-    }
-  }
-
-  return MB_SUCCESS;
-}
-
 ErrorCode NCHelperGCRM::create_padded_local_cells(const std::vector<int>& vertices_on_local_cells,
                                                   EntityHandle start_vertex, Range& faces)
 {
   Interface*& mbImpl = _readNC->mbImpl;
   Tag& mGlobalIdTag = _readNC->mGlobalIdTag;
 
-  // Only one group of cells (each cell is represented by a polygon with maxEdgesPerCell edges)
-  numCellGroups = 1;
-
-  // Create cells for this cell group
+  // Create cells
   EntityHandle start_element;
   EntityHandle* conn_arr_local_cells = NULL;
-  ErrorCode rval = _readNC->readMeshIface->get_element_connect(nLocalCells, maxEdgesPerCell, MBPOLYGON, 0, start_element, conn_arr_local_cells,
+  ErrorCode rval = _readNC->readMeshIface->get_element_connect(nLocalCells, EDGES_PER_CELL, MBPOLYGON, 0, start_element, conn_arr_local_cells,
                                                     // Might have to create gather mesh later
                                                     (createGatherSet ? nLocalCells + nCells : nLocalCells));
   ERRORR(rval, "Failed to create cells.");
@@ -1353,14 +1155,15 @@ ErrorCode NCHelperGCRM::create_padded_local_cells(const std::vector<int>& vertic
   std::copy(localGidCells.begin(), localGidCells.end(), gid_data);
 
   // Set connectivity array with proper local vertices handles
-  // vertices_on_local_cells array was already corrected to have the last vertices padded
-  // no need for extra checks considering
+  // vertices_on_local_cells array was already corrected to have
+  // the last vertices repeated for pentagons, e.g. 122345 => 123455
   for (int local_cell_idx = 0; local_cell_idx < nLocalCells; local_cell_idx++) {
-    for (int i = 0; i < maxEdgesPerCell; i++) {
-      EntityHandle global_vert_idx = vertices_on_local_cells[local_cell_idx * maxEdgesPerCell + i]; // Global vertex index, 1 based
+    for (int i = 0; i < EDGES_PER_CELL; i++) {
+      // Note, indices stored in vertices_on_local_cells are 1 based
+      EntityHandle global_vert_idx = vertices_on_local_cells[local_cell_idx * EDGES_PER_CELL + i]; // Global vertex index, 1 based
       int local_vert_idx = localGidVerts.index(global_vert_idx); // Local vertex index, 0 based
       assert(local_vert_idx != -1);
-      conn_arr_local_cells[local_cell_idx * maxEdgesPerCell + i] = start_vertex + local_vert_idx;
+      conn_arr_local_cells[local_cell_idx * EDGES_PER_CELL + i] = start_vertex + local_vert_idx;
     }
   }
 
@@ -1387,8 +1190,8 @@ ErrorCode NCHelperGCRM::create_gather_set_vertices(EntityHandle gather_set, Enti
   // Read x coordinates for gather set vertices
   double* xptr = arrays[0];
   int xVertexVarId;
-  int success = NCFUNC(inq_varid)(_fileId, "xVertex", &xVertexVarId);
-  ERRORS(success, "Failed to get variable id of xVertex.");
+  int success = NCFUNC(inq_varid)(_fileId, "grid_corner_lon", &xVertexVarId);
+  ERRORS(success, "Failed to get variable id of grid_corner_lon.");
   NCDF_SIZE read_start = 0;
   NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nVertices);
 #ifdef PNETCDF_FILE
@@ -1396,49 +1199,44 @@ ErrorCode NCHelperGCRM::create_gather_set_vertices(EntityHandle gather_set, Enti
   success = NCFUNC(begin_indep_data)(_fileId);
   ERRORS(success, "Failed to begin independent I/O mode.");
   success = NCFUNCG(_vara_double)(_fileId, xVertexVarId, &read_start, &read_count, xptr);
-  ERRORS(success, "Failed to read xVertex data.");
+  ERRORS(success, "Failed to read grid_corner_lon data.");
   success = NCFUNC(end_indep_data)(_fileId);
   ERRORS(success, "Failed to end independent I/O mode.");
 #else
   success = NCFUNCG(_vara_double)(_fileId, xVertexVarId, &read_start, &read_count, xptr);
-  ERRORS(success, "Failed to read xVertex data.");
+  ERRORS(success, "Failed to read grid_corner_lon data.");
 #endif
 
   // Read y coordinates for gather set vertices
   double* yptr = arrays[1];
   int yVertexVarId;
-  success = NCFUNC(inq_varid)(_fileId, "yVertex", &yVertexVarId);
-  ERRORS(success, "Failed to get variable id of yVertex.");
+  success = NCFUNC(inq_varid)(_fileId, "grid_corner_lat", &yVertexVarId);
+  ERRORS(success, "Failed to get variable id of grid_corner_lat.");
 #ifdef PNETCDF_FILE
   // Enter independent I/O mode, since this read is only for the gather processor
   success = NCFUNC(begin_indep_data)(_fileId);
   ERRORS(success, "Failed to begin independent I/O mode.");
   success = NCFUNCG(_vara_double)(_fileId, yVertexVarId, &read_start, &read_count, yptr);
-  ERRORS(success, "Failed to read yVertex data.");
+  ERRORS(success, "Failed to read grid_corner_lat data.");
   success = NCFUNC(end_indep_data)(_fileId);
   ERRORS(success, "Failed to end independent I/O mode.");
 #else
   success = NCFUNCG(_vara_double)(_fileId, yVertexVarId, &read_start, &read_count, yptr);
-  ERRORS(success, "Failed to read yVertex data.");
+  ERRORS(success, "Failed to read grid_corner_lat data.");
 #endif
 
-  // Read z coordinates for gather set vertices
+  // Convert lon/lat/rad to x/y/z
   double* zptr = arrays[2];
-  int zVertexVarId;
-  success = NCFUNC(inq_varid)(_fileId, "zVertex", &zVertexVarId);
-  ERRORS(success, "Failed to get variable id of zVertex.");
-#ifdef PNETCDF_FILE
-  // Enter independent I/O mode, since this read is only for the gather processor
-  success = NCFUNC(begin_indep_data)(_fileId);
-  ERRORS(success, "Failed to begin independent I/O mode.");
-  success = NCFUNCG(_vara_double)(_fileId, zVertexVarId, &read_start, &read_count, zptr);
-  ERRORS(success, "Failed to read zVertex data.");
-  success = NCFUNC(end_indep_data)(_fileId);
-  ERRORS(success, "Failed to end independent I/O mode.");
-#else
-  success = NCFUNCG(_vara_double)(_fileId, zVertexVarId, &read_start, &read_count, zptr);
-  ERRORS(success, "Failed to read zVertex data.");
-#endif
+  double rad = 8000.0 + levVals[0];
+  for (int i = 0; i < nVertices; i++) {
+    double cosphi = cos(yptr[i]);
+    double zmult = sin(yptr[i]);
+    double xmult = cosphi * cos(xptr[i]);
+    double ymult = cosphi * sin(xptr[i]);
+    xptr[i] = rad * xmult;
+    yptr[i] = rad * ymult;
+    zptr[i] = rad * zmult;
+  }
 
   // Get ptr to GID memory for gather set vertices
   int count = 0;
@@ -1504,8 +1302,8 @@ ErrorCode NCHelperGCRM::create_gather_set_edges(EntityHandle gather_set, EntityH
    // Convert in-place from int (stored in the first half) to EntityHandle
    // Reading backward is the trick
    for (int edge_vert = nEdges * 2 - 1; edge_vert >= 0; edge_vert--) {
-     int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert]; // Global vertex index, 1 based
-     gather_set_vert_idx--; // 1 based -> 0 based
+     // Note, indices stored in vertices_on_gather_set_edges are 0 based
+     int gather_set_vert_idx = vertices_on_gather_set_edges[edge_vert]; // Global vertex index, 0 based
      // Connectivity array is shifted by where the gather set vertices start
      conn_arr_gather_set_edges[edge_vert] = gather_set_start_vertex + gather_set_vert_idx;
    }
@@ -1513,121 +1311,15 @@ ErrorCode NCHelperGCRM::create_gather_set_edges(EntityHandle gather_set, EntityH
    return MB_SUCCESS;
 }
 
-ErrorCode NCHelperGCRM::create_gather_set_cells(EntityHandle gather_set, EntityHandle gather_set_start_vertex)
-{
-  Interface*& mbImpl = _readNC->mbImpl;
-
-  // Read number of edges on each gather set cell
-  int nEdgesOnCellVarId;
-  int success = NCFUNC(inq_varid)(_fileId, "nEdgesOnCell", &nEdgesOnCellVarId);
-  ERRORS(success, "Failed to get variable id of nEdgesOnCell.");
-  std::vector<int> num_edges_on_gather_set_cells(nCells);
-  NCDF_SIZE read_start = 0;
-  NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nCells);
-#ifdef PNETCDF_FILE
-  // Enter independent I/O mode, since this read is only for the gather processor
-  success = NCFUNC(begin_indep_data)(_fileId);
-  ERRORS(success, "Failed to begin independent I/O mode.");
-  success = NCFUNCG(_vara_int)(_fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0]);
-  ERRORS(success, "Failed to read nEdgesOnCell data.");
-  success = NCFUNC(end_indep_data)(_fileId);
-  ERRORS(success, "Failed to end independent I/O mode.");
-#else
-  success = NCFUNCG(_vara_int)(_fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0]);
-  ERRORS(success, "Failed to read nEdgesOnCell data.");
-#endif
-
-  // Read vertices on each gather set cell (connectivity)
-  int verticesOnCellVarId;
-  success = NCFUNC(inq_varid)(_fileId, "verticesOnCell", &verticesOnCellVarId);
-  ERRORS(success, "Failed to get variable id of verticesOnCell.");
-  std::vector<int> vertices_on_gather_set_cells(nCells * maxEdgesPerCell);
-  NCDF_SIZE read_starts[2] = {0, 0};
-  NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(nCells), static_cast<NCDF_SIZE>(maxEdgesPerCell)};
-#ifdef PNETCDF_FILE
-  // Enter independent I/O mode, since this read is only for the gather processor
-  success = NCFUNC(begin_indep_data)(_fileId);
-  ERRORS(success, "Failed to begin independent I/O mode.");
-  success = NCFUNCG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts, &vertices_on_gather_set_cells[0]);
-  ERRORS(success, "Failed to read verticesOnCell data.");
-  success = NCFUNC(end_indep_data)(_fileId);
-  ERRORS(success, "Failed to end independent I/O mode.");
-#else
-  success = NCFUNCG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts, &vertices_on_gather_set_cells[0]);
-  ERRORS(success, "Failed to read verticesOnCell data.");
-#endif
-
-  // Divide gather set cells into groups based on the number of edges
-  Range gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
-  // Insert larger values before smaller values to increase efficiency
-  for (int i = nCells - 1; i >= 0; i--) {
-    int num_edges = num_edges_on_gather_set_cells[i];
-    gather_set_cells_with_n_edges[num_edges].insert(i + 1); // 0 based -> 1 based
-  }
-
-  // Create gather set cells
-  EntityHandle* conn_arr_gather_set_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
-  for (int num_edges_per_cell = 3; num_edges_per_cell <= maxEdgesPerCell; num_edges_per_cell++) {
-    int num_group_cells = gather_set_cells_with_n_edges[num_edges_per_cell].size();
-    if (num_group_cells > 0) {
-      EntityHandle start_element;
-      ErrorCode rval = _readNC->readMeshIface->get_element_connect(num_group_cells, num_edges_per_cell, MBPOLYGON, 0, start_element,
-                                                         conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell], num_group_cells);
-      ERRORR(rval, "Failed to create cells.");
-
-      // Add cells to the gather set
-      Range gather_set_cells_range(start_element, start_element + num_group_cells - 1);
-      rval = mbImpl->add_entities(gather_set, gather_set_cells_range);
-      ERRORR(rval, "Failed to add cells to the gather set.");
-
-      for (int j = 0; j < num_group_cells; j++) {
-        int gather_set_cell_idx = gather_set_cells_with_n_edges[num_edges_per_cell][j]; // Global cell index, 1 based
-        gather_set_cell_idx--; // 1 based -> 0 based
-
-        for (int k = 0; k < num_edges_per_cell; k++) {
-          EntityHandle gather_set_vert_idx = vertices_on_gather_set_cells[gather_set_cell_idx * maxEdgesPerCell + k]; // Global vertex index, 1 based
-          gather_set_vert_idx--; // 1 based -> 0 based
-
-          // Connectivity array is shifted by where the gather set vertices start
-          conn_arr_gather_set_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
-            gather_set_start_vertex + gather_set_vert_idx;
-        }
-      }
-    }
-  }
-
-  return MB_SUCCESS;
-}
-
 ErrorCode NCHelperGCRM::create_padded_gather_set_cells(EntityHandle gather_set, EntityHandle gather_set_start_vertex)
 {
   Interface*& mbImpl = _readNC->mbImpl;
 
-  // Read number of edges on each gather set cell
-  int nEdgesOnCellVarId;
-  int success = NCFUNC(inq_varid)(_fileId, "nEdgesOnCell", &nEdgesOnCellVarId);
-  ERRORS(success, "Failed to get variable id of nEdgesOnCell.");
-  std::vector<int> num_edges_on_gather_set_cells(nCells);
-  NCDF_SIZE read_start = 0;
-  NCDF_SIZE read_count = static_cast<NCDF_SIZE>(nCells);
-#ifdef PNETCDF_FILE
-  // Enter independent I/O mode, since this read is only for the gather processor
-  success = NCFUNC(begin_indep_data)(_fileId);
-  ERRORS(success, "Failed to begin independent I/O mode.");
-  success = NCFUNCG(_vara_int)(_fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0]);
-  ERRORS(success, "Failed to read nEdgesOnCell data.");
-  success = NCFUNC(end_indep_data)(_fileId);
-  ERRORS(success, "Failed to end independent I/O mode.");
-#else
-  success = NCFUNCG(_vara_int)(_fileId, nEdgesOnCellVarId, &read_start, &read_count, &num_edges_on_gather_set_cells[0]);
-  ERRORS(success, "Failed to read nEdgesOnCell data.");
-#endif
-
   // Create gather set cells
   EntityHandle start_element;
   EntityHandle* conn_arr_gather_set_cells = NULL;
   // Don't need to specify allocation number here, because we know enough cells were created before
-  ErrorCode rval = _readNC->readMeshIface->get_element_connect(nCells, maxEdgesPerCell, MBPOLYGON, 0, start_element, conn_arr_gather_set_cells);
+  ErrorCode rval = _readNC->readMeshIface->get_element_connect(nCells, EDGES_PER_CELL, MBPOLYGON, 0, start_element, conn_arr_gather_set_cells);
   ERRORR(rval, "Failed to create cells.");
 
   // Add cells to the gather set
@@ -1637,41 +1329,46 @@ ErrorCode NCHelperGCRM::create_padded_gather_set_cells(EntityHandle gather_set,
 
   // Read vertices on each gather set cell (connectivity)
   int verticesOnCellVarId;
-  success = NCFUNC(inq_varid)(_fileId, "verticesOnCell", &verticesOnCellVarId);
-  ERRORS(success, "Failed to get variable id of verticesOnCell.");
+  int success = NCFUNC(inq_varid)(_fileId, "cell_corners", &verticesOnCellVarId);
+  ERRORS(success, "Failed to get variable id of cell_corners.");
   // Utilize the memory storage pointed by conn_arr_gather_set_cells
   int* vertices_on_gather_set_cells = (int*) conn_arr_gather_set_cells;
   NCDF_SIZE read_starts[2] = {0, 0};
-  NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(nCells), static_cast<NCDF_SIZE>(maxEdgesPerCell)};
+  NCDF_SIZE read_counts[2] = {static_cast<NCDF_SIZE>(nCells), static_cast<NCDF_SIZE>(EDGES_PER_CELL)};
 #ifdef PNETCDF_FILE
   // Enter independent I/O mode, since this read is only for the gather processor
   success = NCFUNC(begin_indep_data)(_fileId);
   ERRORS(success, "Failed to begin independent I/O mode.");
   success = NCFUNCG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells);
-  ERRORS(success, "Failed to read verticesOnCell data.");
+  ERRORS(success, "Failed to read cell_corners data.");
   success = NCFUNC(end_indep_data)(_fileId);
   ERRORS(success, "Failed to end independent I/O mode.");
 #else
   success = NCFUNCG(_vara_int)(_fileId, verticesOnCellVarId, read_starts, read_counts, vertices_on_gather_set_cells);
-  ERRORS(success, "Failed to read verticesOnCell data.");
+  ERRORS(success, "Failed to read cell_corners data.");
 #endif
 
-  // Correct gather set cell vertices array in the same way as local cell vertices array,
-  // replace the padded vertices with the last vertices in the corresponding cells
+  // Correct gather set cell vertices array in the same way as local cell vertices array
+  // Pentagons as hexagons should have a connectivity like 123455 and not 122345
   for (int gather_set_cell_idx = 0; gather_set_cell_idx < nCells; gather_set_cell_idx++) {
-    int num_edges = num_edges_on_gather_set_cells[gather_set_cell_idx];
-    int idx_in_gather_set_vert_arr = gather_set_cell_idx * maxEdgesPerCell;
-    int last_vert_idx = vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + num_edges - 1];
-    for (int i = num_edges; i < maxEdgesPerCell; i++)
-      vertices_on_gather_set_cells[idx_in_gather_set_vert_arr + i] = last_vert_idx;
+    int* pvertex = vertices_on_gather_set_cells + gather_set_cell_idx * EDGES_PER_CELL;
+    for (int k = 0; k < EDGES_PER_CELL - 2; k++) {
+      if (*(pvertex + k) == *(pvertex + k + 1)) {
+        // Shift the connectivity
+        for (int kk = k + 1; kk < EDGES_PER_CELL - 1; kk++)
+          *(pvertex + kk) = *(pvertex + kk + 1);
+        // No need to try next k
+        break;
+      }
+    }
   }
 
   // Populate connectivity data for gather set cells
   // Convert in-place from int (stored in the first half) to EntityHandle
   // Reading backward is the trick
-  for (int cell_vert = nCells * maxEdgesPerCell - 1; cell_vert >= 0; cell_vert--) {
-    int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert]; // Global vertex index, 1 based
-    gather_set_vert_idx--; // 1 based -> 0 based
+  for (int cell_vert = nCells * EDGES_PER_CELL - 1; cell_vert >= 0; cell_vert--) {
+    // Note, indices stored in vertices_on_gather_set_cells are 0 based
+    int gather_set_vert_idx = vertices_on_gather_set_cells[cell_vert]; // Global vertex index, 0 based
     // Connectivity array is shifted by where the gather set vertices start
     conn_arr_gather_set_cells[cell_vert] = gather_set_start_vertex + gather_set_vert_idx;
   }

diff --git a/src/io/NCHelperGCRM.hpp b/src/io/NCHelperGCRM.hpp
index 3c445f3..160e538 100644
--- a/src/io/NCHelperGCRM.hpp
+++ b/src/io/NCHelperGCRM.hpp
@@ -52,11 +52,7 @@ private:
   //! Create local edges (optional)
   ErrorCode create_local_edges(EntityHandle start_vertex);
 
-  //! Create local cells without padding (cells are divided into groups based on the number of edges)
-  ErrorCode create_local_cells(const std::vector<int>& vertices_on_local_cells,
-                                        EntityHandle start_vertex, Range& faces);
-
-  //! Create local cells with padding (padded cells will have the same number of edges)
+  //! Create local cells with padding (pentagons are padded to hexagons)
   ErrorCode create_padded_local_cells(const std::vector<int>& vertices_on_local_cells,
                                       EntityHandle start_vertex, Range& faces);
 
@@ -66,17 +62,11 @@ private:
   //! Create gather set edges (optional)
   ErrorCode create_gather_set_edges(EntityHandle gather_set, EntityHandle gather_set_start_vertex);
 
-  //! Create gather set cells without padding (cells are divided into groups based on the number of edges)
-  ErrorCode create_gather_set_cells(EntityHandle gather_set, EntityHandle gather_set_start_vertex);
-
-  //! Create gather set cells with padding (padded cells will have the same number of edges)
+  //! Create gather set cells with padding (pentagons are padded to hexagons)
   ErrorCode create_padded_gather_set_cells(EntityHandle gather_set, EntityHandle gather_set_start_vertex);
 
 private:
-  int maxEdgesPerCell;
-  int numCellGroups;
   bool createGatherSet;
-  std::map<EntityHandle, int> cellHandleToGlobalID;
   Range facesOwned;
 };
 

diff --git a/src/io/NCHelperMPAS.cpp b/src/io/NCHelperMPAS.cpp
index 5c964b9..a02e4ce 100644
--- a/src/io/NCHelperMPAS.cpp
+++ b/src/io/NCHelperMPAS.cpp
@@ -122,22 +122,22 @@ ErrorCode NCHelperMPAS::init_mesh_vals()
   levDim = idx;
   nLevels = dimLens[idx];
 
-  // Dimension numbers for other optional levels
+  // Dimension indices for other optional levels
   std::vector<unsigned int> opt_lev_dims;
 
-  // Get number of vertex levels P1
+  // Get index of vertex levels P1
   if ((vit = std::find(dimNames.begin(), dimNames.end(), "nVertLevelsP1")) != dimNames.end()) {
     idx = vit - dimNames.begin();
     opt_lev_dims.push_back(idx);
   }
 
-  // Get number of vertex levels P2
+  // Get index of vertex levels P2
   if ((vit = std::find(dimNames.begin(), dimNames.end(), "nVertLevelsP2")) != dimNames.end()) {
     idx = vit - dimNames.begin();
     opt_lev_dims.push_back(idx);
   }
 
-  // Get number of soil levels
+  // Get index of soil levels
   if ((vit = std::find(dimNames.begin(), dimNames.end(), "nSoilLevels")) != dimNames.end()) {
     idx = vit - dimNames.begin();
     opt_lev_dims.push_back(idx);

diff --git a/src/io/NCWriteGCRM.cpp b/src/io/NCWriteGCRM.cpp
index 1f9bb97..b80d90a 100644
--- a/src/io/NCWriteGCRM.cpp
+++ b/src/io/NCWriteGCRM.cpp
@@ -123,6 +123,21 @@ ErrorCode NCWriteGCRM::collect_variable_data(std::vector<std::string>& var_names
 {
   NCWriteHelper::collect_variable_data(var_names, tstep_nums);
 
+  std::vector<std::string>& dimNames = _writeNC->dimNames;
+  std::vector<int>& dimLens = _writeNC->dimLens;
+
+  // Dimension indices for other optional levels
+  std::vector<unsigned int> opt_lev_dims;
+
+  unsigned int lev_idx;
+  std::vector<std::string>::iterator vecIt;
+
+  // Get index of interface levels
+  if ((vecIt = std::find(dimNames.begin(), dimNames.end(), "interfaces")) != dimNames.end()) {
+    lev_idx = vecIt - dimNames.begin();
+    opt_lev_dims.push_back(lev_idx);
+  }
+
   std::map<std::string, WriteNC::VarData>& varInfo = _writeNC->varInfo;
 
   for (size_t i = 0; i < var_names.size(); i++) {
@@ -132,14 +147,22 @@ ErrorCode NCWriteGCRM::collect_variable_data(std::vector<std::string>& var_names
       ERRORR(MB_FAILURE, "Can't find one variable.");
 
     WriteNC::VarData& currentVarData = vit->second;
-#ifndef NDEBUG
     std::vector<int>& varDims = currentVarData.varDims;
-#endif
 
     // Skip edge variables, if there are no edges
     if (localEdgesOwned.empty() && currentVarData.entLoc == WriteNC::ENTLOCEDGE)
       continue;
 
+    // If layers dimension is not found, try other optional levels such as interfaces
+    if (std::find(varDims.begin(), varDims.end(), levDim) == varDims.end()) {
+      for (unsigned int j = 0; j < opt_lev_dims.size(); j++) {
+        if (std::find(varDims.begin(), varDims.end(), opt_lev_dims[j]) != varDims.end()) {
+          currentVarData.numLev = dimLens[opt_lev_dims[j]];
+          break;
+        }
+      }
+    }
+
     // Skip set variables, which were already processed in NCWriteHelper::collect_variable_data()
     if (WriteNC::ENTLOCSET == currentVarData.entLoc)
       continue;

diff --git a/src/io/NCWriteMPAS.cpp b/src/io/NCWriteMPAS.cpp
index 0ece324..512c1d5 100644
--- a/src/io/NCWriteMPAS.cpp
+++ b/src/io/NCWriteMPAS.cpp
@@ -126,25 +126,25 @@ ErrorCode NCWriteMPAS::collect_variable_data(std::vector<std::string>& var_names
   std::vector<std::string>& dimNames = _writeNC->dimNames;
   std::vector<int>& dimLens = _writeNC->dimLens;
 
-  // Dimension numbers for other optional levels
+  // Dimension indices for other optional levels
   std::vector<unsigned int> opt_lev_dims;
 
   unsigned int lev_idx;
   std::vector<std::string>::iterator vecIt;
 
-  // Get number of vertex levels P1
+  // Get index of vertex levels P1
   if ((vecIt = std::find(dimNames.begin(), dimNames.end(), "nVertLevelsP1")) != dimNames.end()) {
     lev_idx = vecIt - dimNames.begin();
     opt_lev_dims.push_back(lev_idx);
   }
 
-  // Get number of vertex levels P2
+  // Get index of vertex levels P2
   if ((vecIt = std::find(dimNames.begin(), dimNames.end(), "nVertLevelsP2")) != dimNames.end()) {
     lev_idx = vecIt - dimNames.begin();
     opt_lev_dims.push_back(lev_idx);
   }
 
-  // Get number of soil levels
+  // Get index of soil levels
   if ((vecIt = std::find(dimNames.begin(), dimNames.end(), "nSoilLevels")) != dimNames.end()) {
     lev_idx = vecIt - dimNames.begin();
     opt_lev_dims.push_back(lev_idx);

diff --git a/src/io/WriteHDF5.cpp b/src/io/WriteHDF5.cpp
index 337cb96..b53508e 100644
--- a/src/io/WriteHDF5.cpp
+++ b/src/io/WriteHDF5.cpp
@@ -2567,7 +2567,43 @@ ErrorCode WriteHDF5::serial_create_file( const char* filename,
     rval = assign_ids( ex_itor->range, ex_itor->first_id );
     CHK_MB_ERR_0(rval);
   }
+  // create set tables
+  writeSets = !setSet.range.empty();
+  if (writeSets)
+  {
+    long contents_len, children_len, parents_len;
+
+    setSet.total_num_ents = setSet.range.size();
+    setSet.max_num_ents = setSet.total_num_ents;
+    rval = create_set_meta(setSet.total_num_ents, first_id);
+    CHK_MB_ERR_0(rval);
+
+    setSet.first_id = (id_t) first_id;
+    rval = assign_ids(setSet.range, setSet.first_id);
+    CHK_MB_ERR_0(rval);
+
+    rval = count_set_size(setSet.range, contents_len, children_len,
+        parents_len);
+    CHK_MB_ERR_0(rval);
+
+    rval = create_set_tables(contents_len, children_len, parents_len);
+    CHK_MB_ERR_0(rval);
+
+    setSet.offset = 0;
+    setContentsOffset = 0;
+    setChildrenOffset = 0;
+    setParentsOffset = 0;
+    writeSetContents = !!contents_len;
+    writeSetChildren = !!children_len;
+    writeSetParents = !!parents_len;
+
+    maxNumSetContents = contents_len;
+    maxNumSetChildren = children_len;
+    maxNumSetParents = parents_len;
+  } // if(!setSet.range.empty())
 
+  // create adjacency table after set table, because sets do not have yet an id
+  // some entities are adjacent to sets (exodus?)
     // create node adjacency table
   id_t num_adjacencies;
 #ifdef MB_H5M_WRITE_NODE_ADJACENCIES  
@@ -2605,39 +2641,6 @@ ErrorCode WriteHDF5::serial_create_file( const char* filename,
     }
   }
   
-    // create set tables
-  writeSets = !setSet.range.empty();
-  if (writeSets)
-  {
-    long contents_len, children_len, parents_len;
-    
-    setSet.total_num_ents = setSet.range.size();
-    setSet.max_num_ents = setSet.total_num_ents;
-    rval = create_set_meta( setSet.total_num_ents, first_id );
-    CHK_MB_ERR_0(rval);
-
-    setSet.first_id = (id_t)first_id;
-    rval = assign_ids( setSet.range, setSet.first_id );
-    CHK_MB_ERR_0(rval);
-    
-    rval = count_set_size( setSet.range, contents_len, children_len, parents_len );
-    CHK_MB_ERR_0(rval);
-    
-    rval = create_set_tables( contents_len, children_len, parents_len );
-    CHK_MB_ERR_0(rval);
-   
-    setSet.offset = 0;
-    setContentsOffset = 0;
-    setChildrenOffset = 0;
-    setParentsOffset = 0;
-    writeSetContents = !!contents_len;
-    writeSetChildren = !!children_len;
-    writeSetParents = !!parents_len;
-    
-    maxNumSetContents = contents_len;
-    maxNumSetChildren = children_len;
-    maxNumSetParents = parents_len;
-  } // if(!setSet.range.empty())
   
   
   dbgOut.tprint( 1, "Gathering Tags\n" );

diff --git a/src/io/WriteHDF5.hpp b/src/io/WriteHDF5.hpp
index 256a217..555d4ca 100644
--- a/src/io/WriteHDF5.hpp
+++ b/src/io/WriteHDF5.hpp
@@ -104,7 +104,7 @@ public:
     Range range;
     //! The first Id allocated by the mhdf library.  Entities in range have sequential IDs.
     id_t first_id;
-    //! The offset at which to begin writting this processor's data.
+    //! The offset at which to begin writing this processor's data.
     //! Always zero except for parallel IO.
     long offset;
     //! Offset for adjacency data.  Always zero except for parallel IO

diff --git a/src/moab/MergeMesh.hpp b/src/moab/MergeMesh.hpp
index 489bb3e..8eef9a8 100644
--- a/src/moab/MergeMesh.hpp
+++ b/src/moab/MergeMesh.hpp
@@ -7,73 +7,69 @@
 namespace moab {
 
 class AdaptiveKDTree;
-    
-class MergeMesh 
+
+class MergeMesh
 {
 public:
-    /* \brief Constructor
-     */
-  MergeMesh(moab::Interface *mbImpl, bool printErrorIn = true);
-  
-    /* \brief Destructor
-     */
+  /* \brief Constructor
+   */
+  MergeMesh(Interface *mbImpl, bool printErrorIn = true);
+
+  /* \brief Destructor
+   */
   virtual ~MergeMesh();
 
-    /* \brief Merge vertices in elements passed in
-     */
-   moab::ErrorCode merge_entities(moab::EntityHandle *elems,
-                      int elems_size,
-                      const double merge_tol,
-                      const int do_merge = true,
-                      const int update_sets = false,
-		      moab::Tag merge_tag = 0,
-		      bool do_higher_dim = true);
-
-  moab::ErrorCode merge_entities(moab::Range &elems,
-                                 const double merge_tol,
-                                 const int do_merge = true,
-                                 const int update_sets = false,
-                                 moab::Tag merge_tag = 0,
-				 bool do_higher_dim = true);
-  
+  /* \brief Merge vertices in elements passed in
+   */
+  ErrorCode merge_entities(EntityHandle *elems, int elems_size,
+      const double merge_tol, const int do_merge = true, const int update_sets =
+          false, Tag merge_tag = 0, bool do_higher_dim = true);
+
+  ErrorCode merge_entities(Range &elems, const double merge_tol,
+      const int do_merge = true, const int update_sets = false,
+      Tag merge_tag = 0, bool do_higher_dim = true);
+
   //Identify higher dimension to be merged
-  moab::ErrorCode merge_higher_dimensions(moab::Range &elems);
+  ErrorCode merge_higher_dimensions(Range &elems);
 
-      //- perform the actual merge
-  moab::ErrorCode perform_merge(moab::Tag merged_to);
+  //- perform the actual merge
+  ErrorCode perform_merge(Tag merged_to);
 private:
   //iMesh_Instance imeshImpl;
 
   double mergeTol, mergeTolSq;
 
-  moab::Tag mergeTag;
+  Tag mergeTag;
+
+  //- given a kdtree, set tag on vertices in leaf nodes with vertices
+  //- to which they should be merged
+  ErrorCode find_merged_to(EntityHandle &tree_root,
+      AdaptiveKDTree &tree, Tag merged_to);
+
+  Interface *mbImpl;
 
-    //- given a kdtree, set tag on vertices in leaf nodes with vertices
-    //- to which they should be merged
-  moab::ErrorCode find_merged_to(moab::EntityHandle &tree_root,
-                                 moab::AdaptiveKDTree &tree,
-				 moab::Tag merged_to);
-  
-  moab::Interface *mbImpl;
+  //- the tag pointing to the entity to which an entity will be merged
+  Tag mbMergeTag;
 
-    //- the tag pointing to the entity to which an entity will be merged
-  moab::Tag mbMergeTag;
+  //- entities which will go away after the merge
+  Range deadEnts;
 
-    //- entities which will go away after the merge
-  moab::Range deadEnts;
+  // vertices that were merged with other vertices, and were left in the database
+  Range mergedToVertices;
 
   //Allow a warning to be suppressed when no merging is done
   bool printError;
 };
 
-  inline MergeMesh::MergeMesh(Interface *impl, bool printErrorIn) 
-    : mbImpl(impl), printError(printErrorIn)
+inline MergeMesh::MergeMesh(Interface *impl, bool printErrorIn) :
+    mbImpl(impl), printError(printErrorIn)
 {
 }
 
-inline MergeMesh::~MergeMesh() 
+inline MergeMesh::~MergeMesh()
 {
-  if (mbMergeTag) mbImpl->tag_delete(mbMergeTag);
+  if (mbMergeTag)
+    mbImpl->tag_delete(mbMergeTag);
 }
 
 }

diff --git a/src/parallel/WriteHDF5Parallel.cpp b/src/parallel/WriteHDF5Parallel.cpp
index be692ba..7bd14b8 100644
--- a/src/parallel/WriteHDF5Parallel.cpp
+++ b/src/parallel/WriteHDF5Parallel.cpp
@@ -420,8 +420,17 @@ ErrorCode WriteHDF5Parallel::parallel_create_file( const char* filename,
   if (MB_SUCCESS != rval) return error(rval);
   if (times) times[FILEID_EXCHANGE_TIME] = timer.elapsed();
  
+  /**************** Create meshset tables *********************/
 
-    /**************** Create adjacency tables *********************/
+  debug_barrier();
+  dbgOut.tprint(1,"creating meshset table\n");
+  topState.start("creating meshset tables");
+  rval = create_meshset_tables(times);
+  topState.end(rval);
+  if (MB_SUCCESS != rval) return error(rval);
+  if (times) times[CREATE_SET_TIME] = timer.elapsed();
+
+  /**************** Create adjacency tables *********************/
   
   debug_barrier();
   dbgOut.tprint(1,"creating adjacency table\n");
@@ -431,15 +440,7 @@ ErrorCode WriteHDF5Parallel::parallel_create_file( const char* filename,
   if (MB_SUCCESS != rval) return error(rval);
   if (times) times[CREATE_ADJ_TIME] = timer.elapsed();
   
-    /**************** Create meshset tables *********************/
   
-  debug_barrier();
-  dbgOut.tprint(1,"creating meshset table\n");
-  topState.start("creating meshset tables");
-  rval = create_meshset_tables(times);
-  topState.end(rval);
-  if (MB_SUCCESS != rval) return error(rval);
-  if (times) times[CREATE_SET_TIME] = timer.elapsed();
   
     /**************** Create tag data *********************/
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/fathomteam/moab/commits/d0870202d8f9/
Changeset:   d0870202d8f9
Branch:      None
User:        pshriwise
Date:        2014-06-17 01:26:53
Summary:     Merge branch 'master' of https://bitbucket.org/fathomteam/moab

Affected #:  53 files

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 6b18435..21fe792 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,135 +1,214 @@
-# check if we are using MPI - reset compilers accordingly
-if ( MOAB_USE_MPI )
-  SET(CMAKE_CXX_COMPILER ${MPI_CXX_COMPILER})
-  SET(CMAKE_C_COMPILER ${MPI_C_COMPILER})
-endif ( MOAB_USE_MPI )
-
+cmake_minimum_required( VERSION 2.8.12 )
+cmake_policy( SET CMP0003 NEW )
+cmake_policy(SET CMP0020 NEW)
 project( MOAB )
 
-  cmake_minimum_required( VERSION 2.4 )
-  if ( COMMAND cmake_policy )
-    cmake_policy( SET CMP0003 NEW )
-  endif ( COMMAND cmake_policy )
-  # Always build tests
-  enable_testing()
-
-  list (APPEND CMAKE_MODULE_PATH "${MOAB_SOURCE_DIR}/config")
-  include (config/GetAcInitVersion.cmake)
-
-  # enable_language( Fortran )
-
-  get_ac_init_version()
-  set ( MOAB_VERSION_MAJOR  "${MAJOR_VERSION}"  )
-  set ( MOAB_VERSION_MINOR  "${MINOR_VERSION}"  )
-  set ( MOAB_VERSION        "${VERSION_STRING}" )
-  set ( MOAB_VERSION_STRING "${VERSION_STRING}" )
-  if ( DEFINED PATCH_VERSION )
-    set ( MOAB_VERSION_PATCH "${PATCH_VERSION}" )
-  else ( DEFINED PATCH_VERSION )
-    if ( MOAB_VERSION_MINOR EQUAL 99 )
-      set ( MOAB_VERSION_STRING "${MOAB_VERSION_STRING} (alpha)" )
-    else ( MOAB_VERSION_MINOR EQUAL 99 )
-      set ( MOAB_VERSION_STRING "${MOAB_VERSION_STRING} (beta)" )
-    endif ( MOAB_VERSION_MINOR EQUAL 99 )
-  endif ( DEFINED PATCH_VERSION )
-
-  set ( EXECUTABLE_OUTPUT_PATH ${MOAB_BINARY_DIR}/bin CACHE PATH "Path to executables" FORCE )
-  set ( LIBRARY_OUTPUT_PATH    ${MOAB_BINARY_DIR}/bin CACHE PATH "Path to libraries"   FORCE )
-  mark_as_advanced(
-    EXECUTABLE_OUTPUT_PATH
-    LIBRARY_OUTPUT_PATH
-  )
-
-  # Compiler defines... this should really be in a config file.
-  set( MOAB_DEFINES "" )
-
-  include ( CheckIncludeFile )
-  include ( CheckFunctionExists )
-  include ( CheckTypeSize )
-
-  # Shared libraries
-  option ( BUILD_SHARED_LIBS "Should shared or static libraries be created?" ON )
-
-  # HANDLE SIZE
-  option ( MOAB_FORCE_64_BIT_HANDLES "Force MBEntityHandle to be 64 bits (uint64_t)" OFF )
-  option ( MOAB_FORCE_32_BIT_HANDLES "Force MBEntityHandle to be 32 bits (uint32_t)" OFF )
-  mark_as_advanced(
+#Add our Cmake directory to the module search path
+set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/config ${CMAKE_MODULE_PATH})
+
+################################################################################
+# Set up version info
+################################################################################
+include (config/GetAcInitVersion.cmake)
+get_ac_init_version()
+set ( MOAB_VERSION_MAJOR  "${MAJOR_VERSION}"  )
+set ( MOAB_VERSION_MINOR  "${MINOR_VERSION}"  )
+set ( MOAB_VERSION        "${VERSION_STRING}" )
+set ( MOAB_VERSION_STRING "${VERSION_STRING}" )
+if ( DEFINED PATCH_VERSION )
+  set ( MOAB_VERSION_PATCH "${PATCH_VERSION}" )
+else ( DEFINED PATCH_VERSION )
+  if ( MOAB_VERSION_MINOR EQUAL 99 )
+    set ( MOAB_VERSION_STRING "${MOAB_VERSION_STRING} (alpha)" )
+  else ( MOAB_VERSION_MINOR EQUAL 99 )
+    set ( MOAB_VERSION_STRING "${MOAB_VERSION_STRING} (beta)" )
+  endif ( MOAB_VERSION_MINOR EQUAL 99 )
+endif ( DEFINED PATCH_VERSION )
+
+################################################################################
+# Install Related Settings
+################################################################################
+
+## Set the directory where the binaries will be stored
+set( EXECUTABLE_OUTPUT_PATH
+  ${PROJECT_BINARY_DIR}/bin
+  CACHE PATH
+  "Directory where all executable will be stored"
+)
+
+## Set the directory where the libraries will be stored
+set( LIBRARY_OUTPUT_PATH
+  ${PROJECT_BINARY_DIR}/lib
+  CACHE PATH
+  "Directory where all the libraries will be stored"
+)
+mark_as_advanced(
+  EXECUTABLE_OUTPUT_PATH
+  LIBRARY_OUTPUT_PATH)
+
+include ( CheckIncludeFile )
+include ( CheckFunctionExists )
+include ( CheckTypeSize )
+
+# Compiler defines... this should really be in a config file.
+set( MOAB_DEFINES "" )
+set( MOAB_LIBS )
+set( MOAB_INSTALL_TARGETS )
+
+################################################################################
+# Options that the user controls
+################################################################################
+option ( BUILD_SHARED_LIBS   "Should shared or static libraries be created?"   ON  )
+option ( MOAB_USE_SZIP       "Should build with szip support?"                 OFF )
+option ( MOAB_USE_CGM        "Should build with CGM support?"                  OFF )
+option ( MOAB_USE_CGNS       "Should build with CGNS support?"                 OFF )
+option ( MOAB_USE_MPI        "Should MOAB be compiled with MPI support?"       OFF )
+option ( MOAB_USE_HDF        "Include HDF I/O in the build?"                   OFF )
+option ( MOAB_USE_NETCDF     "Include NetCDF support (ExodusII) in the build?" OFF )
+option ( MOAB_USE_PNETCDF    "Include parallel NetCDF support (ExodusII) in the build?" OFF )
+option ( MOAB_USE_ZOLTAN     "Include Zoltan support for partitioning algorithms?" OFF )
+option ( MOAB_ENABLE_TESTING "Enable Testing"                                  ON  )
+option ( MOAB_FORCE_64_BIT_HANDLES "Force MBEntityHandle to be 64 bits (uint64_t)" OFF )
+option ( MOAB_FORCE_32_BIT_HANDLES "Force MBEntityHandle to be 32 bits (uint32_t)" OFF )
+
+option ( ENABLE_IMESH        "Should build IMESH?"       OFF )
+option ( ENABLE_IGEOM        "Should build IGEOM?"       OFF )
+
+mark_as_advanced(
     MOAB_FORCE_64_BIT_HANDLES
     MOAB_FORCE_32_BIT_HANDLES
   )
 
-  if ( MOAB_FORCE_64_BIT_HANDLES AND MOAB_FORCE_32_BIT_HANDLES )
-    message( FATAL_ERROR
+
+################################################################################
+# Check for system include files
+################################################################################
+check_include_file( inttypes.h   MOAB_HAVE_INTTYPES_H )
+check_include_file( stdint.h     MOAB_HAVE_STDINT_H )
+check_include_file( stddef.h     MOAB_HAVE_STDDEF_H )
+check_include_file( stdlib.h     MOAB_HAVE_STDLIB_H )
+check_include_file( sys/types.h  MOAB_HAVE_SYS_TYPES_H )
+check_type_size( size_t SIZE_T)
+check_type_size( ptrdiff_t PTRDIFF_T )
+set( MOAB_HAVE_SIZE_T ${HAVE_SIZE_T} )
+set( MOAB_HAVE_PTRDIFF_T ${HAVE_PTRDIFF_T} )
+set( HAVE_SYS_TYPES_H ${MOAB_HAVE_SYS_TYPES_H} )
+set( HAVE_STDDEF_H    ${MOAB_HAVE_STDDEF_H} )
+set( HAVE_STDINT_H    ${MOAB_HAVE_STDINT_H} )
+set( HAVE_INTTYPES_H    ${MOAB_HAVE_INTTYPES_H} )
+set( HAVE_STDLIB_H    ${MOAB_HAVE_STDLIB_H} )
+check_include_file( memory.h     HAVE_MEMORY_H )
+
+################################################################################
+# Interger size Related Settings
+################################################################################
+if ( MOAB_FORCE_64_BIT_HANDLES AND MOAB_FORCE_32_BIT_HANDLES )
+  message( FATAL_ERROR
       "You may not turn both MOAB_FORCE_64_BIT_HANDLES and MOAB_FORCE_32_BIT_HANDLES on. Turn one off to continue."
     )
-  endif ( MOAB_FORCE_64_BIT_HANDLES AND MOAB_FORCE_32_BIT_HANDLES )
-
-  check_include_file( inttypes.h   MOAB_HAVE_INTTYPES_H )
-  check_include_file( stdint.h     MOAB_HAVE_STDINT_H )
-  check_include_file( stddef.h     MOAB_HAVE_STDDEF_H )
-  check_include_file( stdlib.h     MOAB_HAVE_STDLIB_H )
-  check_include_file( sys/types.h  MOAB_HAVE_SYS_TYPES_H )
-  set( HAVE_SYS_TYPES_H ${MOAB_HAVE_SYS_TYPES_H} )
-  set( HAVE_STDDEF_H    ${MOAB_HAVE_STDDEF_H} )
-  set( HAVE_STDINT_H    ${MOAB_HAVE_STDINT_H} )
-  set( HAVE_INTTYPES_H    ${MOAB_HAVE_INTTYPES_H} )
-  set( HAVE_STDLIB_H    ${MOAB_HAVE_STDLIB_H} )
-  check_include_file( memory.h     HAVE_MEMORY_H )
-
-  if ( NOT MOAB_FORCE_64_BIT_HANDLES AND NOT MOAB_FORCE_32_BIT_HANDLES )
-    if ( MOAB_HAVE_INTTYPES_H )
-      set ( CMAKE_EXTRA_INCLUDE_FILES "${CMAKE_EXTRA_INCLUDE_FILES};inttypes.h" )
-    endif ( MOAB_HAVE_INTTYPES_H )
-    if ( MOAB_HAVE_STDLIB_H )
-      set ( CMAKE_EXTRA_INCLUDE_FILES "${CMAKE_EXTRA_INCLUDE_FILES};stdlib.h" )
-      #set ( CHECK_TYPE_SIZE_PREMAIN "${CHECK_TYPE_SIZE_PREMAIN}\n#include <stdlib.h>\n" )
-    endif ( MOAB_HAVE_STDLIB_H )
-    check_type_size(  size_t       HAVE_SIZE_T )
-    check_type_size(  ptrdiff_t    HAVE_PTRDIFF_T )
-    set ( MOAB_HAVE_SIZE_T ${HAVE_SIZE_T} )
-    set ( MOAB_HAVE_PTRDIFF_T ${HAVE_PTRDIFF_T} )
-  endif ( NOT MOAB_FORCE_64_BIT_HANDLES AND NOT MOAB_FORCE_32_BIT_HANDLES )
-
-  try_compile( TEMPLATE_DEFS_INCLUDED ${MOAB_BINARY_DIR} ${MOAB_SOURCE_DIR}/config/TemplateSpecialization.cxx OUTPUT_VARIABLE BLORT )
-  if ( TEMPLATE_DEFS_INCLUDED )
-    set ( MOAB_DEFINES "${MOAB_DEFINES} -DTEMPLATE_DEFS_INCLUDED" )
-  endif ( TEMPLATE_DEFS_INCLUDED )
-
-  # find Verdict
-  find_package( verdict REQUIRED )
-
-  # Build options
-  option ( MOAB_USE_MPI    "Should MOAB be compiled with MPI support?"       OFF )
-  option ( MOAB_USE_HDF    "Include HDF I/O in the build?"                   OFF )
-  option ( MOAB_USE_NETCDF "Include NetCDF support (ExodusII) in the build?" OFF )
-
-  # iMesh
-  option ( MOAB_BUILD_IMESH        "Build the iMesh interface?"           ON )
-
-  # check for MPI package
-  if ( MOAB_USE_MPI )
-    find_package( MPI )
-    # CMake FindMPI script is sorely lacking:
-    if ( MPI_LIBRARY AND MPI_INCLUDE_PATH )
-      set( MPI_FOUND 1 )
-    endif ( MPI_LIBRARY AND MPI_INCLUDE_PATH )
-
-    if ( MPI_FOUND )
-      set ( MOAB_DEFINES "${MOAB_DEFINES} -DUSE_MPI" )
-    endif ( MPI_FOUND )
-  endif ( MOAB_USE_MPI )
-
-  if ( MOAB_USE_NETCDF )
-    find_package( NetCDF )
-  endif ( MOAB_USE_NETCDF )
-
-  if ( MOAB_USE_HDF )
-    # HDF5
-    find_package( HDF5 )
-  endif ( MOAB_USE_HDF )
-
-  add_subdirectory( src )
-  add_subdirectory( itaps/imesh )
-  add_subdirectory( tools )
-  add_subdirectory( test )
+endif ( MOAB_FORCE_64_BIT_HANDLES AND MOAB_FORCE_32_BIT_HANDLES )
+
+if ( NOT MOAB_FORCE_64_BIT_HANDLES AND NOT MOAB_FORCE_32_BIT_HANDLES )
+  if ( MOAB_HAVE_INTTYPES_H )
+    set ( CMAKE_EXTRA_INCLUDE_FILES "${CMAKE_EXTRA_INCLUDE_FILES};inttypes.h" )
+  endif ( MOAB_HAVE_INTTYPES_H )
+  if ( MOAB_HAVE_STDLIB_H )
+    set ( CMAKE_EXTRA_INCLUDE_FILES "${CMAKE_EXTRA_INCLUDE_FILES};stdlib.h" )
+    #set ( CHECK_TYPE_SIZE_PREMAIN "${CHECK_TYPE_SIZE_PREMAIN}\n#include <stdlib.h>\n" )
+  endif ( MOAB_HAVE_STDLIB_H )
+  check_type_size(  size_t       HAVE_SIZE_T )
+  check_type_size(  ptrdiff_t    HAVE_PTRDIFF_T )
+  set ( MOAB_HAVE_SIZE_T ${HAVE_SIZE_T} )
+  set ( MOAB_HAVE_PTRDIFF_T ${HAVE_PTRDIFF_T} )
+endif ( NOT MOAB_FORCE_64_BIT_HANDLES AND NOT MOAB_FORCE_32_BIT_HANDLES )
+
+
+################################################################################
+# Find packages
+################################################################################
+find_package( verdict REQUIRED )
+
+# check for MPI package
+if ( MOAB_USE_MPI )
+  find_package( MPI REQUIRED )
+  # CMake FindMPI script is sorely lacking:
+  if ( MPI_LIBRARY AND MPI_INCLUDE_PATH )
+    set( MPI_FOUND 1 )
+  endif ( MPI_LIBRARY AND MPI_INCLUDE_PATH )
+
+  if ( MPI_FOUND )
+    set ( MOAB_DEFINES "${MOAB_DEFINES} -DUSE_MPI" )
+  endif ( MPI_FOUND )
+endif ( MOAB_USE_MPI )
 
+if ( MOAB_USE_NETCDF )
+  find_package( NetCDF REQUIRED )
+  set( MOAB_DEFINES "-DNETCDF_FILE ${MOAB_DEFINES}" )
+  include_directories( ${NetCDF_INCLUDES} ${PNetCDF_INCLUDES} )
+  set( MOAB_LIBS ${MOAB_LIBS} ${NetCDF_LIBRARIES} ${PNetCDF_LIBRARIES} )
+endif ( MOAB_USE_NETCDF )
+
+if ( MOAB_USE_HDF )
+  find_package( HDF5 REQUIRED )
+  set( MOAB_DEFINES "-DHDF5_FILE ${MOAB_DEFINES}" )
+  set( MOAB_LIBS ${MOAB_LIBS} ${HDF5_LIBRARIES} )
+  include_directories( ${HDF5_INCLUDE_DIR} src/io/mhdf/include )
+endif ( MOAB_USE_HDF )
+
+if ( MOAB_USE_ZLIB )
+  find_package( ZLIB REQUIRED )
+endif ( )
+
+if ( MOAB_USE_ZOLTAN )
+  find_package( Zoltan REQUIRED )
+endif ( )
+
+if ( MOAB_USE_CGM )
+   find_package( CGM REQUIRED )
+   set( MOAB_DEFINES "${CGM_DEFINES} -DCGM ${MOAB_DEFINES}" )
+endif ()
+
+if (MOAB_USE_CGNS)
+  set( MOABIO_LIBS ${MOABIO_LIBS} ${CGNS_LIBRARIES} )
+endif()
+
+################################################################################
+# Add Directories
+################################################################################
+add_subdirectory( src )
+add_subdirectory( itaps )
+add_subdirectory( tools )
+
+################################################################################
+# Testing Related Settings
+################################################################################
+#turn on ctest if we want testing
+if ( MOAB_ENABLE_TESTING )
+  enable_testing()
+  add_subdirectory( test )
+endif()
+
+###############################################################################
+#
+###############################################################################
+export(TARGETS ${MOAB_INSTALL_TARGETS}
+              FILE "${PROJECT_BINARY_DIR}/MOABTargets.cmake")
+install(EXPORT MOABTargets DESTINATION lib)
+
+################################################################################
+# Generate the MOABConfig.cmake file
+################################################################################
+set(CXX ${CMAKE_CXX_COMPILER})
+set(CC ${CMAKE_C_COMPILER})
+set(F77 "" )
+set(FC "")
+set(CXXFLAGS ${MOAB_DEFINES})
+set(AM_CXXFLAGS)
+set(CFLAGS ${MOAB_DEFINES})
+set(AM_CFLAGS)
+set(MOAB_FFLAGS)
+set(AM_FFLAGS)
+
+configure_file(MOABConfig.new.cmake.in
+  "${PROJECT_BINARY_DIR}/MOABConfig.cmake" @ONLY)
+install( FILES "${PROJECT_BINARY_DIR}/MOABConfig.cmake" DESTINATION lib )

diff --git a/MOABConfig.new.cmake.in b/MOABConfig.new.cmake.in
new file mode 100644
index 0000000..5b96610
--- /dev/null
+++ b/MOABConfig.new.cmake.in
@@ -0,0 +1,35 @@
+# Config file for MOAB; use the CMake find_package() function to pull this into
+# your own CMakeLists.txt file.
+#
+# This file defines the following variables:
+# MOAB_FOUND        - boolean indicating that MOAB is found
+# MOAB_INCLUDE_DIRS - include directories from which to pick up MOAB includes
+# MOAB_LIBRARIES    - libraries need to link to MOAB; use this in target_link_libraries for MOAB-dependent targets
+# MOAB_CXX, MOAB_CC, MOAB_F77, MOAB_FC - compilers used to compile MOAB
+# MOAB_CXXFLAGS, MOAB_CCFLAGS, MOAB_FFLAGS, MOAB_FCFLAGS - compiler flags used to compile MOAB; possibly need to use these in add_definitions or CMAKE_<LANG>_FLAGS_<MODE> 
+
+set(MOAB_FOUND 1)
+
+set(MOAB_USE_HDF @MOAB_USE_HDF@)
+
+# Compiler flags used by MOAB
+
+set(MOAB_CXXFLAGS "@CXXFLAGS@ @AM_CXXFLAGS@")
+set(MOAB_CFLAGS "@CFLAGS@ @AM_CFLAGS@")
+set(MOAB_FORTRAN_FLAGS "@MOAB_FFLAGS@ @AM_FFLAGS@")
+
+# Library and include defs
+get_filename_component(MOAB_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
+
+if(NOT TARGET MOAB AND NOT MOAB_BINARY_DIR)
+  include("${MOAB_CMAKE_DIR}/MOABTargets.cmake")
+endif()
+
+if(MOAB_USE_HDF)
+  if(EXISTS "@HDF5_DIR@/share/cmake/hdf5/hdf5-config.cmake")
+    include(@HDF5_DIR@/share/cmake/hdf5/hdf5-config.cmake)
+  endif()
+endif()
+
+set(MOAB_INCLUDE_DIRS "@CMAKE_INSTALL_PREFIX@/include")
+set(MOAB_LIBRARIES MOAB)

diff --git a/MeshFiles/unittest/io/Makefile.am b/MeshFiles/unittest/io/Makefile.am
index 94704c1..d40e02b 100644
--- a/MeshFiles/unittest/io/Makefile.am
+++ b/MeshFiles/unittest/io/Makefile.am
@@ -5,7 +5,7 @@ EXTRA_DIST = HommeMapping.nc \
              eul26x48x96.t0.nc \
              eul26x48x96.t1.nc \
              eul26x48x96.t2.nc \
-             eul26x48x96.t.3.nc \ 
+             eul26x48x96.t.3.nc \
              fv26x46x72.t.3.nc \
 	     cubtest12.cub \
 	     cubtest14.cub \

diff --git a/MeshFiles/unittest/io/sample.stl b/MeshFiles/unittest/io/sample.stl
index 5aa4dc6..6ae22ff 100644
--- a/MeshFiles/unittest/io/sample.stl
+++ b/MeshFiles/unittest/io/sample.stl
@@ -13,7 +13,7 @@ solid cube_corner
       vertex 1.0 0.0 0.0
     endloop
   endfacet
-  facet normal 0.0 0.0 -1.0
+  facet normal -1.0 0.0 0.0
     outer loop
       vertex 0.0 0.0 0.0
       vertex 0.0 0.0 1.0

diff --git a/config/AutoconfHeader.cmake b/config/AutoconfHeader.cmake
index 360f2b8..1d83ee8 100644
--- a/config/AutoconfHeader.cmake
+++ b/config/AutoconfHeader.cmake
@@ -27,3 +27,10 @@ else ( EXISTS "${OUTFILE}" )
 endif ( EXISTS "${OUTFILE}" )
 
 endmacro( autoconf_header )
+
+macro( moab_install_headers )
+  foreach (header ${ARGV})
+    string(REGEX MATCH "(.*)/" DIR ${header})
+    install(FILES ${header} DESTINATION include/${DIR})
+  endforeach()
+endmacro(moab_install_headers)

diff --git a/config/FindCGM.cmake b/config/FindCGM.cmake
index 5bc833e..236ea7f 100644
--- a/config/FindCGM.cmake
+++ b/config/FindCGM.cmake
@@ -1,146 +1,137 @@
-# Find the common geometry module libraries
+# FindCGM.cmake
+#
+# If you set the CGM_CFG CMake variable to point to a file named "cgm.make"
+# (produced by CGM as part of any build or install), then the script will
+# locate CGM assets for your package to use.
+#
+# This script defines the following CMake variables:
+#   CGM_FOUND         defined when CGM is located, false otherwise
+#   CGM_INCLUDE_DIRS  directories containing CGM headers
+#   CGM_DEFINES       preprocessor definitions you should add to source files
+#   CGM_LIBRARIES     paths to CGM library and its dependencies
+#
+# Note that this script does not produce CGM_VERSION as that information
+# is not available in the "cgm.make" configuration file that CGM creates.
 
-if ( NOT CGM_SOURCE_DIR )
-  find_path( CGM_SOURCE_DIR
-    NAMES geom/CubitGeomConfigure.h.in
-    PATHS
-      ${CGM_INCLUDE_DIRECTORIES}
-  )
-endif ( NOT CGM_SOURCE_DIR )
+find_file(CGM_CFG cgm.make DOC "Path to cgm.make configuration file")
+if(CGM_CFG)
+  set(CGM_FOUND 1)
+  file(READ "${CGM_CFG}" CGM_CFG_DATA)
+  ##
+  ## Replace line continuations ('\' at EOL) so we don't have to parse them later
+  ##
+  string(REGEX REPLACE "\\\\\\\n" "" CGM_CFG_DATA "${CGM_CFG_DATA}")
 
-if ( NOT CGM_BINARY_DIR )
-  if ( CGM_LIBRARIES )
-    set( CGM_BUILD_SEARCH "" )
-    foreach ( LIB ${CGM_LIBRARIES} )
-      get_filename_component( PATH DIR ${LIB} )
-      set( CGM_BUILD_SEARCH "${CGM_BUILD_SEARCH};${DIR}/.." )
-    endforeach ( DIR )
-    set( DIR )
-  endif ( CGM_LIBRARIES )
-  find_path( CGM_BINARY_DIR
-    NAMES geom/CubitGeomConfigure.h
-    PATHS
-      ${CGM_BUILD_SEARCH}
-  )
-endif ( NOT CGM_BINARY_DIR )
+  ##
+  ## Find include directories
+  ##
+  string(REGEX MATCHALL "CGM_INT_INCLUDE =[^\\\n]*" _CGM_INCS "${CGM_CFG_DATA}")
+  foreach(_CGM_INC ${_CGM_INCS})
+    # Only use include directories specified by the *last*
+    # occurrence of CGM_INT_INCLUDE in the config file:
+    unset(CGM_INCLUDE_DIRS)
 
-if ( NOT CGM_LIBRARIES )
-  set( CGM_LIBSEARCH
-    ${CGM_BINARY_DIR}/init
-    ${CGM_BINARY_DIR}/geom/virtual
-    ${CGM_BINARY_DIR}/geom/facetbool
-    ${CGM_BINARY_DIR}/geom/facet
-    ${CGM_BINARY_DIR}/geom/Cholla
-    ${CGM_BINARY_DIR}/geom
-    ${CGM_BINARY_DIR}/util
-    /usr/local/lib64
-    /usr/local/bin64
-    /usr/lib64
-    /usr/bin64
-    /usr/local/lib
-    /usr/local/bin
-    /usr/lib
-    /usr/bin
-  )
-  find_library( CGM_UTIL_LIBRARY
-    NAMES cubit_util
-    PATHS ${CGM_LIBSEARCH}
-  )
+    string(REGEX REPLACE "-I" ";-I" _CGM_INC "${_CGM_INC}")
+    foreach(_CGM_IDIR ${_CGM_INC})
+      if ("${_CGM_IDIR}" MATCHES "^-I.*")
+        string(REGEX REPLACE "-I" "" _CGM_IDIR "${_CGM_IDIR}")
+        string(STRIP "${_CGM_IDIR}" _CGM_IDIR)
+        list(APPEND CGM_INCLUDE_DIRS "${_CGM_IDIR}")
+      endif()
+    endforeach()
+    # Alternately, one might:
+    #list(APPEND CGM_INCLUDE_DIRS "${_CGM_INC}")
+  endforeach()
+  #message("CGM_INCLUDE_DIRS=\"${CGM_INCLUDE_DIRS}\"")
 
-  find_library( CGM_GEOM_LIBRARY
-    NAMES cubit_geom
-    PATHS ${CGM_LIBSEARCH}
-  )
+  ##
+  ## Find preprocessor definitions
+  ##
+  string(REGEX MATCH "CGM_DEFINES =[^\\\n]*" CGM_DEFINES "${CGM_CFG_DATA}")
+  string(REGEX REPLACE "CGM_DEFINES = ([^\\\n]*)" "\\1" CGM_DEFINES "${CGM_DEFINES}")
 
-  find_library( CGM_CHOLLA_LIBRARY
-    NAMES cholla
-    PATHS ${CGM_LIBSEARCH}
-  )
+  ##
+  ## Find CGM library directory(-ies)
+  ##
+  string(REGEX MATCHALL "CGM_INT_LDFLAGS =[^\\\n]*" _CGM_LDIRS "${CGM_CFG_DATA}")
+  foreach(_CGM_LDIR ${_CGM_LDIRS})
+    set(CGM_LIB_DIRS)
+    string(REGEX REPLACE " -L" ";-L" _CGM_LDIR "${_CGM_LDIR}")
+    string(REGEX REPLACE "CGM_INT_LDFLAGS = ([^\\\n]*)" "\\1" _CGM_LDIR "${_CGM_LDIR}")
+    foreach(_CGM_LL ${_CGM_LDIR})
+      if("${_CGM_LL}" MATCHES "^-L.*")
+        string(REGEX REPLACE "-L" "" _CGM_LL "${_CGM_LL}")
+        string(STRIP "${_CGM_LL}" _CGM_LL)
+        list(APPEND CGM_LIB_DIRS "${_CGM_LL}")
+      endif()
+    endforeach()
+  endforeach()
 
-  find_library( CGM_FACET_LIBRARY
-    NAMES cubit_facet
-    PATHS ${CGM_LIBSEARCH}
-  )
+  ##
+  ## Now add dependent library directories to CGM_LIB_DIRS
+  ##
+  string(REGEX MATCH "CGM_LDFLAGS =[^\\\n]*" _CGM_LDIR "${CGM_CFG_DATA}")
+  string(REGEX REPLACE "CGM_LDFLAGS = ([^\\\n]*)" "\\1" _CGM_LDIR "${_CGM_LDIR}")
+  string(REGEX REPLACE " -L" ";-L" _CGM_LDIR "${_CGM_LDIR}")
+  set(_CGM_LDIRS)
+  foreach(_CGM_LL ${_CGM_LDIR})
+    if("${_CGM_LL}" MATCHES "^-L.*")
+      string(REGEX REPLACE "-L" "" _CGM_LL "${_CGM_LL}")
+      string(STRIP "${_CGM_LL}" _CGM_LL)
+      list(APPEND _CGM_LDIRS "${_CGM_LL}")
+    endif()
+  endforeach()
+  set(CGM_LIB_DIRS "${CGM_LIB_DIRS};${_CGM_LDIRS}")
+  #message("${CGM_LIB_DIRS}")
 
-  find_library( CGM_FACETBOOL_LIBRARY
-    NAMES cubit_facetboolstub
-    PATHS ${CGM_LIBSEARCH}
-  )
+  ##
+  ## Find the CGM library and its dependencies
+  ##
+  string(REGEX MATCHALL "CGM_LIBS =[^\\\n]*" _CGM_LIBS "${CGM_CFG_DATA}")
+  string(REGEX MATCHALL "-l[^ \t\n]+" _CGM_LIBS "${_CGM_LIBS}")
+  foreach(_CGM_LIB ${_CGM_LIBS})
+    string(REGEX REPLACE "-l" "" _CGM_LIB "${_CGM_LIB}")
+    find_library(_CGM_LIB_LOC
+      NAME "${_CGM_LIB}"
+      # Cannot quote since it contains semicolons:
+      PATHS ${CGM_LIB_DIRS}
+      NO_DEFAULT_PATH
+      NO_CMAKE_ENVIRONMENT_PATH
+      NO_CMAKE_PATH
+      NO_SYSTEM_ENVIRONMENT_PATH
+      NO_CMAKE_SYSTEM_PATH
+      )
+    #message("Lib \"${_CGM_LIB}\" @ \"${_CGM_LIB_LOC}\" paths \"${CGM_LIB_DIRS}\"")
+    if (_CGM_LIB_LOC)
+      list(APPEND CGM_LIBRARIES "${_CGM_LIB_LOC}")
+      unset(_CGM_LIB_LOC CACHE)
+      unset(_CGM_LIB_LOC)
+    else()
+      message("Could not find ${_CGM_LIB} library (part of CGM)")
+      unset(CGM_FOUND)
+    endif()
+  endforeach()
+  #message("Libs ${CGM_LIBRARIES}")
 
-  find_library( CGM_VIRTUAL_LIBRARY
-    NAMES cubit_virtual
-    PATHS ${CGM_LIBSEARCH}
-  )
 
-  find_library( CGM_INIT_LIBRARY
-    NAMES cgma_init
-    PATHS ${CGM_LIBSEARCH}
-  )
-endif ( NOT CGM_LIBRARIES )
+  ##
+  ## Kill temporary variables
+  ##
+  unset(_CGM_INCS)
+  unset(_CGM_INC)
+  unset(_CGM_IDIR)
+  unset(_CGM_LDIRS)
+  unset(_CGM_LDIR)
+  unset(_CGM_LL)
+  unset(_CGM_LIBS)
+  unset(_CGM_LIB)
+  unset(_CGM_LIB_LOC)
+else()
+  unset(CGM_FOUND)
+endif()
 
-if ( CGM_SOURCE_DIR AND CGM_BINARY_DIR AND NOT CGM_INCLUDE_DIRECTORIES )
-  set( cubit_geom_SOURCE_DIR "${CGM_SOURCE_DIR}/geom" )
-  set( cubit_geom_BINARY_DIR "${CGM_BINARY_DIR}/geom" )
-  set( cubit_util_SOURCE_DIR "${CGM_SOURCE_DIR}/util" )
-  set( cubit_util_BINARY_DIR "${CGM_BINARY_DIR}/util" )
-  set( cgma_init_SOURCE_DIR "${CGM_SOURCE_DIR}/init" )
-  set( cgma_init_BINARY_DIR "${CGM_BINARY_DIR}/init" )
-  set( CGM_INCLUDE_DIRECTORIES
-    "${cubit_util_SOURCE_DIR}"
-    "${cubit_util_BINARY_DIR}"
-    "${cubit_geom_SOURCE_DIR}"
-    "${cubit_geom_SOURCE_DIR}/ACIS"
-    "${cubit_geom_SOURCE_DIR}/Cholla"
-    "${cubit_geom_SOURCE_DIR}/facet"
-    "${cubit_geom_SOURCE_DIR}/facetbool"
-    "${cubit_geom_SOURCE_DIR}/OCC"
-    "${cubit_geom_SOURCE_DIR}/parallel"
-    "${cubit_geom_SOURCE_DIR}/SolidWorks"
-    "${cubit_geom_SOURCE_DIR}/virtual"
-    "${cubit_geom_BINARY_DIR}"
-    "${cgma_init_SOURCE_DIR}"
-    "${cgma_init_BINARY_DIR}"
-    CACHE PATH "Paths to util/CubitUtilConfigure.h.in, util/CubitUtilConfigure.h, geom/CubitGeomConfigure.h.in, AND geom/CubitGeomConfigure.h files." FORCE
-  )
-endif ( CGM_SOURCE_DIR AND CGM_BINARY_DIR AND NOT CGM_INCLUDE_DIRECTORIES )
-
-if ( CGM_UTIL_LIBRARY AND CGM_GEOM_LIBRARY AND CGM_INIT_LIBRARY AND NOT CGM_LIBRARIES )
-  # Link to libdl in case shared libraries are used.
-  set( CGM_LIBRARIES
-    "${CGM_INIT_LIBRARY}"
-    "${CGM_CHOLLA_LIBRARY}"
-    "${CGM_FACETBOOL_LIBRARY}"
-    "${CGM_FACET_LIBRARY}"
-    "${CGM_VIRTUAL_LIBRARY}"
-    "${CGM_GEOM_LIBRARY}"
-    "${CGM_UTIL_LIBRARY}"
-    dl
-    CACHE FILEPATH "Full paths to cubit_util AND cubit_geom libraries." FORCE
-  )
-endif ( CGM_UTIL_LIBRARY AND CGM_GEOM_LIBRARY AND CGM_INIT_LIBRARY AND NOT CGM_LIBRARIES )
-
-mark_as_advanced(
-  CGM_SOURCE_DIR
-  CGM_BINARY_DIR
-  CGM_INCLUDE_DIRECTORIES
-  CGM_LIBRARIES
-  CGM_UTIL_LIBRARY
-  CGM_GEOM_LIBRARY
-  CGM_INIT_LIBRARY
-  CGM_CHOLLA_LIBRARY
-  CGM_FACETBOOL_LIBRARY
-  CGM_FACET_LIBRARY
-  CGM_VIRTUAL_LIBRARY
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(CGM
+  REQUIRED_VARS CGM_INCLUDE_DIRS CGM_LIBRARIES
+  VERSION_VAR CGM_VERSION
 )
-
-if ( NOT CGM_INCLUDE_DIRECTORIES OR NOT CGM_LIBRARIES )
-  set( CGM_INCLUDE_DIRECTORIES "" CACHE PATH "Paths to util/CubitUtilConfigure.h.in, util/CubitUtilConfigure.h, geom/CubitGeomConfigure.h.in, AND geom/CubitGeomConfigure.h files." )
-  set( CGM_LIBRARIES           "" CACHE PATH "Paths to cubit_util AND cubit_geom libraries." )
-  set( CGM_FOUND 0 )
-  if ( CGM_FIND_REQUIRED )
-    message( FATAL_ERROR "CGM is required. Please set CGM_INCLUDE_DIRECTORIES and CGM_LIBRARIES or set CGM_SOURCE_DIR and CGM_BINARY_DIR" )
-  endif ( CGM_FIND_REQUIRED )
-else ( NOT CGM_INCLUDE_DIRECTORIES OR NOT CGM_LIBRARIES )
-  set( CGM_FOUND 1 )
-endif ( NOT CGM_INCLUDE_DIRECTORIES OR NOT CGM_LIBRARIES )
-

diff --git a/config/FindHDF5.cmake b/config/FindHDF5.cmake
index f14395f..e26ce95 100644
--- a/config/FindHDF5.cmake
+++ b/config/FindHDF5.cmake
@@ -1,23 +1,100 @@
 #
 # Find the native HDF5 includes and library
 #
-# HDF5_INCLUDE_DIR - where to find H5public.h, etc.
+# HDF5_INCLUDES    - where to find hdf5.h, H5public.h, etc.
 # HDF5_LIBRARIES   - List of fully qualified libraries to link against when using hdf5.
 # HDF5_FOUND       - Do not attempt to use hdf5 if "no" or undefined.
 
-FIND_PATH(HDF5_INCLUDE_DIR H5public.h
+set( HDF5_DIR "" CACHE PATH "Path to search for HDF5 header and library files" )
+set (HDF5_FOUND NO CACHE INTERNAL "Found HDF5 components successfully." )
+
+if(EXISTS "${HDF5_DIR}/share/cmake/hdf5/hdf5-config.cmake")
+  include(${HDF5_DIR}/share/cmake/hdf5/hdf5-config.cmake)
+else()
+
+FIND_PATH(HDF5_INCLUDE_DIR
+  NAMES hdf5.h H5public.h
+  PATHS ${HDF5_DIR}/include
   /usr/local/include
   /usr/include
+  /opt/local/include
 )
 
-FIND_LIBRARY(HDF5_LIBRARY hdf5
-  /usr/local/lib
-  /usr/lib
+foreach (VARIANT dl m z )
+  FIND_LIBRARY(hdf5_deplibs_${VARIANT} ${VARIANT}
+    PATHS /lib /usr/local/lib /usr/lib /opt/local/lib
+  )
+  list(APPEND HDF5_DEP_LIBRARIES ${hdf5_deplibs_${VARIANT}})
+endforeach()
+
+FIND_LIBRARY(HDF5_BASE_LIBRARY hdf5 hdf5d)
+
+FIND_LIBRARY(HDF5_BASE_LIBRARY NAMES hdf5 hdf5d
+  PATHS ${HDF5_DIR}/lib /usr/local/lib /usr/lib /opt/local/lib
 )
+FIND_LIBRARY(HDF5_HLBASE_LIBRARY hdf5_hl hdf5_hld
+  PATHS ${HDF5_DIR}/lib /usr/local/lib /usr/lib /opt/local/lib
+)
+
+IF (NOT HDF5_FOUND)
+  IF (HDF5_INCLUDE_DIR AND HDF5_BASE_LIBRARY)
+    FIND_LIBRARY(HDF5_CXX_LIBRARY hdf5_cxx
+      PATHS ${HDF5_DIR}/lib /usr/local/lib /usr/lib /opt/local/lib
+    )
+    FIND_LIBRARY(HDF5_HLCXX_LIBRARY hdf5_hl_cxx
+      PATHS ${HDF5_DIR}/lib /usr/local/lib /usr/lib /opt/local/lib
+    )
+    FIND_LIBRARY(HDF5_FORT_LIBRARY hdf5_fortran
+      PATHS ${HDF5_DIR}/lib /usr/local/lib /usr/lib /opt/local/lib
+    )
+    FIND_LIBRARY(HDF5_HLFORT_LIBRARY
+      NAMES hdf5hl_fortran hdf5_hl_fortran
+      PATHS ${HDF5_DIR}/lib /usr/local/lib /usr/lib /opt/local/lib
+    )
+    SET( HDF5_INCLUDES "${HDF5_INCLUDE_DIR}" )
+    if (HDF5_FORT_LIBRARY)
+      FIND_PATH(HDF5_FORT_INCLUDE_DIR
+        NAMES hdf5.mod
+        PATHS ${HDF5_DIR}/include
+        ${HDF5_DIR}/include/fortran
+        /usr/local/include
+        /usr/include
+        /opt/local/include
+      )
+      if (HDF5_FORT_INCLUDE_DIR AND NOT ${HDF5_FORT_INCLUDE_DIR} STREQUAL ${HDF5_INCLUDE_DIR})
+        SET( HDF5_INCLUDES "${HDF5_INCLUDES} ${HDF5_FORT_INCLUDE_DIR}" )
+      endif (HDF5_FORT_INCLUDE_DIR AND NOT ${HDF5_FORT_INCLUDE_DIR} STREQUAL ${HDF5_INCLUDE_DIR})
+      unset(HDF5_FORT_INCLUDE_DIR CACHE)
+    endif (HDF5_FORT_LIBRARY)
+    # Add the libraries based on availability
+    foreach (VARIANT CXX FORT BASE )
+      if (HDF5_HL${VARIANT}_LIBRARY)
+        list(APPEND HDF5_LIBRARIES ${HDF5_HL${VARIANT}_LIBRARY})
+      endif (HDF5_HL${VARIANT}_LIBRARY)
+      if (HDF5_${VARIANT}_LIBRARY)
+        list(APPEND HDF5_LIBRARIES ${HDF5_${VARIANT}_LIBRARY})
+      endif (HDF5_${VARIANT}_LIBRARY)
+      unset(HDF5_HL${VARIANT}_LIBRARY CACHE)
+      unset(HDF5_${VARIANT}_LIBRARY CACHE)
+    endforeach()
+    list(APPEND HDF5_LIBRARIES ${HDF5_DEP_LIBRARIES})
+    SET( HDF5_FOUND YES )
+    message (STATUS "---   HDF5 Configuration ::")
+    message (STATUS "        INCLUDES  : ${HDF5_INCLUDES}")
+    message (STATUS "        LIBRARIES : ${HDF5_LIBRARIES}")
+  ELSE (HDF5_INCLUDE_DIR AND HDF5_BASE_LIBRARY)
+    set( HDF5_FOUND NO )
+    message("finding HDF5 failed, please try to set the var HDF5_DIR")
+  ENDIF(HDF5_INCLUDE_DIR AND HDF5_BASE_LIBRARY)
+ENDIF (NOT HDF5_FOUND)
+
+include (FindPackageHandleStandardArgs)
+find_package_handle_standard_args (HDF5 "HDF5 not found, check environment variables HDF5_DIR"
+  HDF5_DIR HDF5_INCLUDES HDF5_LIBRARIES)
+
+#now we create fake targets to be used
+if(EXISTS ${HDF5_DIR}/share/cmake/hdf5/hdf5-targets.cmake)
+  include(${HDF5_DIR}/share/cmake/hdf5/hdf5-targets.cmake)
+endif()
 
-IF(HDF5_INCLUDE_DIR)
-  IF(HDF5_LIBRARY)
-    SET( HDF5_LIBRARIES ${HDF5_LIBRARY} )
-    SET( HDF5_FOUND "YES" )
-  ENDIF(HDF5_LIBRARY)
-ENDIF(HDF5_INCLUDE_DIR)
+endif(EXISTS "${HDF5_DIR}/share/cmake/hdf5/hdf5-config.cmake")

diff --git a/config/FindNetCDF.cmake b/config/FindNetCDF.cmake
index f9e655c..bcb8be3 100644
--- a/config/FindNetCDF.cmake
+++ b/config/FindNetCDF.cmake
@@ -1,75 +1,132 @@
 #
 # Find NetCDF include directories and libraries
 #
-# NetCDF_INCLUDE_DIRECTORIES - where to find netcdf.h
+# NetCDF_INCLUDES            - list of include paths to find netcdf.h
 # NetCDF_LIBRARIES           - list of libraries to link against when using NetCDF
 # NetCDF_FOUND               - Do not attempt to use NetCDF if "no", "0", or undefined.
 
-set( NetCDF_PREFIX "" CACHE PATH "Path to search for NetCDF header and library files" )
+set (NetCDF_DIR "" CACHE PATH "Path to search for NetCDF header and library files" )
+set (NetCDF_FOUND NO CACHE INTERNAL "Found NetCDF components successfully." )
 
-find_path( NetCDF_INCLUDE_DIRECTORIES netcdf.h
+find_path( NetCDF_INCLUDE_DIR netcdf.h
+  ${NetCDF_DIR}
+  ${NetCDF_DIR}/include
   /usr/local/include
   /usr/include
 )
 
 find_library( NetCDF_C_LIBRARY
   NAMES netcdf
-  ${NetCDF_PREFIX}
-  ${NetCDF_PREFIX}/lib64
-  ${NetCDF_PREFIX}/lib
+  HINTS ${NetCDF_DIR}
+  ${NetCDF_DIR}/lib64
+  ${NetCDF_DIR}/lib
   /usr/local/lib64
   /usr/lib64
-  /usr/lib64/netcdf-3
+  /usr/lib64/netcdf
   /usr/local/lib
   /usr/lib
-  /usr/lib/netcdf-3
+  /usr/lib/netcdf
 )
 
 find_library( NetCDF_CXX_LIBRARY
   NAMES netcdf_c++
-  ${NetCDF_PREFIX}
-  ${NetCDF_PREFIX}/lib64
-  ${NetCDF_PREFIX}/lib
+  HINTS ${NetCDF_DIR}
+  ${NetCDF_DIR}/lib64
+  ${NetCDF_DIR}/lib
   /usr/local/lib64
   /usr/lib64
-  /usr/lib64/netcdf-3
+  /usr/lib64/netcdf
   /usr/local/lib
   /usr/lib
-  /usr/lib/netcdf-3
+  /usr/lib/netcdf
 )
 
 find_library( NetCDF_FORTRAN_LIBRARY
   NAMES netcdf_g77 netcdf_ifc netcdf_x86_64
-  ${NetCDF_PREFIX}
-  ${NetCDF_PREFIX}/lib64
-  ${NetCDF_PREFIX}/lib
+  HINTS ${NetCDF_DIR}
+  ${NetCDF_DIR}/lib64
+  ${NetCDF_DIR}/lib
   /usr/local/lib64
   /usr/lib64
-  /usr/lib64/netcdf-3
+  /usr/lib64/netcdf
   /usr/local/lib
   /usr/lib
-  /usr/lib/netcdf-3
+  /usr/lib/netcdf
 )
 
-set( NetCDF_LIBRARIES
-  ${NetCDF_C_LIBRARY}
-  ${NetCDF_CXX_LIBRARY}
-)
+IF (NOT NetCDF_FOUND)
+  if ( NetCDF_INCLUDE_DIR AND NetCDF_C_LIBRARY )
+    set( NetCDF_FOUND YES )
+    set(NetCDF_INCLUDES "${NetCDF_INCLUDE_DIR}")
+    set(NetCDF_LIBRARIES ${NetCDF_C_LIBRARY})
+    if ( NetCDF_CXX_LIBRARY )
+      set(NetCDF_LIBRARIES ${NetCDF_LIBRARIES} ${NetCDF_CXX_LIBRARY})
+    endif ( NetCDF_CXX_LIBRARY )
+    if ( NetCDF_FORTRAN_LIBRARY )
+      set(NetCDF_LIBRARIES ${NetCDF_LIBRARIES} ${NetCDF_FORTRAN_LIBRARY})
+    endif ( NetCDF_FORTRAN_LIBRARY )
+    message (STATUS "---   NetCDF Configuration ::")
+    message (STATUS "        INCLUDES  : ${NetCDF_INCLUDES}")
+    message (STATUS "        LIBRARIES : ${NetCDF_LIBRARIES}")
+  else ( NetCDF_INCLUDE_DIR AND NetCDF_C_LIBRARY )
+    set( NetCDF_FOUND NO )
+    message("finding NetCDF failed, please try to set the var NetCDF_DIR")
+  endif ( NetCDF_INCLUDE_DIR AND NetCDF_C_LIBRARY )
+ENDIF (NOT NetCDF_FOUND)
 
-set( NetCDF_FORTRAN_LIBRARIES
-  ${NetCDF_FORTRAN_LIBRARY}
+mark_as_advanced(
+  NetCDF_DIR
+  NetCDF_INCLUDES
+  NetCDF_LIBRARIES
 )
 
-if ( NetCDF_INCLUDE_DIRECTORIES AND NetCDF_LIBRARIES )
-  set( NetCDF_FOUND 1 )
-else ( NetCDF_INCLUDE_DIRECTORIES AND NetCDF_LIBRARIES )
-  set( NetCDF_FOUND 0 )
-endif ( NetCDF_INCLUDE_DIRECTORIES AND NetCDF_LIBRARIES )
+IF (MOAB_USE_MPI)
+  set (PNetCDF_DIR "" CACHE PATH "Path to search for parallel NetCDF header and library files" )
+  set (PNetCDF_FOUND NO CACHE INTERNAL "Found parallel NetCDF components successfully." )
 
-mark_as_advanced(
-  NetCDF_PREFIX
-  NetCDF_INCLUDE_DIRECTORIES
-  NetCDF_C_LIBRARY
-  NetCDF_CXX_LIBRARY
-  NetCDF_FORTRAN_LIBRARY
-)
+  find_path( PNetCDF_INCLUDES pnetcdf.h
+    ${PNetCDF_DIR}
+    ${PNetCDF_DIR}/include
+    /usr/local/include
+    /usr/include
+  )
+
+  find_library( PNetCDF_LIBRARIES
+    NAMES pnetcdf
+    HINTS ${PNetCDF_DIR}
+    ${PNetCDF_DIR}/lib64
+    ${PNetCDF_DIR}/lib
+    /usr/local/lib64
+    /usr/lib64
+    /usr/lib64/pnetcdf
+    /usr/local/lib
+    /usr/lib
+    /usr/lib/pnetcdf
+  )
+
+  IF (NOT PNetCDF_FOUND)
+    if ( PNetCDF_INCLUDES AND PNetCDF_LIBRARIES )
+      set( PNetCDF_FOUND YES )
+      message (STATUS "---   PNetCDF Configuration ::")
+      message (STATUS "        INCLUDES  : ${PNetCDF_INCLUDES}")
+      message (STATUS "        LIBRARIES : ${PNetCDF_LIBRARIES}")
+    else ( PNetCDF_INCLUDES AND PNetCDF_LIBRARIES )
+      set( NetCDF_FOUND NO )
+      message("finding PNetCDF failed, please try to set the var PNetCDF_DIR")
+    endif ( PNetCDF_INCLUDES AND PNetCDF_LIBRARIES )
+  ENDIF (NOT PNetCDF_FOUND)
+
+  mark_as_advanced(
+    PNetCDF_DIR
+    PNetCDF_INCLUDES
+    PNetCDF_LIBRARIES
+  )
+ELSE (MOAB_USE_MPI)
+  message (STATUS "Not configuring with PNetCDF since MPI installation not specified")
+ENDIF()
+
+include (FindPackageHandleStandardArgs)
+find_package_handle_standard_args (NetCDF "NetCDF not found, check environment variables NetCDF_DIR"
+  NetCDF_DIR NetCDF_INCLUDES NetCDF_LIBRARIES)
+find_package_handle_standard_args (PNetCDF "PNetCDF not found, check environment variables PNetCDF_DIR"
+  PNetCDF_DIR PNetCDF_INCLUDES PNetCDF_LIBRARIES)

diff --git a/config/FindZoltan.cmake b/config/FindZoltan.cmake
new file mode 100644
index 0000000..8c6b259
--- /dev/null
+++ b/config/FindZoltan.cmake
@@ -0,0 +1,91 @@
+#
+# Find Zoltan include directories and libraries
+#
+# Zoltan_INCLUDES            - list of include paths to find netcdf.h
+# Zoltan_LIBRARIES           - list of libraries to link against when using Zoltan
+# Zoltan_FOUND               - Do not attempt to use Zoltan if "no", "0", or undefined.
+
+set (Zoltan_DIR "" CACHE PATH "Path to search for Zoltan header and library files" )
+set (Zoltan_FOUND NO CACHE INTERNAL "Found Zoltan components successfully." )
+
+find_path( Zoltan_INCLUDE_DIR zoltan.h
+  ${Zoltan_DIR}
+  ${Zoltan_DIR}/include
+  /usr/local/include
+  /usr/include
+)
+
+find_library( Zoltan_LIBRARY
+  NAMES zoltan
+  HINTS ${Zoltan_DIR}
+  ${Zoltan_DIR}/lib64
+  ${Zoltan_DIR}/lib
+  /usr/local/lib64
+  /usr/lib64
+  /usr/lib64/zoltan
+  /usr/local/lib
+  /usr/lib
+  /usr/lib/zoltan
+)
+
+
+macro (Zoltan_GET_VARIABLE makefile name var)
+  set (${var} "NOTFOUND" CACHE INTERNAL "Cleared" FORCE)
+  execute_process (COMMAND ${CMAKE_BUILD_TOOL} -f ${${makefile}} show VARIABLE=${name}
+    OUTPUT_VARIABLE ${var}
+    RESULT_VARIABLE zoltan_return)
+endmacro (Zoltan_GET_VARIABLE)
+
+macro (Zoltan_GET_ALL_VARIABLES)
+  if (NOT zoltan_config_current)
+    # A temporary makefile to probe this Zoltan components's configuration
+    # The current inspection is based on Zoltan-3.6 installation
+    set (zoltan_config_makefile "${CMAKE_CURRENT_BINARY_DIR}/Makefile.zoltan")
+    file (WRITE ${zoltan_config_makefile}
+      "## This file was autogenerated by FindZoltan.cmake
+include ${Zoltan_INCLUDE_DIR}/Makefile.export.zoltan
+include ${Zoltan_INCLUDE_DIR}/Makefile.export.zoltan.macros
+show :
+	- at echo -n \${\${VARIABLE}}")
+    Zoltan_GET_VARIABLE (zoltan_config_makefile ZOLTAN_CPPFLAGS    zoltan_extra_cppflags)
+    Zoltan_GET_VARIABLE (zoltan_config_makefile ZOLTAN_EXTRA_LIBS  zoltan_extra_libs)
+    Zoltan_GET_VARIABLE (zoltan_config_makefile ZOLTAN_LDFLAGS     zoltan_ldflags)
+    
+    file (REMOVE ${zoltan_config_makefile})
+    SET(tmp_incs "-I${Zoltan_INCLUDE_DIR} ${zoltan_extra_cppflags}")
+    resolve_includes(Zoltan_INCLUDES ${tmp_incs})
+    SET(tmp_libs "${Zoltan_LIBRARY} ${zoltan_ldflags} ${zoltan_extra_libs}")
+    resolve_libraries (Zoltan_LIBRARIES "${tmp_libs}")
+  endif ()
+endmacro (Zoltan_GET_ALL_VARIABLES)
+
+
+IF (NOT Zoltan_FOUND)
+  if ( Zoltan_INCLUDE_DIR AND Zoltan_LIBRARY )
+    set( Zoltan_FOUND YES )
+    if(EXISTS ${Zoltan_INCLUDE_DIR}/Makefile.export.zoltan)
+      include (ResolveCompilerPaths)
+      Zoltan_GET_ALL_VARIABLES()
+    else(EXISTS ${Zoltan_INCLUDE_DIR}/Makefile.export.zoltan)
+      SET(Zoltan_INCLUDES ${Zoltan_INCLUDE_DIR})
+      SET(Zoltan_LIBRARIES ${Zoltan_LIBRARY})
+    endif(EXISTS ${Zoltan_INCLUDE_DIR}/Makefile.export.zoltan)
+    message (STATUS "---   Zoltan Configuration ::")
+    message (STATUS "        INCLUDES  : ${Zoltan_INCLUDES}")
+    message (STATUS "        LIBRARIES : ${Zoltan_LIBRARIES}")
+  else ( Zoltan_INCLUDE_DIR AND Zoltan_LIBRARY )
+    set( Zoltan_FOUND NO )
+    message("finding Zoltan failed, please try to set the var Zoltan_DIR")
+  endif ( Zoltan_INCLUDE_DIR AND Zoltan_LIBRARY )
+ENDIF (NOT Zoltan_FOUND)
+
+mark_as_advanced(
+  Zoltan_DIR
+  Zoltan_INCLUDES
+  Zoltan_LIBRARIES
+)
+
+include (FindPackageHandleStandardArgs)
+find_package_handle_standard_args (Zoltan "Zoltan not found, check environment variables Zoltan_DIR"
+  Zoltan_DIR Zoltan_INCLUDES Zoltan_LIBRARIES)
+

diff --git a/config/ITAPSFortranMangling.cmake b/config/ITAPSFortranMangling.cmake
index 0dcd687..5b2c381 100644
--- a/config/ITAPSFortranMangling.cmake
+++ b/config/ITAPSFortranMangling.cmake
@@ -2,14 +2,14 @@
 
 # MACRO ( itaps_fortran_mangle input_file output_file prefix )
 
-SET( match_expr "^[ \\t]*void[ \\t]+${prefix}_([a-z][_a-zA-Z0-9]*)[ \\t]*\\(.*$" )
+SET( match_expr "^[ \\t]*void[ \\t]+${prefix}_([a-zA-Z][_a-zA-Z0-9]*)[ \\t]*\\(.*$" )
 FILE( STRINGS ${input_file} data REGEX ${match_expr} )
-FILE( WRITE ${output_file} "#include \"MOAB_FCDefs.h\"\n#ifdef FC_FUNC_\n\n" )
+FILE( WRITE ${output_file} "#include \"MOAB_FCDefs.h\"\n#ifdef MOAB_FC_FUNC_\n\n" )
 FOREACH( line ${data} )
   STRING(REGEX REPLACE ${match_expr} "${prefix}_\\1" func ${line})
   STRING(TOUPPER ${func} upper)
   STRING(TOLOWER ${func} lower)
-  FILE( APPEND ${output_file}  "#define ${func} FC_FUNC_( ${upper}, ${lower} )\n" )
+  FILE( APPEND ${output_file}  "#define ${func} ${DEF}_FC_FUNC_( ${lower}, ${upper} )\n" )
 ENDFOREACH( line )
 FILE( APPEND ${output_file} "\n#endif\n" )
 

diff --git a/config/ResolveCompilerPaths.cmake b/config/ResolveCompilerPaths.cmake
new file mode 100644
index 0000000..259dd99
--- /dev/null
+++ b/config/ResolveCompilerPaths.cmake
@@ -0,0 +1,99 @@
+# ResolveCompilerPaths - this module defines two macros
+#
+# RESOLVE_LIBRARIES (XXX_LIBRARIES LINK_LINE)
+#  This macro is intended to be used by FindXXX.cmake modules.
+#  It parses a compiler link line and resolves all libraries
+#  (-lfoo) using the library path contexts (-L/path) in scope.
+#  The result in XXX_LIBRARIES is the list of fully resolved libs.
+#  Example:
+#
+#    RESOLVE_LIBRARIES (FOO_LIBRARIES "-L/A -la -L/B -lb -lc -ld")
+#
+#  will be resolved to
+#
+#    FOO_LIBRARIES:STRING="/A/liba.so;/B/libb.so;/A/libc.so;/usr/lib/libd.so"
+#
+#  if the filesystem looks like
+#
+#    /A:       liba.so         libc.so
+#    /B:       liba.so libb.so
+#    /usr/lib: liba.so libb.so libc.so libd.so
+#
+#  and /usr/lib is a system directory.
+#
+#  Note: If RESOLVE_LIBRARIES() resolves a link line differently from
+#  the native linker, there is a bug in this macro (please report it).
+#
+# RESOLVE_INCLUDES (XXX_INCLUDES INCLUDE_LINE)
+#  This macro is intended to be used by FindXXX.cmake modules.
+#  It parses a compile line and resolves all includes
+#  (-I/path/to/include) to a list of directories.  Other flags are ignored.
+#  Example:
+#
+#    RESOLVE_INCLUDES (FOO_INCLUDES "-I/A -DBAR='\"irrelevant -I/string here\"' -I/B")
+#
+#  will be resolved to
+#
+#    FOO_INCLUDES:STRING="/A;/B"
+#
+#  assuming both directories exist.
+#  Note: as currently implemented, the -I/string will be picked up mistakenly (cry, cry)
+
+macro (RESOLVE_LIBRARIES LIBS LINK_LINE)
+  string (REGEX MATCHALL "((-L|-l|-Wl)([^\" ]+|\"[^\"]+\")|/[^\" ]+(a|so|dylib|dll))" _all_tokens "${LINK_LINE}")
+  set (_libs_found)
+  set (_directory_list)
+  foreach (token ${_all_tokens})
+    # message ("Resolving token = ${token}")
+    if (token MATCHES "-L([^\" ]+|\"[^\"]+\")")
+      # If it's a library path, add it to the list
+      string (REGEX REPLACE "^-L" "" token ${token})
+      string (REGEX REPLACE "//" "/" token ${token})
+      list (APPEND _directory_list ${token})
+      #message ("Resolved token with directory = ${token}")
+    elseif (token MATCHES "^(-l([^\" ]+|\"[^\"]+\")|/[^\" ]+(a|so|dylib|dll))")
+      # It's a library, resolve the path by looking in the list and then (by default) in system directories
+      string (REGEX REPLACE "^-l" "" token ${token})
+      set (_root)
+      if (token MATCHES "^/")	# We have an absolute path, add root to the search path
+	set (_root "/")
+      endif (token MATCHES "^/")
+      set (_lib "NOTFOUND" CACHE FILEPATH "Cleared" FORCE)
+      find_library (_lib ${token} HINTS ${_directory_list} ${_root})
+      #message ("Resolving shared library with directory = ${_lib}")
+      if (_lib)
+	string (REPLACE "//" "/" _lib ${_lib})
+        list (APPEND _libs_found ${_lib})
+      else (_lib)
+        message (STATUS "Unable to find library ${token}")
+        #message ("Unable to find library ${token}")
+      endif (_lib)
+    endif (token MATCHES "-L([^\" ]+|\"[^\"]+\")")
+  endforeach (token)
+  set (_lib "NOTFOUND" CACHE INTERNAL "Scratch variable" FORCE)
+  # only the LAST occurence of each library is required since there should be no circular dependencies
+  if (_libs_found)
+    list (REVERSE _libs_found)
+    list (REMOVE_DUPLICATES _libs_found)
+    list (REVERSE _libs_found)
+  endif (_libs_found)
+  set (${LIBS} "${_libs_found}")
+endmacro (RESOLVE_LIBRARIES)
+
+macro (RESOLVE_INCLUDES INCS COMPILE_LINE)
+  string (REGEX MATCHALL "-I([^\" ]+|\"[^\"]+\")" _all_tokens "${COMPILE_LINE}")
+  set (_incs_found)
+  foreach (token ${_all_tokens})
+    string (REGEX REPLACE "^-I" "" token ${token})
+    string (REGEX REPLACE "//" "/" token ${token})
+    if (EXISTS ${token})
+      list (APPEND _incs_found ${token})
+    else (EXISTS ${token})
+      message (STATUS "Include directory ${token} does not exist")
+    endif (EXISTS ${token})
+  endforeach (token)
+  if (_incs_found)
+    list (REMOVE_DUPLICATES _incs_found)
+  endif(_incs_found)
+  set (${INCS} "${_incs_found}")
+endmacro (RESOLVE_INCLUDES)

diff --git a/examples/HelloParMOAB.cpp b/examples/HelloParMOAB.cpp
index 5bcc4ab..3611a8d 100644
--- a/examples/HelloParMOAB.cpp
+++ b/examples/HelloParMOAB.cpp
@@ -2,6 +2,12 @@
  * \brief Read mesh into MOAB and resolve/exchange/report shared and ghosted entities \n
  * <b>To run</b>: mpiexec -np 4 HelloMoabPar [filename]\n
  *
+ *  It shows how to load the mesh independently, on multiple
+ *  communicators (with second argument, the number of comms)
+ *
+ *
+ *
+ *  mpiexec -np 8 HelloMoabPar [filename] [nbComms]
  */
 
 #include "moab/ParallelComm.hpp"
@@ -26,6 +32,10 @@ int main(int argc, char **argv)
     test_file_name = argv[1];
   }  
 
+  int nbComms = 1;
+  if (argc > 2)
+    nbComms = atoi(argv[2]);
+
   options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
 
   // Get MOAB instance and read the file with the specified options
@@ -33,15 +43,38 @@ int main(int argc, char **argv)
   if (NULL == mb)
     return 1;
 
+  MPI_Comm comm ;
+  int global_rank, global_size;
+  MPI_Comm_rank( MPI_COMM_WORLD, &global_rank );
+  MPI_Comm_rank( MPI_COMM_WORLD, &global_size );
+
+  int color = global_rank%nbComms; // for each angle group a different color
+  if (nbComms>1)
+  {
+    // split the communicator, into ngroups = nbComms
+    MPI_Comm_split( MPI_COMM_WORLD, color, global_rank, &comm );
+  }
+  else
+  {
+    comm = MPI_COMM_WORLD;
+  }
   // Get the ParallelComm instance
-  ParallelComm* pcomm = new ParallelComm(mb, MPI_COMM_WORLD);
+  ParallelComm* pcomm = new ParallelComm(mb, comm);
   int nprocs = pcomm->proc_config().proc_size();
   int rank = pcomm->proc_config().proc_rank();
-  MPI_Comm comm = pcomm->proc_config().proc_comm();
+  MPI_Comm rcomm = pcomm->proc_config().proc_comm();
+  assert(rcomm==comm);
+  if (global_rank == 0)
+    cout<< " global rank:" <<global_rank << " color:" << color << " rank:" << rank << " of " << nprocs << " processors\n";
+
+  if (global_rank == 1)
+    cout<< " global rank:" <<global_rank << " color:" << color << " rank:" << rank << " of " << nprocs << " processors\n";
 
-  if (rank == 0)
+  MPI_Barrier(MPI_COMM_WORLD);
+
+  if (global_rank == 0)
     cout << "Reading file " << test_file_name << "\n  with options: " << options << endl
-         << " on " << nprocs << " processors\n";
+         << " on " << nprocs << " processors on " << nbComms << " communicator(s) \n";
 
   ErrorCode rval = mb->load_file(test_file_name.c_str(), 0, options.c_str());
   if (rval != MB_SUCCESS) {
@@ -69,9 +102,9 @@ int main(int argc, char **argv)
   for (int i = 0; i < 4; i++)
     nums[i] = (int)owned_entities.num_of_dimension(i);
   vector<int> rbuf(nprocs*4, 0);
-  MPI_Gather(nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, MPI_COMM_WORLD);
+  MPI_Gather(nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm);
   // Print the stats gathered:
-  if (rank == 0) {
+  if (global_rank == 0) {
     for (int i = 0; i < nprocs; i++)
       cout << " Shared, owned entities on proc " << i << ": " << rbuf[4*i] << " verts, " <<
           rbuf[4*i + 1] << " edges, " << rbuf[4*i + 2] << " faces, " << rbuf[4*i + 3] << " elements" << endl;
@@ -109,7 +142,7 @@ int main(int argc, char **argv)
 
   // gather the statistics on processor 0
   MPI_Gather(nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm);
-  if (rank == 0) {
+  if (global_rank == 0) {
     cout << " \n\n After exchanging one ghost layer: \n";
     for (int i = 0; i < nprocs; i++) {
       cout << " Shared, owned entities on proc " << i << ": " << rbuf[4*i] << " verts, " <<

diff --git a/itaps/CMakeLists.txt b/itaps/CMakeLists.txt
new file mode 100644
index 0000000..70978bb
--- /dev/null
+++ b/itaps/CMakeLists.txt
@@ -0,0 +1,17 @@
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+
+set( SIZEOF_VOID_P ${CMAKE_SIZEOF_VOID_P} )
+
+configure_file(iBase_f.h.in "${CMAKE_CURRENT_BINARY_DIR}/iBase_f.h" @ONLY)
+
+set( BASE_INCLUDE_HEADERS iBase.h ${CMAKE_CURRENT_BINARY_DIR}/iBase_f.h)
+
+if(ENABLE_IMESH )
+  add_subdirectory(imesh)
+endif()
+
+if(ENABLE_IGEOM )
+  add_subdirectory(igeom)
+endif()
+
+install(FILES ${BASE_INCLUDE_HEADERS} DESTINATION include )

diff --git a/itaps/igeom/CMakeLists.txt b/itaps/igeom/CMakeLists.txt
new file mode 100644
index 0000000..828d0b7
--- /dev/null
+++ b/itaps/igeom/CMakeLists.txt
@@ -0,0 +1,21 @@
+ADD_CUSTOM_COMMAND(
+  OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/FBiGeom_protos.h
+  COMMAND ${CMAKE_COMMAND}
+          -DDEF:STRING=MOAB
+          -Dinput_file:STRING=${CMAKE_CURRENT_SOURCE_DIR}/FBiGeom.h
+          -Doutput_file:STRING=${CMAKE_CURRENT_BINARY_DIR}/FBiGeom_protos.h
+          -Dprefix:STRING=FBiGeom
+          -P ${CMAKE_SOURCE_DIR}/config/ITAPSFortranMangling.cmake
+  DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iMesh.h )
+
+set( FBiGEOM_SOURCE FBiGeom_MOAB.hpp FBiGeom_MOAB.cpp )
+set( FBiGEOM_INCLUDE_HEADERS FBiGeom.h
+                             ${CMAKE_CURRENT_BINARY_DIR}/FBiGeom_protos.h
+	                           FBiGeom_f.h )
+
+add_library( FBiGeomMOAB
+  ${FBiGEOM_SOURCE}
+  ${FBiGEOM_INCLUDE_HEADERS} )
+
+target_link_libraries( FBiGeomMOAB MOAB )
+

diff --git a/itaps/imesh/CMakeLists.txt b/itaps/imesh/CMakeLists.txt
index e98c0b1..204ce93 100644
--- a/itaps/imesh/CMakeLists.txt
+++ b/itaps/imesh/CMakeLists.txt
@@ -3,69 +3,84 @@
 ADD_CUSTOM_COMMAND(
   OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/iMesh_protos.h
   COMMAND ${CMAKE_COMMAND}
+          -DDEF:STRING=MOAB
           -Dinput_file:STRING=${CMAKE_CURRENT_SOURCE_DIR}/iMesh.h
           -Doutput_file:STRING=${CMAKE_CURRENT_BINARY_DIR}/iMesh_protos.h
           -Dprefix:STRING=iMesh
           -P ${CMAKE_SOURCE_DIR}/config/ITAPSFortranMangling.cmake
-  DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iMesh.h
-  )
+  DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iMesh.h )
 ADD_CUSTOM_COMMAND(
   OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/iMesh_extensions_protos.h
   COMMAND ${CMAKE_COMMAND}
+          -DDEF:STRING=MOAB
           -Dinput_file:STRING=${CMAKE_CURRENT_SOURCE_DIR}/iMesh_extensions.h
           -Doutput_file:STRING=${CMAKE_CURRENT_BINARY_DIR}/iMesh_extensions_protos.h
           -Dprefix:STRING=iMesh
           -P ${CMAKE_SOURCE_DIR}/config/ITAPSFortranMangling.cmake
-  DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iMesh_extensions.h
-  )
-ADD_CUSTOM_COMMAND(
-  OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/iMeshP_protos.h
-  COMMAND ${CMAKE_COMMAND}
-          -Dinput_file:STRING=${CMAKE_CURRENT_SOURCE_DIR}/iMeshP.h
-          -Doutput_file:STRING=${CMAKE_CURRENT_BINARY_DIR}/iMeshP_protos.h
-          -Dprefix:STRING=iMeshP
-          -P ${CMAKE_SOURCE_DIR}/config/ITAPSFortranMangling.cmake
-  DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iMeshP.h
-  )
+  DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iMesh_extensions.h )
 
 set ( MOAB_IMESH_SRCS
-      iMesh_MOAB.cpp
-      ${CMAKE_CURRENT_BINARY_DIR}/iMesh_protos.h
-      ${CMAKE_CURRENT_BINARY_DIR}/iMesh_extensions_protos.h
-      ${CMAKE_CURRENT_BINARY_DIR}/iMeshP_protos.h )
+          iMesh_MOAB.hpp iMesh_MOAB.cpp
+          MBIter.hpp )
+set ( MOAB_IMESH_LIB_INCLUDES
+          iMesh.h
+          iMesh_extensions.h
+          ${CMAKE_CURRENT_BINARY_DIR}/iMesh_extensions_protos.h
+          iMesh_f.h
+          ${CMAKE_CURRENT_BINARY_DIR}/iMesh_protos.h
+          MBiMesh.hpp )
 
-include_directories(
-    ${MOAB_BINARY_DIR}
-    ${MOAB_BINARY_DIR}/src
-    ${MOAB_BINARY_DIR}/src/parallel
-    ${MOAB_SOURCE_DIR}/src
-    ${MOAB_SOURCE_DIR}/itaps
-    ${MOAB_SOURCE_DIR}/itaps/imesh
-  )
+if ( MOAB_USE_MPI )
+  include_directories( ${CMAKE_SOURCE_DIR}/src/parallel
+                       ${CMAKE_BINARY_DIR}/src/parallel)
+  ADD_CUSTOM_COMMAND(
+    OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/iMeshP_extensions_protos.h
+    COMMAND ${CMAKE_COMMAND}
+            -Dinput_file:STRING=${CMAKE_CURRENT_SOURCE_DIR}/iMeshP_extensions.h
+            -Doutput_file:STRING=${CMAKE_CURRENT_BINARY_DIR}/iMeshP_extensions_protos.h
+            -Dprefix:STRING=iMesh
+            -P ${CMAKE_SOURCE_DIR}/config/ITAPSFortranMangling.cmake
+    DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iMesh_extensions.h)
+  ADD_CUSTOM_COMMAND(
+    OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/iMeshP_protos.h
+    COMMAND ${CMAKE_COMMAND}
+            -DDEF:STRING=MOAB
+            -Dinput_file:STRING=${CMAKE_CURRENT_SOURCE_DIR}/iMeshP.h
+            -Doutput_file:STRING=${CMAKE_CURRENT_BINARY_DIR}/iMeshP_protos.h
+            -Dprefix:STRING=iMeshP
+            -P ${CMAKE_SOURCE_DIR}/config/ITAPSFortranMangling.cmake
+    DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iMeshP.h )
+  set(MOAB_IMESH_SRCS ${MOAB_IMESH_SRCS}
+                      iMeshP_MOAB.cpp)
+  set(MOAB_IMESH_LIB_INCLUDES
+          ${MOAB_IMESH_LIB_INCLUDES}
+          ${CMAKE_CURRENT_BINARY_DIR}/iMeshP_protos.h
+          ${CMAKE_CURRENT_BINARY_DIR}/iMeshP_extensions_protos.h )
+endif()
+
+include_directories( ${CMAKE_CURRENT_SOURCE_DIR}
+                     ${CMAKE_CURRENT_BINARY_DIR}
+                     ${CMAKE_BINARY_DIR}/src
+                     ${CMAKE_SOURCE_DIR}/src
+                     ${CMAKE_SOURCE_DIR}/itaps
+                     ${CMAKE_BINARY_DIR}/itaps )
 
 if ( MOAB_USE_HDF5 AND HDF5_FOUND )
-  include_directories(
-    ${HDF5_INCLUDE_DIRECTORIES}
-    ${MOAB_SOURCE_DIR}/src/io/mhdf/include
-  )
-endif ( MOAB_USE_HDF5 AND HDF5_FOUND )
+  include_directories( ${HDF5_INCLUDE_DIRECTORIES}
+                       ${CMAKE_SOURCE_DIR}/src/io/mhdf/include )
+endif( )
 
-if ( MOAB_USE_MPI AND MPI_FOUND )
-  LIST ( APPEND MOAB_IMESH_SRCS
-         iMeshP_MOAB.cpp
-         ${CMAKE_CURRENT_BINARY_DIR}/iMeshP_protos.h )
+set_source_files_properties( ${MOAB_IMESH_SRCS} COMPILE_FLAGS "${MOAB_DEFINES}" )
 
-  include_directories(
-    ${MOAB_SOURCE_DIR}/src/parallel
-  )
-endif ( MOAB_USE_MPI AND MPI_FOUND )
+add_library( iMesh ${MOAB_IMESH_SRCS}
+                   ${MOAB_IMESH_LIB_INCLUDES} )
 
-set_source_files_properties( ${MOAB_IMESH_SRCS}
-  COMPILE_FLAGS "${MOAB_DEFINES}"
-)
+target_link_libraries( iMesh MOAB )
 
-add_library( iMesh
-  ${MOAB_IMESH_SRCS}
-  )
+install(TARGETS iMesh EXPORT MOABTargets
+                      RUNTIME DESTINATION bin
+                      LIBRARY DESTINATION lib
+                      ARCHIVE DESTINATION lib )
+set( MOAB_INSTALL_TARGETS ${MOAB_INSTALL_TARGETS} iMesh)
 
-target_link_libraries( iMesh MOAB )
+install(FILES ${MOAB_IMESH_LIB_INCLUDES} DESTINATION include)

diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 5b5cd8f..d7b1506 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,198 +1,230 @@
-
-  # MOAB Library
-  set ( MOABCORE_LIB_SRCS
-    AdaptiveKDTree.cpp
-    AEntityFactory.cpp
-    AffineXform.cpp
-    AxisBox.cpp
-    BitPage.cpp
-    BitTag.cpp
-    BoundBox.cpp
-    BSPTree.cpp
-    BSPTreePoly.cpp
-    CN.cpp
-    CartVect.cpp
-    Core.cpp
-    DebugOutput.cpp
-    DenseTag.cpp
-    DualTool.cpp
-    EntitySequence.cpp
-    Factory.cpp
-    FileOptions.cpp
-    GeomUtil.cpp
-    GeomTopoTool.cpp
-    HigherOrderFactory.cpp
-    HomXform.cpp
-    MeshSet.cpp
-    MeshSetSequence.cpp
-    MeshTag.cpp
-    MeshTopoUtil.cpp
-    OrientedBox.cpp
-    OrientedBoxTreeTool.cpp
-    PolyElementSeq.cpp
-    Range.cpp
-    RangeSeqIntersectIter.cpp
-    ReadUtil.cpp
-    ScdElementData.cpp
-    ScdInterface.cpp
-    ScdVertexData.cpp
-    SequenceData.cpp
-    SequenceManager.cpp
-    SetIterator.cpp
-    Skinner.cpp
-    SparseTag.cpp
-    StructuredElementSeq.cpp
-    SweptElementData.cpp
-    SweptElementSeq.cpp
-    SweptVertexData.cpp
-    SysUtil.cpp
-    TagInfo.cpp
-    Tree.cpp
-    TupleList.cpp
-    Types.cpp
-    TypeSequenceManager.cpp
-    UnstructuredElemSeq.cpp
-    Util.cpp
-    VarLenDenseTag.cpp
-    VarLenSparseTag.cpp
-    VertexSequence.cpp
-    WriteUtil.cpp
-  )
-
-  set ( MOABPTLOC_LIB_SRCS
-    lotte/findpt.c
-    lotte/errmem.c
-    lotte/poly.c
-    lotte/tensor.c
-  )
-
-  set ( MOABIO_LIB_SRCS
-    io/IODebugTrack.cpp
-    io/ExoIIUtil.cpp
-    io/FileTokenizer.cpp
-    io/GmshUtil.cpp
-    io/ReadABAQUS.cpp
-    io/ReadGmsh.cpp
-    io/ReadIDEAS.cpp
-    io/ReadMCNP5.cpp
-    io/ReadNASTRAN.cpp
-    io/ReadSmf.cpp
-    io/ReadSms.cpp
-    io/ReadSTL.cpp
-    io/ReadTemplate.cpp
-    io/ReadTetGen.cpp
-    io/ReadTxt.cpp
-    io/ReadVtk.cpp
-    io/SMF_State.cpp
-    io/Tqdcfr.cpp
-    io/VtkUtil.cpp
-    io/WriteAns.cpp
-    io/WriteGMV.cpp
-    io/WriteGmsh.cpp
-    io/WriteSTL.cpp
-    io/WriteSmf.cpp
-    io/WriteTemplate.cpp
-    io/WriteVtk.cpp
-    ReaderWriterSet.cpp
-  )
-
-  set ( MOABLD_LIB_SRCS
-    LocalDiscretization/ElemEvaluator.cpp
-    LocalDiscretization/LinearHex.cpp
-    LocalDiscretization/LinearQuad.cpp
-    LocalDiscretization/LinearTet.cpp
-    LocalDiscretization/LinearTri.cpp
-    LocalDiscretization/QuadraticHex.cpp
-#    LocalDiscretization/SpectralHex.cpp
-#    LocalDiscretization/SpectralQuad.cpp
-  ) 
-
-  include_directories(
-    ${MOAB_SOURCE_DIR}/src
-    ${MOAB_SOURCE_DIR}/src/io
-    ${MOAB_SOURCE_DIR}/src/parallel
-    ${MOAB_SOURCE_DIR}/src/LocalDiscretization
-    ${MOAB_SOURCE_DIR}/src/moab/point_locater/lotte
-    ${MOAB_BINARY_DIR}
-    ${MOAB_BINARY_DIR}/src
-    ${MOAB_BINARY_DIR}/src/parallel
-  )
-
-  if ( NetCDF_FOUND )
-    set ( MOAB_DEFINES "${MOAB_DEFINES} -DNETCDF_FILE" )
-    set ( MOABIO_LIB_SRCS
-      ${MOABIO_LIB_SRCS}
-      io/ReadNC.cpp
-      io/ReadNCDF.cpp
-      io/WriteNCDF.cpp
-      io/WriteSLAC.cpp
-      io/NCHelper.cpp
-      io/NCHelperEuler.cpp
-      io/NCHelperFV.cpp
-      io/NCHelperHOMME.cpp
-      io/NCHelperMPAS.cpp
-      io/NCHelperGCRM.cpp
-      SpectralMeshTool.cpp
-    )
-    include_directories(
-      ${NetCDF_INCLUDE_DIRECTORIES}
-    )
-  endif ( NetCDF_FOUND )
-
-  if ( HDF5_FOUND )
-    set ( MOAB_DEFINES "${MOAB_DEFINES} -DHDF5_FILE" )
-    check_function_exists( H5Pset_fapl_mpio MOAB_HDF_HAVE_PARALLEL )
-    set ( MOABIO_LIB_SRCS
-      ${MOABIO_LIB_SRCS}
-      io/HDF5Common.cpp
-      io/ReadHDF5.cpp
-      io/ReadHDF5Dataset.cpp
-      io/ReadHDF5VarLen.cpp
-      io/WriteHDF5.cpp
-    )
-
-    include_directories(
-      ${HDF5_INCLUDE_DIR}
-      io/mhdf/include
-    )
-    add_subdirectory( io/mhdf )
-  endif ( HDF5_FOUND )
-
-
-  SET(MOAB_LIB_SRCS ${MOABCORE_LIB_SRCS} ${MOABPTLOC_LIB_SRCS} ${MOABIO_LIB_SRCS} ${MOABLD_LIB_SRCS})
-
-  set_source_files_properties( ${MOAB_LIB_SRCS}
-    COMPILE_FLAGS "-DIS_BUILDING_MB ${MOAB_DEFINES}"
-  )
-  add_library( MOAB
-    ${MOAB_LIB_SRCS}
-  )
-
-  if ( MOAB_USE_NETCDF AND NetCDF_FOUND )
-    target_link_libraries( MOAB ${NetCDF_LIBRARIES} )
-  endif ( MOAB_USE_NETCDF AND NetCDF_FOUND )
-
-  if ( MOAB_USE_HDF AND HDF5_FOUND )
-    target_link_libraries( MOAB mhdf ${HDF5_LIBRARIES} )
-  endif ( MOAB_USE_HDF AND HDF5_FOUND )
-
-  if ( MOAB_USE_MPI AND MPI_FOUND )
-    add_subdirectory( parallel )
-    target_link_libraries( MOAB MOABpar )
-  endif ( MOAB_USE_MPI AND MPI_FOUND )
-
-  include( ${MOAB_SOURCE_DIR}/config/AutoconfHeader.cmake )
-  # Define some more variables so they will be substituted properly in the autoconf files.
-  set( MB_VERSION        "\"${MOAB_VERSION}\"" )
-  set( MB_VERSION_MAJOR  ${MOAB_VERSION_MAJOR} )
-  set( MB_VERSION_MINOR  ${MOAB_VERSION_MINOR} )
-  if ( DEFINED MOAB_VERSION_PATCH )
-    set( MB_VERSION_PATCH  ${MOAB_VERSION_PATCH} )
-  endif ( DEFINED MOAB_VERSION_PATCH )
-  set( MB_VERSION_STRING "\"${MOAB_VERSION_STRING}\"" )
-
-  autoconf_header( ${MOAB_SOURCE_DIR}/src/moab/Version.h.in ${MOAB_BINARY_DIR}/src/moab/Version.h )
-  autoconf_header( ${MOAB_SOURCE_DIR}/src/moab/EntityHandle.hpp.in ${MOAB_BINARY_DIR}/src/moab/EntityHandle.hpp )
-  autoconf_header( ${MOAB_SOURCE_DIR}/src/parallel/moab_mpi_config.h.in ${MOAB_BINARY_DIR}/src/parallel/moab_mpi_config.h )
-  autoconf_header( ${MOAB_SOURCE_DIR}/src/FCDefs.h.in ${MOAB_BINARY_DIR}/MOAB_FCDefs.h )
-  file( WRITE ${MOAB_BINARY_DIR}/src/MBCN_protos.h "" )
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}
+                    ${CMAKE_CURRENT_BINARY_DIR}
+                    parallel
+                    LocalDiscretization
+                    io io/mhdf
+                    ${MOAB_BINARY_DIR}/src/parallel )
+
+if(MOAB_USE_MPI AND MPI_FOUND)
+  add_subdirectory(parallel)
+  set( MOAB_MPI MOABpar )
+endif()
+
+#create the FCDefs.h and src/MOAB_FCDefs.h files
+configure_file(FCDefs.h.in "${CMAKE_CURRENT_BINARY_DIR}/FCDefs.h" @ONLY)
+#sed -e "s/FC_FUNC/MOAB_FC_FUNC/" src/FCDefs.h >src/MOAB_FCDefs.h
+file(READ ${CMAKE_CURRENT_BINARY_DIR}/FCDefs.h infile)
+string( REGEX REPLACE "FC_FUNC" "MOAB_FC_FUNC" outstring "${infile}" )
+file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/MOAB_FCDefs.h" "${outstring}")
+
+#Create the moab/EntityHandle.hpp file
+file(READ ${CMAKE_CURRENT_SOURCE_DIR}/moab/EntityHandle.hpp.in infile2)
+string( REGEX REPLACE "#undef" "#cmakedefine" outstring2 "${infile2}" )
+file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/moab/EntityHandle.hpp.in" "${outstring2}")
+configure_file(${CMAKE_CURRENT_BINARY_DIR}/moab/EntityHandle.hpp.in ${CMAKE_CURRENT_BINARY_DIR}/moab/EntityHandle.hpp)
+
+#generate the Version.h file
+configure_file(${CMAKE_CURRENT_SOURCE_DIR}/moab/Version.h.cmake.in "${CMAKE_CURRENT_BINARY_DIR}/moab/Version.h" @ONLY)
+
+set(moab_link_lib)
+add_subdirectory(io)
+add_subdirectory(LocalDiscretization)
+add_subdirectory(oldinc)
+
+ADD_CUSTOM_COMMAND(
+  OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/MBCN_protos.h
+  COMMAND ${CMAKE_COMMAND}
+          -DDEF:STRING=MOAB
+          -Dinput_file:STRING=${CMAKE_CURRENT_SOURCE_DIR}/MBCN.h
+          -Doutput_file:STRING=${CMAKE_CURRENT_BINARY_DIR}/MBCN_protos.h
+          -Dprefix:STRING=MBCN
+          -P ${CMAKE_SOURCE_DIR}/config/ITAPSFortranMangling.cmake
+  DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/MBCN.h )
+
+if( MOAB_USE_HDF )
+  include_directories(${HDF5_INCLUDES})
+  set( MOAB_HDF_LIBS ${HDF5_LIBRARIES} )
+endif()
+if( MOAB_USE_NETCDF )
+  include_directories(${NetCDF_INCLUDES})
+  if ( MOAB_USE_MPI )
+    set( MOAB_NETCDF_LIBS ${PNetCDF_LIBRARIES} ${NetCDF_LIBRARIES} )
+  else ( MOAB_USE_MPI )
+    set( MOAB_NETCDF_LIBS ${NetCDF_LIBRARIES} )
+  endif ( MOAB_USE_MPI )
+endif()
+
+set(MOAB_LIB_SOURCES
+        AdaptiveKDTree.cpp
+        AEntityFactory.hpp AEntityFactory.cpp
+        AffineXform.hpp    AffineXform.cpp
+        AxisBox.hpp        AxisBox.cpp
+        BitPage.hpp        BitPage.cpp
+        BitTag.hpp         BitTag.cpp
+        BoundBox.cpp
+        BSPTree.cpp
+        BSPTreePoly.cpp
+        BVHTree.cpp
+        CN.cpp
+        CartVect.cpp
+        Core.cpp
+        DebugOutput.hpp DebugOutput.cpp
+        DenseTag.hpp    DenseTag.cpp
+        DualTool.cpp
+        ElementSequence.hpp
+        EntitySequence.hpp  EntitySequence.cpp
+        Factory.cpp
+        FBEngine.cpp
+        FileOptions.cpp
+        GeomUtil.cpp
+        GeomTopoTool.cpp
+        HigherOrderFactory.cpp
+        HomXform.cpp
+        Internals.hpp
+        MBCNArrays.hpp
+        MergeMesh.cpp
+        MeshSet.hpp         MeshSet.cpp
+        MeshSetSequence.hpp MeshSetSequence.cpp
+        MeshTag.hpp         MeshTag.cpp
+        MeshTopoUtil.cpp
+        OrientedBox.hpp     OrientedBox.cpp
+        OrientedBoxTreeTool.cpp
+        lotte/poly.c
+        lotte/findpt.c
+        lotte/errmem.c
+        lotte/tensor.c
+        PolyElementSeq.hpp  PolyElementSeq.cpp
+        ProgOptions.cpp
+        Range.cpp
+        RangeSeqIntersectIter.hpp RangeSeqIntersectIter.cpp
+        ReadUtil.hpp              ReadUtil.cpp
+        ReaderWriterSet.cpp
+        ReorderTool.cpp
+        ScdElementData.hpp        ScdElementData.cpp
+        ScdInterface.cpp
+        ScdVertexData.hpp         ScdVertexData.cpp
+        SequenceData.hpp          SequenceData.cpp
+        SequenceManager.hpp       SequenceManager.cpp
+        SetIterator.cpp
+        Skinner.cpp
+        SmoothCurve.hpp           SmoothCurve.cpp
+        SmoothFace.hpp            SmoothFace.cpp
+        SparseTag.hpp             SparseTag.cpp
+        SpatialLocator.cpp
+        SpectralMeshTool.cpp
+        StructuredElementSeq.hpp  StructuredElementSeq.cpp
+        SweptElementData.hpp      SweptElementData.cpp
+        SweptElementSeq.hpp       SweptElementSeq.cpp
+        SweptVertexData.hpp       SweptVertexData.cpp
+        SysUtil.hpp               SysUtil.cpp
+        TagCompare.hpp
+        TagInfo.hpp               TagInfo.cpp
+        TupleList.cpp
+        Tree.cpp
+        Types.cpp
+        TypeSequenceManager.hpp   TypeSequenceManager.cpp
+        UnstructuredElemSeq.hpp   UnstructuredElemSeq.cpp
+        Util.cpp
+        VarLenDenseTag.hpp        VarLenDenseTag.cpp
+        VarLenSparseTag.hpp       VarLenSparseTag.cpp
+        VarLenTag.hpp      
+        VertexSequence.hpp        VertexSequence.cpp
+        WriteUtil.hpp             WriteUtil.cpp
+        moab_mpe.h )
+
+set( MOAB_INSTALL_HEADERS
+        moab/AdaptiveKDTree.hpp
+        moab/BoundBox.hpp
+        moab/BSPTree.hpp
+        moab/BSPTreePoly.hpp
+        moab/BVHTree.hpp
+        moab/CN.hpp
+        moab/CartVect.hpp
+        moab/Compiler.hpp
+        moab/Core.hpp
+        moab/CpuTimer.hpp
+        moab/DualTool.hpp
+        moab/Error.hpp
+        moab/GeomTopoTool.hpp
+        moab/HigherOrderFactory.hpp
+        moab/HomXform.hpp
+        moab/EntityType.hpp
+        moab/FBEngine.hpp
+        moab/FileOptions.hpp
+        moab/FindPtFuncs.h
+        moab/Forward.hpp
+        moab/GeomUtil.hpp
+        moab/Interface.hpp
+        moab/point_locater/tree/common_tree.hpp
+        moab/point_locater/tree/element_tree.hpp
+        moab/point_locater/tree/bvh_tree.hpp
+        moab/point_locater/io.hpp 
+        moab/point_locater/element_maps/linear_hex_map.hpp 
+        moab/point_locater/element_maps/linear_tet_map.hpp 
+        moab/point_locater/element_maps/spectral_hex_map.hpp 
+        moab/point_locater/element_maps/quadratic_hex_map.hpp 
+        moab/point_locater/point_locater.hpp
+        moab/point_locater/parametrizer.hpp
+        moab/Matrix3.hpp
+        moab/MergeMesh.hpp
+        moab/MeshTopoUtil.hpp
+        moab/OrientedBoxTreeTool.hpp
+        moab/ProgOptions.hpp
+        moab/Range.hpp
+        moab/RangeMap.hpp
+        moab/ReadUtilIface.hpp
+        moab/ReaderIface.hpp
+        moab/ReaderWriterSet.hpp
+        moab/ReorderTool.hpp
+        moab/ScdInterface.hpp
+        moab/SetIterator.hpp
+        moab/Skinner.hpp
+        moab/SpatialLocator.hpp
+        moab/SpectralMeshTool.hpp
+        moab/Tree.hpp
+        moab/TreeStats.hpp
+        moab/Types.hpp
+        moab/UnknownInterface.hpp
+        moab/Util.hpp
+        moab/WriteUtilIface.hpp
+        moab/WriterIface.hpp
+        MBEntityType.h
+        MBCN.h
+        MBTagConventions.hpp )
+
+set( MOAB_GENERATED_INSTALL_HEADERS
+     ${CMAKE_CURRENT_BINARY_DIR}/MOAB_FCDefs.h
+     ${CMAKE_CURRENT_BINARY_DIR}/MBCN_protos.h
+     ${CMAKE_CURRENT_BINARY_DIR}/moab/EntityHandle.hpp )
+
+if( CGM_FOUND )
+  set( MOAB_CGM_LIBS ${CGM_LIBRARIES} )
+endif()
+
+if (MOAB_USE_ZOLTAN)
+  set( MOAB_LIB_SOURCES ${MOAB_LIB_SOURCES}
+          ${CMAKE_SOURCE_DIR}/tools/mbzoltan/MBZoltan.cpp )
+  set( MOAB_INSTALL_HEADERS ${MOAB_INSTALL_HEADERS}
+          ${CMAKE_SOURCE_DIR}/tools/mbzoltan/MBZoltan.hpp )
+  include_directories( ${Zoltan_INCLUDES} )
+  list(APPEND MOAB_DEP_LIBRARIES ${Zoltan_LIBRARIES})
+endif()
+add_library( MOAB ${MOAB_LIB_SOURCES} ${MOAB_INSTALL_HEADERS} ${MOAB_GENERATED_INSTALL_HEADERS}
+                  $<TARGET_OBJECTS:moabio> 
+                  $<TARGET_OBJECTS:moab_loc_discr> 
+                  ${MDHF_OBJ} )
+target_link_libraries( MOAB ${MOAB_MPI} ${MOAB_DEP_LIBRARIES} ${MOAB_LIBS} ${MOAB_CGM_LIBS} )
+set_target_properties( MOAB PROPERTIES COMPILE_FLAGS "-DIS_BUILDING_MB ${MOAB_DEFINES}")
+
+install(TARGETS MOAB EXPORT MOABTargets
+                     RUNTIME DESTINATION bin
+                     LIBRARY DESTINATION lib
+                     ARCHIVE DESTINATION lib )
+set( MOAB_INSTALL_TARGETS ${MOAB_INSTALL_TARGETS} MOAB)
+include(AutoconfHeader)
+moab_install_headers(${MOAB_INSTALL_HEADERS})
+foreach (header ${MOAB_GENERATED_INSTALL_HEADERS})
+    STRING(REGEX REPLACE "^${CMAKE_CURRENT_BINARY_DIR}/" "" relative ${header})
+    string(REGEX MATCH "(.*)/" DIR ${relative})
+    install(FILES ${header} DESTINATION include/${DIR})
+endforeach()

diff --git a/src/LocalDiscretization/CMakeLists.txt b/src/LocalDiscretization/CMakeLists.txt
new file mode 100644
index 0000000..34bfdee
--- /dev/null
+++ b/src/LocalDiscretization/CMakeLists.txt
@@ -0,0 +1,21 @@
+set( MOAB_LOC_DSCR_SRCS ElemEvaluator.cpp
+                        LinearHex.cpp
+                        LinearQuad.cpp
+                        LinearTet.cpp
+                        LinearTri.cpp
+                        QuadraticHex.cpp )
+
+set( MOAB_LOCALDSRC_Install_Headers moab/ElemEvaluator.hpp
+                                    moab/LinearHex.hpp
+                                    moab/LinearQuad.hpp
+	                                  moab/LinearTet.hpp
+                                    moab/LinearTri.hpp
+                                    moab/QuadraticHex.hpp )
+
+add_library( moab_loc_discr OBJECT ${MOAB_LOC_DSCR_SRCS}
+                                   ${MOAB_LOCALDSRC_Install_Headers} )
+set_target_properties( moab_loc_discr 
+                       PROPERTIES COMPILE_FLAGS "-DIS_BUILDING_MB ${MOAB_DEFINES}")
+
+include(AutoconfHeader)
+moab_install_headers(${MOAB_LOCALDSRC_Install_Headers})

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/fathomteam/moab/commits/cc6f5d89a295/
Changeset:   cc6f5d89a295
Branch:      None
User:        pshriwise
Date:        2014-06-23 16:08:15
Summary:     Merge branch 'master' of https://bitbucket.org/fathomteam/moab

Affected #:  6 files

diff --git a/src/io/NCHelper.cpp b/src/io/NCHelper.cpp
index b2f99b9..93cda3b 100644
--- a/src/io/NCHelper.cpp
+++ b/src/io/NCHelper.cpp
@@ -551,89 +551,6 @@ ErrorCode NCHelper::read_variables_to_set(std::vector<ReadNC::VarData>& vdatas,
   return rval;
 }
 
-ErrorCode NCHelper::convert_variable(ReadNC::VarData& var_data, int tstep_num)
-{
-  DebugOutput& dbgOut = _readNC->dbgOut;
-
-  // Get ptr to tag space
-  void* data = var_data.varDatas[tstep_num];
-
-  // Get variable size
-  std::size_t sz = var_data.sz;
-  assert(sz > 0);
-
-  // Finally, read into that space
-  int success = 0;
-  int* idata;
-  double* ddata;
-  float* fdata;
-  short* sdata;
-
-  switch (var_data.varDataType) {
-    case NC_FLOAT:
-      ddata = (double*) var_data.varDatas[tstep_num];
-      fdata = (float*) var_data.varDatas[tstep_num];
-      // Convert in-place
-      for (int i = sz - 1; i >= 0; i--)
-        ddata[i] = fdata[i];
-      break;
-    case NC_SHORT:
-      idata = (int*) var_data.varDatas[tstep_num];
-      sdata = (short*) var_data.varDatas[tstep_num];
-      // Convert in-place
-      for (int i = sz - 1; i >= 0; i--)
-        idata[i] = sdata[i];
-      break;
-    default:
-      success = 1;
-  }
-
-  if (2 <= dbgOut.get_verbosity() && !success) {
-    double dmin, dmax;
-    int imin, imax;
-    switch (var_data.varDataType) {
-      case NC_DOUBLE:
-      case NC_FLOAT:
-        ddata = (double*) data;
-        if (sz == 0)
-          break;
-
-        dmin = dmax = ddata[0];
-        for (unsigned int i = 1; i < sz; i++) {
-          if (ddata[i] < dmin)
-            dmin = ddata[i];
-          if (ddata[i] > dmax)
-            dmax = ddata[i];
-        }
-        dbgOut.tprintf(2, "Variable %s (double): min = %f, max = %f\n", var_data.varName.c_str(), dmin, dmax);
-        break;
-      case NC_INT:
-      case NC_SHORT:
-        idata = (int*) data;
-        if (sz == 0)
-          break;
-
-        imin = imax = idata[0];
-        for (unsigned int i = 1; i < sz; i++) {
-          if (idata[i] < imin)
-            imin = idata[i];
-          if (idata[i] > imax)
-            imax = idata[i];
-        }
-        dbgOut.tprintf(2, "Variable %s (int): min = %d, max = %d\n", var_data.varName.c_str(), imin, imax);
-        break;
-      case NC_NAT:
-      case NC_BYTE:
-      case NC_CHAR:
-        break;
-      default: // Default case added to remove compiler warnings
-        success = 1;
-    }
-  }
-
-  return MB_SUCCESS;
-}
-
 ErrorCode NCHelper::read_coordinate(const char* var_name, int lmin, int lmax, std::vector<double>& cvals)
 {
   std::map<std::string, ReadNC::VarData>& varInfo = _readNC->varInfo;

diff --git a/src/io/NCHelper.hpp b/src/io/NCHelper.hpp
index 5b3b93c..afc49dc 100644
--- a/src/io/NCHelper.hpp
+++ b/src/io/NCHelper.hpp
@@ -48,9 +48,6 @@ protected:
   //! Read set variables (common to scd mesh and ucd mesh)
   ErrorCode read_variables_to_set(std::vector<ReadNC::VarData>& vdatas, std::vector<int>& tstep_nums);
 
-  //! Convert variables in place
-  ErrorCode convert_variable(ReadNC::VarData& var_data, int tstep_num);
-
   ErrorCode read_coordinate(const char* var_name, int lmin, int lmax,
                             std::vector<double>& cvals);
 

diff --git a/tools/dagmc/DagMC.cpp b/tools/dagmc/DagMC.cpp
index 6e2260a..151b3f5 100755
--- a/tools/dagmc/DagMC.cpp
+++ b/tools/dagmc/DagMC.cpp
@@ -1601,7 +1601,7 @@ ErrorCode DagMC::get_group_name( EntityHandle group_set, std::string& name )
   return MB_SUCCESS;
 }
 
-ErrorCode DagMC::parse_group_name( EntityHandle group_set, prop_map& result )
+ErrorCode DagMC::parse_group_name( EntityHandle group_set, prop_map& result, const char *delimiters )
 {
   ErrorCode rval;
   std::string group_name;
@@ -1609,7 +1609,7 @@ ErrorCode DagMC::parse_group_name( EntityHandle group_set, prop_map& result )
   if( rval != MB_SUCCESS ) return rval;
 
   std::vector< std::string > group_tokens;
-  tokenize( group_name, group_tokens, "_" );
+  tokenize( group_name, group_tokens, delimiters );
 
   // iterate over all the keyword positions 
   // keywords are even indices, their values (optional) are odd indices
@@ -1623,7 +1623,7 @@ ErrorCode DagMC::parse_group_name( EntityHandle group_set, prop_map& result )
   return MB_SUCCESS;
 }
 
-ErrorCode DagMC::detect_available_props( std::vector<std::string>& keywords_list )
+ErrorCode DagMC::detect_available_props( std::vector<std::string>& keywords_list, const char *delimiters )
 {
   ErrorCode rval;
   std::set< std::string > keywords;
@@ -1631,7 +1631,7 @@ ErrorCode DagMC::detect_available_props( std::vector<std::string>& keywords_list
        grp != group_handles().end(); ++grp )
   {
     std::map< std::string, std::string > properties;
-    rval = parse_group_name( *grp, properties );
+    rval = parse_group_name( *grp, properties, delimiters );
     if( rval == MB_TAG_NOT_FOUND ) continue;
     else if( rval != MB_SUCCESS ) return rval;
 
@@ -1698,7 +1698,8 @@ ErrorCode DagMC::unpack_packed_string( Tag tag, EntityHandle eh,
 }
 
 ErrorCode DagMC::parse_properties( const std::vector<std::string>& keywords,
-                                   const std::map<std::string, std::string>& keyword_synonyms )
+                                   const std::map<std::string, std::string>& keyword_synonyms,
+				   const char *delimiters)
 {
   ErrorCode rval;
 
@@ -1739,7 +1740,7 @@ ErrorCode DagMC::parse_properties( const std::vector<std::string>& keywords,
   {
 
     prop_map properties;
-    rval = parse_group_name( *grp, properties );
+    rval = parse_group_name( *grp, properties, delimiters );
     if( rval == MB_TAG_NOT_FOUND ) continue;
     else if( rval != MB_SUCCESS ) return rval;
 

diff --git a/tools/dagmc/DagMC.hpp b/tools/dagmc/DagMC.hpp
index fe39082..6b79f67 100755
--- a/tools/dagmc/DagMC.hpp
+++ b/tools/dagmc/DagMC.hpp
@@ -377,13 +377,15 @@ public:
    * @param keywords_out The result list of keywords.  This list could be
    *        validly passed to parse_properties().
    */
-  ErrorCode detect_available_props( std::vector<std::string>& keywords_out );
+  ErrorCode detect_available_props( std::vector<std::string>& keywords_out, const char *delimiters = "_" );
 
   /** Parse properties from group names per metadata syntax standard
    * 
    * @param keywords A list of keywords to parse.  These are considered the canonical
    *                 names of the properties, and constitute the valid inputs to 
    *                 has_prop() and prop_value().
+   * @param delimiters An array of characters the routine will use to split the groupname
+   *                   into properties.
    * @param synonyms An optional mapping of synonym keywords to canonical keywords. 
    *                 This allows more than one group name keyword to take on the same
    *                 meaning
@@ -392,7 +394,8 @@ public:
    *                 group named "graveyard".
    */
   ErrorCode parse_properties( const std::vector<std::string>& keywords, 
-                              const std::map<std::string,std::string>& synonyms = no_synonyms );
+                              const std::map<std::string,std::string>& synonyms = no_synonyms, 
+			      const char* delimiters = "_" );
 
   /** Get the value of a property on a volume or surface
    *
@@ -463,7 +466,7 @@ private:
   /** tokenize the metadata stored in group names - basically borroed from ReadCGM.cpp */
   void tokenize( const std::string& str,
                  std::vector<std::string>& tokens,
-                 const char* delimiters ) const;
+                 const char* delimiters = "_" ) const;
 
   // a common type within the property and group name functions
   typedef std::map<std::string, std::string> prop_map;
@@ -471,7 +474,7 @@ private:
   /** Store the name of a group in a string */
   ErrorCode get_group_name( EntityHandle group_set, std::string& name );
   /** Parse a group name into a set of key:value pairs */
-  ErrorCode parse_group_name( EntityHandle group_set, prop_map& result );
+  ErrorCode parse_group_name( EntityHandle group_set, prop_map& result, const char* delimiters = "_");
   /** Add a string value to a property tag for a given entity */
   ErrorCode append_packed_string( Tag, EntityHandle, std::string& );
   /** Convert a property tag's value on a handle to a list of strings */

diff --git a/tools/dagmc/dagmc_preproc.cpp b/tools/dagmc/dagmc_preproc.cpp
index 98b4c63..ce21baa 100644
--- a/tools/dagmc/dagmc_preproc.cpp
+++ b/tools/dagmc/dagmc_preproc.cpp
@@ -454,7 +454,7 @@ int main( int argc, char* argv[] ){
    CHECKERR( *dag, ret );
 
    std::vector< std::string > keywords;
-   ret = dag->detect_available_props( keywords );
+   ret = dag->detect_available_props( keywords);
    CHECKERR( *dag, ret );
    ret = dag->parse_properties( keywords );
    CHECKERR( *dag, ret );

diff --git a/tools/mbzoltan/MBZoltan.hpp b/tools/mbzoltan/MBZoltan.hpp
index 2acce87..c303158 100644
--- a/tools/mbzoltan/MBZoltan.hpp
+++ b/tools/mbzoltan/MBZoltan.hpp
@@ -26,6 +26,7 @@
 #include "moab_mpi.h"
 #include "zoltan_cpp.h"
 #include "moab/Range.hpp"
+#include <time.h>
 
 #ifdef CGM
 #include <map>


https://bitbucket.org/fathomteam/moab/commits/a5d3b32b123c/
Changeset:   a5d3b32b123c
Branch:      None
User:        pshriwise
Date:        2014-06-23 16:46:45
Summary:     Removed unused variables/structures in ReadCGM.

Affected #:  1 file

diff --git a/src/io/ReadCGM.cpp b/src/io/ReadCGM.cpp
index 87ce275..6b4d9b5 100644
--- a/src/io/ReadCGM.cpp
+++ b/src/io/ReadCGM.cpp
@@ -515,10 +515,7 @@ void ReadCGM::set_cgm_attributes(bool const act_attributes, bool const verbose)
     if (MB_SUCCESS != rval)
       return MB_FAILURE;
     
-    rval = mdbImpl->add_entities( ci->second, &vh, 1 );
-    if (MB_SUCCESS != rval)
-      return MB_FAILURE;
-    
+    //replace meshset with the vertex handle
     ci->second = vh;
   }
   return MB_SUCCESS;
@@ -800,13 +797,6 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   rval = mdbImpl->tag_set_data( geometry_resabs_tag, &set, 1, &GEOMETRY_RESABS );
   if(MB_SUCCESS != rval) return rval;
 
-  // CGM data
-  std::map<RefEntity*,EntityHandle>::iterator ci;
-  //const char geom_categories[][CATEGORY_TAG_SIZE] =
-      //{"Vertex\0", "Curve\0", "Surface\0", "Volume\0", "Group\0"};
- 
-
-
   // Initialize CGM
   InitCGMA::initialize_cgma();
 
@@ -828,9 +818,8 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   }
 
   // create entity sets for all geometric entities
-  DLIList<RefEntity*> entlist;
   std::map<RefEntity*,EntityHandle> entmap[5]; // one for each dim, and one for groups
-  //std::map<RefEntity*,EntityHandle>* entmap_ptr = entmap;
+
   rval = create_entity_sets( entmap );
   if (rval!=MB_SUCCESS) return rval;
 
@@ -854,7 +843,7 @@ ErrorCode ReadCGM::load_file(const char *cgm_file_name,
   entmap[3].clear();
   entmap[4].clear();
 
-  // create geometry for all vertices and replace 
+  // create geometry for all vertices
   rval = create_vertices( entmap );
   if(rval!=MB_SUCCESS) return rval; 
 


https://bitbucket.org/fathomteam/moab/commits/ca4e2834137e/
Changeset:   ca4e2834137e
Branch:      None
User:        pshriwise
Date:        2014-06-23 16:57:50
Summary:     Revert "Removed unused variables/structures in ReadCGM."

This reverts commit a5d3b32b123c88dd96286c01ae64d36c1f9773be.

Affected #:  1 file
Diff not available.

https://bitbucket.org/fathomteam/moab/commits/b66d9b7c1e2c/
Changeset:   b66d9b7c1e2c
Branch:      None
User:        pshriwise
Date:        2014-06-23 17:02:56
Summary:     Removed unused variables/structures. Added comments to create_vertices function.

Affected #:  1 file
Diff not available.

https://bitbucket.org/fathomteam/moab/commits/a98a1aa3e2fc/
Changeset:   a98a1aa3e2fc
Branch:      None
User:        pshriwise
Date:        2014-06-23 17:06:33
Summary:     Spelling correction to set_cgm_attributes variable

Affected #:  1 file
Diff not available.

https://bitbucket.org/fathomteam/moab/commits/e11c7096a508/
Changeset:   e11c7096a508
Branch:      None
User:        pshriwise
Date:        2014-06-23 17:33:51
Summary:     Altered ReadCGM methods to pass maps by reference.

Affected #:  2 files
Diff not available.

https://bitbucket.org/fathomteam/moab/commits/84544094e5c7/
Changeset:   84544094e5c7
Branch:      None
User:        pshriwise
Date:        2014-06-24 23:10:06
Summary:     Moved newly created re-factor functions from public to private.

Affected #:  1 file
Diff not available.

https://bitbucket.org/fathomteam/moab/commits/762183dd3d33/
Changeset:   762183dd3d33
Branch:      None
User:        pshriwise
Date:        2014-06-24 23:22:02
Summary:     Updated arguments to store_*_sense functions to make them more readable.

Affected #:  2 files
Diff not available.

https://bitbucket.org/fathomteam/moab/commits/7bdedf05b135/
Changeset:   7bdedf05b135
Branch:      None
User:        pshriwise
Date:        2014-06-24 23:37:26
Summary:     Updated faceting functions to use two distinct entitymaps for readability.

Affected #:  2 files
Diff not available.

https://bitbucket.org/fathomteam/moab/commits/46dc7529d43d/
Changeset:   46dc7529d43d
Branch:      master
User:        gonuke
Date:        2014-06-28 00:00:49
Summary:     Merged in pshriwise/moab (pull request #26). ReadCGM refactor part 2.
Performed the merge manually and this merge should have no diffs.
Affected #:  0 files
Diff not available.

Repository URL: https://bitbucket.org/fathomteam/moab/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the moab-dev mailing list