[MOAB-dev] commit/MOAB: 2 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue May 7 13:52:42 CDT 2013


2 new commits in MOAB:

https://bitbucket.org/fathomteam/moab/commits/012ae018d4a0/
Changeset:   012ae018d4a0
Branch:      None
User:        danwu
Date:        2013-05-07 20:12:17
Summary:     Fix compilation errors of HelloMOAB.cpp

Affected #:  1 file

diff --git a/examples/HelloMOAB.cpp b/examples/HelloMOAB.cpp
index ee81de9..ebab971 100644
--- a/examples/HelloMOAB.cpp
+++ b/examples/HelloMOAB.cpp
@@ -8,13 +8,15 @@
 
 
 #include "moab/Core.hpp"
+#include <iostream>
+#include <assert.h>
 
 using namespace moab;
 using namespace std;
 
 string test_file_name = string(MESH_DIR) + string("/3k-tri-sphere.vtk");
 
-int main( int argc, char** argv[] )
+int main( int argc, char** argv )
 {
   Interface *iface = new Core;
 
@@ -24,7 +26,7 @@ int main( int argc, char** argv[] )
     test_file_name = argv[1];
   }  
     //load the mesh from vtk file
-  ErrorCode rval = iface->load_mesh( test_file_name );
+  ErrorCode rval = iface->load_mesh( test_file_name.c_str() );
   assert( rval == MB_SUCCESS);
 
     //get verts entities


https://bitbucket.org/fathomteam/moab/commits/19231ecb1911/
Changeset:   19231ecb1911
Branch:      master
User:        danwu
Date:        2013-05-07 20:52:14
Summary:     Merge branch 'master' of https://bitbucket.org/fathomteam/moab

Affected #:  10 files

diff --git a/src/parallel/WriteHDF5Parallel.cpp b/src/parallel/WriteHDF5Parallel.cpp
index da16c7b..4fbca9e 100644
--- a/src/parallel/WriteHDF5Parallel.cpp
+++ b/src/parallel/WriteHDF5Parallel.cpp
@@ -1680,23 +1680,17 @@ static void convert_to_ranged_ids( const TYPE* buffer,
   }
 
   result.resize( len*2 );
-  std::copy( buffer, buffer+len, result.begin()+len );
-  std::sort( result.begin()+len, result.end() );
-  std::vector<WriteHDF5::id_t>::iterator w = result.begin();
-  std::vector<WriteHDF5::id_t>::const_iterator r = result.begin()+len;
-  *w = *r; ++w;
-  *w = *r; ++r;
-  for (; r != result.end(); ++r) {
-    if (*r == *w + 1) {
-      ++*w;
-    }
-    else {
-      ++w; *w = *r; 
-      ++w; *w = *r;
-    }
+  Range tmp;
+  for (size_t i=0; i<len; i++)
+    tmp.insert( (EntityHandle)buffer[i]);
+  result.resize(tmp.psize()*2);
+  int j=0;
+  for (Range::const_pair_iterator pit=tmp.const_pair_begin();
+      pit!=tmp.const_pair_end(); pit++, j++)
+  {
+    result[2*j]=pit->first;
+    result[2*j+1]=pit->second-pit->first+1;
   }
-  ++w;
-  result.erase( w, result.end() );
 }
 
 static void merge_ranged_ids( const unsigned long* range_list,
@@ -1710,22 +1704,21 @@ static void merge_ranged_ids( const unsigned long* range_list,
   
   result.insert( result.end(), range_list, range_list+len );
   size_t plen = result.size()/2;
-  if (plen > 1) {
-    std::pair<id_t,id_t> *plist = reinterpret_cast<std::pair<id_t,id_t>*>(&result[0]);
-    std::sort( plist, plist+plen );
-    std::pair<id_t,id_t> *wr = plist, *pend = plist+plen;
-    std::sort( plist, pend );
-    for (std::pair<id_t,id_t> *iter = plist+1; iter != pend; ++iter) {
-      if (wr->second+1 < iter->first) {
-        ++wr;
-        *wr = *iter;
-      }
-      else {
-        wr->second = std::max( wr->second, iter->second );
-      }
-    }
-    ++wr;
-    result.resize( 2*(wr - plist) ); 
+  Range tmp;
+  for (size_t i=0; i<plen; i++)
+  {
+    EntityHandle starth=(EntityHandle)result[2*i];
+    EntityHandle endh=(EntityHandle)result[2*i]+(id_t)result[2*i+1]-1; // id+count-1
+    tmp.insert( starth, endh);
+  }
+  // now convert back to std::vector<WriteHDF5::id_t>, compressed range format
+  result.resize(tmp.psize()*2);
+  int j=0;
+  for (Range::const_pair_iterator pit=tmp.const_pair_begin();
+      pit!=tmp.const_pair_end(); pit++, j++)
+  {
+    result[2*j]=pit->first;
+    result[2*j+1]=pit->second-pit->first+1;
   }
 }
 
@@ -1762,12 +1755,13 @@ ErrorCode WriteHDF5Parallel::unpack_set( EntityHandle set,
   
     // If either the current data or the new data is in ranged format,
     // then change the other to ranged format if it isn't already
+  // in both cases when they differ, the data will end up "compressed range"
   std::vector<id_t> tmp;
   if ((flags & mhdf_SET_RANGE_BIT) != (data->setFlags & mhdf_SET_RANGE_BIT)) {
     if (flags & mhdf_SET_RANGE_BIT) {
       tmp = data->contentIds;
       convert_to_ranged_ids( &tmp[0], tmp.size(), data->contentIds );
-      data->setFlags &= mhdf_SET_RANGE_BIT;
+      data->setFlags |= mhdf_SET_RANGE_BIT;
     }
     else {
       tmp.clear();
@@ -1789,7 +1783,7 @@ ErrorCode WriteHDF5Parallel::unpack_set( EntityHandle set,
     }
   }
   
-  if (flags & mhdf_SET_RANGE_BIT)
+  if (data->setFlags & mhdf_SET_RANGE_BIT)
     merge_ranged_ids( contents, num_content, data->contentIds );
   else
     merge_vector_ids( contents, num_content, data->contentIds );

diff --git a/test/parallel/Makefile.am b/test/parallel/Makefile.am
index f1ef1d6..d86a8bf 100644
--- a/test/parallel/Makefile.am
+++ b/test/parallel/Makefile.am
@@ -67,7 +67,7 @@ else
 endif
 
 if ENABLE_mbcslam
-  MBCSLAM_TESTS = par_intx_sph
+  MBCSLAM_TESTS = par_intx_sph 
 else
   MBCSLAM_TESTS = 
 endif
@@ -99,7 +99,7 @@ endif
 
 if ENABLE_mbcslam
   par_intx_sph_SOURCES = par_intx_sph.cpp
-  par_intx_sph_LDADD = $(LDADD) ../../tools/mbcoupler/libmbcoupler.la ../../tools/mbcslam/libmbcslam.la
+  par_intx_sph_LDADD = $(LDADD) ../../tools/mbcoupler/libmbcoupler.la ../../tools/mbcslam/libmbcslam.la  
 endif
 # Other files to clean up (e.g. output from tests)
 MOSTLYCLEANFILES = mhdf_ll.h5m tmp0.h5m tmp1.h5m tmp2.h5m tmp3.h5m

diff --git a/tools/mbcoupler/ElemUtil.cpp b/tools/mbcoupler/ElemUtil.cpp
index c75c0bb..c0010d0 100644
--- a/tools/mbcoupler/ElemUtil.cpp
+++ b/tools/mbcoupler/ElemUtil.cpp
@@ -1234,10 +1234,10 @@ namespace Element {
     int n2= _n*_n;
     for (int i=0; i<_n; i++)
     {
-      double csi=_z[0][i];
+      double eta=_z[0][i];
       for (int j=0; j<_n; j++)
       {
-        double eta = _z[1][j]; // we could really use the same _z[0] array of lobatto nodes
+        double csi = _z[1][j]; // we could really use the same _z[0] array of lobatto nodes
         CartVect pos(0.0);
         for (int k = 0; k < 4; k++) {
           const double N_k = (1 + csi*corner_xi[k][0])

diff --git a/tools/mbcslam/CslamUtils.cpp b/tools/mbcslam/CslamUtils.cpp
index c5d6adb..61d1bf0 100644
--- a/tools/mbcslam/CslamUtils.cpp
+++ b/tools/mbcslam/CslamUtils.cpp
@@ -477,7 +477,7 @@ CartVect spherical_to_cart (SphereCoords & sc)
   return res;
 }
 
-ErrorCode SpectralVisuMesh(Interface * mb, Range & input, int NP, EntityHandle & outputSet)
+ErrorCode SpectralVisuMesh(Interface * mb, Range & input, int NP, EntityHandle & outputSet, double tolerance)
 {
   ErrorCode rval = MB_SUCCESS;
 
@@ -485,7 +485,9 @@ ErrorCode SpectralVisuMesh(Interface * mb, Range & input, int NP, EntityHandle &
   moab::Element::SpectralQuad specQuad(NP);
   Range fineElem;
 
+  std::vector<int> gids(input.size()*(NP-1)*(NP-1));// total number of "linear" elements
   // get all edges? or not? Form all gl points + quads, then merge, then output
+  int startGid=0;
   for (Range::iterator it=input.begin(); it!=input.end(); it++)
   {
     const moab::EntityHandle * conn4 = NULL;
@@ -493,7 +495,7 @@ ErrorCode SpectralVisuMesh(Interface * mb, Range & input, int NP, EntityHandle &
     rval = mb->get_connectivity(*it, conn4, num_nodes);
     if (moab::MB_SUCCESS != rval)
     {
-      std::cout << "can't get conectivity for quad \n";
+      std::cout << "can't get connectivity for quad \n";
       return rval;
     }
     assert(num_nodes==4);
@@ -533,6 +535,8 @@ ErrorCode SpectralVisuMesh(Interface * mb, Range & input, int NP, EntityHandle &
         if (rval!=moab::MB_SUCCESS)
           return rval;
         fineElem.insert(element_handle);
+        gids[startGid]=startGid+1;
+        startGid++;
       }
     }
 
@@ -541,7 +545,15 @@ ErrorCode SpectralVisuMesh(Interface * mb, Range & input, int NP, EntityHandle &
   mb->add_entities(outputSet, fineElem);
   // merge all nodes
   MergeMesh mgtool(mb);
-  rval = mgtool.merge_entities(fineElem, 0.0001);
+  rval = mgtool.merge_entities(fineElem, tolerance);
+  if (MB_SUCCESS!=rval)
+    return rval;
+  Tag gidTag;
+  rval = mb->tag_get_handle("GLOBAL_ID", 1, MB_TYPE_INTEGER,
+      gidTag, MB_TAG_DENSE|MB_TAG_CREAT);
+  if (MB_SUCCESS!=rval)
+      return rval;
+  rval = mb->tag_set_data(gidTag, fineElem, &gids[0]);
 
   return rval;
 }

diff --git a/tools/mbcslam/CslamUtils.hpp b/tools/mbcslam/CslamUtils.hpp
index 136d166..e244138 100644
--- a/tools/mbcslam/CslamUtils.hpp
+++ b/tools/mbcslam/CslamUtils.hpp
@@ -83,7 +83,7 @@ CartVect spherical_to_cart (SphereCoords &) ;
  *   output: a set with refined elements; with proper input, it should be pretty
  *   similar to a Homme mesh read with ReadNC
  */
-ErrorCode SpectralVisuMesh(Interface * mb, Range & input, int NP, EntityHandle & outputSet);
+ErrorCode SpectralVisuMesh(Interface * mb, Range & input, int NP, EntityHandle & outputSet, double tolerance);
 
 /*
  * given an entity set, get all nodes and project them on a sphere with given radius

diff --git a/tools/mbcslam/Makefile.am b/tools/mbcslam/Makefile.am
index 0e5542d..2d37b4a 100644
--- a/tools/mbcslam/Makefile.am
+++ b/tools/mbcslam/Makefile.am
@@ -14,7 +14,8 @@ AM_CPPFLAGS += -DSRCDIR=$(srcdir) \
                -I$(top_srcdir)/itaps \
                -I$(top_srcdir)/itaps/imesh \
                -I$(top_builddir)/itaps \
-               -I$(top_builddir)/itaps/imesh
+               -I$(top_builddir)/itaps/imesh \
+               -I$(top_srcdir)/test
 
 lib_LTLIBRARIES = libmbcslam.la
 libmbcslam_la_LIBADD = $(top_builddir)/src/libMOAB.la $(top_builddir)/itaps/imesh/libiMesh.la \
@@ -39,10 +40,16 @@ cfgdir = $(libdir)
 
 TESTS = intx_on_sphere_test  intx_in_plane_test  spec_visu_test spherical_area_test \
          case1_test
+noinst_PROGRAMS = process_arm cslam_par_test
+
 check_PROGRAMS = $(TESTS) 
 intx_on_sphere_test_SOURCES = intx_on_sphere_test.cpp
 intx_in_plane_test_SOURCES = intx_in_plane_test.cpp
 spec_visu_test_SOURCES = spec_visu_test.cpp
 spherical_area_test_SOURCES = spherical_area_test.cpp
 case1_test_SOURCES = case1_test.cpp
+process_arm_SOURCES = process_arm.cpp
+
+cslam_par_test_SOURCES = cslam_par_test.cpp
+cslam_par_test_LDADD = $(LDADD) $(top_builddir)/tools/mbcoupler/libmbcoupler.la 
 

diff --git a/tools/mbcslam/cslam_par_test.cpp b/tools/mbcslam/cslam_par_test.cpp
new file mode 100644
index 0000000..9887047
--- /dev/null
+++ b/tools/mbcslam/cslam_par_test.cpp
@@ -0,0 +1,227 @@
+/*
+ * cslam_par_test.cpp
+ *  test to trigger intersection on a sphere in parallel
+ *  it will start from an eulerian mesh (mesh + velocity from Mark Taylor)
+ *   file: VELO00.h5m; mesh + velo at 850 milibar, in an unstrucured mesh refined from
+ *   a cube sphere grid
+ *  the mesh is read in parallel (euler mesh);
+ *
+ *   lagrangian mesh is obtained using
+ *   pos (t-dt) = pos(t) -Velo(t)*dt
+ *   then, project on the sphere with a given radius
+ *
+ *  Created on: Apr 22, 2013
+ */
+
+#include <iostream>
+#include <sstream>
+#include <time.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "moab/Core.hpp"
+#include "moab/Interface.hpp"
+#include "Intx2MeshOnSphere.hpp"
+#include <math.h>
+#include "TestUtil.hpp"
+#include "moab/ParallelComm.hpp"
+#include "moab/ProgOptions.hpp"
+#include "MBParallelConventions.h"
+#include "moab/ReadUtilIface.hpp"
+#include "MBTagConventions.hpp"
+
+#include "CslamUtils.hpp"
+
+// for M_PI
+#include <math.h>
+
+#ifdef MESHDIR
+std::string TestDir( STRINGIFY(MESHDIR) );
+#else
+std::string TestDir(".");
+#endif
+
+using namespace moab;
+// some input data
+double EPS1=0.2; // this is for box error
+std::string input_mesh_file("VELO00_16p.h5m"); // input file, partitioned correctly
+double Radius = 1.0; // change to radius
+double deltaT = 1.e-6;
+void test_intx_in_parallel_elem_based();
+
+int main(int argc, char **argv)
+{
+  MPI_Init(&argc, &argv);
+  EPS1 = 0.000002;
+  int result = 0;
+
+  if (argc>1)
+  {
+    int index=1;
+    while (index<argc)
+    {
+      if (!strcmp( argv[index], "-eps")) // this is for box error
+      {
+        EPS1=atof(argv[++index]);
+      }
+      if (!strcmp( argv[index], "-input"))
+      {
+        input_mesh_file=argv[++index];
+      }
+      if (!strcmp( argv[index], "-radius"))
+      {
+        Radius=atof(argv[++index]);
+      }
+      if (!strcmp( argv[index], "-deltaT"))
+      {
+        deltaT=atof(argv[++index]);
+      }
+      index++;
+    }
+  }
+  std::cout << " run: -input " << input_mesh_file << "  -eps " << EPS1 <<
+      " -radius " << Radius << " -deltaT " << deltaT << "\n";
+
+  result += RUN_TEST(test_intx_in_parallel_elem_based);
+
+  MPI_Finalize();
+  return result;
+}
+// will save the LOC tag on the euler nodes
+ErrorCode  compute_lagrange_mesh_on_sphere(Interface * mb, EntityHandle euler_set)
+{
+  ErrorCode rval = MB_SUCCESS;
+
+  /*
+   * get all quads first, then vertices, then move them on the surface of the sphere
+   *  radius is 1, usually
+   *  pos (t-dt) = pos(t) -Velo(t)*dt; this will be lagrange mesh, on each processor
+   */
+  Range quads;
+  rval = mb->get_entities_by_type(euler_set, MBQUAD, quads);
+  CHECK_ERR(rval);
+
+  Range connecVerts;
+  rval = mb->get_connectivity(quads, connecVerts);
+
+  // the LOC tag, should be provided by the user?
+  Tag tagh = 0;
+  std::string tag_name("DP");
+  rval = mb->tag_get_handle(tag_name.c_str(), 3, MB_TYPE_DOUBLE, tagh, MB_TAG_DENSE | MB_TAG_CREAT);
+  CHECK_ERR(rval);
+  void *data; // pointer to the DP in memory, for each vertex
+  int count;
+
+  rval = mb->tag_iterate(tagh, connecVerts.begin(), connecVerts.end(), count, data);
+  CHECK_ERR(rval);
+  // here we are checking contiguity
+  assert(count == (int) connecVerts.size());
+  double * ptr_DP=(double*)data;
+  // get the coordinates of the old mesh, and move it around using velocity tag
+
+  Tag tagv = 0;
+  std::string velo_tag_name("VELO");
+  rval = mb->tag_get_handle(velo_tag_name.c_str(), 3, MB_TYPE_DOUBLE, tagv, MB_TAG_DENSE);
+  CHECK_ERR(rval);
+
+  /*void *datavelo; // pointer to the VELO in memory, for each vertex
+
+  rval = mb->tag_iterate(tagv, connecVerts.begin(), connecVerts.end(), count, datavelo);
+  CHECK_ERR(rval);*/
+  // here we are checking contiguity
+  assert(count == (int) connecVerts.size());
+// now put the vertices in the right place....
+  //int vix=0; // vertex index in new array
+
+  for (Range::iterator vit=connecVerts.begin();vit!=connecVerts.end(); vit++ )
+  {
+    EntityHandle oldV=*vit;
+    CartVect posi;
+    rval = mb->get_coords(&oldV, 1, &(posi[0]) );
+    CHECK_ERR(rval);
+    CartVect velo;
+    rval = mb->tag_get_data(tagv, &oldV, 1, (void *) &(velo[0]) );
+    CHECK_ERR(rval);
+    // do some mumbo jumbo, as in python script
+    CartVect newPos = posi - deltaT*velo;
+    double len1= newPos.length();
+    newPos = Radius*newPos/len1;
+
+    ptr_DP[0]=newPos[0];
+    ptr_DP[1]=newPos[1];
+    ptr_DP[2]=newPos[2];
+    ptr_DP+=3; // increment to the next node
+  }
+
+  return rval;
+}
+
+void test_intx_in_parallel_elem_based()
+{
+  std::string opts = std::string("PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION")+
+        std::string(";PARALLEL_RESOLVE_SHARED_ENTS");
+  Core moab;
+  Interface & mb = moab;
+  EntityHandle euler_set;
+  ErrorCode rval;
+  rval = mb.create_meshset(MESHSET_SET, euler_set);
+  CHECK_ERR(rval);
+  std::string example(TestDir + "/" +  input_mesh_file);
+
+  rval = mb.load_file(example.c_str(), &euler_set, opts.c_str());
+
+  ParallelComm* pcomm = ParallelComm::get_pcomm(&mb, 0);
+  CHECK_ERR(rval);
+
+  rval = pcomm->check_all_shared_handles();
+  CHECK_ERR(rval);
+
+  // everybody will get a DP tag, including the non owned entities; so exchange tags is not required for LOC (here)
+  rval = compute_lagrange_mesh_on_sphere(&mb, euler_set);
+  CHECK_ERR(rval);
+
+  int rank = pcomm->proc_config().proc_rank();
+
+  std::stringstream ste;
+  ste<<"initial" << rank<<".vtk";
+  mb.write_file(ste.str().c_str(), 0, 0, &euler_set, 1);
+
+  Intx2MeshOnSphere worker(&mb);
+
+  worker.SetRadius(Radius);
+  worker.set_box_error(EPS1);//
+  worker.SetEntityType(MBQUAD);
+
+  worker.SetErrorTolerance(Radius*1.e-8);
+  std::cout << "error tolerance epsilon_1="<< Radius*1.e-8 << "\n";
+  //  worker.locate_departure_points(euler_set);
+
+  // we need to make sure the covering set is bigger than the euler mesh
+  EntityHandle covering_lagr_set;
+  rval = mb.create_meshset(MESHSET_SET, covering_lagr_set);
+  CHECK_ERR(rval);
+
+  rval = worker.create_departure_mesh_2nd_alg(euler_set, covering_lagr_set);
+  CHECK_ERR(rval);
+
+  std::stringstream ss;
+  ss<<"partial" << rank<<".vtk";
+  mb.write_file(ss.str().c_str(), 0, 0, &covering_lagr_set, 1);
+  EntityHandle outputSet;
+  rval = mb.create_meshset(MESHSET_SET, outputSet);
+  CHECK_ERR(rval);
+  rval = worker.intersect_meshes(covering_lagr_set, euler_set, outputSet);
+  CHECK_ERR(rval);
+
+  //std::string opts_write("PARALLEL=WRITE_PART");
+  //rval = mb.write_file("manuf.h5m", 0, opts_write.c_str(), &outputSet, 1);
+  std::string opts_write("");
+  std::stringstream outf;
+  outf<<"intersect" << rank<<".h5m";
+  rval = mb.write_file(outf.str().c_str(), 0, 0, &outputSet, 1);
+  double intx_area = area_on_sphere(&mb, outputSet, Radius);
+  double arrival_area = area_on_sphere(&mb, euler_set, Radius) ;
+  std::cout<< "On rank : " << rank << " arrival area: " << arrival_area<<
+      "  intersection area:" << intx_area << " rel error: " << fabs((intx_area-arrival_area)/arrival_area) << "\n";
+  CHECK_ERR(rval);
+}

diff --git a/tools/mbcslam/intx_in_plane_test.cpp b/tools/mbcslam/intx_in_plane_test.cpp
index 08e97a4..774e583 100644
--- a/tools/mbcslam/intx_in_plane_test.cpp
+++ b/tools/mbcslam/intx_in_plane_test.cpp
@@ -73,6 +73,19 @@ int main(int argc, char* argv[])
   rval = mb->write_mesh(newFile, &outputSet, 1);
   if (MB_SUCCESS != rval)
     return 1;
+
+  // retrieve polygons and get adjacent edges
+  Range polygons;
+  rval = mb->get_entities_by_type(outputSet, MBPOLYGON, polygons);
+  if (MB_SUCCESS != rval)
+    return 1;
+
+  Range edges;
+  rval = mb->get_adjacencies(polygons, 1, true, edges , Interface::UNION);
+  if (MB_SUCCESS != rval)
+     return 1;
+
+  std::cout << "number of edges:" << edges.size() << "\n";
   return 0;
 
 

diff --git a/tools/mbcslam/process_arm.cpp b/tools/mbcslam/process_arm.cpp
new file mode 100644
index 0000000..ba7642c
--- /dev/null
+++ b/tools/mbcslam/process_arm.cpp
@@ -0,0 +1,404 @@
+/*
+ * process_arm.cpp
+ *
+ *  Created on: April 18, 2013
+ */
+
+// process the files from Mark; also, link against netcdf directly, because we will use
+// netcdf calls to read data, the same as in ReadNC and ReadNCDF
+//
+//
+#include <iostream>
+#include <sstream>
+#include <time.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "moab/Core.hpp"
+#include "moab/Interface.hpp"
+#include "moab/ReadUtilIface.hpp"
+#include "moab/AdaptiveKDTree.hpp"
+
+#include "CslamUtils.hpp"
+
+#include "netcdf.h"
+
+#include <algorithm>
+#include <string>
+#include <assert.h>
+
+#include <cmath>
+
+
+using namespace moab;
+
+#define INS_ID(stringvar, prefix, id) \
+          sprintf(stringvar, prefix, id)
+
+#define GET_DIM(ncdim, name, val)\
+    {                            \
+    int gdfail = nc_inq_dimid(ncFile, name, &ncdim);          \
+    if (NC_NOERR == gdfail) {                                             \
+      size_t tmp_val;                                                   \
+      gdfail = nc_inq_dimlen(ncFile, ncdim, &tmp_val);                        \
+      if (NC_NOERR != gdfail) {                                           \
+        std::cout<<"couldn't get dimension length for" << name<< " \n"; \
+        return 1;                                              \
+      }                                                                 \
+      else val = tmp_val;                                               \
+    } else val = 0;}
+
+#define GET_DIMB(ncdim, name, varname, id, val) \
+          INS_ID(name, varname, id); \
+          GET_DIM(ncdim, name, val);
+
+#define GET_VAR(name, id, dims) \
+    {                           \
+    id = -1;\
+    int gvfail = nc_inq_varid(ncFile, name, &id);   \
+    if (NC_NOERR == gvfail) {       \
+    int ndims;\
+    gvfail = nc_inq_varndims(ncFile, id, &ndims);\
+    if (NC_NOERR == gvfail) {\
+    dims.resize(ndims);    \
+    gvfail = nc_inq_vardimid(ncFile, id, &dims[0]);}}}
+
+#define GET_1D_INT_VAR(name, id, vals) \
+    {GET_VAR(name, id, vals);  \
+  if (-1 != id) {\
+    size_t ntmp;\
+    int ivfail = nc_inq_dimlen(ncFile, vals[0], &ntmp);\
+    vals.resize(ntmp);\
+    size_t ntmp1 = 0;                                                           \
+    ivfail = nc_get_vara_int(ncFile, id, &ntmp1, &ntmp, &vals[0]);\
+    if (NC_NOERR != ivfail) {\
+      std::cout<<"ReadNCDF:: Problem getting variable " <<name <<"\n";\
+      return 1;}}}
+
+
+#define GET_1D_DBL_VAR(name, id, vals) \
+    {std::vector<int> dum_dims;        \
+  GET_VAR(name, id, dum_dims);\
+  if (-1 != id) {\
+    size_t ntmp;\
+    int dvfail = nc_inq_dimlen(ncFile, dum_dims[0], &ntmp);\
+    vals.resize(ntmp);\
+    size_t ntmp1 = 0;                                                           \
+    dvfail = nc_get_vara_double(ncFile, id, &ntmp1, &ntmp, &vals[0]);\
+    if (NC_NOERR != dvfail) {\
+      std::cout<<"ReadNCDF:: Problem getting variable "<< name<<"\n";\
+      return 1;}}}
+
+/*
+int get_2d_flt_var(int ncFile, const char * name, int index, std::vector<float> & vals)
+{
+  int id;
+  std::vector<int> dum_dims;
+  GET_VAR(name, id, dum_dims);
+  if (-1 != id) {
+    size_t ntmp;
+  int dvfail = nc_inq_dimlen(ncFile, dum_dims[1], &ntmp);
+  vals.resize(ntmp);
+  size_t ntmp1[2] = {index, 0};
+
+  int ntimes;
+  dvfail = nc_inq_dimlen(ncFile, dum_dims[0], &ntimes);
+  if (index>=ntimes) {
+    std::cout<<"ReadNCDF:: Problem getting variable "<< name<<"\n";
+    return 1;
+  }
+  size_t count[2] ={1, ntmp};
+  dvfail = nc_get_vara_float(ncFile, id, ntmp1, count, &vals[0]);
+  if (NC_NOERR != dvfail) {
+    std::cout<<"ReadNCDF:: Problem getting variable "<< name<<"\n";
+    return 1;
+  }
+  return 0;
+}
+*/
+/* get the variable along an index */
+#define GET_2D_FLT_VAR(name, id, index, vals) \
+    {std::vector<int> dum_dims;        \
+  GET_VAR(name, id, dum_dims);\
+  if (-1 != id) {\
+    size_t ntmp, ntmp2;\
+    int dvfail = nc_inq_dimlen(ncFile, dum_dims[1], &ntmp);\
+    dvfail = nc_inq_dimlen(ncFile, dum_dims[0], &ntmp2);\
+    vals.resize(ntmp);\
+    size_t ntmp1[2] = {index, 0}; \
+    if (index>=ntmp2) { \
+      std::cout<<"ReadNCDF:: Problem getting variable "<< name<<"\n"; \
+      return 1; \
+    } \
+    size_t count[2] ={1, ntmp}; \
+    dvfail = nc_get_vara_float(ncFile, id, ntmp1, count, &vals[0]);\
+    if (NC_NOERR != dvfail) {\
+      std::cout<<"ReadNCDF:: Problem getting variable "<< name<<"\n";\
+      return 1;}}}
+
+int main(int argc, char ** argv)
+{
+
+  int num_el = 50000;
+  if (argc>1)
+  {
+    num_el = atoi(argv[1]);
+  }
+  std::cout << "num_el=" << num_el << "\n";
+
+  Core moab;
+  Interface & mb = moab;
+  ErrorCode rval;
+
+  int ncFile, temp_dim;        // netcdf/exodus file
+
+  // now, open the data file and read the lat, lon and U850 and V850
+  const char *data_file = "fc5_arm12.cam2.h2.0004-12-12-00000.nc";
+
+  int fail = nc_open(data_file, 0, &ncFile);
+  if (NC_NOWRITE != fail) {
+    std::cout<<"ReadNCDF:: problem opening Netcdf/Exodus II "<<data_file <<"\n";
+    return 1;
+  }
+  int ncol;
+  GET_DIM(temp_dim, "ncol", ncol);
+  std::cout << "ncol:" << ncol << "\n";
+
+  std::vector<double> lat;
+  std::vector<double> lon;
+  GET_1D_DBL_VAR("lat", temp_dim, lat);
+
+  GET_1D_DBL_VAR("lon", temp_dim, lon);
+
+  std::cout<< " lat, lon 0" << lat[0] << " " << lon[0] << "\n";
+  std::cout<< " lat, lon 1" << lat[1] << " " << lon[1] << "\n";
+  std::cout << " size: " << lat.size() << "\n";
+
+  // see if the mesh from metadata makes sense
+  // create quads with the connectivity from conn array
+
+  ReadUtilIface* readMeshIface;
+  mb.query_interface( readMeshIface );
+  if (NULL==readMeshIface)
+    return 1;
+  EntityHandle node_handle = 0;
+  std::vector<double*> arrays;
+  rval = readMeshIface->get_node_coords(3, ncol,
+      1, node_handle, arrays);
+  if (MB_SUCCESS!= rval)
+    return 1;
+
+  SphereCoords  sp;
+  sp.R = 1.;
+  Tag gid;
+  rval = mb.tag_get_handle("GLOBAL_ID", 1, MB_TYPE_INTEGER,
+      gid, MB_TAG_SPARSE|MB_TAG_CREAT);
+  if (MB_SUCCESS!= rval)
+      return 1;
+  std::vector<int> gids(ncol);
+
+  double conversion_factor = M_PI/180.;
+  for (int k=0; k<ncol; k++)
+  {
+    sp.lat=lat[k]*conversion_factor;
+    sp.lon=lon[k]*conversion_factor;
+    CartVect pos=spherical_to_cart (sp);
+    arrays[0][k]=pos[0];
+    arrays[1][k]=pos[1];
+    arrays[2][k]=pos[2];
+    gids[k]=k+1;
+  }
+
+  Range nodes(node_handle, node_handle+ncol-1);
+  rval = mb.tag_set_data(gid, nodes, &gids[0]);
+  if (MB_SUCCESS!= rval)
+       return 1;
+
+  EntityHandle newSet;
+  rval = mb.create_meshset(MESHSET_SET, newSet);
+  if (MB_SUCCESS != rval)
+    return 1;
+
+  // so the nodes will be part
+  mb.add_entities(newSet, nodes);
+
+  // build a kd tree with the vertices
+  EntityHandle tree_root;
+  AdaptiveKDTree kd(&mb, true);
+  rval = kd.build_tree(nodes, tree_root);
+  if (MB_SUCCESS != rval)
+    return 1;
+
+  unsigned int  min_depth, max_depth;
+  rval = kd.depth(tree_root, min_depth, max_depth);
+  if (MB_SUCCESS != rval)
+     return 1;
+  std::cout << "min_depth, max_depth " << min_depth << " " << max_depth << "\n";
+  // now, read the conn file created using spectral visu, and see how they fit
+  // this can be directly imported to moab
+  const char *myconn_file = "spec.R2.vtk";
+  EntityHandle euler_set;
+  rval = mb.create_meshset(MESHSET_SET, euler_set);
+  if (MB_SUCCESS != rval)
+    return 1;
+
+  rval = mb.load_file(myconn_file, &euler_set);
+
+  if (MB_SUCCESS != rval)
+    return 1;
+
+  mb.list_entities(&euler_set, 1);
+
+  Range specQuads;
+  rval = mb.get_entities_by_dimension(euler_set, 2, specQuads );
+  if (MB_SUCCESS != rval)
+     return 1;
+
+  Range vertices;
+  rval = mb.get_connectivity(specQuads, vertices);
+  if (MB_SUCCESS != rval)
+     return 1;
+
+  // do a mapping, from position of vertices to the vertices in the kd tree.
+  // find the closest vertex to each of this
+  std::vector<EntityHandle> mappedTo(vertices.size());
+  std::vector<double> mycoords(vertices.size()*3);
+  rval = mb.get_coords(vertices, &mycoords[0]);
+  double * ptr = &mycoords[0];
+  size_t num_verts=vertices.size();
+  for (size_t i=0; i<num_verts; i++, ptr+=3)
+  {
+    CartVect pos(ptr); // take 3 coordinates
+    std::vector<EntityHandle> leaves;
+    rval = kd.leaves_within_distance( tree_root,
+                                          ptr,
+                                          0.001,
+                                        leaves);
+    if (MB_SUCCESS != rval)
+      return 1;
+    Range closeVerts;
+    for (std::vector<EntityHandle>::iterator vit = leaves.begin(); vit != leaves.end(); vit++)
+    {
+      rval= mb.get_entities_by_handle(*vit, closeVerts, Interface::UNION);
+      if (moab::MB_SUCCESS != rval)
+        return 1;
+    }
+    if (closeVerts.size()<1)
+    {
+      std::cout << "increase tolerance, no points close to " << pos << "\n";
+      return 1;
+    }
+    std::vector<CartVect> coordsTarget(closeVerts.size());
+    rval = mb.get_coords(closeVerts, &(coordsTarget[0][0]));
+    if (MB_SUCCESS != rval)
+      return 1;
+    double minDist2=(pos-coordsTarget[0]).length_squared();
+    EntityHandle closestVertex=closeVerts[0];
+    for (unsigned int j=1; j<closeVerts.size(); j++)
+    {
+      double dist2=(pos-coordsTarget[j]).length_squared();
+      if (minDist2>dist2)
+      {
+        closestVertex = closeVerts[j];
+        minDist2=dist2;
+      }
+    }
+    if (minDist2 > 0.00001)
+    {
+      std::cout << " problem with node " << vertices[i] << "  min dist2:" << minDist2 << "\n";
+      return 1;
+    }
+    mappedTo [i] = closestVertex;
+  }
+
+  num_el = (int)specQuads.size();
+  // tmp_ptr is of type int* and points at the same place as conn
+  EntityHandle * conn = 0;
+
+  EntityHandle elh;
+
+  readMeshIface->get_element_connect(
+          num_el,
+          4,
+          MBQUAD,
+          1,
+          elh,
+          conn);
+
+  EntityHandle firstVertMyMesh=vertices[0];
+  for (int k=0; k<num_el; k++)
+  {
+    const EntityHandle * myconn=0;
+    EntityHandle specElem = specQuads[k];
+    int num_nodes=0;
+    rval = mb.get_connectivity(specElem, myconn, num_nodes);
+    if (MB_SUCCESS != rval || num_nodes !=4)
+      return 1;
+
+    int start_el = k*4;
+    for (int j=0; j<4; j++)
+       conn[start_el+j] = mappedTo[myconn[j]-firstVertMyMesh];
+  }
+  std::cout << " conn:" << conn[0] << " " << conn[1] << " " << conn[3]<< "\n";
+  Range erange(elh, elh+num_el-1);
+
+  mb.add_entities(newSet, erange);
+  std::vector<int> gidels(num_el);
+  Tag gid2;
+  rval = mb.tag_get_handle("GLOBAL_ID_EL", 1, MB_TYPE_INTEGER,
+        gid2, MB_TAG_SPARSE|MB_TAG_CREAT);
+
+  if (MB_SUCCESS != rval)
+      return 1;
+  for (int k=0; k<num_el; k++)
+    gidels[k]=k+1;
+  mb.tag_set_data(gid2, erange, &gidels[0]);
+
+  int times;
+  GET_DIM(temp_dim, "time", times);
+  Tag velotag;
+  rval = mb.tag_get_handle("VELO", 3, MB_TYPE_DOUBLE,
+            velotag, MB_TAG_DENSE|MB_TAG_CREAT);
+  if (MB_SUCCESS!= rval)
+    return 1;
+
+  for (size_t tt=0; tt<56 /*(size_t)times*/; tt++)
+  {
+    // now, read velocities from file:
+    // read the U850 and V850 variables
+    std::vector<float> u850;
+    GET_2D_FLT_VAR("U850", temp_dim, tt, u850);
+    std::vector<float> v850;
+    GET_2D_FLT_VAR("V850", temp_dim, tt, v850);
+
+    std::cout << " U850:" << u850[0] << " " << u850[1] << " " << u850[5] << " "<< u850.size()<<"\n";
+    std::cout << " V850:" << v850[0] << " " << v850[1] << " " << v850[5] << " "<< u850.size()<<"\n";
+    // ok, use radius as 6371km; not needed
+
+    std::vector<CartVect> velo850(ncol);
+
+    std::stringstream fileName;
+    fileName << "VELO0" <<  tt << ".h5m";
+    std::cout << " read velocities at 850 for time:" << tt << "\n";
+
+
+    for (int k=0; k<ncol; k++)
+    {
+      double latRad=lat[k]*conversion_factor;
+      double lonRad=lon[k]*conversion_factor;
+      CartVect U(-sin(lonRad), cos(lonRad), 0.);
+      CartVect V(-sin(latRad)*cos(lonRad), -sin(latRad)*cos(lonRad), cos(latRad));
+      velo850[k]=U*u850[k] +V*v850[k];
+    }
+    rval = mb.tag_set_data(velotag, nodes, &(velo850[0][0]));
+    if (MB_SUCCESS!= rval)
+      return 1;
+    rval = mb.write_mesh(fileName.str().c_str(), &newSet, 1);
+    if (MB_SUCCESS!= rval)
+      return 1;
+  }
+
+
+
+  return 0;
+}

diff --git a/tools/mbcslam/spec_visu_test.cpp b/tools/mbcslam/spec_visu_test.cpp
index 686fde7..54c7e8f 100644
--- a/tools/mbcslam/spec_visu_test.cpp
+++ b/tools/mbcslam/spec_visu_test.cpp
@@ -22,21 +22,25 @@ int main(int argc, char* argv[])
   const char *filename_mesh = STRINGIFY(SRCDIR) "/eulerHomme.vtk";
   const char *newFile = "spectral.vtk";
   int NP = 4; // number of nodes in each direction
-  if (argc == 4)
+  double tolerance = 0.0001;
+  double R = 6.; // should be input
+  if (argc == 6)
   {
     filename_mesh = argv[1];
     NP = atoi(argv[2]);
     newFile = argv[3];
+    R = atof(argv[4]);
+    tolerance = atof(argv[5]);
   }
   else
   {
-    printf("Usage: %s <mesh_filename><NP><newFile>\n", argv[0]);
+    printf("Usage: %s <mesh_filename><NP><newFile><tolerance>\n", argv[0]);
     if (argc != 1)
       return 1;
-    printf("No files specified.  Defaulting to: %s  %d  %s\n",
-        filename_mesh, NP, newFile);
   }
 
+  printf("run:  %s  %d  %s %f %f\n",
+          filename_mesh, NP, newFile, R, tolerance);
   // read input mesh in a set
   ErrorCode rval = MB_SUCCESS;
   Core moab;
@@ -61,11 +65,11 @@ int main(int argc, char* argv[])
   if (MB_SUCCESS != rval)
     return 1;
 
-  rval = SpectralVisuMesh( mb,  inputRange, NP, outputSet);
+  rval = SpectralVisuMesh( mb,  inputRange, NP, outputSet, tolerance);
 
   if (MB_SUCCESS != rval)
     return 1;
-  double R = 6.; // should be input
+
   rval = ProjectOnSphere(mb, outputSet, R);
   if (MB_SUCCESS != rval)
     return 1;

Repository URL: https://bitbucket.org/fathomteam/moab/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the moab-dev mailing list