[MOAB-dev] commit/MOAB: 4 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sun Jun 29 23:03:46 CDT 2014


4 new commits in MOAB:

https://bitbucket.org/fathomteam/moab/commits/7d12eea5999e/
Changeset:   7d12eea5999e
Branch:      None
User:        iulian07
Date:        2014-06-29 04:26:05
Summary:     add large mesh example

Affected #:  2 files

diff --git a/examples/GenLargeMesh.cpp b/examples/GenLargeMesh.cpp
new file mode 100644
index 0000000..6ef1dc9
--- /dev/null
+++ b/examples/GenLargeMesh.cpp
@@ -0,0 +1,105 @@
+/** @example GenLargeMesh.cpp \n
+ * \brief Create a large structured mesh, partitioned \n
+ * <b>To run</b>: mpiexec -np 2 GenLargeMesh \n
+ *
+ *  It shows how to load create a mesh on the fly, on multiple
+ *  processors, block wise
+ *  Each processor will create its version of a block mesh, partitioned
+ *  as AxBxC blocks. Each block will be with blockSize^3 hexahedrons, and will get a
+ *  different PARALLEL_PARTITION tag
+ *
+ *  The number of tasks will be MxNxK, and it must match the mpi size
+ *  Each task will generate its mesh at location (m,n,k)
+ *
+ *  By default M=2, N=1, K=1, so by default it should be launched on 2 procs
+ *  By default, blockSize is 4, and A=2, B=2, C=2, so each task will generate locally
+ *    blockSize^3 x A x B x C hexahedrons (value = 64x8 = 512 hexas, in 8 partitions)
+ *
+ *  The total number f partitions will be A*B*C*M*N*K (default 16)
+ *
+ *  Each part in partition will get a proper tag
+ *
+ *  The vertices will get a proper global id, which will be used to resolve the
+ *  shared entities
+ *  The output will be written in parallel, and we will try sizes as big as we can
+ *  (up to a billion vertices, because we use int for global ids)
+ *
+ *  Within each partition, the hexas will be numbered contiguously, and also the
+ *  vertices; The global id will be determined easily, for a vertex, but the entity
+ *  handle space will be more interesting to handle, within a partition (we want
+ *  contiguous handles within a partition)
+ *
+ *  We may or may not use ScdInterface, because we want control over global id and
+ *  entity handle within a partition
+ *
+ *
+ *  mpiexec -np 2 GenLargeMesh
+ */
+
+#include "moab/Core.hpp"
+#include "moab/ProgOptions.hpp"
+#include "moab/ParallelComm.hpp"
+#include "moab/CN.hpp"
+
+#include <iostream>
+#include <vector>
+
+using namespace moab;
+using namespace std;
+int main(int argc, char **argv)
+{
+  int A=2, B=2, C=2, M=2, N=1, K=1;
+  int blockSize = 4;
+
+  MPI_Init(&argc, &argv);
+
+  ProgOptions opts;
+
+  opts.addOpt<int>(std::string("blockSize,b"),
+      std::string("Block size of mesh (default=4)"), &blockSize);
+  opts.addOpt<int>(std::string("xproc,M"),
+      std::string("Number of processors in x dir (default=2)"), &M);
+  opts.addOpt<int>(std::string("yproc,N"),
+      std::string("Number of processors in y dir (default=1)"), &N);
+  opts.addOpt<int>(std::string("zproc,K"),
+      std::string("Number of processors in z dir (default=1)"), &K);
+
+  opts.addOpt<int>(std::string("xblocks,A"),
+      std::string("Number of blocks on a task in x dir (default=2)"), &A);
+  opts.addOpt<int>(std::string("yblocks,B"),
+      std::string("Number of blocks on a task in y dir (default=2)"), &B);
+  opts.addOpt<int>(std::string("zblocks,C"),
+      std::string("Number of blocks on a task in x dir (default=2)"), &C);
+
+
+  opts.parseCommandLine(argc, argv);
+
+  Interface* mb = new Core;
+  if (NULL == mb)
+  {
+    MPI_Finalize();
+    return 1;
+  }
+
+  MPI_Comm comm ;
+  int rank, size;
+  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+  MPI_Comm_rank( MPI_COMM_WORLD, &size );
+
+  if (M*N*K != size)
+  {
+    if (rank==0)
+    {
+      cout <<"M*N*K=" << M*N*K << "  != size = "<< " size\n";
+    }
+    MPI_Finalize();
+    return 1;
+  }
+
+  // the global id of each vertex will be, on task
+
+  MPI_Finalize();
+  return 0;
+}
+
+

diff --git a/examples/makefile b/examples/makefile
index 787c8d3..f41def7 100644
--- a/examples/makefile
+++ b/examples/makefile
@@ -71,6 +71,9 @@ VisTags: VisTags.o ${MOAB_LIBDIR}/libMOAB.la
 ReadWriteTest: ReadWriteTest.o ${MOAB_LIBDIR}/libMOAB.la
 	${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK} 
 	
+GenLargeMesh: GenLargeMesh.o ${MOAB_LIBDIR}/libMOAB.la
+	${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK} 
+	
 clean:
 	rm -rf *.o *.mod *.h5m ${EXAMPLES} ${PAREXAMPLES} ${EXOIIEXAMPLES} ${F90EXAMPLES}
 


https://bitbucket.org/fathomteam/moab/commits/89f888c9c193/
Changeset:   89f888c9c193
Branch:      None
User:        iulian07
Date:        2014-06-30 05:49:04
Summary:     is_valid should be on the interface too

not only on the Core.hpp
How do we know if an entity handle is valid or not?
is there another way?

Affected #:  1 file

diff --git a/src/moab/Interface.hpp b/src/moab/Interface.hpp
index 09f4219..c8102b8 100644
--- a/src/moab/Interface.hpp
+++ b/src/moab/Interface.hpp
@@ -1981,6 +1981,8 @@ public:
                                         SetIterator *&set_iter) = 0;
     /**@}*/
 
+
+  virtual bool is_valid(EntityHandle entity) const = 0;
 };
 
 //! predicate for STL algorithms.  Returns true if the entity handle is


https://bitbucket.org/fathomteam/moab/commits/e861c1df4f15/
Changeset:   e861c1df4f15
Branch:      None
User:        iulian07
Date:        2014-06-30 05:51:54
Summary:     validate sharedEnts

it is becoming important if some entities are deleted after they are resolved
they are part of the sharedEnts vector, but they are not deleted
from there.
This is the advantage of keeping entities in lists in member data
we should hunt them down :(

Affected #:  2 files

diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 10ce709..a8571af 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -8789,7 +8789,22 @@ ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared
 
   return MB_SUCCESS;
   // end copy
+}
+ErrorCode ParallelComm::correct_shared_entities()
+{
+
+  std::vector<EntityHandle> good_ents;
+  for (size_t i=0; i<sharedEnts.size(); i++)
+  {
+    if (mbImpl->is_valid(sharedEnts[i]))
+    {
+      good_ents.push_back(sharedEnts[i]);
+    }
   }
+  sharedEnts = good_ents;
+
+  return MB_SUCCESS;
+}
 
 void ParallelComm::print_pstatus(unsigned char pstat, std::string &ostr) 
 {

diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index 3424c51..2a16ac2 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -932,6 +932,12 @@ namespace moab {
     ErrorCode settle_intersection_points(Range & edges, Range & shared_edges_owned,
         std::vector<std::vector<EntityHandle> *> & extraNodesVec, double tolerance);
 
+    /* \brief check the shared entities if they exist anymore
+     * will check the shared ents array, and clean it if necessary
+     *
+     */
+    ErrorCode correct_shared_entities();
+
   private:
 
     ErrorCode reduce_void(int tag_data_type, const MPI_Op mpi_op, int num_ents, void *old_vals, void *new_vals);


https://bitbucket.org/fathomteam/moab/commits/11db00407a80/
Changeset:   11db00407a80
Branch:      iulian07/largemesh
User:        iulian07
Date:        2014-06-30 05:54:01
Summary:     improve the large mesh example

this example pushes the limits of moab
It will be used later to create meshes larger than 1 billion vertices

Each processor creates a structure of AxBxC blocks, each block with
blockSize ^ 3 hexas
The example should be run on MxNxK array of processors.
After resolving and sharing the vertices, mesh is written in
parallel.

each block will represent a partition
It is different than test paralle_write_test because there the number
of partitions created is equal to the number of processors.
Here, each processor creates AxBxC partitions
For a total of AxBxC x MxNxK partitions

block size, A, B, .., K are options, with default values

The total number of hexas will be

 AxBxC x MxNxK x block^3

edges and faces are removed after resolve sharing

the file will have just vertices and hexas, and it will be already
partitioned

We will add some tags of interest, to make it better.

The writer failed if I just deleted the edges and faces.
I had to introduce a method to correct sharedEnts in PC

Affected #:  1 file

diff --git a/examples/GenLargeMesh.cpp b/examples/GenLargeMesh.cpp
index 6ef1dc9..1e0fa73 100644
--- a/examples/GenLargeMesh.cpp
+++ b/examples/GenLargeMesh.cpp
@@ -40,12 +40,16 @@
 #include "moab/ProgOptions.hpp"
 #include "moab/ParallelComm.hpp"
 #include "moab/CN.hpp"
+#include "moab/ReadUtilIface.hpp"
+#include "moab/MergeMesh.hpp"
 
 #include <iostream>
 #include <vector>
 
 using namespace moab;
 using namespace std;
+
+#define CHECKE(message) if(rval != MB_SUCCESS) { cout << (message) << "\n"; MPI_Finalize(); return 1; }
 int main(int argc, char **argv)
 {
   int A=2, B=2, C=2, M=2, N=1, K=1;
@@ -58,7 +62,7 @@ int main(int argc, char **argv)
   opts.addOpt<int>(std::string("blockSize,b"),
       std::string("Block size of mesh (default=4)"), &blockSize);
   opts.addOpt<int>(std::string("xproc,M"),
-      std::string("Number of processors in x dir (default=2)"), &M);
+      std::string("Number of processors in x dir (default=1)"), &M);
   opts.addOpt<int>(std::string("yproc,N"),
       std::string("Number of processors in y dir (default=1)"), &N);
   opts.addOpt<int>(std::string("zproc,K"),
@@ -80,23 +84,175 @@ int main(int argc, char **argv)
     MPI_Finalize();
     return 1;
   }
+  ReadUtilIface *iface;
+  ErrorCode rval = mb->query_interface(iface);
+  CHECKE("Can't get reader interface");
 
-  MPI_Comm comm ;
   int rank, size;
   MPI_Comm_rank( MPI_COMM_WORLD, &rank );
-  MPI_Comm_rank( MPI_COMM_WORLD, &size );
+  MPI_Comm_size( MPI_COMM_WORLD, &size );
 
   if (M*N*K != size)
   {
     if (rank==0)
     {
-      cout <<"M*N*K=" << M*N*K << "  != size = "<< " size\n";
+      cout <<"M*N*K=" << M*N*K << "  != size = "<< size << "\n";
     }
     MPI_Finalize();
     return 1;
   }
 
-  // the global id of each vertex will be, on task
+  // determine m, n, k for processor rank
+  int m,n,k;
+  k = rank/(M*N);
+  int leftover = rank%(M*N);
+  n = leftover/M;
+  m = leftover%M;
+  if (rank==size-1)
+  {
+    cout << "m, n, k for last rank:" << m << " " << n << " " << k << "\n";
+  }
+
+  // so there are a total of M * A * blockSize elements in x direction (so M * A * blockSize + 1 verts in x direction)
+  // so there are a total of N * B * blockSize elements in y direction (so N * B * blockSize + 1 verts in y direction)
+  // so there are a total of K * C * blockSize elements in z direction (so K * C * blockSize + 1 verts in z direction)
+
+  // there are ( M * A blockSize )      *  ( N * B * blockSize)      * (K * C * blockSize )    hexas
+  // there are ( M * A * blockSize + 1) *  ( N * B * blockSize + 1 ) * (K * C * blockSize + 1) vertices
+  // x is the first dimension that varies
+
+  int NX = ( M * A * blockSize + 1);
+  int NY = ( N * B * blockSize + 1);
+  int NZ = ( K * C * blockSize + 1);
+  int blockSize1 = blockSize + 1;// used for vertices
+  int bsq = blockSize * blockSize;
+  int b1sq = blockSize1 * blockSize1;
+  // generate the block at (a, b, c); it will represent a partition , it will get a partition tag
+
+  Tag global_id_tag;
+  mb->tag_get_handle("GLOBAL_ID", 1, MB_TYPE_INTEGER,
+               global_id_tag);
+
+  Tag part_tag;
+  int dum_id=-1;
+  mb->tag_get_handle("PARALLEL_PARTITION", 1, MB_TYPE_INTEGER,
+                 part_tag, MB_TAG_CREAT|MB_TAG_SPARSE, &dum_id);
+
+  for (int a=0; a<A; a++)
+  {
+    for (int b=0; b<B; b++)
+    {
+      for (int c=0; c<C; c++)
+      {
+        // we will generate (block+1)^3 vertices, and block^3 hexas
+        // the global id of the vertices will come from m, n, k, a, b, c
+        // x will vary from  m*A*block + a*block to m*A*block+(a+1)*block etc;
+        int num_nodes = (blockSize+1)*(blockSize+1)*(blockSize+1);
+
+        vector<double*> arrays;
+        EntityHandle startv;
+        rval = iface->get_node_coords(3, num_nodes, 0, startv, arrays);
+        CHECKE("Can't get node coords.");
+
+        int x = m*A*blockSize + a*blockSize;
+        int y = n*B*blockSize + b*blockSize;
+        int z = k*C*blockSize + c*blockSize;
+        int ix=0;
+        vector<int>  gids(num_nodes);
+        Range verts(startv, startv+num_nodes-1);
+        for (int kk=0; kk<blockSize+1; kk++)
+        {
+          for (int jj=0; jj<blockSize+1; jj++)
+          {
+            for (int ii=0; ii<blockSize+1; ii++)
+            {
+              arrays[0][ix] = (double)x+ii;
+              arrays[1][ix] = (double)y+jj;
+              arrays[2][ix] = (double)z+kk;
+              gids[ix] = 1 + (z+kk) * (NX*NY) + (y+jj) * NX + (x+ii);
+              ix++;
+            }
+          }
+        }
+        mb->tag_set_data(global_id_tag, verts, &gids[0]);
+        int num_hexas = (blockSize)*(blockSize)*(blockSize);
+        EntityHandle starte; // connectivity
+        EntityHandle * conn;
+        rval = iface->get_element_connect(num_hexas, 8, MBHEX, 0, starte, conn);
+        CHECKE("Can't get hexa  connectivity.");
+
+        Range hexas(starte, starte+num_hexas-1);
+        // fill  hexas
+        ix=0;
+        for (int kk=0; kk<blockSize; kk++)
+        {
+          for (int jj=0; jj<blockSize; jj++)
+          {
+            for (int ii=0; ii<blockSize; ii++)
+            {
+              EntityHandle corner=startv + ii + (jj*blockSize1) + kk * b1sq;
+              conn[ix]=corner;
+              conn[ix+1]=corner+1;
+              conn[ix+2]=corner+ 1 + blockSize1;
+              conn[ix+3]=corner+ blockSize1;
+              conn[ix+4]=corner + b1sq;
+              conn[ix+5]=corner+1 + b1sq;
+              conn[ix+6]=corner+ 1 + blockSize1 + b1sq;
+              conn[ix+7]=corner+ blockSize1 + b1sq;
+              ix+=8;
+            }
+          }
+        }
+        EntityHandle part_set;
+        rval = mb->create_meshset(MESHSET_SET, part_set);
+        CHECKE("Can't create mesh set.");
+        rval = mb->add_entities(part_set, hexas);
+        CHECKE("Can't add entities to set.");
+        int part_num= a +  m*A + (b + n*B)*(M*A) + (c+k*C)*(M*A * N*B);
+        rval = mb->tag_set_data(part_tag, &part_set, 1, &part_num);
+        CHECKE("Can't set part tag on set");
+
+
+
+      }
+    }
+  }
+  // after the mesh is generated on each proc, merge the vertices
+  MergeMesh mm(mb);
+  Range allhexas;
+  rval = mb->get_entities_by_type(0, MBHEX, allhexas);
+
+  CHECKE("Can't get all hexa  elements.");
+
+  rval = mm.merge_entities( allhexas, 0.0001);
+  CHECKE("Can't merge");
+  ParallelComm* pcomm = ParallelComm::get_pcomm(mb, 0);
+  if (pcomm==NULL)
+  {
+    pcomm = new ParallelComm( mb, MPI_COMM_WORLD );
+  }
+  rval = pcomm->resolve_shared_ents( 0, allhexas, 3, 0 );
+  CHECKE("Can't resolve shared ents");
+
+  // delete all quads and edges
+  Range toDelete;
+  rval =  mb->get_entities_by_dimension(0, 1, toDelete);
+  CHECKE("Can't get edges");
+  rval = mb->delete_entities(toDelete);
+  CHECKE("Can't delete edges");
+
+  toDelete.clear();
+
+  rval = mb->get_entities_by_dimension(0, 2, toDelete);
+  CHECKE("Can't get faces");
+  rval = mb->delete_entities(toDelete);
+  CHECKE("Can't delete faces");
+
+  rval = pcomm->correct_shared_entities() ;
+  CHECKE("Can't correct local shared")
+
+  rval = mb->write_file("test1.h5m", 0, ";;PARALLEL=WRITE_PART");
+  CHECKE("Can't write in parallel");
 
   MPI_Finalize();
   return 0;

Repository URL: https://bitbucket.org/fathomteam/moab/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the moab-dev mailing list