[MOAB-dev] commit/MOAB: 5 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed Jul 2 00:31:54 CDT 2014
5 new commits in MOAB:
https://bitbucket.org/fathomteam/moab/commits/097d6ce2cf4b/
Changeset: 097d6ce2cf4b
Branch: None
User: iulian07
Date: 2014-07-01 20:48:29
Summary: add large mesh example
Affected #: 2 files
diff --git a/examples/GenLargeMesh.cpp b/examples/GenLargeMesh.cpp
new file mode 100644
index 0000000..6ef1dc9
--- /dev/null
+++ b/examples/GenLargeMesh.cpp
@@ -0,0 +1,105 @@
+/** @example GenLargeMesh.cpp \n
+ * \brief Create a large structured mesh, partitioned \n
+ * <b>To run</b>: mpiexec -np 2 GenLargeMesh \n
+ *
+ * It shows how to load create a mesh on the fly, on multiple
+ * processors, block wise
+ * Each processor will create its version of a block mesh, partitioned
+ * as AxBxC blocks. Each block will be with blockSize^3 hexahedrons, and will get a
+ * different PARALLEL_PARTITION tag
+ *
+ * The number of tasks will be MxNxK, and it must match the mpi size
+ * Each task will generate its mesh at location (m,n,k)
+ *
+ * By default M=2, N=1, K=1, so by default it should be launched on 2 procs
+ * By default, blockSize is 4, and A=2, B=2, C=2, so each task will generate locally
+ * blockSize^3 x A x B x C hexahedrons (value = 64x8 = 512 hexas, in 8 partitions)
+ *
+ * The total number f partitions will be A*B*C*M*N*K (default 16)
+ *
+ * Each part in partition will get a proper tag
+ *
+ * The vertices will get a proper global id, which will be used to resolve the
+ * shared entities
+ * The output will be written in parallel, and we will try sizes as big as we can
+ * (up to a billion vertices, because we use int for global ids)
+ *
+ * Within each partition, the hexas will be numbered contiguously, and also the
+ * vertices; The global id will be determined easily, for a vertex, but the entity
+ * handle space will be more interesting to handle, within a partition (we want
+ * contiguous handles within a partition)
+ *
+ * We may or may not use ScdInterface, because we want control over global id and
+ * entity handle within a partition
+ *
+ *
+ * mpiexec -np 2 GenLargeMesh
+ */
+
+#include "moab/Core.hpp"
+#include "moab/ProgOptions.hpp"
+#include "moab/ParallelComm.hpp"
+#include "moab/CN.hpp"
+
+#include <iostream>
+#include <vector>
+
+using namespace moab;
+using namespace std;
+int main(int argc, char **argv)
+{
+ int A=2, B=2, C=2, M=2, N=1, K=1;
+ int blockSize = 4;
+
+ MPI_Init(&argc, &argv);
+
+ ProgOptions opts;
+
+ opts.addOpt<int>(std::string("blockSize,b"),
+ std::string("Block size of mesh (default=4)"), &blockSize);
+ opts.addOpt<int>(std::string("xproc,M"),
+ std::string("Number of processors in x dir (default=2)"), &M);
+ opts.addOpt<int>(std::string("yproc,N"),
+ std::string("Number of processors in y dir (default=1)"), &N);
+ opts.addOpt<int>(std::string("zproc,K"),
+ std::string("Number of processors in z dir (default=1)"), &K);
+
+ opts.addOpt<int>(std::string("xblocks,A"),
+ std::string("Number of blocks on a task in x dir (default=2)"), &A);
+ opts.addOpt<int>(std::string("yblocks,B"),
+ std::string("Number of blocks on a task in y dir (default=2)"), &B);
+ opts.addOpt<int>(std::string("zblocks,C"),
+ std::string("Number of blocks on a task in x dir (default=2)"), &C);
+
+
+ opts.parseCommandLine(argc, argv);
+
+ Interface* mb = new Core;
+ if (NULL == mb)
+ {
+ MPI_Finalize();
+ return 1;
+ }
+
+ MPI_Comm comm ;
+ int rank, size;
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_rank( MPI_COMM_WORLD, &size );
+
+ if (M*N*K != size)
+ {
+ if (rank==0)
+ {
+ cout <<"M*N*K=" << M*N*K << " != size = "<< " size\n";
+ }
+ MPI_Finalize();
+ return 1;
+ }
+
+ // the global id of each vertex will be, on task
+
+ MPI_Finalize();
+ return 0;
+}
+
+
diff --git a/examples/makefile b/examples/makefile
index 787c8d3..f41def7 100644
--- a/examples/makefile
+++ b/examples/makefile
@@ -71,6 +71,9 @@ VisTags: VisTags.o ${MOAB_LIBDIR}/libMOAB.la
ReadWriteTest: ReadWriteTest.o ${MOAB_LIBDIR}/libMOAB.la
${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
+GenLargeMesh: GenLargeMesh.o ${MOAB_LIBDIR}/libMOAB.la
+ ${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
+
clean:
rm -rf *.o *.mod *.h5m ${EXAMPLES} ${PAREXAMPLES} ${EXOIIEXAMPLES} ${F90EXAMPLES}
https://bitbucket.org/fathomteam/moab/commits/7608cca635a1/
Changeset: 7608cca635a1
Branch: None
User: iulian07
Date: 2014-07-01 20:48:29
Summary: is_valid should be on the interface too
not only on the Core.hpp
How do we know if an entity handle is valid or not?
is there another way?
Affected #: 1 file
diff --git a/src/moab/Interface.hpp b/src/moab/Interface.hpp
index 09f4219..c8102b8 100644
--- a/src/moab/Interface.hpp
+++ b/src/moab/Interface.hpp
@@ -1981,6 +1981,8 @@ public:
SetIterator *&set_iter) = 0;
/**@}*/
+
+ virtual bool is_valid(EntityHandle entity) const = 0;
};
//! predicate for STL algorithms. Returns true if the entity handle is
https://bitbucket.org/fathomteam/moab/commits/c55dff84fd2b/
Changeset: c55dff84fd2b
Branch: None
User: iulian07
Date: 2014-07-01 20:48:29
Summary: validate sharedEnts
it is becoming important if some entities are deleted after they are resolved
they are part of the sharedEnts vector, but they are not deleted
from there.
This is the advantage of keeping entities in lists in member data
we should hunt them down :(
Affected #: 2 files
diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index 10ce709..a8571af 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -8789,7 +8789,22 @@ ErrorCode ParallelComm::settle_intersection_points(Range & edges, Range & shared
return MB_SUCCESS;
// end copy
+}
+ErrorCode ParallelComm::correct_shared_entities()
+{
+
+ std::vector<EntityHandle> good_ents;
+ for (size_t i=0; i<sharedEnts.size(); i++)
+ {
+ if (mbImpl->is_valid(sharedEnts[i]))
+ {
+ good_ents.push_back(sharedEnts[i]);
+ }
}
+ sharedEnts = good_ents;
+
+ return MB_SUCCESS;
+}
void ParallelComm::print_pstatus(unsigned char pstat, std::string &ostr)
{
diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index 3424c51..2a16ac2 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -932,6 +932,12 @@ namespace moab {
ErrorCode settle_intersection_points(Range & edges, Range & shared_edges_owned,
std::vector<std::vector<EntityHandle> *> & extraNodesVec, double tolerance);
+ /* \brief check the shared entities if they exist anymore
+ * will check the shared ents array, and clean it if necessary
+ *
+ */
+ ErrorCode correct_shared_entities();
+
private:
ErrorCode reduce_void(int tag_data_type, const MPI_Op mpi_op, int num_ents, void *old_vals, void *new_vals);
https://bitbucket.org/fathomteam/moab/commits/f2e4be76d1b5/
Changeset: f2e4be76d1b5
Branch: None
User: iulian07
Date: 2014-07-01 20:48:29
Summary: improve the large mesh example
this example pushes the limits of moab
It will be used later to create meshes larger than 1 billion vertices
Each processor creates a structure of AxBxC blocks, each block with
blockSize ^ 3 hexas
The example should be run on MxNxK array of processors.
After resolving and sharing the vertices, mesh is written in
parallel.
each block will represent a partition
It is different than test paralle_write_test because there the number
of partitions created is equal to the number of processors.
Here, each processor creates AxBxC partitions
For a total of AxBxC x MxNxK partitions
block size, A, B, .., K are options, with default values
The total number of hexas will be
AxBxC x MxNxK x block^3
edges and faces are removed after resolve sharing
the file will have just vertices and hexas, and it will be already
partitioned
We will add some tags of interest, to make it better.
The writer failed if I just deleted the edges and faces.
I had to introduce a method to correct sharedEnts in PC
Affected #: 1 file
diff --git a/examples/GenLargeMesh.cpp b/examples/GenLargeMesh.cpp
index 6ef1dc9..1e0fa73 100644
--- a/examples/GenLargeMesh.cpp
+++ b/examples/GenLargeMesh.cpp
@@ -40,12 +40,16 @@
#include "moab/ProgOptions.hpp"
#include "moab/ParallelComm.hpp"
#include "moab/CN.hpp"
+#include "moab/ReadUtilIface.hpp"
+#include "moab/MergeMesh.hpp"
#include <iostream>
#include <vector>
using namespace moab;
using namespace std;
+
+#define CHECKE(message) if(rval != MB_SUCCESS) { cout << (message) << "\n"; MPI_Finalize(); return 1; }
int main(int argc, char **argv)
{
int A=2, B=2, C=2, M=2, N=1, K=1;
@@ -58,7 +62,7 @@ int main(int argc, char **argv)
opts.addOpt<int>(std::string("blockSize,b"),
std::string("Block size of mesh (default=4)"), &blockSize);
opts.addOpt<int>(std::string("xproc,M"),
- std::string("Number of processors in x dir (default=2)"), &M);
+ std::string("Number of processors in x dir (default=1)"), &M);
opts.addOpt<int>(std::string("yproc,N"),
std::string("Number of processors in y dir (default=1)"), &N);
opts.addOpt<int>(std::string("zproc,K"),
@@ -80,23 +84,175 @@ int main(int argc, char **argv)
MPI_Finalize();
return 1;
}
+ ReadUtilIface *iface;
+ ErrorCode rval = mb->query_interface(iface);
+ CHECKE("Can't get reader interface");
- MPI_Comm comm ;
int rank, size;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
- MPI_Comm_rank( MPI_COMM_WORLD, &size );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
if (M*N*K != size)
{
if (rank==0)
{
- cout <<"M*N*K=" << M*N*K << " != size = "<< " size\n";
+ cout <<"M*N*K=" << M*N*K << " != size = "<< size << "\n";
}
MPI_Finalize();
return 1;
}
- // the global id of each vertex will be, on task
+ // determine m, n, k for processor rank
+ int m,n,k;
+ k = rank/(M*N);
+ int leftover = rank%(M*N);
+ n = leftover/M;
+ m = leftover%M;
+ if (rank==size-1)
+ {
+ cout << "m, n, k for last rank:" << m << " " << n << " " << k << "\n";
+ }
+
+ // so there are a total of M * A * blockSize elements in x direction (so M * A * blockSize + 1 verts in x direction)
+ // so there are a total of N * B * blockSize elements in y direction (so N * B * blockSize + 1 verts in y direction)
+ // so there are a total of K * C * blockSize elements in z direction (so K * C * blockSize + 1 verts in z direction)
+
+ // there are ( M * A blockSize ) * ( N * B * blockSize) * (K * C * blockSize ) hexas
+ // there are ( M * A * blockSize + 1) * ( N * B * blockSize + 1 ) * (K * C * blockSize + 1) vertices
+ // x is the first dimension that varies
+
+ int NX = ( M * A * blockSize + 1);
+ int NY = ( N * B * blockSize + 1);
+ int NZ = ( K * C * blockSize + 1);
+ int blockSize1 = blockSize + 1;// used for vertices
+ int bsq = blockSize * blockSize;
+ int b1sq = blockSize1 * blockSize1;
+ // generate the block at (a, b, c); it will represent a partition , it will get a partition tag
+
+ Tag global_id_tag;
+ mb->tag_get_handle("GLOBAL_ID", 1, MB_TYPE_INTEGER,
+ global_id_tag);
+
+ Tag part_tag;
+ int dum_id=-1;
+ mb->tag_get_handle("PARALLEL_PARTITION", 1, MB_TYPE_INTEGER,
+ part_tag, MB_TAG_CREAT|MB_TAG_SPARSE, &dum_id);
+
+ for (int a=0; a<A; a++)
+ {
+ for (int b=0; b<B; b++)
+ {
+ for (int c=0; c<C; c++)
+ {
+ // we will generate (block+1)^3 vertices, and block^3 hexas
+ // the global id of the vertices will come from m, n, k, a, b, c
+ // x will vary from m*A*block + a*block to m*A*block+(a+1)*block etc;
+ int num_nodes = (blockSize+1)*(blockSize+1)*(blockSize+1);
+
+ vector<double*> arrays;
+ EntityHandle startv;
+ rval = iface->get_node_coords(3, num_nodes, 0, startv, arrays);
+ CHECKE("Can't get node coords.");
+
+ int x = m*A*blockSize + a*blockSize;
+ int y = n*B*blockSize + b*blockSize;
+ int z = k*C*blockSize + c*blockSize;
+ int ix=0;
+ vector<int> gids(num_nodes);
+ Range verts(startv, startv+num_nodes-1);
+ for (int kk=0; kk<blockSize+1; kk++)
+ {
+ for (int jj=0; jj<blockSize+1; jj++)
+ {
+ for (int ii=0; ii<blockSize+1; ii++)
+ {
+ arrays[0][ix] = (double)x+ii;
+ arrays[1][ix] = (double)y+jj;
+ arrays[2][ix] = (double)z+kk;
+ gids[ix] = 1 + (z+kk) * (NX*NY) + (y+jj) * NX + (x+ii);
+ ix++;
+ }
+ }
+ }
+ mb->tag_set_data(global_id_tag, verts, &gids[0]);
+ int num_hexas = (blockSize)*(blockSize)*(blockSize);
+ EntityHandle starte; // connectivity
+ EntityHandle * conn;
+ rval = iface->get_element_connect(num_hexas, 8, MBHEX, 0, starte, conn);
+ CHECKE("Can't get hexa connectivity.");
+
+ Range hexas(starte, starte+num_hexas-1);
+ // fill hexas
+ ix=0;
+ for (int kk=0; kk<blockSize; kk++)
+ {
+ for (int jj=0; jj<blockSize; jj++)
+ {
+ for (int ii=0; ii<blockSize; ii++)
+ {
+ EntityHandle corner=startv + ii + (jj*blockSize1) + kk * b1sq;
+ conn[ix]=corner;
+ conn[ix+1]=corner+1;
+ conn[ix+2]=corner+ 1 + blockSize1;
+ conn[ix+3]=corner+ blockSize1;
+ conn[ix+4]=corner + b1sq;
+ conn[ix+5]=corner+1 + b1sq;
+ conn[ix+6]=corner+ 1 + blockSize1 + b1sq;
+ conn[ix+7]=corner+ blockSize1 + b1sq;
+ ix+=8;
+ }
+ }
+ }
+ EntityHandle part_set;
+ rval = mb->create_meshset(MESHSET_SET, part_set);
+ CHECKE("Can't create mesh set.");
+ rval = mb->add_entities(part_set, hexas);
+ CHECKE("Can't add entities to set.");
+ int part_num= a + m*A + (b + n*B)*(M*A) + (c+k*C)*(M*A * N*B);
+ rval = mb->tag_set_data(part_tag, &part_set, 1, &part_num);
+ CHECKE("Can't set part tag on set");
+
+
+
+ }
+ }
+ }
+ // after the mesh is generated on each proc, merge the vertices
+ MergeMesh mm(mb);
+ Range allhexas;
+ rval = mb->get_entities_by_type(0, MBHEX, allhexas);
+
+ CHECKE("Can't get all hexa elements.");
+
+ rval = mm.merge_entities( allhexas, 0.0001);
+ CHECKE("Can't merge");
+ ParallelComm* pcomm = ParallelComm::get_pcomm(mb, 0);
+ if (pcomm==NULL)
+ {
+ pcomm = new ParallelComm( mb, MPI_COMM_WORLD );
+ }
+ rval = pcomm->resolve_shared_ents( 0, allhexas, 3, 0 );
+ CHECKE("Can't resolve shared ents");
+
+ // delete all quads and edges
+ Range toDelete;
+ rval = mb->get_entities_by_dimension(0, 1, toDelete);
+ CHECKE("Can't get edges");
+ rval = mb->delete_entities(toDelete);
+ CHECKE("Can't delete edges");
+
+ toDelete.clear();
+
+ rval = mb->get_entities_by_dimension(0, 2, toDelete);
+ CHECKE("Can't get faces");
+ rval = mb->delete_entities(toDelete);
+ CHECKE("Can't delete faces");
+
+ rval = pcomm->correct_shared_entities() ;
+ CHECKE("Can't correct local shared")
+
+ rval = mb->write_file("test1.h5m", 0, ";;PARALLEL=WRITE_PART");
+ CHECKE("Can't write in parallel");
MPI_Finalize();
return 0;
https://bitbucket.org/fathomteam/moab/commits/56cbe5e23a8e/
Changeset: 56cbe5e23a8e
Branch: iulian07/largemesh
User: iulian07
Date: 2014-07-02 07:14:18
Summary: changes to the large structured mesh example
fun project
see how big of a model we can write in moab
use a different merging strategy, in which the global id tag is
used for determining what should be merged. (instead of a
adaptive KD tree)
It took 21 minutes on a task to merge a mesh with 500k vertices
This is ridiculous; the new method takes 3 minutes
I think that there is still room for improvement.
In the new method, we get the tag on each vertex and then order
The vertices that get the same tag will be merged (duh..)
It is away to generate a big model on a small machine
This new merging method might need a new branch/pull request
Maybe this will be accepted as is :) I will not hold my breath.
Still, right now it took 21 minutes to merge locally an 80x80x80
mesh
this is way too much. With the new method (-w option) it is taking less,
Still need to quantify
Affected #: 3 files
diff --git a/examples/GenLargeMesh.cpp b/examples/GenLargeMesh.cpp
index 1e0fa73..52cca34 100644
--- a/examples/GenLargeMesh.cpp
+++ b/examples/GenLargeMesh.cpp
@@ -42,6 +42,7 @@
#include "moab/CN.hpp"
#include "moab/ReadUtilIface.hpp"
#include "moab/MergeMesh.hpp"
+#include <time.h>
#include <iostream>
#include <vector>
@@ -52,9 +53,11 @@ using namespace std;
#define CHECKE(message) if(rval != MB_SUCCESS) { cout << (message) << "\n"; MPI_Finalize(); return 1; }
int main(int argc, char **argv)
{
- int A=2, B=2, C=2, M=2, N=1, K=1;
+ int A=2, B=2, C=2, M=1, N=1, K=1;
int blockSize = 4;
+ bool newMergeMethod=false;
+
MPI_Init(&argc, &argv);
ProgOptions opts;
@@ -75,6 +78,8 @@ int main(int argc, char **argv)
opts.addOpt<int>(std::string("zblocks,C"),
std::string("Number of blocks on a task in x dir (default=2)"), &C);
+ opts.addOpt<void>("newMerge,w", "use new merging method",
+ &newMergeMethod);
opts.parseCommandLine(argc, argv);
@@ -121,11 +126,13 @@ int main(int argc, char **argv)
// there are ( M * A * blockSize + 1) * ( N * B * blockSize + 1 ) * (K * C * blockSize + 1) vertices
// x is the first dimension that varies
+ clock_t tt = clock();
+
int NX = ( M * A * blockSize + 1);
int NY = ( N * B * blockSize + 1);
- int NZ = ( K * C * blockSize + 1);
+ // int NZ = ( K * C * blockSize + 1); not used
int blockSize1 = blockSize + 1;// used for vertices
- int bsq = blockSize * blockSize;
+ // int bsq = blockSize * blockSize;
int b1sq = blockSize1 * blockSize1;
// generate the block at (a, b, c); it will represent a partition , it will get a partition tag
@@ -217,6 +224,14 @@ int main(int argc, char **argv)
}
}
}
+
+ if (0==rank)
+ {
+ std::cout << "generate local mesh: "
+ << (clock() - tt) / (double) CLOCKS_PER_SEC << " seconds" << std::endl;
+ tt = clock();
+ }
+
// after the mesh is generated on each proc, merge the vertices
MergeMesh mm(mb);
Range allhexas;
@@ -224,8 +239,23 @@ int main(int argc, char **argv)
CHECKE("Can't get all hexa elements.");
- rval = mm.merge_entities( allhexas, 0.0001);
+ Range verts;
+ rval = mb->get_entities_by_dimension(0, 0, verts);
+
+ CHECKE("Can't get all vertices.");
+
+ if (newMergeMethod)
+ rval = mm.merge_using_integer_tag( verts, global_id_tag);
+ else
+ rval = mm.merge_entities(allhexas, 0.0001);
CHECKE("Can't merge");
+ if (0==rank)
+ {
+ std::cout << "merge locally: "
+ << (clock() - tt) / (double) CLOCKS_PER_SEC << " seconds" << std::endl;
+ tt = clock();
+ }
+
ParallelComm* pcomm = ParallelComm::get_pcomm(mb, 0);
if (pcomm==NULL)
{
@@ -234,6 +264,13 @@ int main(int argc, char **argv)
rval = pcomm->resolve_shared_ents( 0, allhexas, 3, 0 );
CHECKE("Can't resolve shared ents");
+ if (0==rank)
+ {
+ std::cout << "resolve shared entities: "
+ << (clock() - tt) / (double) CLOCKS_PER_SEC << " seconds" << std::endl;
+ tt = clock();
+ }
+
// delete all quads and edges
Range toDelete;
rval = mb->get_entities_by_dimension(0, 1, toDelete);
@@ -251,9 +288,23 @@ int main(int argc, char **argv)
rval = pcomm->correct_shared_entities() ;
CHECKE("Can't correct local shared")
+ if (0==rank)
+ {
+ std::cout << "delete edges and faces, and correct sharedEnts: "
+ << (clock() - tt) / (double) CLOCKS_PER_SEC << " seconds" << std::endl;
+ tt = clock();
+ }
+
rval = mb->write_file("test1.h5m", 0, ";;PARALLEL=WRITE_PART");
CHECKE("Can't write in parallel");
+ if (0==rank)
+ {
+ std::cout << "write file test1.h5m in parallel "
+ << (clock() - tt) / (double) CLOCKS_PER_SEC << " seconds" << std::endl;
+ tt = clock();
+ }
+
MPI_Finalize();
return 0;
}
diff --git a/src/MergeMesh.cpp b/src/MergeMesh.cpp
index cb43b85..62b0022 100644
--- a/src/MergeMesh.cpp
+++ b/src/MergeMesh.cpp
@@ -139,7 +139,79 @@ ErrorCode MergeMesh::perform_merge(Tag merge_tag)
result = mbImpl->delete_entities(deadEnts);
return result;
}
+// merge vertices according to an input tag
+// merge them if the tags are equal
+struct handle_id
+{
+ EntityHandle eh;
+ int val;
+};
+
+// handle structure comparison function for qsort
+// if the id is the same , compare the handle.
+bool compare_handle_id(handle_id ia, handle_id ib) {
+
+ if(ia.val == ib.val) {
+ return ia.eh<ia.eh;
+ } else {
+ return ia.val<ib.val;
+ }
+}
+
+ErrorCode MergeMesh::merge_using_integer_tag(Range & verts, Tag user_tag, Tag merge_tag)
+{
+ ErrorCode rval;
+ DataType tag_type;
+ rval = mbImpl->tag_get_data_type(user_tag, tag_type);
+ if (rval!=MB_SUCCESS || tag_type!=MB_TYPE_INTEGER)
+ return MB_FAILURE;
+ std::vector<int> vals(verts.size());
+ rval = mbImpl->tag_get_data(user_tag, verts, &vals[0]);
+ if (rval!=MB_SUCCESS)
+ return rval;
+
+ if (0 == merge_tag)
+ {
+ EntityHandle def_val = 0;
+ rval = mbImpl->tag_get_handle("__merge_tag", 1, MB_TYPE_HANDLE, mbMergeTag,
+ MB_TAG_DENSE | MB_TAG_EXCL, &def_val);
+ if (MB_SUCCESS != rval)
+ return rval;
+ }
+ else
+ mbMergeTag = merge_tag;
+
+ std::vector<handle_id> handles(verts.size());
+ int i=0;
+ for (Range::iterator vit = verts.begin(); vit!= verts.end(); vit++ )
+ {
+ handles[i].eh=*vit;
+ handles[i].val = vals[i];
+ i++;
+ }
+ std::sort(handles.begin(), handles.end(), compare_handle_id);
+
+ i=0;
+ while (i<(int)verts.size()-1)
+ {
+ handle_id first = handles[i];
+ int j=i+1;
+ while (handles[j].val == first.val && j<(int)verts.size())
+ {
+ rval= mbImpl->tag_set_data(mbMergeTag, &(handles[j].eh), 1, &(first.eh));
+ if (rval!=MB_SUCCESS)
+ return rval;
+ deadEnts.insert(handles[j].eh);
+ j++;
+ }
+ i=j;
+ }
+
+ rval = perform_merge(mbMergeTag);
+
+ return rval;
+}
ErrorCode MergeMesh::find_merged_to(EntityHandle &tree_root,
AdaptiveKDTree &tree, Tag merge_tag)
{
diff --git a/src/moab/MergeMesh.hpp b/src/moab/MergeMesh.hpp
index 8eef9a8..8f4d5bc 100644
--- a/src/moab/MergeMesh.hpp
+++ b/src/moab/MergeMesh.hpp
@@ -32,6 +32,9 @@ public:
//Identify higher dimension to be merged
ErrorCode merge_higher_dimensions(Range &elems);
+ // merge vertices according to an input tag
+ ErrorCode merge_using_integer_tag(Range & verts, Tag user_tag, Tag merge_tag=0);
+
//- perform the actual merge
ErrorCode perform_merge(Tag merged_to);
private:
@@ -39,8 +42,6 @@ private:
double mergeTol, mergeTolSq;
- Tag mergeTag;
-
//- given a kdtree, set tag on vertices in leaf nodes with vertices
//- to which they should be merged
ErrorCode find_merged_to(EntityHandle &tree_root,
Repository URL: https://bitbucket.org/fathomteam/moab/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the moab-dev
mailing list