[MOAB-dev] commit/MOAB: danwu: Merged master into error_handling_enhancement
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed Jun 11 15:38:20 CDT 2014
1 new commit in MOAB:
https://bitbucket.org/fathomteam/moab/commits/29f64ae95cfd/
Changeset: 29f64ae95cfd
Branch: error_handling_enhancement
User: danwu
Date: 2014-06-11 22:38:15
Summary: Merged master into error_handling_enhancement
Affected #: 16 files
diff --git a/MeshFiles/unittest/io/Makefile.am b/MeshFiles/unittest/io/Makefile.am
index 94704c1..d40e02b 100644
--- a/MeshFiles/unittest/io/Makefile.am
+++ b/MeshFiles/unittest/io/Makefile.am
@@ -5,7 +5,7 @@ EXTRA_DIST = HommeMapping.nc \
eul26x48x96.t0.nc \
eul26x48x96.t1.nc \
eul26x48x96.t2.nc \
- eul26x48x96.t.3.nc \
+ eul26x48x96.t.3.nc \
fv26x46x72.t.3.nc \
cubtest12.cub \
cubtest14.cub \
diff --git a/examples/HelloParMOAB.cpp b/examples/HelloParMOAB.cpp
index 5bcc4ab..3611a8d 100644
--- a/examples/HelloParMOAB.cpp
+++ b/examples/HelloParMOAB.cpp
@@ -2,6 +2,12 @@
* \brief Read mesh into MOAB and resolve/exchange/report shared and ghosted entities \n
* <b>To run</b>: mpiexec -np 4 HelloMoabPar [filename]\n
*
+ * It shows how to load the mesh independently, on multiple
+ * communicators (with second argument, the number of comms)
+ *
+ *
+ *
+ * mpiexec -np 8 HelloMoabPar [filename] [nbComms]
*/
#include "moab/ParallelComm.hpp"
@@ -26,6 +32,10 @@ int main(int argc, char **argv)
test_file_name = argv[1];
}
+ int nbComms = 1;
+ if (argc > 2)
+ nbComms = atoi(argv[2]);
+
options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
// Get MOAB instance and read the file with the specified options
@@ -33,15 +43,38 @@ int main(int argc, char **argv)
if (NULL == mb)
return 1;
+ MPI_Comm comm ;
+ int global_rank, global_size;
+ MPI_Comm_rank( MPI_COMM_WORLD, &global_rank );
+ MPI_Comm_rank( MPI_COMM_WORLD, &global_size );
+
+ int color = global_rank%nbComms; // for each angle group a different color
+ if (nbComms>1)
+ {
+ // split the communicator, into ngroups = nbComms
+ MPI_Comm_split( MPI_COMM_WORLD, color, global_rank, &comm );
+ }
+ else
+ {
+ comm = MPI_COMM_WORLD;
+ }
// Get the ParallelComm instance
- ParallelComm* pcomm = new ParallelComm(mb, MPI_COMM_WORLD);
+ ParallelComm* pcomm = new ParallelComm(mb, comm);
int nprocs = pcomm->proc_config().proc_size();
int rank = pcomm->proc_config().proc_rank();
- MPI_Comm comm = pcomm->proc_config().proc_comm();
+ MPI_Comm rcomm = pcomm->proc_config().proc_comm();
+ assert(rcomm==comm);
+ if (global_rank == 0)
+ cout<< " global rank:" <<global_rank << " color:" << color << " rank:" << rank << " of " << nprocs << " processors\n";
+
+ if (global_rank == 1)
+ cout<< " global rank:" <<global_rank << " color:" << color << " rank:" << rank << " of " << nprocs << " processors\n";
- if (rank == 0)
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (global_rank == 0)
cout << "Reading file " << test_file_name << "\n with options: " << options << endl
- << " on " << nprocs << " processors\n";
+ << " on " << nprocs << " processors on " << nbComms << " communicator(s) \n";
ErrorCode rval = mb->load_file(test_file_name.c_str(), 0, options.c_str());
if (rval != MB_SUCCESS) {
@@ -69,9 +102,9 @@ int main(int argc, char **argv)
for (int i = 0; i < 4; i++)
nums[i] = (int)owned_entities.num_of_dimension(i);
vector<int> rbuf(nprocs*4, 0);
- MPI_Gather(nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, MPI_COMM_WORLD);
+ MPI_Gather(nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm);
// Print the stats gathered:
- if (rank == 0) {
+ if (global_rank == 0) {
for (int i = 0; i < nprocs; i++)
cout << " Shared, owned entities on proc " << i << ": " << rbuf[4*i] << " verts, " <<
rbuf[4*i + 1] << " edges, " << rbuf[4*i + 2] << " faces, " << rbuf[4*i + 3] << " elements" << endl;
@@ -109,7 +142,7 @@ int main(int argc, char **argv)
// gather the statistics on processor 0
MPI_Gather(nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm);
- if (rank == 0) {
+ if (global_rank == 0) {
cout << " \n\n After exchanging one ghost layer: \n";
for (int i = 0; i < nprocs; i++) {
cout << " Shared, owned entities on proc " << i << ": " << rbuf[4*i] << " verts, " <<
diff --git a/src/MergeMesh.cpp b/src/MergeMesh.cpp
index 4978f79..cb43b85 100644
--- a/src/MergeMesh.cpp
+++ b/src/MergeMesh.cpp
@@ -5,6 +5,7 @@
#include "moab/Range.hpp"
#include "moab/CartVect.hpp"
+#include "Internals.hpp"
#include <vector>
#include <algorithm>
#include <string>
@@ -15,108 +16,123 @@
namespace moab {
- moab::ErrorCode MergeMesh::merge_entities(moab::EntityHandle *elems,
- int elems_size,
- const double merge_tol,
- const int do_merge,
- const int update_sets,
- moab::Tag merge_tag,
- bool do_higher_dim)
+ErrorCode MergeMesh::merge_entities(EntityHandle *elems,
+ int elems_size, const double merge_tol, const int do_merge,
+ const int update_sets, Tag merge_tag, bool do_higher_dim)
{
mergeTol = merge_tol;
- mergeTolSq = merge_tol*merge_tol;
- moab::Range tmp_elems;
- tmp_elems.insert( elems, elems + elems_size);
- moab::ErrorCode result = merge_entities(tmp_elems, merge_tol, do_merge, update_sets,
- (moab::Tag)merge_tag, do_higher_dim);
+ mergeTolSq = merge_tol * merge_tol;
+ Range tmp_elems;
+ tmp_elems.insert(elems, elems + elems_size);
+ ErrorCode result = merge_entities(tmp_elems, merge_tol, do_merge,
+ update_sets, (Tag) merge_tag, do_higher_dim);
return result;
}
/* This function appears to be not necessary after MOAB conversion
-void MergeMesh::perform_merge(iBase_TagHandle merge_tag)
-{
- // put into a range
- moab::ErrorCode result = perform_merge((moab::Tag) merge_tag);
- if (result != moab::MB_SUCCESS)
- throw MKException(iBase_FAILURE, "");
-}*/
-
-moab::ErrorCode MergeMesh::merge_entities(moab::Range &elems,
- const double merge_tol,
- const int do_merge,
- const int ,
- moab::Tag merge_tag,
- bool merge_higher_dim)
+ void MergeMesh::perform_merge(iBase_TagHandle merge_tag)
+ {
+ // put into a range
+ ErrorCode result = perform_merge((Tag) merge_tag);
+ if (result != MB_SUCCESS)
+ throw MKException(iBase_FAILURE, "");
+ }*/
+
+ErrorCode MergeMesh::merge_entities(Range &elems,
+ const double merge_tol, const int do_merge, const int, Tag merge_tag,
+ bool merge_higher_dim)
{
//If merge_higher_dim is true, do_merge must also be true
- if(merge_higher_dim && !do_merge){
- return moab::MB_FAILURE;
+ if (merge_higher_dim && !do_merge)
+ {
+ return MB_FAILURE;
}
mergeTol = merge_tol;
- mergeTolSq = merge_tol*merge_tol;
+ mergeTolSq = merge_tol * merge_tol;
// get the skin of the entities
- moab::Skinner skinner(mbImpl);
- moab::Range skin_range;
- moab::ErrorCode result = skinner.find_skin(0, elems, 0, skin_range, false, false);
- if (moab::MB_SUCCESS != result) return result;
+ Skinner skinner(mbImpl);
+ Range skin_range;
+ ErrorCode result = skinner.find_skin(0, elems, 0, skin_range, false,
+ false);
+ if (MB_SUCCESS != result)
+ return result;
// create a tag to mark merged-to entity; reuse tree_root
- moab::EntityHandle tree_root = 0;
- if (0 == merge_tag) {
- result = mbImpl->tag_get_handle("__merge_tag", 1, moab::MB_TYPE_HANDLE,
- mbMergeTag,
- moab::MB_TAG_DENSE|moab::MB_TAG_EXCL,
- &tree_root);
- if (moab::MB_SUCCESS != result) return result;
+ EntityHandle tree_root = 0;
+ if (0 == merge_tag)
+ {
+ result = mbImpl->tag_get_handle("__merge_tag", 1, MB_TYPE_HANDLE,
+ mbMergeTag, MB_TAG_DENSE | MB_TAG_EXCL, &tree_root);
+ if (MB_SUCCESS != result)
+ return result;
}
- else mbMergeTag = merge_tag;
-
+ else
+ mbMergeTag = merge_tag;
+
// build a kd tree with the vertices
- moab::AdaptiveKDTree kd(mbImpl);
+ AdaptiveKDTree kd(mbImpl);
result = kd.build_tree(skin_range, &tree_root);
- if (moab::MB_SUCCESS != result) return result;
+ if (MB_SUCCESS != result)
+ return result;
// find matching vertices, mark them
result = find_merged_to(tree_root, kd, mbMergeTag);
- if (moab::MB_SUCCESS != result) return result;
+ if (MB_SUCCESS != result)
+ return result;
// merge them if requested
- if (do_merge) {
+ if (do_merge)
+ {
result = perform_merge(mbMergeTag);
- if (moab::MB_SUCCESS != result) return result;
+ if (MB_SUCCESS != result)
+ return result;
}
- if(merge_higher_dim && deadEnts.size() != 0){
+ if (merge_higher_dim && deadEnts.size() != 0)
+ {
result = merge_higher_dimensions(elems);
- if(moab::MB_SUCCESS != result) return result;
+ if (MB_SUCCESS != result)
+ return result;
}
-
- return moab::MB_SUCCESS;
+
+ return MB_SUCCESS;
}
-moab::ErrorCode MergeMesh::perform_merge(moab::Tag merge_tag)
+ErrorCode MergeMesh::perform_merge(Tag merge_tag)
{
- moab::ErrorCode result;
- if (deadEnts.size()==0){
- if(printError)std::cout << "\nWarning: Geometries don't have a common face; Nothing to merge" << std::endl;
- return moab::MB_SUCCESS; //nothing to merge carry on with the program
+ // we start with an empty range of vertices that are "merged to"
+ // they are used (eventually) for higher dim entities
+ mergedToVertices.clear();
+ ErrorCode result;
+ if (deadEnts.size() == 0)
+ {
+ if (printError)
+ std::cout
+ << "\nWarning: Geometries don't have a common face; Nothing to merge"
+ << std::endl;
+ return MB_SUCCESS; //nothing to merge carry on with the program
}
- if (mbImpl->type_from_handle(*deadEnts.rbegin()) != moab::MBVERTEX)
- return moab::MB_FAILURE;
- std::vector<moab::EntityHandle> merge_tag_val(deadEnts.size());
+ if (mbImpl->type_from_handle(*deadEnts.rbegin()) != MBVERTEX)
+ return MB_FAILURE;
+ std::vector<EntityHandle> merge_tag_val(deadEnts.size());
result = mbImpl->tag_get_data(merge_tag, deadEnts, &merge_tag_val[0]);
- if (moab::MB_SUCCESS != result) return result;
-
- moab::Range::iterator rit;
+ if (MB_SUCCESS != result)
+ return result;
+
+ Range::iterator rit;
unsigned int i;
- for (rit = deadEnts.begin(), i = 0; rit != deadEnts.end(); rit++, i++) {
+ for (rit = deadEnts.begin(), i = 0; rit != deadEnts.end(); rit++, i++)
+ {
assert(merge_tag_val[i]);
+ if (MBVERTEX==TYPE_FROM_HANDLE(merge_tag_val[i]) )
+ mergedToVertices.insert(merge_tag_val[i]);
result = mbImpl->merge_entities(merge_tag_val[i], *rit, false, false);
- if (moab::MB_SUCCESS != result) {
+ if (MB_SUCCESS != result)
+ {
return result;
}
}
@@ -124,142 +140,249 @@ moab::ErrorCode MergeMesh::perform_merge(moab::Tag merge_tag)
return result;
}
-moab::ErrorCode MergeMesh::find_merged_to(moab::EntityHandle &tree_root,
- moab::AdaptiveKDTree &tree,
- moab::Tag merge_tag)
+ErrorCode MergeMesh::find_merged_to(EntityHandle &tree_root,
+ AdaptiveKDTree &tree, Tag merge_tag)
{
- moab::AdaptiveKDTreeIter iter;
-
+ AdaptiveKDTreeIter iter;
+
// evaluate vertices in this leaf
- moab::Range leaf_range, leaf_range2;
- std::vector<moab::EntityHandle> sorted_leaves;
+ Range leaf_range, leaf_range2;
+ std::vector<EntityHandle> sorted_leaves;
std::vector<double> coords;
- std::vector<moab::EntityHandle> merge_tag_val, leaves_out;
-
- moab::ErrorCode result = tree.get_tree_iterator(tree_root, iter);
- if (moab::MB_SUCCESS != result) return result;
- while (result == moab::MB_SUCCESS) {
- sorted_leaves.push_back( iter.handle() );
+ std::vector<EntityHandle> merge_tag_val, leaves_out;
+
+ ErrorCode result = tree.get_tree_iterator(tree_root, iter);
+ if (MB_SUCCESS != result)
+ return result;
+ while (result == MB_SUCCESS)
+ {
+ sorted_leaves.push_back(iter.handle());
result = iter.step();
}
- if (result != moab::MB_ENTITY_NOT_FOUND)
+ if (result != MB_ENTITY_NOT_FOUND)
return result;
- std::sort( sorted_leaves.begin(), sorted_leaves.end() );
-
- std::vector<moab::EntityHandle>::iterator it;
- for (it = sorted_leaves.begin(); it != sorted_leaves.end(); ++it) {
+ std::sort(sorted_leaves.begin(), sorted_leaves.end());
+
+ std::vector<EntityHandle>::iterator it;
+ for (it = sorted_leaves.begin(); it != sorted_leaves.end(); ++it)
+ {
leaf_range.clear();
result = mbImpl->get_entities_by_handle(*it, leaf_range);
- if (moab::MB_SUCCESS != result) return result;
- coords.resize(3*leaf_range.size());
+ if (MB_SUCCESS != result)
+ return result;
+ coords.resize(3 * leaf_range.size());
merge_tag_val.resize(leaf_range.size());
result = mbImpl->get_coords(leaf_range, &coords[0]);
- if (moab::MB_SUCCESS != result) return result;
+ if (MB_SUCCESS != result)
+ return result;
result = mbImpl->tag_get_data(merge_tag, leaf_range, &merge_tag_val[0]);
- if (moab::MB_SUCCESS != result) return result;
- moab::Range::iterator rit;
+ if (MB_SUCCESS != result)
+ return result;
+ Range::iterator rit;
unsigned int i;
bool inleaf_merged, outleaf_merged = false;
unsigned int lr_size = leaf_range.size();
-
- for (i = 0, rit = leaf_range.begin(); i != lr_size; rit++, i++) {
- if (0 != merge_tag_val[i]) continue;
- moab::CartVect from(&coords[3*i]);
+
+ for (i = 0, rit = leaf_range.begin(); i != lr_size; rit++, i++)
+ {
+ if (0 != merge_tag_val[i])
+ continue;
+ CartVect from(&coords[3 * i]);
inleaf_merged = false;
// check close-by leaves too
leaves_out.clear();
- result = tree.distance_search(from.array(), mergeTol,
- leaves_out, mergeTol, 1.0e-6, NULL, NULL, &tree_root);
+ result = tree.distance_search(from.array(), mergeTol, leaves_out,
+ mergeTol, 1.0e-6, NULL, NULL, &tree_root);
leaf_range2.clear();
- for (std::vector<moab::EntityHandle>::iterator vit = leaves_out.begin();
- vit != leaves_out.end(); vit++) {
- if (*vit > *it) { // if we haven't visited this leaf yet in the outer loop
- result = mbImpl->get_entities_by_handle(*vit, leaf_range2, moab::Interface::UNION);
- if (moab::MB_SUCCESS != result) return result;
+ for (std::vector<EntityHandle>::iterator vit = leaves_out.begin();
+ vit != leaves_out.end(); vit++)
+ {
+ if (*vit > *it)
+ { // if we haven't visited this leaf yet in the outer loop
+ result = mbImpl->get_entities_by_handle(*vit, leaf_range2,
+ Interface::UNION);
+ if (MB_SUCCESS != result)
+ return result;
}
}
- if (!leaf_range2.empty()) {
- coords.resize(3*(lr_size+leaf_range2.size()));
- merge_tag_val.resize(lr_size+leaf_range2.size());
- result = mbImpl->get_coords(leaf_range2, &coords[3*lr_size]);
- if (moab::MB_SUCCESS != result) return result;
- result = mbImpl->tag_get_data(merge_tag, leaf_range2, &merge_tag_val[lr_size]);
- if (moab::MB_SUCCESS != result) return result;
+ if (!leaf_range2.empty())
+ {
+ coords.resize(3 * (lr_size + leaf_range2.size()));
+ merge_tag_val.resize(lr_size + leaf_range2.size());
+ result = mbImpl->get_coords(leaf_range2, &coords[3 * lr_size]);
+ if (MB_SUCCESS != result)
+ return result;
+ result = mbImpl->tag_get_data(merge_tag, leaf_range2,
+ &merge_tag_val[lr_size]);
+ if (MB_SUCCESS != result)
+ return result;
outleaf_merged = false;
}
// check other verts in this leaf
- for (unsigned int j = i+1; j < merge_tag_val.size(); j++) {
- moab::EntityHandle to_ent = j >= lr_size ? leaf_range2[j-lr_size] :
- leaf_range[j];
-
- if (*rit == to_ent) continue;
-
- if ((from - moab::CartVect(&coords[3*j])).length_squared() < mergeTolSq) {
+ for (unsigned int j = i + 1; j < merge_tag_val.size(); j++)
+ {
+ EntityHandle to_ent =
+ j >= lr_size ? leaf_range2[j - lr_size] : leaf_range[j];
+
+ if (*rit == to_ent)
+ continue;
+
+ if ((from - CartVect(&coords[3 * j])).length_squared()
+ < mergeTolSq)
+ {
merge_tag_val[j] = *rit;
- if (j < lr_size){
- inleaf_merged = true;}
- else{
- outleaf_merged = true;}
+ if (j < lr_size)
+ {
+ inleaf_merged = true;
+ }
+ else
+ {
+ outleaf_merged = true;
+ }
deadEnts.insert(to_ent);
}
}
- if (outleaf_merged) {
- result = mbImpl->tag_set_data(merge_tag, leaf_range2, &merge_tag_val[leaf_range.size()]);
- if (moab::MB_SUCCESS != result) return result;
- outleaf_merged = false;
+ if (outleaf_merged)
+ {
+ result = mbImpl->tag_set_data(merge_tag, leaf_range2,
+ &merge_tag_val[leaf_range.size()]);
+ if (MB_SUCCESS != result)
+ return result;
+ outleaf_merged = false;
}
- if (inleaf_merged) {
- result = mbImpl->tag_set_data(merge_tag, leaf_range, &merge_tag_val[0]);
- if (moab::MB_SUCCESS != result) return result;
+ if (inleaf_merged)
+ {
+ result = mbImpl->tag_set_data(merge_tag, leaf_range, &merge_tag_val[0]);
+ if (MB_SUCCESS != result)
+ return result;
}
}
}
- return moab::MB_SUCCESS;
+ return MB_SUCCESS;
}
-
//Determine which higher dimensional entities should be merged
-moab::ErrorCode MergeMesh::merge_higher_dimensions(moab::Range &elems)
-{
- Range skinEnts, adj, matches, moreDeadEnts; moab::ErrorCode result;
- moab::Skinner skinner(mbImpl);
+ErrorCode MergeMesh::merge_higher_dimensions(Range &elems)
+{
+ // apply a different strategy
+ // look at the vertices that were merged to, earlier, and find all entities adjacent to them
+ // elems (input) are used just for initial connectivity
+ ErrorCode result;
+ Range verts;
+ result = mbImpl->get_connectivity(elems, verts);
+ if (MB_SUCCESS!=result)
+ return result;
+
+ // all higher dim entities that will be merged will be connected to the vertices that were
+ // merged earlier; we will look at these vertices only
+ Range vertsOfInterest=intersect(this->mergedToVertices, verts);
//Go through each dimension
- for(int dim = 1; dim <3; dim++){
+ Range possibleEntsToMerge, conn, matches, moreDeadEnts;
+
+ for (int dim = 1; dim < 3; dim++)
+ {
+ moreDeadEnts.clear();
+ possibleEntsToMerge.clear();
+ result = mbImpl->get_adjacencies(vertsOfInterest,
+ dim, false, possibleEntsToMerge,
+ Interface::UNION);
+ if (MB_SUCCESS!=result)
+ return result;
+ //Go through each possible entity and see if it shares vertices with another entity of same dimension
+ for (Range::iterator pit = possibleEntsToMerge.begin();
+ pit != possibleEntsToMerge.end(); pit++)
+ {
+ EntityHandle eh=*pit;//possible entity to be matched
+ conn.clear();
+ //Get the vertices connected to it in a range
+
+ result = mbImpl->get_connectivity(&eh, 1, conn);
+ if (MB_SUCCESS!=result)
+ return result;
+ matches.clear();
+ // now retrieve all entities connected to all conn vertices
+ result = mbImpl->get_adjacencies(conn, dim, false, matches,
+ Interface::INTERSECT);
+ if (MB_SUCCESS!=result)
+ return result;
+ if (matches.size() > 1)
+ {
+ for (Range::iterator matchIt = matches.begin();
+ matchIt != matches.end(); matchIt++)
+ {
+ EntityHandle to_remove=*matchIt;
+ if (to_remove != eh)
+ {
+ moreDeadEnts.insert(to_remove);
+ result = mbImpl->merge_entities(eh, to_remove, false, false);
+ if (result != MB_SUCCESS)
+ return result;
+ possibleEntsToMerge.erase(to_remove);
+ }
+ }
+ }
+
+ }
+ //Delete the entities of dimension dim
+ result = mbImpl->delete_entities(moreDeadEnts);
+ if (result != MB_SUCCESS)
+ return result;
+ }
+ return MB_SUCCESS;
+#if 0
+ Range skinEnts, adj, matches, moreDeadEnts;
+ ErrorCode result;
+ Skinner skinner(mbImpl);
+ //Go through each dimension
+ for (int dim = 1; dim < 3; dim++)
+ {
skinEnts.clear();
moreDeadEnts.clear();
result = skinner.find_skin(0, elems, dim, skinEnts, false, false);
//Go through each skin entity and see if it shares adjacancies with another entity
- for(moab::Range::iterator skinIt = skinEnts.begin(); skinIt != skinEnts.end(); skinIt++){
+ for (Range::iterator skinIt = skinEnts.begin();
+ skinIt != skinEnts.end(); skinIt++)
+ {
adj.clear();
//Get the adjacencies 1 dimension lower
- result = mbImpl->get_adjacencies(&(*skinIt), 1, dim-1, false, adj);
- if(result != moab::MB_SUCCESS) return result;
+ result = mbImpl->get_adjacencies(&(*skinIt), 1, dim - 1, false, adj);
+ if (result != MB_SUCCESS)
+ return result;
//See what other entities share these adjacencies
matches.clear();
- result = mbImpl->get_adjacencies(adj, dim, false, matches, moab::Interface::INTERSECT);
- if(result != moab::MB_SUCCESS) return result;
+ result = mbImpl->get_adjacencies(adj, dim, false, matches,
+ Interface::INTERSECT);
+ if (result != MB_SUCCESS)
+ return result;
//If there is more than one entity, then we have some to merge and erase
- if(matches.size() > 1){
- for(moab::Range::iterator matchIt = matches.begin(); matchIt != matches.end(); matchIt++){
- if(*matchIt != *skinIt){
- moreDeadEnts.insert(*matchIt);
- result = mbImpl->merge_entities(*skinIt, *matchIt, false, false);
- if(result != moab::MB_SUCCESS) return result;
- skinEnts.erase(*matchIt);
- }
- }
- }
+ if (matches.size() > 1)
+ {
+ for (Range::iterator matchIt = matches.begin();
+ matchIt != matches.end(); matchIt++)
+ {
+ if (*matchIt != *skinIt)
+ {
+ moreDeadEnts.insert(*matchIt);
+ result = mbImpl->merge_entities(*skinIt, *matchIt, false, false);
+ if (result != MB_SUCCESS)
+ return result;
+ skinEnts.erase(*matchIt);
+ }
+ }
+ }
}
//Delete the entities
result = mbImpl->delete_entities(moreDeadEnts);
- if(result != moab::MB_SUCCESS)return result;
+ if (result != MB_SUCCESS)
+ return result;
}
- return moab::MB_SUCCESS;
+ return MB_SUCCESS;
+#endif
}
-}//End namespace moab
+} //End namespace moab
diff --git a/src/io/NCHelperGCRM.cpp b/src/io/NCHelperGCRM.cpp
index f673aff..25ed53c 100644
--- a/src/io/NCHelperGCRM.cpp
+++ b/src/io/NCHelperGCRM.cpp
@@ -248,6 +248,7 @@ ErrorCode NCHelperGCRM::check_existing_mesh()
ErrorCode NCHelperGCRM::create_mesh(Range& faces)
{
int& gatherSetRank = _readNC->gatherSetRank;
+ int& trivialPartitionShift = _readNC->trivialPartitionShift;
bool& noEdges = _readNC->noEdges;
DebugOutput& dbgOut = _readNC->dbgOut;
@@ -267,19 +268,24 @@ ErrorCode NCHelperGCRM::create_mesh(Range& faces)
createGatherSet = true;
if (procs >= 2) {
+ // Shift rank to obtain a rotated trivial partition
+ int shifted_rank = rank;
+ if (trivialPartitionShift > 0)
+ shifted_rank = (rank + trivialPartitionShift) % procs;
+
// Compute the number of local cells on this proc
nLocalCells = int(std::floor(1.0 * nCells / procs));
// The starting global cell index in the GCRM file for this proc
- int start_cell_idx = rank * nLocalCells;
+ int start_cell_idx = shifted_rank * nLocalCells;
// Number of extra cells after equal split over procs
int iextra = nCells % procs;
// Allocate extra cells over procs
- if (rank < iextra)
+ if (shifted_rank < iextra)
nLocalCells++;
- start_cell_idx += std::min(rank, iextra);
+ start_cell_idx += std::min(shifted_rank, iextra);
start_cell_idx++; // 0 based -> 1 based
diff --git a/src/io/NCHelperHOMME.cpp b/src/io/NCHelperHOMME.cpp
index 3665d01..716f624 100644
--- a/src/io/NCHelperHOMME.cpp
+++ b/src/io/NCHelperHOMME.cpp
@@ -248,6 +248,7 @@ ErrorCode NCHelperHOMME::create_mesh(Range& faces)
DebugOutput& dbgOut = _readNC->dbgOut;
bool& spectralMesh = _readNC->spectralMesh;
int& gatherSetRank = _readNC->gatherSetRank;
+ int& trivialPartitionShift = _readNC->trivialPartitionShift;
int rank = 0;
int procs = 1;
@@ -349,24 +350,29 @@ ErrorCode NCHelperHOMME::create_mesh(Range& faces)
tmp_conn[4 * i + 3] = tmp_conn2[i + 3 * num_quads];
}
- // Need to know whether we'll be creating gather mesh later, to make sure we allocate enough space
- // in one shot
+ // Need to know whether we'll be creating gather mesh later, to make sure
+ // we allocate enough space in one shot
bool create_gathers = false;
if (rank == gatherSetRank)
create_gathers = true;
+ // Shift rank to obtain a rotated trivial partition
+ int shifted_rank = rank;
+ if (procs >= 2 && trivialPartitionShift > 0)
+ shifted_rank = (rank + trivialPartitionShift) % procs;
+
// Compute the number of local quads, accounting for coarse or fine representation
// spectral_unit is the # fine quads per coarse quad, or spectralOrder^2
int spectral_unit = (spectralMesh ? _spectralOrder * _spectralOrder : 1);
// num_coarse_quads is the number of quads instantiated in MOAB; if !spectralMesh, num_coarse_quads = num_fine_quads
num_coarse_quads = int(std::floor(1.0 * num_quads / (spectral_unit * procs)));
// start_idx is the starting index in the HommeMapping connectivity list for this proc, before converting to coarse quad representation
- start_idx = 4 * rank * num_coarse_quads * spectral_unit;
+ start_idx = 4 * shifted_rank * num_coarse_quads * spectral_unit;
// iextra = # coarse quads extra after equal split over procs
int iextra = num_quads % (procs * spectral_unit);
- if (rank < iextra)
+ if (shifted_rank < iextra)
num_coarse_quads++;
- start_idx += 4 * spectral_unit * std::min(rank, iextra);
+ start_idx += 4 * spectral_unit * std::min(shifted_rank, iextra);
// num_fine_quads is the number of quads in the connectivity list in HommeMapping file assigned to this proc
num_fine_quads = spectral_unit * num_coarse_quads;
diff --git a/src/io/NCHelperMPAS.cpp b/src/io/NCHelperMPAS.cpp
index 65a6c37..6851c75 100644
--- a/src/io/NCHelperMPAS.cpp
+++ b/src/io/NCHelperMPAS.cpp
@@ -287,6 +287,7 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
{
Interface*& mbImpl = _readNC->mbImpl;
int& gatherSetRank = _readNC->gatherSetRank;
+ int& trivialPartitionShift = _readNC->trivialPartitionShift;
bool& noMixedElements = _readNC->noMixedElements;
bool& noEdges = _readNC->noEdges;
@@ -306,19 +307,24 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
createGatherSet = true;
if (procs >= 2) {
+ // Shift rank to obtain a rotated trivial partition
+ int shifted_rank = rank;
+ if (trivialPartitionShift > 0)
+ shifted_rank = (rank + trivialPartitionShift) % procs;
+
// Compute the number of local cells on this proc
nLocalCells = int(std::floor(1.0 * nCells / procs));
// The starting global cell index in the MPAS file for this proc
- int start_cell_idx = rank * nLocalCells;
+ int start_cell_idx = shifted_rank * nLocalCells;
// Number of extra cells after equal split over procs
int iextra = nCells % procs;
// Allocate extra cells over procs
- if (rank < iextra)
+ if (shifted_rank < iextra)
nLocalCells++;
- start_cell_idx += std::min(rank, iextra);
+ start_cell_idx += std::min(shifted_rank, iextra);
start_cell_idx++; // 0 based -> 1 based
diff --git a/src/io/NCWriteHOMME.cpp b/src/io/NCWriteHOMME.cpp
index abfe4ff..3dd61c1 100644
--- a/src/io/NCWriteHOMME.cpp
+++ b/src/io/NCWriteHOMME.cpp
@@ -246,9 +246,11 @@ ErrorCode NCWriteHOMME::write_nonset_variables(std::vector<WriteNC::VarData>& vd
switch (variableData.varDataType) {
case NC_DOUBLE: {
std::vector<double> tmpdoubledata(num_local_verts_owned * num_lev);
- if (num_lev > 1)
+ if (num_lev > 1) {
// Transpose (ncol, lev) back to (lev, ncol)
- jik_to_kji(num_local_verts_owned, 1, num_lev, &tmpdoubledata[0], &tag_data[0]);
+ // Note, num_local_verts_owned is not used by jik_to_kji_stride()
+ jik_to_kji_stride(num_local_verts_owned, 1, num_lev, &tmpdoubledata[0], &tag_data[0], localGidVertsOwned);
+ }
size_t indexInDoubleArray = 0;
size_t ic = 0;
diff --git a/src/io/NCWriteHelper.hpp b/src/io/NCWriteHelper.hpp
index 42425d2..8e817e3 100644
--- a/src/io/NCWriteHelper.hpp
+++ b/src/io/NCWriteHelper.hpp
@@ -44,16 +44,6 @@ protected:
// Write non-set variables (implemented in child classes)
virtual ErrorCode write_nonset_variables(std::vector<WriteNC::VarData>& vdatas, std::vector<int>& tstep_nums) = 0;
- template <typename T> void jik_to_kji(size_t ni, size_t nj, size_t nk, T* dest, T* source)
- {
- size_t nik = ni * nk, nij = ni * nj;
- for (std::size_t k = 0; k != nk; k++)
- for (std::size_t j = 0; j != nj; j++)
- for (std::size_t i = 0; i != ni; i++)
- dest[k*nij + j*ni + i] = source[j*nik + i*nk + k];
- }
-
-protected:
//! Allow NCWriteHelper to directly access members of WriteNC
WriteNC* _writeNC;
@@ -99,6 +89,15 @@ private:
//! Implementation of NCWriteHelper::write_nonset_variables()
virtual ErrorCode write_nonset_variables(std::vector<WriteNC::VarData>& vdatas, std::vector<int>& tstep_nums);
+ template <typename T> void jik_to_kji(size_t ni, size_t nj, size_t nk, T* dest, T* source)
+ {
+ size_t nik = ni * nk, nij = ni * nj;
+ for (std::size_t k = 0; k != nk; k++)
+ for (std::size_t j = 0; j != nj; j++)
+ for (std::size_t i = 0; i != ni; i++)
+ dest[k*nij + j*ni + i] = source[j*nik + i*nk + k];
+ }
+
protected:
//! Dimensions of my local part of grid
int lDims[6];
@@ -117,6 +116,25 @@ public:
virtual ~UcdNCWriteHelper() {}
protected:
+ //! This version takes as input the moab range, from which we actually need just the
+ //! size of each sequence, for a proper transpose of the data
+ template <typename T> void jik_to_kji_stride(size_t , size_t nj, size_t nk, T* dest, T* source, Range& localGid)
+ {
+ std::size_t idxInSource = 0; // Position of the start of the stride
+ // For each subrange, we will transpose a matrix of size
+ // subrange*nj*nk (subrange takes the role of ni)
+ for (Range::pair_iterator pair_iter = localGid.pair_begin();
+ pair_iter != localGid.pair_end(); ++pair_iter) {
+ std::size_t size_range = pair_iter->second - pair_iter->first + 1;
+ std::size_t nik = size_range * nk, nij = size_range * nj;
+ for (std::size_t k = 0; k != nk; k++)
+ for (std::size_t j = 0; j != nj; j++)
+ for (std::size_t i = 0; i != size_range; i++)
+ dest[idxInSource + k*nij + j*size_range + i] = source[idxInSource + j*nik + i*nk + k];
+ idxInSource += (size_range*nj*nk);
+ }
+ }
+
//! Dimension numbers for nCells, nEdges and nVertices
int cDim, eDim, vDim;
diff --git a/src/io/ReadNC.cpp b/src/io/ReadNC.cpp
index 7607927..bdf6d3b 100644
--- a/src/io/ReadNC.cpp
+++ b/src/io/ReadNC.cpp
@@ -19,7 +19,7 @@ ReadNC::ReadNC(Interface* impl) :
myPcomm(NULL),
#endif
noMesh(false), noVars(false), spectralMesh(false), noMixedElements(false), noEdges(false),
- gatherSetRank(-1), tStepBase(-1), myHelper(NULL)
+ gatherSetRank(-1), tStepBase(-1), trivialPartitionShift(0), myHelper(NULL)
{
assert(impl != NULL);
impl->query_interface(readMeshIface);
@@ -280,6 +280,12 @@ ErrorCode ReadNC::parse_options(const FileOptions& opts, std::vector<std::string
SET_ERR(rval, "Invalid value for TIMESTEPBASE option");
}
+ rval = opts.get_int_option("TRIVIAL_PARTITION_SHIFT", 1, trivialPartitionShift);
+ if (MB_TYPE_OUT_OF_RANGE == rval) {
+ readMeshIface->report_error("Invalid value for TRIVIAL_PARTITION_SHIFT option");
+ return rval;
+ }
+
#ifdef USE_MPI
isParallel = (opts.match_option("PARALLEL", "READ_PART") != MB_ENTITY_NOT_FOUND);
diff --git a/src/io/ReadNC.hpp b/src/io/ReadNC.hpp
index 1b81d15..b4ec60b 100644
--- a/src/io/ReadNC.hpp
+++ b/src/io/ReadNC.hpp
@@ -209,6 +209,7 @@ private:
bool noEdges;
int gatherSetRank;
int tStepBase;
+ int trivialPartitionShift;
//! Helper class instance
NCHelper* myHelper;
diff --git a/src/io/WriteHDF5.cpp b/src/io/WriteHDF5.cpp
index 337cb96..b53508e 100644
--- a/src/io/WriteHDF5.cpp
+++ b/src/io/WriteHDF5.cpp
@@ -2567,7 +2567,43 @@ ErrorCode WriteHDF5::serial_create_file( const char* filename,
rval = assign_ids( ex_itor->range, ex_itor->first_id );
CHK_MB_ERR_0(rval);
}
+ // create set tables
+ writeSets = !setSet.range.empty();
+ if (writeSets)
+ {
+ long contents_len, children_len, parents_len;
+
+ setSet.total_num_ents = setSet.range.size();
+ setSet.max_num_ents = setSet.total_num_ents;
+ rval = create_set_meta(setSet.total_num_ents, first_id);
+ CHK_MB_ERR_0(rval);
+
+ setSet.first_id = (id_t) first_id;
+ rval = assign_ids(setSet.range, setSet.first_id);
+ CHK_MB_ERR_0(rval);
+
+ rval = count_set_size(setSet.range, contents_len, children_len,
+ parents_len);
+ CHK_MB_ERR_0(rval);
+
+ rval = create_set_tables(contents_len, children_len, parents_len);
+ CHK_MB_ERR_0(rval);
+
+ setSet.offset = 0;
+ setContentsOffset = 0;
+ setChildrenOffset = 0;
+ setParentsOffset = 0;
+ writeSetContents = !!contents_len;
+ writeSetChildren = !!children_len;
+ writeSetParents = !!parents_len;
+
+ maxNumSetContents = contents_len;
+ maxNumSetChildren = children_len;
+ maxNumSetParents = parents_len;
+ } // if(!setSet.range.empty())
+ // create adjacency table after set table, because sets do not have yet an id
+ // some entities are adjacent to sets (exodus?)
// create node adjacency table
id_t num_adjacencies;
#ifdef MB_H5M_WRITE_NODE_ADJACENCIES
@@ -2605,39 +2641,6 @@ ErrorCode WriteHDF5::serial_create_file( const char* filename,
}
}
- // create set tables
- writeSets = !setSet.range.empty();
- if (writeSets)
- {
- long contents_len, children_len, parents_len;
-
- setSet.total_num_ents = setSet.range.size();
- setSet.max_num_ents = setSet.total_num_ents;
- rval = create_set_meta( setSet.total_num_ents, first_id );
- CHK_MB_ERR_0(rval);
-
- setSet.first_id = (id_t)first_id;
- rval = assign_ids( setSet.range, setSet.first_id );
- CHK_MB_ERR_0(rval);
-
- rval = count_set_size( setSet.range, contents_len, children_len, parents_len );
- CHK_MB_ERR_0(rval);
-
- rval = create_set_tables( contents_len, children_len, parents_len );
- CHK_MB_ERR_0(rval);
-
- setSet.offset = 0;
- setContentsOffset = 0;
- setChildrenOffset = 0;
- setParentsOffset = 0;
- writeSetContents = !!contents_len;
- writeSetChildren = !!children_len;
- writeSetParents = !!parents_len;
-
- maxNumSetContents = contents_len;
- maxNumSetChildren = children_len;
- maxNumSetParents = parents_len;
- } // if(!setSet.range.empty())
dbgOut.tprint( 1, "Gathering Tags\n" );
diff --git a/src/io/WriteHDF5.hpp b/src/io/WriteHDF5.hpp
index 256a217..555d4ca 100644
--- a/src/io/WriteHDF5.hpp
+++ b/src/io/WriteHDF5.hpp
@@ -104,7 +104,7 @@ public:
Range range;
//! The first Id allocated by the mhdf library. Entities in range have sequential IDs.
id_t first_id;
- //! The offset at which to begin writting this processor's data.
+ //! The offset at which to begin writing this processor's data.
//! Always zero except for parallel IO.
long offset;
//! Offset for adjacency data. Always zero except for parallel IO
diff --git a/src/moab/MergeMesh.hpp b/src/moab/MergeMesh.hpp
index 489bb3e..8eef9a8 100644
--- a/src/moab/MergeMesh.hpp
+++ b/src/moab/MergeMesh.hpp
@@ -7,73 +7,69 @@
namespace moab {
class AdaptiveKDTree;
-
-class MergeMesh
+
+class MergeMesh
{
public:
- /* \brief Constructor
- */
- MergeMesh(moab::Interface *mbImpl, bool printErrorIn = true);
-
- /* \brief Destructor
- */
+ /* \brief Constructor
+ */
+ MergeMesh(Interface *mbImpl, bool printErrorIn = true);
+
+ /* \brief Destructor
+ */
virtual ~MergeMesh();
- /* \brief Merge vertices in elements passed in
- */
- moab::ErrorCode merge_entities(moab::EntityHandle *elems,
- int elems_size,
- const double merge_tol,
- const int do_merge = true,
- const int update_sets = false,
- moab::Tag merge_tag = 0,
- bool do_higher_dim = true);
-
- moab::ErrorCode merge_entities(moab::Range &elems,
- const double merge_tol,
- const int do_merge = true,
- const int update_sets = false,
- moab::Tag merge_tag = 0,
- bool do_higher_dim = true);
-
+ /* \brief Merge vertices in elements passed in
+ */
+ ErrorCode merge_entities(EntityHandle *elems, int elems_size,
+ const double merge_tol, const int do_merge = true, const int update_sets =
+ false, Tag merge_tag = 0, bool do_higher_dim = true);
+
+ ErrorCode merge_entities(Range &elems, const double merge_tol,
+ const int do_merge = true, const int update_sets = false,
+ Tag merge_tag = 0, bool do_higher_dim = true);
+
//Identify higher dimension to be merged
- moab::ErrorCode merge_higher_dimensions(moab::Range &elems);
+ ErrorCode merge_higher_dimensions(Range &elems);
- //- perform the actual merge
- moab::ErrorCode perform_merge(moab::Tag merged_to);
+ //- perform the actual merge
+ ErrorCode perform_merge(Tag merged_to);
private:
//iMesh_Instance imeshImpl;
double mergeTol, mergeTolSq;
- moab::Tag mergeTag;
+ Tag mergeTag;
+
+ //- given a kdtree, set tag on vertices in leaf nodes with vertices
+ //- to which they should be merged
+ ErrorCode find_merged_to(EntityHandle &tree_root,
+ AdaptiveKDTree &tree, Tag merged_to);
+
+ Interface *mbImpl;
- //- given a kdtree, set tag on vertices in leaf nodes with vertices
- //- to which they should be merged
- moab::ErrorCode find_merged_to(moab::EntityHandle &tree_root,
- moab::AdaptiveKDTree &tree,
- moab::Tag merged_to);
-
- moab::Interface *mbImpl;
+ //- the tag pointing to the entity to which an entity will be merged
+ Tag mbMergeTag;
- //- the tag pointing to the entity to which an entity will be merged
- moab::Tag mbMergeTag;
+ //- entities which will go away after the merge
+ Range deadEnts;
- //- entities which will go away after the merge
- moab::Range deadEnts;
+ // vertices that were merged with other vertices, and were left in the database
+ Range mergedToVertices;
//Allow a warning to be suppressed when no merging is done
bool printError;
};
- inline MergeMesh::MergeMesh(Interface *impl, bool printErrorIn)
- : mbImpl(impl), printError(printErrorIn)
+inline MergeMesh::MergeMesh(Interface *impl, bool printErrorIn) :
+ mbImpl(impl), printError(printErrorIn)
{
}
-inline MergeMesh::~MergeMesh()
+inline MergeMesh::~MergeMesh()
{
- if (mbMergeTag) mbImpl->tag_delete(mbMergeTag);
+ if (mbMergeTag)
+ mbImpl->tag_delete(mbMergeTag);
}
}
diff --git a/src/parallel/WriteHDF5Parallel.cpp b/src/parallel/WriteHDF5Parallel.cpp
index be692ba..7bd14b8 100644
--- a/src/parallel/WriteHDF5Parallel.cpp
+++ b/src/parallel/WriteHDF5Parallel.cpp
@@ -420,8 +420,17 @@ ErrorCode WriteHDF5Parallel::parallel_create_file( const char* filename,
if (MB_SUCCESS != rval) return error(rval);
if (times) times[FILEID_EXCHANGE_TIME] = timer.elapsed();
+ /**************** Create meshset tables *********************/
- /**************** Create adjacency tables *********************/
+ debug_barrier();
+ dbgOut.tprint(1,"creating meshset table\n");
+ topState.start("creating meshset tables");
+ rval = create_meshset_tables(times);
+ topState.end(rval);
+ if (MB_SUCCESS != rval) return error(rval);
+ if (times) times[CREATE_SET_TIME] = timer.elapsed();
+
+ /**************** Create adjacency tables *********************/
debug_barrier();
dbgOut.tprint(1,"creating adjacency table\n");
@@ -431,15 +440,7 @@ ErrorCode WriteHDF5Parallel::parallel_create_file( const char* filename,
if (MB_SUCCESS != rval) return error(rval);
if (times) times[CREATE_ADJ_TIME] = timer.elapsed();
- /**************** Create meshset tables *********************/
- debug_barrier();
- dbgOut.tprint(1,"creating meshset table\n");
- topState.start("creating meshset tables");
- rval = create_meshset_tables(times);
- topState.end(rval);
- if (MB_SUCCESS != rval) return error(rval);
- if (times) times[CREATE_SET_TIME] = timer.elapsed();
/**************** Create tag data *********************/
This diff is so big that we needed to truncate the remainder.
Repository URL: https://bitbucket.org/fathomteam/moab/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the moab-dev
mailing list