[MOAB-dev] commit/MOAB: iulian07: use mbzoltan for partitioning in MPAS reader
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Sat Oct 19 10:00:45 CDT 2013
1 new commit in MOAB:
https://bitbucket.org/fathomteam/moab/commits/b816be22a376/
Changeset: b816be22a376
Branch: master
User: iulian07
Date: 2013-10-19 16:44:00
Summary: use mbzoltan for partitioning in MPAS reader
some changes were needed:
1) include MBZoltan class in moab library
2) eliminate libmbzoltan library
3) of course, MBZoltan is included only if moab is configured with zoltan
4) the new method added in MBZoltan has to be launched in parallel
so far it is exercised only in the mpas reader, when launched on
at least 2 processors. For mbcslam needs, RCB method works best, but
maybe different options could be used
5) mbpart tool is still compiled, but it depends directly on libMOAB
6) mpas reader in parallel has now 2 more tests, for RCB partition, and
no_mixed_elements option
7) the new option is launched with -O PARTITION_METHOD=RCBZOLTAN; it is
defined in ScdParData, even though it is an unstructured option (sic)
TODO:
eliminate mbzoltan folder, and move MBZoltan class in src, src/moab
we have to use git mv, otherwise we will lose the wonderful history.
mbpart will move from tools/mbzoltan to tools folder
modify metadata manual for the new option(s)
MPAS reader still uses a std::map that is probably unnecessary;
Affected #: 9 files
diff --git a/src/Makefile.am b/src/Makefile.am
index dc0c3f2..3a86984 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -205,6 +205,18 @@ nobase_libMOAB_la_include_HEADERS = \
MOAB_FCDefs.h \
MBCN_protos.h \
MBTagConventions.hpp
+if ENABLE_mbzoltan
+ libMOAB_la_SOURCES += $(srcdir)/../tools/mbzoltan/MBZoltan.cpp
+ nobase_libMOAB_la_include_HEADERS += $(srcdir)/../tools/mbzoltan/MBZoltan.hpp
+ AM_CPPFLAGS += $(ZOLTAN_INC_FLAGS) -DHAVE_ZOLTAN
+ # not sure yet if this is needed; will see when linking io reader (MPAS so far)
+ # the tool mbpart needed it, and it just includes MBZoltan.hpp; so it needs ways to find
+ # the libraries zoltan depends on
+ libMOAB_la_LIBADD += $(ZOLTAN_LIBS) $(ZOLTAN_LIB_FLAGS)
+endif
+if HAVE_CGM
+ AM_CPPFLAGS += @CGM_CPPFLAGS@ -DCGM @MOAB_CGM_DEFINES@
+endif
MBCN_protos.h: MBCN.h $(top_srcdir)/itaps/mkprotos.sh
$(AM_V_GEN)$(top_srcdir)/itaps/mkprotos.sh MBCN MOAB $< $@ MOAB_FCDefs.h
diff --git a/src/ScdInterface.cpp b/src/ScdInterface.cpp
index 1d42dde..12f5b89 100644
--- a/src/ScdInterface.cpp
+++ b/src/ScdInterface.cpp
@@ -22,7 +22,7 @@
namespace moab
{
-const char *ScdParData::PartitionMethodNames[] = {"alljorkori", "alljkbal", "sqij", "sqjk", "sqijk", "trivial", "nopart"};
+const char *ScdParData::PartitionMethodNames[] = {"alljorkori", "alljkbal", "sqij", "sqjk", "sqijk", "trivial","rcbzoltan", "nopart"};
ScdInterface::ScdInterface(Interface *imp, bool boxes)
: mbImpl(imp),
diff --git a/src/io/Makefile.am b/src/io/Makefile.am
index bb5b7cb..968f47f 100644
--- a/src/io/Makefile.am
+++ b/src/io/Makefile.am
@@ -39,6 +39,9 @@ if !NETCDF_FILE
endif
endif
+if ENABLE_mbzoltan
+ AM_CPPFLAGS += -DHAVE_ZOLTAN -I$(srcdir)/../../tools/mbzoltan $(ZOLTAN_INC_FLAGS)
+endif
if HDF5_FILE
libmoabio_la_LIBADD += mhdf/libmhdf.la
MOAB_HDF5_SRCS = HDF5Common.cpp \
diff --git a/src/io/NCHelperMPAS.cpp b/src/io/NCHelperMPAS.cpp
index 09fe541..35fdb2b 100644
--- a/src/io/NCHelperMPAS.cpp
+++ b/src/io/NCHelperMPAS.cpp
@@ -4,6 +4,10 @@
#include "moab/SpectralMeshTool.hpp"
#include "MBTagConventions.hpp"
+#if HAVE_ZOLTAN
+#include "MBZoltan.hpp"
+#endif
+
#include <cmath>
#define ERRORR(rval, str) \
@@ -16,6 +20,36 @@ namespace moab {
const int DEFAULT_MAX_EDGES_PER_CELL = 10;
+// this is used for zoltan only related stuff
+#define GET_VAR(name, id, dims) \
+ { \
+ id = -1;\
+ int gvfail = NCFUNC(inq_varid)( _fileId, name, &id); \
+ if (NC_NOERR == gvfail) { \
+ int ndims;\
+ gvfail = NCFUNC(inq_varndims)( _fileId, id, &ndims);\
+ if (NC_NOERR == gvfail) {\
+ dims.resize(ndims); \
+ gvfail = NCFUNC(inq_vardimid)( _fileId, id, &dims[0]);}}}
+
+#define GET_1D_DBL_VAR_RANGE(name, id, startIndex, endIndex, vals) \
+ {std::vector<int> dum_dims; \
+ GET_VAR(name, id, dum_dims);\
+ if (-1 != id) {\
+ if (rank==0) std::cout << " var:" << name << " id: " << id << "\n"; \
+ NCDF_SIZE ntmp;\
+ int dvfail = NCFUNC(inq_dimlen)(_fileId, dum_dims[0], &ntmp);\
+ if (rank==0) std::cout << " prepare read var " << name << " start:" << startIndex << " end:"<< endIndex <<" Actual size:" << \
+ ntmp << "\n" ; \
+ if (startIndex > (int)ntmp || endIndex > (int) ntmp || startIndex>=endIndex) \
+ { std::cout << "bad input \n"; return MB_FAILURE;} \
+ vals.resize(endIndex-startIndex);\
+ NCDF_SIZE ntmp1 = startIndex; NCDF_SIZE count = endIndex-startIndex; \
+ dvfail = NCFUNC(get_vara_double_all)( _fileId, id, &ntmp1, &count, &vals[0]);\
+ if (NC_NOERR != dvfail) {\
+ std::cout<<"ReadNCDF:: Problem getting variable "<< name<<"\n";\
+ return MB_FAILURE;}}}
+
NCHelperMPAS::NCHelperMPAS(ReadNC* readNC, int fileId, const FileOptions& opts, EntityHandle fileSet)
: UcdNCHelper(readNC, fileId, opts, fileSet)
, maxEdgesPerCell(DEFAULT_MAX_EDGES_PER_CELL)
@@ -237,6 +271,8 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
int& gatherSetRank = _readNC->gatherSetRank;
bool& noMixedElements = _readNC->noMixedElements;
+ DebugOutput& dbgOut = _readNC->dbgOut;
+
int rank = 0;
int procs = 1;
#ifdef USE_MPI
@@ -253,25 +289,54 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
if (rank == gatherSetRank)
create_gathers = true;
- bool apply_zoltan = false;
- if (apply_zoltan) {
- // Zoltan partition, TBD
+ // Trivial partition
+ // Compute the number of local cells on this proc
+ nLocalCells = int(std::floor(1.0 * nCells / procs));
+ // start_cell_idx is the starting global cell index in the MPAS file for this proc
+ int start_cell_idx = rank * nLocalCells;
+ // iextra = # cells extra after equal split over procs
+ int iextra = nCells % procs;
+ if (rank < iextra)
+ nLocalCells++;
+ start_cell_idx += std::min(rank, iextra);
+
+
+#ifdef USE_MPI
+ int& partMethod = _readNC->partMethod;
+ if (partMethod==ScdParData::RCBZOLTAN && procs >=2) // it does not make sense to partition
+ // if the number of processors is less than 2; trivial partition is good enough
+ {
+ // Zoltan partition using RCB; maybe more studies would be good, as to which partition
+ // is better
+ int temp_dim;
+ MBZoltan * mbZTool = new MBZoltan(mbImpl, false, 0, NULL);
+
+ std::vector<double> x, y, z;
+ int end_cell_index=start_cell_idx+nLocalCells;
+ GET_1D_DBL_VAR_RANGE("xCell", temp_dim, start_cell_idx, end_cell_index, x) ;
+ GET_1D_DBL_VAR_RANGE("yCell", temp_dim, start_cell_idx, end_cell_index, y) ;
+ GET_1D_DBL_VAR_RANGE("zCell", temp_dim, start_cell_idx, end_cell_index, z) ;
+
+ ErrorCode rval = mbZTool->repartition(x, y, z, start_cell_idx+1, "RCB", localGidCells );
+ //delete mbZTool;
+ if (rval !=MB_SUCCESS)
+ {
+ std::cout <<" error in partitioning\n";
+ return MB_FAILURE;
+ }
+
+ dbgOut.tprintf(1, "After partitioning, localGidCells.psize()=%d\n", (int)localGidCells.psize());
+ dbgOut.tprintf(1, " localGidCells.size()=%d\n", (int)localGidCells.size());
+ // this is important: local cells are now redistributed, so nLocalCells is different!
+ nLocalCells = localGidCells.size();
}
else {
- // Trivial partition
- // Compute the number of local cells on this proc
- nLocalCells = int(std::floor(1.0 * nCells / procs));
- // start_cell_idx is the starting global cell index in the MPAS file for this proc
- int start_cell_idx = rank * nLocalCells;
- // iextra = # cells extra after equal split over procs
- int iextra = nCells % procs;
- if (rank < iextra)
- nLocalCells++;
- start_cell_idx += std::min(rank, iextra);
+#endif /* use mpi */
start_cell_idx++; // 0 based -> 1 based
localGidCells.insert(start_cell_idx, start_cell_idx + nLocalCells - 1);
+#ifdef USE_MPI
}
-
+#endif
// Read number of edges on each local cell, to calculate actual maxEdgesPerCell
int nEdgesOnCellVarId;
int success = NCFUNC(inq_varid)(_fileId, "nEdgesOnCell", &nEdgesOnCellVarId);
@@ -324,14 +389,18 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
MPI_Allreduce(&local_max_edges_per_cell, &global_max_edges_per_cell, 1, MPI_INTEGER, MPI_MAX, myPcomm->proc_config().proc_comm());
assert(local_max_edges_per_cell <= global_max_edges_per_cell);
maxEdgesPerCell = global_max_edges_per_cell;
+ if (0==rank)
+ dbgOut.tprintf(1, " global_max_edges_per_cell=%d\n", global_max_edges_per_cell);
}
#endif
+
// Read edges on each local cell, to get localGidEdges later
int edgesOnCellVarId;
success = NCFUNC(inq_varid)(_fileId, "edgesOnCell", &edgesOnCellVarId);
ERRORS(success, "Failed to get variable id of edgesOnCell.");
std::vector<int> edges_on_local_cells(nLocalCells * maxEdgesPerCell);
+ dbgOut.tprintf(1, " edges_on_local_cells.size()=%d\n", (int)edges_on_local_cells.size());
#ifdef PNETCDF_FILE
idxReq = 0;
#endif
@@ -342,7 +411,7 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
EntityHandle starth = pair_iter->first;
EntityHandle endh = pair_iter->second;
NCDF_SIZE tmp_starts[2] = {static_cast<NCDF_SIZE>(starth - 1), 0};
- NCDF_SIZE tmp_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1), maxEdgesPerCell};
+ NCDF_SIZE tmp_counts[2] = {static_cast<NCDF_SIZE>(endh - starth + 1), static_cast<NCDF_SIZE>(maxEdgesPerCell)};
// Do a partial read in each subrange
#ifdef PNETCDF_FILE
@@ -357,7 +426,6 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
// Increment the index for next subrange
indexInArray += (endh - starth + 1) * maxEdgesPerCell;
}
-
#ifdef PNETCDF_FILE
// Wait outside the loop
success = NCFUNC(wait_all)(_fileId, requests.size(), &requests[0], &statuss[0]);
@@ -369,6 +437,8 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
std::copy(edges_on_local_cells.rbegin(), edges_on_local_cells.rend(), range_inserter(localGidEdges));
nLocalEdges = localGidEdges.size();
+ dbgOut.tprintf(1, " localGidEdges.psize()=%d\n", (int)localGidEdges.psize());
+ dbgOut.tprintf(1, " localGidEdges.size()=%d\n", (int)localGidEdges.size());
// Read vertices on each local cell, to get localGidVerts and cell connectivity later
int verticesOnCellVarId;
success = NCFUNC(inq_varid)(_fileId, "verticesOnCell", &verticesOnCellVarId);
@@ -415,6 +485,9 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
std::copy(vertices_on_local_cells_sorted.rbegin(), vertices_on_local_cells_sorted.rend(), range_inserter(localGidVerts));
nLocalVertices = localGidVerts.size();
+ dbgOut.tprintf(1, " localGidVerts.psize()=%d\n", (int)localGidVerts.psize());
+ dbgOut.tprintf(1, " localGidVerts.size()=%d\n", (int)localGidVerts.size());
+
// Create local vertices
// We can temporarily use the memory storage allocated before it will be populated
EntityHandle start_vertex;
@@ -467,7 +540,9 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
// Used when NO_MIXED_ELEMENTS option is NOT set
EntityHandle* conn_arr_local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
- std::vector<int> local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
+ // put in subranges; each subrange (for numEdges) will contain, in order, the
+ // gids for cells with that many edges (numEdges)
+ Range local_cells_with_n_edges[DEFAULT_MAX_EDGES_PER_CELL + 1];
std::vector<int> num_edges_on_cell_groups;
std::vector<EntityHandle> start_element_on_cell_groups;
@@ -500,7 +575,8 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
// Divide local cells into groups based on the number of edges
for (int i = 0; i < nLocalCells; i++) {
int num_edges = num_edges_on_local_cells[i];
- local_cells_with_n_edges[num_edges].push_back(localGidCells[i]); // Global cell index
+ local_cells_with_n_edges[num_edges].insert(localGidCells[i]); // Global cell index
+ // by construction, this subrange will contain cells in order
}
for (int i = 3; i <= maxEdgesPerCell; i++) {
@@ -533,11 +609,16 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
// Create a temporary map from vertex GIDs to local handles
// Utilize the memory storage pointed by arrays[0]
- EntityHandle* vert_gid_to_local_handle_tmp_map = (EntityHandle*) arrays[0];
- Range::const_iterator rit;
+ // EntityHandle* vert_gid_to_local_handle_tmp_map = (EntityHandle*) arrays[0];
+ // you don't need it:
+ // so vertex handle start_vertex+idx has the global ID localGidVerts[idx]
+ // if I need the vertex handle for gid, use Range::index();
+ // so, from gid to eh: int index = localGidVerts.index(gid)
+ // eh = start_vertex+idx;
+ /*Range::const_iterator rit;
int vert_idx;
for (rit = localGidVerts.begin(), vert_idx = 0; rit != localGidVerts.end(); ++rit, vert_idx++)
- vert_gid_to_local_handle_tmp_map[*rit - 1] = start_vertex + vert_idx;
+ vert_gid_to_local_handle_tmp_map[*rit - 1] = start_vertex + vert_idx;*/
// Read vertices on each local edge, to get edge connectivity
// Utilize the memory storage pointed by conn_arr_edges
@@ -584,7 +665,9 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
// Convert in-place from int to EntityHandle type (backward)
for (int edge_vert = nLocalEdges * 2 - 1; edge_vert >= 0; edge_vert--) {
EntityHandle global_vert_id = vertices_on_local_edges[edge_vert];
- conn_arr_edges[edge_vert] = vert_gid_to_local_handle_tmp_map[global_vert_id - 1];
+ int idx_vertex=localGidVerts.index(global_vert_id);
+ assert(idx_vertex!=-1);
+ conn_arr_edges[edge_vert] = start_vertex+idx_vertex;
}
// Populate connectivity for local cells
@@ -595,7 +678,9 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
int num_edges = num_edges_on_local_cells[cell_idx];
for (int i = 0; i < num_edges; i++) {
EntityHandle global_vert_id = vertices_on_local_cells[cell_idx * maxEdgesPerCell + i];
- conn_arr_local_cells[cell_idx * maxEdgesPerCell + i] = vert_gid_to_local_handle_tmp_map[global_vert_id - 1];
+ int idx_vertex=localGidVerts.index(global_vert_id);
+ assert(idx_vertex!=-1);
+ conn_arr_local_cells[cell_idx * maxEdgesPerCell + i] = start_vertex+idx_vertex;
}
// Padding: fill connectivity array with last vertex handle
@@ -609,14 +694,16 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
else {
// Create a temporary map from cell GID to local index
// Utilize the memory storage pointed by arrays[1]
- EntityHandle* cell_gid_to_local_index_tmp_map = (EntityHandle*) arrays[1];
+ // again, not correct because of overflow
+ // here we have to use a different map, maybe RangeMap
+ /*EntityHandle* cell_gid_to_local_index_tmp_map = (EntityHandle*) arrays[1];
for (rit = localGidCells.begin(), cell_idx = 0; rit != localGidCells.end(); ++rit, cell_idx++)
- cell_gid_to_local_index_tmp_map[*rit - 1] = cell_idx;
+ cell_gid_to_local_index_tmp_map[*rit - 1] = cell_idx;*/
// For each non-empty cell group, set connectivity array with proper local vertices handles
for (int i = 0; i < numCellGroups; i++) {
int num_edges_per_cell = num_edges_on_cell_groups[i];
- int num_cells = local_cells_with_n_edges[num_edges_per_cell].size();
+ int num_cells = (int)local_cells_with_n_edges[num_edges_per_cell].size();
start_element = start_element_on_cell_groups[i];
for (int j = 0; j < num_cells; j++) {
@@ -624,12 +711,18 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
if (numCellGroups > 1)
cellHandleToGlobalID[start_element + j] = cell_idx;
+ // find the index in the range: local_cells_with_n_edges[num_edges_per_cell];
- cell_idx = cell_gid_to_local_index_tmp_map[cell_idx - 1]; // Local cell index
+ // overflow averted :(
+ // cell_idx = cell_gid_to_local_index_tmp_map[cell_idx - 1]; // Local cell index
+ cell_idx = localGidCells.index(cell_idx);
+ assert(cell_idx!=-1);
for (int k = 0; k < num_edges_per_cell; k++) {
EntityHandle global_vert_id = vertices_on_local_cells[cell_idx * maxEdgesPerCell + k];
+ int idx_vertex=localGidVerts.index(global_vert_id);
+ assert(idx_vertex!=-1);
conn_arr_local_cells_with_n_edges[num_edges_per_cell][j * num_edges_per_cell + k] =
- vert_gid_to_local_handle_tmp_map[global_vert_id - 1];
+ start_vertex+idx_vertex;
}
}
}
@@ -765,6 +858,7 @@ ErrorCode NCHelperMPAS::create_mesh(Range& faces)
rval = _readNC->mbImpl->add_entities(_fileSet, tmp_range);
ERRORR(rval, "Couldn't add new vertices/faces/edges to file set.");
+ dbgOut.tprintf(1, " local entities in set: tmp_range.size()=%d\n", (int)tmp_range.size());
if (create_gathers) {
EntityHandle gather_set;
rval = _readNC->readMeshIface->create_gather_set(gather_set);
diff --git a/src/moab/ScdInterface.hpp b/src/moab/ScdInterface.hpp
index 0726b13..1f2a482 100644
--- a/src/moab/ScdInterface.hpp
+++ b/src/moab/ScdInterface.hpp
@@ -112,7 +112,7 @@ public:
//! Partition method enumeration; these strategies are described in comments for
//! compute_partition_alljorkori, compute_partition_alljkbal, compute_partition_sqij,
//! compute_partition_sqjk, and compute_partition_sqijk
- enum PartitionMethod {ALLJORKORI = 0, ALLJKBAL, SQIJ, SQJK, SQIJK, TRIVIAL, NOPART};
+ enum PartitionMethod {ALLJORKORI = 0, ALLJKBAL, SQIJ, SQJK, SQIJK, TRIVIAL, RCBZOLTAN, NOPART};
//! Partition method names
static const char *PartitionMethodNames[NOPART + 1];
diff --git a/test/parallel/mpastrvpart.cpp b/test/parallel/mpastrvpart.cpp
index 2b86f61..f9bfa36 100644
--- a/test/parallel/mpastrvpart.cpp
+++ b/test/parallel/mpastrvpart.cpp
@@ -13,6 +13,8 @@ static const char example[] = STRINGIFY(MESHDIR) "/io/mpasx1.642.t.2.nc";
void test_read_parallel_mpas_trivial();
void test_read_parallel_mpas_trivial_no_mixed_elements();
+void test_read_parallel_mpas_rcbzoltan();
+void test_read_parallel_mpas_rcbzoltan_no_mixed_elements();
void test_read_parallel(int num_verts, bool test_nb_nodes, int num_edges, bool test_nb_edges,
int num_cells, bool test_nb_cells, bool mixed_elements);
void test_multiple_loads_of_same_file();
@@ -27,6 +29,8 @@ int main(int argc, char* argv[])
result += RUN_TEST(test_read_parallel_mpas_trivial);
result += RUN_TEST(test_read_parallel_mpas_trivial_no_mixed_elements);
+ result += RUN_TEST(test_read_parallel_mpas_rcbzoltan);
+ result += RUN_TEST(test_read_parallel_mpas_rcbzoltan_no_mixed_elements);
result += RUN_TEST(test_multiple_loads_of_same_file);
result += RUN_TEST(test_multiple_loads_of_same_file_no_mixed_elements);
@@ -46,6 +50,18 @@ void test_read_parallel_mpas_trivial_no_mixed_elements()
test_read_parallel(1280, true, 1920, true, 642, true, false);
}
+void test_read_parallel_mpas_rcbzoltan()
+{
+ partition_method = std::string(";PARTITION_METHOD=RCBZOLTAN;PARALLEL_RESOLVE_SHARED_ENTS");
+ test_read_parallel(1280, false, 1920, false, 642, false, true);
+}
+
+void test_read_parallel_mpas_rcbzoltan_no_mixed_elements()
+{
+ partition_method = std::string(";PARTITION_METHOD=RCBZOLTAN;PARALLEL_RESOLVE_SHARED_ENTS;NO_MIXED_ELEMENTS");
+ test_read_parallel(1280, false, 1920, false, 642, false, true);
+}
+
void test_read_parallel(int num_verts, bool test_nb_nodes, int num_edges, bool test_nb_edges,
int num_cells, bool test_nb_cells, bool mixed_elements)
{
diff --git a/tools/mbzoltan/MBZoltan.cpp b/tools/mbzoltan/MBZoltan.cpp
index 45b8a35..e598f0e 100644
--- a/tools/mbzoltan/MBZoltan.cpp
+++ b/tools/mbzoltan/MBZoltan.cpp
@@ -243,6 +243,131 @@ ErrorCode MBZoltan::balance_mesh(const char *zmethod,
return MB_SUCCESS;
}
+ErrorCode MBZoltan::repartition(std::vector<double> & x,std::vector<double>&y, std::vector<double> &z, int StartID,
+ const char * zmethod, Range & localGIDs)
+{
+ //
+ std::vector<int> sendToProcs;
+ int nprocs=mbpc->proc_config().proc_size();
+ int rank = mbpc->proc_config().proc_rank();
+ clock_t t = clock();
+ // form pts and ids, as in assemble_graph
+ std::vector<double> pts; // x[0], y[0], z[0], ... from MOAB
+ pts.resize(x.size()*3);
+ std::vector<int> ids; // point ids from MOAB
+ ids.resize(x.size());
+ for (size_t i=0; i<x.size(); i++)
+ {
+ pts[3*i]= x[i];
+ pts[3*i+1]=y[i];
+ pts[3*i+2]=z[i];
+ ids[i]=StartID+(int)i;
+ }
+ // do not get out until done!
+
+ Points= &pts[0];
+ GlobalIds=&ids[0];
+ NumPoints=(int)x.size();
+ NumEdges=NULL;
+ NborGlobalId=NULL;
+ NborProcs=NULL;
+ ObjWeights=NULL;
+ EdgeWeights=NULL;
+ Parts=NULL;
+
+ float version;
+ if (rank==0)
+ std::cout << "Initializing zoltan..." << std::endl;
+
+ Zoltan_Initialize(argcArg, argvArg, &version);
+
+ // Create Zoltan object. This calls Zoltan_Create.
+ if (NULL == myZZ) myZZ = new Zoltan(MPI_COMM_WORLD);
+
+ if (NULL == zmethod || !strcmp(zmethod, "RCB"))
+ SetRCB_Parameters();
+ else if (!strcmp(zmethod, "RIB"))
+ SetRIB_Parameters();
+ else if (!strcmp(zmethod, "HSFC"))
+ SetHSFC_Parameters();
+
+ // set # requested partitions
+ char buff[10];
+ sprintf(buff, "%d", nprocs);
+ int retval = myZZ->Set_Param("NUM_GLOBAL_PARTITIONS", buff);
+ if (ZOLTAN_OK != retval) return MB_FAILURE;
+
+ // request only partition assignments
+ retval = myZZ->Set_Param("RETURN_LISTS", "PARTITION ASSIGNMENTS");
+ if (ZOLTAN_OK != retval) return MB_FAILURE;
+
+
+ myZZ->Set_Num_Obj_Fn(mbGetNumberOfAssignedObjects, NULL);
+ myZZ->Set_Obj_List_Fn(mbGetObjectList, NULL);
+ myZZ->Set_Num_Geom_Fn(mbGetObjectSize, NULL);
+ myZZ->Set_Geom_Multi_Fn(mbGetObject, NULL);
+ myZZ->Set_Num_Edges_Multi_Fn(mbGetNumberOfEdges, NULL);
+ myZZ->Set_Edge_List_Multi_Fn(mbGetEdgeList, NULL);
+
+ // Perform the load balancing partitioning
+
+ int changes;
+ int numGidEntries;
+ int numLidEntries;
+ int dumnum1;
+ ZOLTAN_ID_PTR dum_local, dum_global;
+ int *dum1, *dum2;
+ int num_assign;
+ ZOLTAN_ID_PTR assign_gid, assign_lid;
+ int *assign_procs, *assign_parts;
+
+ if (rank ==0)
+ std::cout << "Computing partition using " << (zmethod ? zmethod : "RCB") <<
+ " method for " << nprocs << " processors..." << std::endl;
+
+ retval = myZZ->LB_Partition(changes, numGidEntries, numLidEntries,
+ dumnum1, dum_global, dum_local, dum1, dum2,
+ num_assign, assign_gid, assign_lid,
+ assign_procs, assign_parts);
+ if (ZOLTAN_OK != retval) return MB_FAILURE;
+
+ if (rank==0)
+ {
+ std::cout << " time to LB_partition " << (clock() - t) / (double) CLOCKS_PER_SEC << "s. \n";
+ t = clock();
+ }
+
+
+ for (size_t i=0; i<x.size(); i++)
+ sendToProcs.push_back(assign_procs[i]);
+ // free some memory after we are done
+ delete myZZ;
+ // free memory used by Zoltan
+ // form tuples for Gids!
+ TupleList gidProcs;
+
+ // allocate a TupleList of that size
+ gidProcs.initialize(1, 1, 0, 0, x.size()*2);// to account for some imbalances
+ gidProcs.enableWriteAccess();
+ for (size_t i = 0;i <sendToProcs.size(); i++)
+ {
+ gidProcs.vi_wr[i]=sendToProcs[i];
+ gidProcs.vl_wr[i]=StartID+i;
+ gidProcs.inc_n();
+ }
+
+ // now do a transfer
+ (mbpc->proc_config().crystal_router())->gs_transfer(1, gidProcs, 0);
+
+ // after this, every gidProc should contain the gids sent from other processes
+
+ int receivedGIDs=gidProcs.get_n();
+ for (int j=0;j<receivedGIDs; j++)
+ localGIDs.insert(gidProcs.vl_wr[j]);
+
+ return MB_SUCCESS;
+}
+
ErrorCode MBZoltan::partition_mesh_geom(const double part_geom_mesh_size,
const int nparts,
const char *zmethod,
@@ -255,7 +380,8 @@ ErrorCode MBZoltan::partition_mesh_geom(const double part_geom_mesh_size,
const int edge_weight,
const bool part_surf,
const bool ghost,
- const bool print_time)
+ const bool print_time,
+ const bool spherical_coords)
{
// should only be called in serial
if (mbpc->proc_config().proc_size() != 1) {
@@ -326,7 +452,8 @@ ErrorCode MBZoltan::partition_mesh_geom(const double part_geom_mesh_size,
if (part_geom_mesh_size < 0.) {
//if (!part_geom) {
- result = assemble_graph(part_dim, pts, ids, adjs, length, elems, part_geom); RR;
+ result = assemble_graph(part_dim, pts, ids, adjs, length, elems, part_geom,
+ spherical_coords); RR;
}
else {
#ifdef CGM
@@ -575,7 +702,7 @@ ErrorCode MBZoltan::assemble_graph(const int dimension,
std::vector<int> &moab_ids,
std::vector<int> &adjacencies,
std::vector<int> &length,
- Range &elems, bool part_geom)
+ Range &elems, bool part_geom, bool spherical_coords)
{
// assemble a graph with vertices equal to elements of specified dimension, edges
// signified by list of other elements to which an element is connected
@@ -632,6 +759,18 @@ ErrorCode MBZoltan::assemble_graph(const int dimension,
// copy those into coords vector
moab_ids.push_back(moab_id);
+ // transform coordinates to spherical coordinates, if requested
+ if (spherical_coords)
+ {
+ double R = avg_position[0]*avg_position[0]+avg_position[1]*avg_position[1]+avg_position[2]*avg_position[2];
+ R = sqrt(R);
+ double lat = asin(avg_position[2]/R);
+ double lon=atan2(avg_position[1], avg_position[0]);
+ avg_position[0]=lon;
+ avg_position[1]=lat;
+ avg_position[2]=R;
+ }
+
std::copy(avg_position, avg_position+3, std::back_inserter(coords));
}
diff --git a/tools/mbzoltan/MBZoltan.hpp b/tools/mbzoltan/MBZoltan.hpp
index 4a0e999..2acce87 100644
--- a/tools/mbzoltan/MBZoltan.hpp
+++ b/tools/mbzoltan/MBZoltan.hpp
@@ -113,7 +113,8 @@ using namespace moab;
const int edge_weight = 0,
const bool part_surf = false,
const bool ghost = false,
- const bool print_time = false);
+ const bool print_time = false,
+ const bool spherical_coords = false);
int get_mesh(std::vector<double> &pts, std::vector<int> &ids,
std::vector<int> &adjs, std::vector<int> &length,
@@ -126,6 +127,10 @@ using namespace moab;
const bool write_as_sets,
const bool write_as_tags);
+ // given x, y, z and a starting id, return where to send to each (x[i],y[i],z[i]) point
+ ErrorCode repartition(std::vector<double> & x,std::vector<double>&y, std::vector<double> &z, int StartID,
+ const char * zmethod, Range & localGIDs);
+
#ifdef CGM
ErrorCode write_partition(const int nparts,
DLIList<RefEntity*> entities,
@@ -199,7 +204,7 @@ using namespace moab;
std::vector<int> &moab_ids,
std::vector<int> &adjacencies,
std::vector<int> &length,
- Range &elems, bool part_geom = false);
+ Range &elems, bool part_geom = false, const bool spherical_coords=false);
#ifdef CGM
std::map<int, int> body_vertex_map, surf_vertex_map;
diff --git a/tools/mbzoltan/Makefile.am b/tools/mbzoltan/Makefile.am
index 3b68941..355ead8 100644
--- a/tools/mbzoltan/Makefile.am
+++ b/tools/mbzoltan/Makefile.am
@@ -11,14 +11,15 @@ AM_CPPFLAGS += $(CGM_CPPFLAGS)
endif
AM_LDFLAGS += $(ZOLTAN_LIB_FLAGS)
-lib_LTLIBRARIES = libmbzoltan.la
-libmbzoltan_la_LIBADD = $(top_builddir)/src/libMOAB.la $(ZOLTAN_LIBS)
-libmbzoltan_la_includedir = $(includedir)
-libmbzoltan_la_SOURCES = MBZoltan.cpp
-libmbzoltan_la_include_HEADERS = MBZoltan.hpp
+# lib_LTLIBRARIES = libmbzoltan.la
+# libmbzoltan_la_LIBADD = $(top_builddir)/src/libMOAB.la $(ZOLTAN_LIBS)
+# libmbzoltan_la_includedir = $(includedir)
+# libmbzoltan_la_SOURCES = MBZoltan.cpp
+# libmbzoltan_la_include_HEADERS = MBZoltan.hpp
bin_PROGRAMS = mbpart
-LDADD = libmbzoltan.la
+# LDADD = libmbzoltan.la
mbpart_SOURCES = mbpart.cpp
-mbpart_LDADD = libmbzoltan.la
+# mbpart_LDADD = libmbzoltan.la
+mbpart_LDADD = $(top_builddir)/src/libMOAB.la $(ZOLTAN_LIBS)
Repository URL: https://bitbucket.org/fathomteam/moab/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the moab-dev
mailing list