[MOAB-dev] commit/MOAB: 3 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri Sep 6 13:50:00 CDT 2013
3 new commits in MOAB:
https://bitbucket.org/fathomteam/moab/commits/08bcff1aab09/
Changeset: 08bcff1aab09
Branch: None
User: tautges
Date: 2013-09-06 20:49:46
Summary: ScdInterface: adding ParallelComm object to ScdParData; changing default global dimensions to 0, from -1;
added resolve_shared_ents argument to construct_box, in preparation for full parallel scd mesh creation;
adding some resolve_shared logic and some logic for computing partition if none is specified for parallel construct_box
ParallelComm:
- in recv_entities, moving additions to L2 lists to after update_remote_data
- adding get_debug_verbosity function
Passes all parallel tests.
Affected #: 4 files
diff --git a/src/ScdInterface.cpp b/src/ScdInterface.cpp
index 6382871..b51245b 100644
--- a/src/ScdInterface.cpp
+++ b/src/ScdInterface.cpp
@@ -104,11 +104,39 @@ ScdBox *ScdInterface::get_scd_box(EntityHandle eh)
ErrorCode ScdInterface::construct_box(HomCoord low, HomCoord high, const double * const coords, unsigned int num_coords,
ScdBox *& new_box, int * const lperiodic, ScdParData *par_data,
- bool assign_gids)
+ bool assign_gids, int tag_shared_ents)
{
// create a rectangular structured mesh block
ErrorCode rval;
+ int tmp_lper[3] = {0, 0, 0};
+ if (lperiodic) std::copy(lperiodic, lperiodic+3, tmp_lper);
+
+#ifndef USE_MPI
+ if (-1 == tag_shared_ents) ERRORR(MB_FAILURE, "Parallel capability requested but MOAB not compiled parallel.");
+ if (-1 == tag_shared_verts && !assign_gids) assign_gids = true; // need to assign gids in order to tag shared verts
+#else
+ if (par_data && low == high && ScdParData::NOPART != par_data->partMethod) {
+ // requesting creation of parallel mesh, so need to compute partition
+ if (!par_data->pComm) {
+ // this is a really boneheaded way to have to create a PC
+ par_data->pComm = ParallelComm::get_pcomm(mbImpl, 0);
+ if (NULL == par_data->pComm) par_data->pComm = new ParallelComm(mbImpl, MPI_COMM_WORLD);
+ }
+ int ldims[6];
+ rval = compute_partition(par_data->pComm->size(), par_data->pComm->rank(), *par_data,
+ ldims, tmp_lper, par_data->pDims);
+ ERRORR(rval, "Error returned from compute_partition.");
+ low.set(ldims);
+ high.set(ldims+3);
+ if (par_data->pComm->get_debug_verbosity() > 0) {
+ std::cout << "Proc " << par_data->pComm->rank() << ": " << *par_data;
+ std::cout << "Proc " << par_data->pComm->rank() << " local dims: "
+ << low << "-" << high << std::endl;
+ }
+ }
+#endif
+
HomCoord tmp_size = high - low + HomCoord(1, 1, 1, 0);
if ((tmp_size[1] && num_coords && (int)num_coords < tmp_size[0]) ||
(tmp_size[2] && num_coords && (int)num_coords < tmp_size[0]*tmp_size[1]))
@@ -150,7 +178,7 @@ ErrorCode ScdInterface::construct_box(HomCoord low, HomCoord high, const double
if (1 >= tmp_size[2]) this_tp = MBQUAD;
if (1 >= tmp_size[2] && 1 >= tmp_size[1]) this_tp = MBEDGE;
rval = seq_mgr->create_scd_sequence(low, high, this_tp, 0, start_ent, tmp_seq,
- lperiodic);
+ tmp_lper);
ERRORR(rval, "Trouble creating scd element sequence.");
new_box->elem_seq(tmp_seq);
@@ -181,6 +209,12 @@ ErrorCode ScdInterface::construct_box(HomCoord low, HomCoord high, const double
ERRORR(rval, "Trouble assigning global ids");
}
+#ifdef USE_MPI
+ if (par_data && -1 != tag_shared_ents) {
+ rval = tag_shared_vertices(par_data->pComm, new_box);
+ }
+#endif
+
return MB_SUCCESS;
}
diff --git a/src/moab/ScdInterface.hpp b/src/moab/ScdInterface.hpp
index bb24835..33e75e2 100644
--- a/src/moab/ScdInterface.hpp
+++ b/src/moab/ScdInterface.hpp
@@ -103,10 +103,10 @@ class ParallelComm;
//! struct for keeping parallel data in one place
class ScdParData {
public:
- ScdParData() : partMethod(NOPART) {
- gDims[0] = gDims[1] = gDims[2] = gDims[3] = gDims[4] = gDims[5] = -1;
- gPeriodic[0] = gPeriodic[1] = gPeriodic[2] = -1;
- pDims[0] = pDims[1] = pDims[2] = -1;
+ ScdParData() : partMethod(NOPART), pComm(NULL) {
+ gDims[0] = gDims[1] = gDims[2] = gDims[3] = gDims[4] = gDims[5] = 0;
+ gPeriodic[0] = gPeriodic[1] = gPeriodic[2] = 0;
+ pDims[0] = pDims[1] = pDims[2] = 0;
}
//! Partition method enumeration; these strategies are described in comments for
@@ -129,6 +129,8 @@ public:
//! number of procs in each direction
int pDims[3];
+ //! parallel communicator object for this par scd mesh
+ ParallelComm *pComm;
};
class ScdInterface
@@ -168,11 +170,12 @@ public:
* \param par_data If non-NULL, this will get stored on the ScdBox once created, contains info
* about global parallel nature of ScdBox across procs
* \param assign_global_ids If true, assigns 1-based global ids to vertices using GLOBAL_ID_TAG_NAME
+ * \param resolve_shared_ents If != -1, resolves shared entities up to and including dimension equal to value
*/
ErrorCode construct_box(HomCoord low, HomCoord high, const double * const coords, unsigned int num_coords,
ScdBox *& new_box, int * const lperiodic = NULL,
ScdParData * const par_data = NULL,
- bool assign_global_ids = false);
+ bool assign_global_ids = false, int resolve_shared_ents = -1);
//! Create a structured sequence of vertices, quads, or hexes
/** Starting handle for the sequence is available from the returned ScdBox.
@@ -1429,11 +1432,10 @@ inline const int *ScdBox::locally_periodic() const
inline std::ostream &operator<<(std::ostream &str, const ScdParData &pd)
{
- static const char *PartitionMethodNames[] = {"NOPART", "ALLJORKORI", "ALLJKBAL", "SQIJ", "SQJK", "SQIJK"};
- str << "Partition method = " << PartitionMethodNames[pd.partMethod] << ", gDims = ("
+ str << "Partition method = " << ScdParData::PartitionMethodNames[pd.partMethod] << ", gDims = ("
<< pd.gDims[0] << "," << pd.gDims[1] << "," << pd.gDims[2] << ")-("
<< pd.gDims[3] << "," << pd.gDims[4] << "," << pd.gDims[5] << "), gPeriodic = ("
- << pd.gPeriodic[0] << ", " << pd.gPeriodic[1] << "," << pd.gPeriodic[2] << "), pDims = ("
+ << pd.gPeriodic[0] << "," << pd.gPeriodic[1] << "," << pd.gPeriodic[2] << "), pDims = ("
<< pd.pDims[0] << "," << pd.pDims[1] << "," << pd.pDims[2] << ")" << std::endl;
return str;
}
diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index ab680aa..cdd9433 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -2060,13 +2060,6 @@ ErrorCode ParallelComm::recv_entities(std::set<unsigned int>& recv_procs,
// should have a new handle now
assert(new_h);
- // if a new multi-shared entity, save owner for subsequent lookup in L2 lists
- if (store_remote_handles && !is_iface && num_ps > 2) {
- L2hrem.push_back(hs[0]);
- L2hloc.push_back(new_h);
- L2p.push_back(ps[0]);
- }
-
created_here = true;
}
@@ -2084,14 +2077,24 @@ ErrorCode ParallelComm::recv_entities(std::set<unsigned int>& recv_procs,
if (created_here) new_ents.push_back(new_h);
if (new_h && store_remote_handles) {
+ unsigned char new_pstat = 0x0;
+ if (is_iface) new_pstat = PSTATUS_INTERFACE;
+ else if (created_here) {
+ if (created_iface) new_pstat = PSTATUS_NOT_OWNED;
+ else new_pstat = PSTATUS_GHOST | PSTATUS_NOT_OWNED;
+ }
// update sharing data and pstatus, adjusting order if iface
- result = update_remote_data(new_h, &ps[0], &hs[0], num_ps,
- (is_iface ? PSTATUS_INTERFACE :
- (created_here ? (created_iface ? PSTATUS_NOT_OWNED:
- PSTATUS_GHOST | PSTATUS_NOT_OWNED) : 0)));
- RRA("");
+ result = update_remote_data(new_h, &ps[0], &hs[0], num_ps, new_pstat);
+ RRA("unpack_entities");
+ // if a new multi-shared entity, save owner for subsequent lookup in L2 lists
+ if (store_remote_handles && !is_iface && num_ps > 2) {
+ L2hrem.push_back(hs[0]);
+ L2hloc.push_back(new_h);
+ L2p.push_back(ps[0]);
+ }
+
// need to send this new handle to all sharing procs
if (!is_iface) {
for (j = 0; j < num_ps; j++) {
@@ -8644,6 +8647,11 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
myDebug->set_verbosity(verb);
}
+ int ParallelComm::get_debug_verbosity()
+ {
+ return myDebug->get_verbosity();
+ }
+
ErrorCode ParallelComm::get_entityset_procs( EntityHandle set,
std::vector<unsigned>& ranks ) const
{
diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index f0f1a2b..580168d 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -892,6 +892,9 @@ namespace moab {
//! set the verbosity level of output from this pcomm
void set_debug_verbosity(int verb);
+ //! get the verbosity level of output from this pcomm
+ int get_debug_verbosity();
+
/* \brief Gather tag value from entities down to root proc
* This function gathers data from a domain-decomposed mesh onto a global mesh
* represented on the root processor. On the root, this gather mesh is distinct from
https://bitbucket.org/fathomteam/moab/commits/da66f5c593f5/
Changeset: da66f5c593f5
Branch: None
User: tautges
Date: 2013-09-06 20:49:46
Summary: Cosmetic changes, mostly new comments for more clarity.
Affected #: 3 files
diff --git a/src/ScdInterface.cpp b/src/ScdInterface.cpp
index b51245b..d344037 100644
--- a/src/ScdInterface.cpp
+++ b/src/ScdInterface.cpp
@@ -729,6 +729,7 @@ ErrorCode ScdInterface::tag_shared_vertices(ParallelComm *pcomm, ScdBox *box)
for (std::vector<int>::iterator pit = procs.begin(); pit != procs.end(); pit++)
pcomm->get_buffers(*pit);
+ if (pcomm->get_debug_verbosity() > 1) pcomm->list_entities(NULL, 1);
#ifndef NDEBUG
rval = pcomm->check_all_shared_handles();
diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index cdd9433..c04f11a 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -6838,6 +6838,8 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
unsigned int /*to_proc*/,
Buffer *buff)
{
+ assert(std::find(L1hloc.begin(), L1hloc.end(), (EntityHandle)0) == L1hloc.end());
+
// 2 vectors of handles plus ints
buff->check_space(((L1p.size()+1)*sizeof(int) +
(L1hloc.size()+1)*sizeof(EntityHandle) +
@@ -6846,6 +6848,8 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
// should be in pairs of handles
PACK_INT(buff->buff_ptr, L1hloc.size());
PACK_INTS(buff->buff_ptr, &L1p[0], L1p.size());
+ // pack handles in reverse order, (remote, local), so on destination they
+ // are ordered (local, remote)
PACK_EH(buff->buff_ptr, &L1hrem[0], L1hrem.size());
PACK_EH(buff->buff_ptr, &L1hloc[0], L1hloc.size());
@@ -6868,19 +6872,22 @@ ErrorCode ParallelComm::post_irecv(std::vector<unsigned int>& shared_procs,
buff_ptr += num_eh * sizeof(int);
unsigned char *buff_rem = buff_ptr + num_eh * sizeof(EntityHandle);
ErrorCode result;
- EntityHandle hpair[2], dum_h;
+ EntityHandle hpair[2], new_h;
int proc;
for (int i = 0; i < num_eh; i++) {
UNPACK_INT(buff_proc, proc);
+ // handles packed (local, remote), though here local is either on this
+ // proc or owner proc, depending on value of proc (-1 = here, otherwise owner);
+ // this is decoded in find_existing_entity
UNPACK_EH(buff_ptr, hpair, 1);
UNPACK_EH(buff_rem, hpair+1, 1);
if (-1 != proc) {
result = find_existing_entity(false, proc, hpair[0], 3, NULL, 0,
mbImpl->type_from_handle(hpair[1]),
- L2hloc, L2hrem, L2p, dum_h);
+ L2hloc, L2hrem, L2p, new_h);
RRA("Didn't get existing entity.");
- if (dum_h) hpair[0] = dum_h;
+ if (new_h) hpair[0] = new_h;
else hpair[0] = 0;
}
if (!(hpair[0] && hpair[1])) return MB_FAILURE;
diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index 580168d..a726245 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -500,7 +500,8 @@ namespace moab {
/** \brief Get the shared processors/handles for an entity
* Get the shared processors/handles for an entity. Arrays must
- * be large enough to receive data for all sharing procs.
+ * be large enough to receive data for all sharing procs. Does *not* include
+ * this proc if only shared with one other proc.
* \param entity Entity being queried
* \param ps Pointer to sharing proc data
* \param hs Pointer to shared proc handle data
https://bitbucket.org/fathomteam/moab/commits/4c1554a1e4d8/
Changeset: 4c1554a1e4d8
Branch: master
User: tautges
Date: 2013-09-06 20:49:46
Summary: Cleaning up implementation of update_remote_data and set_sharing_data.
Substantially changed structured3 to use ScdInterface-based creation of parallel mesh,
and added ghosting to this example too.
Passes all tests.
Affected #: 3 files
diff --git a/src/parallel/ParallelComm.cpp b/src/parallel/ParallelComm.cpp
index c04f11a..2d9e61c 100644
--- a/src/parallel/ParallelComm.cpp
+++ b/src/parallel/ParallelComm.cpp
@@ -2527,6 +2527,7 @@ ErrorCode ParallelComm::recv_entities(std::set<unsigned int>& recv_procs,
}
// add myself, if it isn't there already
+ idx = 0;
if (new_ps[0] != (int)rank()) {
idx = std::find(&new_ps[0], &new_ps[0] + new_numps, rank()) - &new_ps[0];
if (idx == new_numps) {
@@ -2536,30 +2537,23 @@ ErrorCode ParallelComm::recv_entities(std::set<unsigned int>& recv_procs,
}
else if (!new_hs[idx] && new_numps > 2)
new_hs[idx] = new_h;
-
- assert(new_hs[idx] == new_h || new_numps <= 2);
- }
-
- // adjust for interface layer if necessary
- if (add_pstat & PSTATUS_INTERFACE) {
- idx = std::min_element(&new_ps[0], &new_ps[0]+new_numps) - &new_ps[0];
- if (idx) {
- int tag_proc = new_ps[idx];
- new_ps[idx] = new_ps[0];
- new_ps[0] = tag_proc;
- EntityHandle tag_h = new_hs[idx];
- new_hs[idx] = new_hs[0];
- new_hs[0] = tag_h;
- if (new_ps[0] != (int)rank()) new_pstat |= PSTATUS_NOT_OWNED;
- }
}
- // update for shared, multishared
+ // proc list is complete; update for shared, multishared
if (new_numps > 1) {
if (new_numps > 2) new_pstat |= PSTATUS_MULTISHARED;
new_pstat |= PSTATUS_SHARED;
}
-
+
+ // if multishared, not ghost or interface, and not not_owned, I'm owned, and should be the first proc
+ assert(new_ps[idx] == (int)rank());
+ if ((new_numps > 2 && !(new_pstat&(PSTATUS_INTERFACE|PSTATUS_GHOST|PSTATUS_NOT_OWNED))) ||
+ (new_pstat&PSTATUS_INTERFACE && !(new_pstat&PSTATUS_NOT_OWNED))
+ ) {
+ std::swap(new_ps[0], new_ps[idx]);
+ std::swap(new_hs[0], new_hs[idx]);
+ }
+
/*
plist("new_ps", new_ps, new_numps);
plist("new_hs", new_hs, new_numps);
@@ -2567,7 +2561,7 @@ ErrorCode ParallelComm::recv_entities(std::set<unsigned int>& recv_procs,
std::cout << ", new_pstat=" << ostr.c_str() << std::endl;
std::cout << std::endl;
*/
-
+
result = set_sharing_data(new_h, new_pstat, num_exist, new_numps, &new_ps[0], &new_hs[0]);
RRA("update_remote_data: setting sharing data");
@@ -4076,7 +4070,7 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
for (v = 0; v < procs.size(); v++) {
rval = pc[procs[v]]->update_remote_data(handles[v],
&procs[0], &handles[0], procs.size(),
- PSTATUS_INTERFACE);
+ (procs[0] == (int)pc[procs[v]]->rank() ? PSTATUS_INTERFACE : (PSTATUS_NOT_OWNED|PSTATUS_INTERFACE)));
if (MB_SUCCESS != rval) return rval;
}
}
@@ -5919,31 +5913,16 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
{
// set sharing data to what's passed in; may have to clean up existing sharing tags
// if things changed too much
-
- ErrorCode result;
- if (pstatus & PSTATUS_MULTISHARED && new_nump < 3) {
- // need to remove multishared tags
- result = mbImpl->tag_delete_data(sharedps_tag(), &ent, 1);
- RRA("");
- result = mbImpl->tag_delete_data(sharedhs_tag(), &ent, 1);
- RRA("");
- pstatus ^= PSTATUS_MULTISHARED;
- if (new_nump < 2)
- pstatus = 0x0;
- else if (ps[0] != (int)proc_config().proc_rank())
- pstatus |= PSTATUS_NOT_OWNED;
- }
- else if (pstatus & PSTATUS_SHARED && new_nump < 2) {
- hs[0] = 0;
- ps[0] = -1;
- pstatus = 0x0;
- }
-
- if (new_nump > 2) {
- result = mbImpl->tag_set_data(sharedps_tag(), &ent, 1, ps);
- RRA("");
- result = mbImpl->tag_set_data(sharedhs_tag(), &ent, 1, hs);
- RRA("");
+
+ // check for consistency in input data
+ assert(new_nump > 1 &&
+ ((new_nump == 2 && pstatus&PSTATUS_SHARED && !(pstatus&PSTATUS_MULTISHARED)) || // if <= 2 must not be multishared
+ (new_nump > 2 && pstatus&PSTATUS_SHARED && pstatus&PSTATUS_MULTISHARED)) && // if > 2 procs, must be multishared
+ (!(pstatus&PSTATUS_GHOST) || pstatus&PSTATUS_SHARED) && // if ghost, it must also be shared
+ (new_nump < 3 || (pstatus&PSTATUS_NOT_OWNED && ps[0] != (int)rank()) || // I'm not owner and first proc not me
+ (!(pstatus&PSTATUS_NOT_OWNED) && ps[0] == (int)rank())) // I'm owner and first proc is me
+ );
+
#ifndef NDEBUG
{
// check for duplicates in proc list
@@ -5954,27 +5933,56 @@ ErrorCode ParallelComm::resolve_shared_ents(EntityHandle this_set,
assert(dp == (int)dumprocs.size());
}
#endif
- if (old_nump < 3) {
- // reset sharedp and sharedh tags
- int tmp_p = -1;
- EntityHandle tmp_h = 0;
- result = mbImpl->tag_set_data(sharedp_tag(), &ent, 1, &tmp_p);
- RRA("");
- result = mbImpl->tag_set_data(sharedh_tag(), &ent, 1, &tmp_h);
- RRA("");
- }
+
+ ErrorCode result;
+ // reset any old data that needs to be
+ if (old_nump > 2 && new_nump < 3) {
+ // need to remove multishared tags
+ result = mbImpl->tag_delete_data(sharedps_tag(), &ent, 1);
+ RRA("set_sharing_data:1");
+ result = mbImpl->tag_delete_data(sharedhs_tag(), &ent, 1);
+ RRA("set_sharing_data:2");
+// if (new_nump < 2)
+// pstatus = 0x0;
+// else if (ps[0] != (int)proc_config().proc_rank())
+// pstatus |= PSTATUS_NOT_OWNED;
+ }
+ else if ((old_nump < 3 && new_nump > 2) || (old_nump > 1 && new_nump == 1)) {
+ // reset sharedp and sharedh tags
+ int tmp_p = -1;
+ EntityHandle tmp_h = 0;
+ result = mbImpl->tag_set_data(sharedp_tag(), &ent, 1, &tmp_p);
+ RRA("set_sharing_data:3");
+ result = mbImpl->tag_set_data(sharedh_tag(), &ent, 1, &tmp_h);
+ RRA("set_sharing_data:4");
+ }
+
+ assert("check for multishared/owner I'm first proc" &&
+ (!(pstatus & PSTATUS_MULTISHARED) || (pstatus & (PSTATUS_NOT_OWNED|PSTATUS_GHOST)) || (ps[0] == (int)rank())) &&
+ "interface entities should have > 1 proc" &&
+ (!(pstatus & PSTATUS_INTERFACE) || new_nump > 1) &&
+ "ghost entities should have > 1 proc" &&
+ (!(pstatus & PSTATUS_GHOST) || new_nump > 1)
+ );
+
+ // now set new data
+ if (new_nump > 2) {
+ result = mbImpl->tag_set_data(sharedps_tag(), &ent, 1, ps);
+ RRA("set_sharing_data:5");
+ result = mbImpl->tag_set_data(sharedhs_tag(), &ent, 1, hs);
+ RRA("set_sharing_data:6");
}
else {
unsigned int j = (ps[0] == (int)procConfig.proc_rank() ? 1 : 0);
assert(-1 != ps[j]);
result = mbImpl->tag_set_data(sharedp_tag(), &ent, 1, ps+j);
- RRA("");
+ RRA("set_sharing_data:7");
result = mbImpl->tag_set_data(sharedh_tag(), &ent, 1, hs+j);
- RRA("");
+ RRA("set_sharing_data:8");
}
result = mbImpl->tag_set_data(pstatus_tag(), &ent, 1, &pstatus);
- RRA("");
+ RRA("set_sharing_data:9");
if (old_nump > 1 && new_nump < 2)
sharedEnts.erase(std::find(sharedEnts.begin(), sharedEnts.end(), ent));
@@ -8998,4 +9006,11 @@ void ParallelComm::print_pstatus(unsigned char pstat, std::string &ostr)
ostr = str.str();
}
+void ParallelComm::print_pstatus(unsigned char pstat)
+{
+ std::string str;
+ print_pstatus(pstat, str);
+ std::cout << str.c_str() << std::endl;
+}
+
} // namespace moab
diff --git a/src/parallel/moab/ParallelComm.hpp b/src/parallel/moab/ParallelComm.hpp
index a726245..e02d4b3 100644
--- a/src/parallel/moab/ParallelComm.hpp
+++ b/src/parallel/moab/ParallelComm.hpp
@@ -664,6 +664,9 @@ namespace moab {
//! print contents of pstatus value in human-readable form
void print_pstatus(unsigned char pstat, std::string &ostr);
+ //! print contents of pstatus value in human-readable form to std::cut
+ void print_pstatus(unsigned char pstat);
+
// ==================================
// \section IMESHP-RELATED FUNCTIONS
// ==================================
diff --git a/test/parallel/structured3.cpp b/test/parallel/structured3.cpp
index 3ac5e9c..151aa34 100644
--- a/test/parallel/structured3.cpp
+++ b/test/parallel/structured3.cpp
@@ -1,242 +1,78 @@
+#include "moab/Core.hpp"
+#include "moab/ParallelComm.hpp"
+#include "moab/ScdInterface.hpp"
+#include "moab/HomXform.hpp"
+#include "TestUtil.hpp"
#include <string>
#include <iomanip>
#include <iostream>
#include <cassert>
-
-#include <moab/Core.hpp>
-#include <moab/Interface.hpp>
-#include <moab/ParallelComm.hpp>
-#include <moab/HomXform.hpp>
-#include <MBParallelConventions.h>
-#include <MBTagConventions.hpp>
-
-using namespace std;
+
using namespace moab;
-
-// Number of cells in each direction:
-const int NC = 20;
+
+ // Number of cells in each direction:
+const int NC = 2;
const int ITERS = 50;
-
-// Number of processes:
-const int NPROCS = 4;
-
-// Domain size:
-const double DSIZE = 10.0;
-
-// MOAB objects:
-Interface *mbint = NULL;
-ParallelComm *mbpc = NULL;
-
-// Local domain starting and ending hex indexes:
-int is, js, ks;
-int ie, je, ke;
-
-// Obvious:
-int rank;
-int size;
-
-Range all_verts;
-
-void set_local_domain_bounds();
-void create_hexes_and_verts();
-void resolve_and_exchange();
-void error(ErrorCode err);
-void tag_get_set(Tag tag);
-
+
+void create_parallel_mesh();
+
int main(int argc, char *argv[])
{
-
-
MPI_Init(&argc, &argv);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- MPI_Comm_size(MPI_COMM_WORLD, &size);
- if(size != 4 && size != 2) {
- cerr << "Run this with 2 or 4 processes\n";
- exit(1);
+ int err = RUN_TEST(create_parallel_mesh);
+
+ MPI_Finalize();
+ return err;
+}
+
+void create_parallel_mesh()
+{
+ Core mbint;
+ ParallelComm pc(&mbint, MPI_COMM_WORLD);
+ ScdInterface *scdi;
+ ErrorCode rval = mbint.query_interface(scdi);
+ CHECK_ERR(rval);
+ pc.set_debug_verbosity(2);
+
+ // create a structured mesh in parallel
+ ScdBox *new_box;
+ ScdParData par_data;
+ par_data.pComm = &pc;
+ par_data.gDims[0] = par_data.gDims[1] = par_data.gDims[2] = 0;
+ par_data.gDims[3] = par_data.gDims[4] = par_data.gDims[5] = NC;
+ par_data.gDims[5] = 1;
+ par_data.partMethod = ScdParData::SQIJK;
+ rval = scdi->construct_box(HomCoord(), HomCoord(), NULL, 0, // no vertex positions
+ new_box, NULL, // not locally periodic
+ &par_data, true, true); // assign global ids & resolve shared verts
+ CHECK_ERR(rval);
+
+ rval = pc.exchange_ghost_cells(-1, -1, 0, 0, true, true);
+ CHECK_ERR(rval);
+
+ pc.list_entities(0,-1);
+
+ rval = pc.exchange_ghost_cells(-1, 0, 1, 0, true);
+ if (MB_SUCCESS != rval) {
+ std::string err;
+ mbint.get_last_error(err);
+ std::cerr << "Error: proc " << pc.rank() << ": " << err << std::endl;
}
+ CHECK_ERR(rval);
- mbint = new Core();
- mbpc = new ParallelComm(mbint, MPI_COMM_WORLD);
-
- set_local_domain_bounds();
- create_hexes_and_verts();
- resolve_and_exchange();
-
- error(mbint->get_entities_by_type(0, MBVERTEX, all_verts));
-
+ pc.list_entities(0,-1);
+
+ return;
-// Create a tag
+ // Create a tag, used in exchange_tags
Tag tag;
- error(mbint->tag_get_handle("test_tag", 1, MB_TYPE_DOUBLE, tag, MB_TAG_DENSE|MB_TAG_EXCL));
+ int def_val = 1.0;
+ rval = mbint.tag_get_handle("test_tag", 1, MB_TYPE_DOUBLE, tag, MB_TAG_DENSE|MB_TAG_EXCL, &def_val);
+ CHECK_ERR(rval);
Range empty_range;
- tag_get_set(tag);
-
- int i;
- for(i = 0; i < ITERS; i++) {
- std::cout << i << endl;
- mbpc->exchange_tags(tag, empty_range);
- }
-
- delete mbpc;
- delete mbint;
-
- MPI_Finalize();
- return 0;
-}
-
-void set_local_domain_bounds()
-{
- switch(rank) {
- case 0:
-
- switch (size) {
- case 2:
- is = 0; ie = NC/2;
- js = 0; je = NC;
- ks = 0; ke = NC;
- break;
-
- case 4:
- is = 0; ie = NC/2;
- js = 0; je = NC/2;
- ks = 0; ke = NC;
- break;
- }
- break;
-
- case 1:
-
- switch(size) {
- case 2:
- is = NC/2; ie = NC;
- js = 0; je = NC;
- ks = 0; ke = NC;
- break;
-
- case 4:
- is = NC/2; ie = NC;
- js = 0; je = NC/2;
- ks = 0; ke = NC;
- break;
- }
- break;
-
- case 2:
- is = 0; ie = NC/2;
- js = NC/2; je = NC;
- ks = 0; ke = NC;
- break;
-
- case 3:
- is = NC/2; ie = NC;
- js = NC/2; je = NC;
- ks = 0; ke = NC;
- break;
-
- default:
- cerr << "Run this with 4 processes\n";
- exit(1);
- }
-}
-
-
-void create_hexes_and_verts()
-{
- Core *mbcore = dynamic_cast<Core*>(mbint);
- HomCoord coord_min(0,0,0);
- HomCoord coord_max(NC/2, NC, NC);
- EntitySequence* vertex_seq = NULL;
- EntitySequence* cell_seq = NULL;
- EntityHandle vs, cs;
-
- error(mbcore->create_scd_sequence(coord_min, coord_max, MBVERTEX, 1, vs, vertex_seq));
- error(mbcore->create_scd_sequence(coord_min, coord_max, MBHEX, 1, cs, cell_seq));
-
- HomCoord p1(0,0,0);
- HomCoord p2(NC/2,0,0);
- HomCoord p3(0,NC/2,0);
-
- error(mbcore->add_vsequence(vertex_seq, cell_seq, p1, p1, p2, p2, p3, p3));
-
- // Set global id's:
- int gid;
- Tag global_id_tag;
- error(mbint->tag_get_handle(GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, global_id_tag));
- EntityHandle handle = vs;
- int i,j,k;
-
- ErrorCode err;
-
- for(i = is; i < ie + 1; i++)
- for(j = js; j < je + 1; j++)
- for(k = ks; k < ke + 1; k++) {
- gid = k + j*(NC+1) + i*(NC+1)*(NC+1) + 1;
- err = mbint->tag_set_data(global_id_tag, &handle, 1, &gid);
- if(err != MB_SUCCESS) {
- exit(1);
- }
- handle++;
- }
-
- handle = cs;
- for(i = is; i < ie; i++)
- for(j = js; j < je; j++)
- for(k = ks; k < ke; k++) {
- gid = k + j*NC + i*NC*NC + 1;
- error(mbint->tag_set_data(global_id_tag, &handle, 1, &gid));
- handle++;
- }
-}
-
-
-void resolve_and_exchange()
-{
- EntityHandle entity_set;
-
- // Create the entity set:
- error(mbint->create_meshset(MESHSET_SET, entity_set));
-
- // Get a list of hexes:
- Range range;
- error(mbint->get_entities_by_type(0, MBHEX, range));
-
- // Add entities to the entity set:
- error(mbint->add_entities(entity_set, range));
-
- // Add the MATERIAL_SET tag to the entity set:
- Tag tag;
- error(mbint->tag_get_handle(MATERIAL_SET_TAG_NAME, 1, MB_TYPE_INTEGER, tag));
- error(mbint->tag_set_data(tag, &entity_set, 1, &rank));
-
- // Set up partition sets. This is where MOAB is actually told what
- // entities each process owns:
- error(mbint->get_entities_by_type_and_tag(0, MBENTITYSET,
- &tag, NULL, 1,
- mbpc->partition_sets()));
-
- // Finally, determine which entites are shared and exchange the
- // ghosted entities:
- error(mbpc->resolve_shared_ents(0, -1, -1));
- error(mbpc->exchange_ghost_cells(-1, 0, 1, 0, true));
-}
-
-void error(ErrorCode err)
-{
- if(err != MB_SUCCESS) {
- cerr << "Error: MOAB function failed\n";
- assert(0);
- }
-}
-
-void tag_get_set(Tag tag)
-{
- Range::iterator iter;
- double data;
-
- for(iter = all_verts.begin(); iter != all_verts.end(); iter++) {
- data = 1.0;
- mbint->tag_set_data(tag, &(*iter), 1, &data);
- mbint->tag_get_data(tag, &(*iter), 1, &data);
+ for(int i = 0; i < ITERS; i++) {
+ pc.exchange_tags(tag, empty_range);
+ CHECK_ERR(rval);
}
}
Repository URL: https://bitbucket.org/fathomteam/moab/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the moab-dev
mailing list