[MOAB-dev] r2972 - MOAB/branches/parallel_ghosting/parallel
tautges at mcs.anl.gov
tautges at mcs.anl.gov
Fri Jun 26 17:22:55 CDT 2009
Author: tautges
Date: 2009-06-26 17:22:55 -0500 (Fri, 26 Jun 2009)
New Revision: 2972
Modified:
MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp
MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp
MOAB/branches/parallel_ghosting/parallel/ReadParallel.hpp
MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp
MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp
Log:
Fixing pcomm_unit for more arbitrary cases where you don't know
beforehand how many / which procs you're communicating with.
pcomm_unit: added a test_pack_shared_arbitrary test, to test parallel
read of an arbitrary file (may be commented out eventually)
parallel_unit_tests: replaced ptest.cub with 64bricks_512hex.h5m
ReadParallel: made some functions public so that they can be called in
tests
MBParallelComm:
- don't initialize size of buffers, leave them to be zero-sized
- move calls to get_buffers into get_interface_procs, controlled by a
boolean flag to the function (normally false, so it isn't done)
- in serial version of exchange_ghost_cells, if buffer is empty, don't
call unpack function; this happens sometimes when a proc receives
entities owned by a third proc, and has to return handles to that
proc; in this case, that third proc didn't send this proc any
entities, even though it's in this proc's communicating procs list;
parallel version doesn't see this 'cuz it never receives a message
with entities from that third proc
- added function to set proc size in addition to proc rank
Modified: MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp 2009-06-26 22:19:54 UTC (rev 2971)
+++ MOAB/branches/parallel_ghosting/parallel/MBParallelComm.cpp 2009-06-26 22:22:55 UTC (rev 2972)
@@ -104,7 +104,7 @@
unsigned int _old_size = buff_ptr - &buff_vec[0], \
_new_size = _old_size + (addl_space); \
if (_new_size > buff_vec.size()) { \
- buff_vec.resize(1.5*_new_size); \
+ buff_vec.resize(1.5*_new_size); \
buff_ptr = &buff_vec[_new_size-(addl_space)];} }
@@ -330,11 +330,11 @@
if (vit == buffProcs.end()) {
ind = buffProcs.size();
buffProcs.push_back((unsigned int)to_proc);
- ownerSBuffs.push_back(std::vector<unsigned char>(INITIAL_BUFF_SIZE));
- ghostRBuffs.push_back(std::vector<unsigned char>(INITIAL_BUFF_SIZE));
+ ownerSBuffs.push_back(std::vector<unsigned char>());
+ ghostRBuffs.push_back(std::vector<unsigned char>());
// allocate these other buffs in case we're storing remote handles
- ownerRBuffs.push_back(std::vector<unsigned char>(INITIAL_BUFF_SIZE));
- ghostSBuffs.push_back(std::vector<unsigned char>(INITIAL_BUFF_SIZE));
+ ownerRBuffs.push_back(std::vector<unsigned char>());
+ ghostSBuffs.push_back(std::vector<unsigned char>());
if (is_new) *is_new = true;
}
else {
@@ -1427,7 +1427,8 @@
MBErrorCode MBParallelComm::list_entities(const MBEntityHandle *ents, int num_ents)
{
if (NULL == ents && 0 == num_ents) {
- return list_entities(NULL, 0);
+ sharedEnts.print("Shared entities:\n");
+ return MB_SUCCESS;
}
else if (NULL == ents || 0 == num_ents) {
@@ -2611,10 +2612,8 @@
// establish comm procs and buffers for them
std::set<unsigned int> procs;
- result = get_interface_procs(procs);
+ result = get_interface_procs(procs, true);
RRA("Trouble getting iface procs.");
- for (std::set<unsigned int>::iterator sit = procs.begin(); sit != procs.end(); sit++)
- get_buffers(*sit);
// resolve shared entity remote handles; implemented in ghost cell exchange
// code because it's so similar
@@ -2718,11 +2717,8 @@
if (MB_SUCCESS != rval) return rval;
// establish comm procs and buffers for them
psets.clear();
- rval = pc[p]->get_interface_procs(psets);
+ rval = pc[p]->get_interface_procs(psets, true);
if (MB_SUCCESS != rval) return rval;
- for (std::set<unsigned int>::iterator sit = psets.begin(); sit != psets.end(); sit++)
- pc[p]->get_buffers(*sit);
-
}
return MB_SUCCESS;
@@ -3167,7 +3163,8 @@
}
//! get processors with which this processor communicates; sets are sorted by processor
-MBErrorCode MBParallelComm::get_interface_procs(std::set<unsigned int> &procs_set)
+MBErrorCode MBParallelComm::get_interface_procs(std::set<unsigned int> &procs_set,
+ bool get_buffs)
{
// make sure the sharing procs vector is empty
procs_set.clear();
@@ -3200,6 +3197,11 @@
}
}
}
+
+ if (get_buffs) {
+ for (std::set<unsigned int>::iterator sit = procs_set.begin(); sit != procs_set.end(); sit++)
+ get_buffers(*sit);
+ }
return MB_SUCCESS;
}
@@ -3739,6 +3741,12 @@
for (ind = 0; ind < pc->buffProcs.size(); ind++) {
// incoming ghost entities; unpack; returns entities received
// both from sending proc and from owning proc (which may be different)
+
+ // buffer could be empty, which means there isn't any message to
+ // unpack (due to this comm proc getting added as a result of indirect
+ // communication); just skip this unpack
+ if (pc->ownerSBuffs[ind].empty()) continue;
+
unsigned int to_p = pc->buffProcs[ind];
unsigned char *buff_ptr = &pc->ownerSBuffs[ind][0];
result = pcs[to_p]->unpack_entities(buff_ptr,
Modified: MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp 2009-06-26 22:19:54 UTC (rev 2971)
+++ MOAB/branches/parallel_ghosting/parallel/MBParallelComm.hpp 2009-06-26 22:22:55 UTC (rev 2972)
@@ -326,7 +326,8 @@
const char *tag_name = NULL);
*/
//! get processors with which this processor shares an interface
- MBErrorCode get_interface_procs(std::set<unsigned int> &iface_procs);
+ MBErrorCode get_interface_procs(std::set<unsigned int> &iface_procs,
+ const bool get_buffs = false);
//! get processors with which this processor communicates
MBErrorCode get_comm_procs(std::set<unsigned int> &procs);
@@ -495,6 +496,9 @@
//! set rank for this pcomm; USED FOR TESTING ONLY!
void set_rank(unsigned int r);
+ //! set rank for this pcomm; USED FOR TESTING ONLY!
+ void set_size(unsigned int r);
+
//! get (and possibly allocate) buffers for messages to/from to_proc; returns
//! index of to_proc in buffProcs vector; if is_new is non-NULL, sets to
//! whether new buffer was allocated
@@ -1033,6 +1037,11 @@
if (procConfig.proc_size() < r) procConfig.proc_size(r+1);
}
+inline void MBParallelComm::set_size(unsigned int s)
+{
+ procConfig.proc_size(s);
+}
+
inline MBErrorCode MBParallelComm::get_sharing_data(MBEntityHandle entity,
int *ps,
MBEntityHandle *hs,
Modified: MOAB/branches/parallel_ghosting/parallel/ReadParallel.hpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/ReadParallel.hpp 2009-06-26 22:19:54 UTC (rev 2971)
+++ MOAB/branches/parallel_ghosting/parallel/ReadParallel.hpp 2009-06-26 22:22:55 UTC (rev 2972)
@@ -31,21 +31,6 @@
const int* material_set_list,
const int num_material_sets );
- //! Constructor
- ReadParallel(MBInterface* impl = NULL, MBParallelComm *pc = NULL);
-
- //! Destructor
- virtual ~ReadParallel() {}
-
- static const char *parallelOptsNames[];
-
- enum ParallelOpts {POPT_NONE=0, POPT_BCAST, POPT_BCAST_DELETE,
- POPT_READ_DELETE, POPT_READ_PARALLEL,
- POPT_FORMAT, POPT_DEFAULT};
-
-protected:
-
-private:
MBErrorCode load_file(const char **file_names,
const int num_files,
MBEntityHandle& file_set,
@@ -64,7 +49,19 @@
const int ghost_dim,
const int bridge_dim,
const int num_layers);
+ //! Constructor
+ ReadParallel(MBInterface* impl = NULL, MBParallelComm *pc = NULL);
+ //! Destructor
+ virtual ~ReadParallel() {}
+
+ static const char *parallelOptsNames[];
+
+ enum ParallelOpts {POPT_NONE=0, POPT_BCAST, POPT_BCAST_DELETE,
+ POPT_READ_DELETE, POPT_READ_PARALLEL,
+ POPT_FORMAT, POPT_DEFAULT};
+
+ //! PUBLIC TO ALLOW TESTING
MBErrorCode delete_nonlocal_entities(std::string &ptag_name,
std::vector<int> &ptag_vals,
bool distribute,
@@ -72,6 +69,10 @@
MBErrorCode delete_nonlocal_entities(MBEntityHandle file_set);
+protected:
+
+private:
+
MBInterface *mbImpl;
// each reader can keep track of its own pcomm
Modified: MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp 2009-06-26 22:19:54 UTC (rev 2971)
+++ MOAB/branches/parallel_ghosting/parallel/parallel_unit_tests.cpp 2009-06-26 22:22:55 UTC (rev 2972)
@@ -187,9 +187,9 @@
if (!filename) {
#ifdef SRCDIR
- filename = STRINGIFY(SRCDIR) "/ptest.cub";
+ filename = STRINGIFY(SRCDIR) "/../test/64bricks_512hex.h5m";
#else
- filename = "ptest.cub";
+ filename = "../test/64bricks_512hex.h5m";
#endif
}
Modified: MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp
===================================================================
--- MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp 2009-06-26 22:19:54 UTC (rev 2971)
+++ MOAB/branches/parallel_ghosting/parallel/pcomm_unit.cpp 2009-06-26 22:22:55 UTC (rev 2972)
@@ -3,6 +3,8 @@
#include "MBTagConventions.hpp"
#include "MBCore.hpp"
#include "MeshTopoUtil.hpp"
+#include "ReadParallel.hpp"
+#include "FileOptions.hpp"
#include "TestUtil.hpp"
#include <algorithm>
#include <vector>
@@ -43,6 +45,8 @@
void test_pack_shared_entities_2d();
/** Test pack/unpack of shared entities in 3d*/
void test_pack_shared_entities_3d();
+/** Test pack/unpack of arbitrary mesh file */
+void test_pack_shared_arbitrary();
/** Test filter_pstatus function*/
void test_filter_pstatus();
@@ -69,6 +73,7 @@
num_err += RUN_TEST( test_pack_tag_handle_data );
num_err += RUN_TEST( test_pack_shared_entities_2d );
num_err += RUN_TEST( test_pack_shared_entities_3d );
+ num_err += RUN_TEST( test_pack_shared_arbitrary );
num_err += RUN_TEST( test_filter_pstatus );
#ifdef USE_MPI
@@ -1851,6 +1856,52 @@
CHECK_ERR(rval);
}
+void test_pack_shared_arbitrary()
+{
+#define NP 4
+ MBCore moab[NP];
+ MBParallelComm *pc[NP];
+ for (unsigned int i = 0; i < NP; i++) {
+ pc[i] = new MBParallelComm(&moab[i]);
+ pc[i]->set_rank(i);
+ pc[i]->set_size(NP);
+ }
+
+ std::string ptag_name("MATERIAL_SET");
+ std::vector<int> pa_vec;
+ pa_vec.push_back(0);
+ pa_vec.push_back(4);
+ pa_vec.push_back(2);
+ MBErrorCode rval;
+
+ const char *fnames[] = {"/home/tautges/MOABpar2/test/64bricks_512hex.h5m"};
+
+ std::string partition_name("MATERIAL_SET");
+ FileOptions fopts(NULL);
+
+ for (unsigned int i = 0; i < NP; i++) {
+ ReadParallel rp(moab+i, pc[i]);
+ MBEntityHandle tmp_set = 0;
+ std::vector<int> partition_tag_vals;
+ rval = rp.load_file(fnames, 1, tmp_set, ReadParallel::POPT_READ_DELETE,
+ partition_name,
+ partition_tag_vals, true, pa_vec, NULL, 0,
+ fopts, i, false, -1, -1, -1, -1, 0);
+ CHECK_ERR(rval);
+ }
+
+ rval = MBParallelComm::resolve_shared_ents(pc, NP, 3);
+ CHECK_ERR(rval);
+
+ // exchange interface cells
+ rval = MBParallelComm::exchange_ghost_cells(pc, NP, -1, -1, 0, true);
+ CHECK_ERR(rval);
+
+ // now 1 layer of hex ghosts
+ rval = MBParallelComm::exchange_ghost_cells(pc, NP, 3, 0, 1, true);
+ CHECK_ERR(rval);
+}
+
void test_filter_pstatus()
{
MBRange::iterator i;
More information about the moab-dev
mailing list