[MOAB-dev] r1318 - in MOAB/trunk: . parallel
tautges at mcs.anl.gov
tautges at mcs.anl.gov
Mon Oct 22 10:13:22 CDT 2007
Author: tautges
Date: 2007-10-22 10:13:22 -0500 (Mon, 22 Oct 2007)
New Revision: 1318
Modified:
MOAB/trunk/FileOptions.cpp
MOAB/trunk/mbparallelcomm_test.cpp
MOAB/trunk/parallel/MBParallelComm.cpp
MOAB/trunk/parallel/ReadParallel.cpp
MOAB/trunk/parallel/ReadParallel.hpp
Log:
Various fixes for parallel read.
FileOptions: fixing dumb bug in get_ints_options.
Modified: MOAB/trunk/FileOptions.cpp
===================================================================
--- MOAB/trunk/FileOptions.cpp 2007-10-20 05:40:29 UTC (rev 1317)
+++ MOAB/trunk/FileOptions.cpp 2007-10-22 15:13:22 UTC (rev 1318)
@@ -167,6 +167,8 @@
for (int i = sval; i <= eval; i++)
values.push_back(i);
+
+ s = endptr;
}
return MB_SUCCESS;
Modified: MOAB/trunk/mbparallelcomm_test.cpp
===================================================================
--- MOAB/trunk/mbparallelcomm_test.cpp 2007-10-20 05:40:29 UTC (rev 1317)
+++ MOAB/trunk/mbparallelcomm_test.cpp 2007-10-22 15:13:22 UTC (rev 1318)
@@ -34,7 +34,7 @@
int IJK, int &nshared);
MBErrorCode read_file(MBInterface *mbImpl, const char *filename,
- const char *tag_name, int tag_val);
+ const char *tag_name, int tag_val, int distrib);
int main(int argc, char **argv)
{
@@ -58,56 +58,75 @@
// get N, M from command line
int N, M;
if (argc < 3) {
- std::cerr << "No arguments passed; assuming N=10, M=2." << std::endl;
- N = 10;
- M = 2;
+ if (0 == rank)
+ std::cerr
+ << "Usage: " << argv[0]
+ << " <opt> <input> [...] where:" << std::endl
+ << "opt input" << std::endl
+ << "=== =====" << std::endl
+ << " 1 <linear_ints> <shared_verts> " << std::endl
+ << " 2 <n_ints> " << std::endl
+ << " 3* <file_name> [<tag_name>=\"MATERIAL_SET\" [tag_val] [distribute=true] ]" << std::endl
+ << "*Note: if opt 3 is used, it must be the last one." << std::endl;
+
+ err = MPI_Finalize();
+ return 1;
}
- else {
- N = atoi(argv[1]);
- M = atoi(argv[2]);
- }
- int max_iter = 2;
- if (argc > 3) max_iter = atoi(argv[3]);
+ int npos = 1, tag_val, distrib;
+ const char *tag_name, *filename;
- for (int i = 0; i < max_iter; i++) {
- int nshared;
- MBErrorCode tmp_result = MB_SUCCESS;
- if (0 == i) {
- tmp_result = create_linear_mesh(mbImpl, N, M, nshared);
- if (MB_SUCCESS != tmp_result) {
- result = tmp_result;
- std::cerr << "Couldn't create linear mesh; error message:."
- << std::endl;
- PRINT_LAST_ERROR
- continue;
- }
+ while (npos < argc) {
+ MBErrorCode tmp_result;
+ int nshared = -1;
+ int this_opt = strtol(argv[npos++], NULL, 0);
+ switch (this_opt) {
+ case 1:
+ N = atoi(argv[npos++]);
+ M = atoi(argv[npos++]);
+ tmp_result = create_linear_mesh(mbImpl, N, M, nshared);
+ if (MB_SUCCESS != tmp_result) {
+ result = tmp_result;
+ std::cerr << "Couldn't create linear mesh; error message:."
+ << std::endl;
+ PRINT_LAST_ERROR
+ }
+ break;
+ case 2:
+ N = atoi(argv[npos++]);
+ tmp_result = create_scd_mesh(mbImpl, N, nshared);
+ if (MB_SUCCESS != tmp_result) {
+ result = tmp_result;
+ std::cerr << "Couldn't create structured mesh; error message:"
+ << std::endl;
+ PRINT_LAST_ERROR
+ }
+ break;
+
+ case 3:
+ // read a file in parallel from the filename on the command line
+ tag_name = "MATERIAL_SET";
+ tag_val = -1;
+ filename = argv[npos++];
+ if (npos < argc) tag_name = argv[npos++];
+ if (npos < argc) tag_val = strtol(argv[npos++], NULL, 0);
+ if (npos < argc) distrib = strtol(argv[npos++], NULL, 0);
+ else distrib = 1;
+ tmp_result = read_file(mbImpl, filename, tag_name, tag_val,
+ distrib);
+ if (MB_SUCCESS != tmp_result) {
+ result = tmp_result;
+ std::cerr << "Couldn't read mesh; error message:" << std::endl;
+ PRINT_LAST_ERROR
+ }
+ nshared = -1;
+ break;
+ default:
+ std::cerr << "Unrecognized option \"" << this_opt
+ << "\"; skipping." << std::endl;
+ tmp_result = MB_FAILURE;
}
- else if (1 == i) {
- tmp_result = create_scd_mesh(mbImpl, N, nshared);
- if (MB_SUCCESS != tmp_result) {
- result = tmp_result;
- std::cerr << "Couldn't create structured mesh; error message:"
- << std::endl;
- PRINT_LAST_ERROR
- continue;
- }
- }
- else if (2 == i && argc > 4) {
- // read a file in parallel from the filename on the command line
- const char *tag_name = NULL;
- int tag_val = -1;
- if (argc > 5) tag_name = argv[5];
- if (argc > 6) tag_val = strtol(argv[6], NULL, 0);
- tmp_result = read_file(mbImpl, argv[4], tag_name, tag_val);
- if (MB_SUCCESS != tmp_result) {
- result = tmp_result;
- std::cerr << "Couldn't read mesh; error message:" << std::endl;
- PRINT_LAST_ERROR
- continue;
- }
- nshared = -1;
- }
+
if (MB_SUCCESS == tmp_result) {
// now figure out which vertices are shared
@@ -135,21 +154,20 @@
}
// check # shared entities
- else if (0 <= nshared && nshared != (int) shared_ents.size()) {
+ if (0 <= nshared && nshared != (int) shared_ents.size()) {
std::cerr << "Didn't get correct number of shared vertices on "
<< "processor " << rank << std::endl;
result = MB_FAILURE;
}
- else if (i < 2) {
- std::cerr << "Proc " << rank;
- if (0 == i) std::cerr << " linear mesh succeeded." << std::endl;
- else std::cerr << " structured mesh succeeded." << std::endl;
- if (0 == rank) std::cerr << " Time = " << wtime << "." << std::endl;
- }
- else {
+
+ else
+ std::cerr << "Proc " << rank << " option " << this_opt
+ << " succeeded." << std::endl;
+
+ if (0 == rank) std::cerr << " Time = " << wtime << "." << std::endl;
+ if (-1 == nshared)
std::cerr << "Proc " << rank << " " << shared_ents.size()
<< " shared entities." << std::endl;
- }
delete pcomm;
tmp_result = mbImpl->delete_mesh();
@@ -162,7 +180,6 @@
}
}
-
err = MPI_Finalize();
if (MB_SUCCESS == result)
@@ -172,14 +189,17 @@
}
MBErrorCode read_file(MBInterface *mbImpl, const char *filename,
- const char *tag_name, int tag_val)
+ const char *tag_name, int tag_val,
+ int distrib)
{
- std::ostringstream options("PARALLEL=BCAST_DELETE;PARTITION=");
- if (NULL == tag_name) options << "MATERIAL_SET";
- else options << tag_name;
+ std::ostringstream options;
+ options << "PARALLEL=BCAST_DELETE;PARTITION=" << tag_name;
if (-1 != tag_val)
options << ";PARTITION_VAL=" << tag_val;
+
+ if (1 == distrib)
+ options << ";PARTITION_DISTRIBUTE";
MBEntityHandle file_set;
MBErrorCode result = mbImpl->load_file(filename, file_set,
Modified: MOAB/trunk/parallel/MBParallelComm.cpp
===================================================================
--- MOAB/trunk/parallel/MBParallelComm.cpp 2007-10-20 05:40:29 UTC (rev 1317)
+++ MOAB/trunk/parallel/MBParallelComm.cpp 2007-10-22 15:13:22 UTC (rev 1318)
@@ -1060,8 +1060,8 @@
if (MB_SUCCESS != result) return result;
}
- if (proc_ents.empty()) return MB_SUCCESS;
-
+ // must call even if we don't have any entities, to make sure
+ // collective comm'n works
return resolve_shared_ents(proc_ents, shared_dim);
}
@@ -1070,72 +1070,81 @@
{
if (debug) std::cerr << "Resolving shared entities." << std::endl;
- MBRange::iterator rit;
- MBSkinner skinner(mbImpl);
-
// get the skin entities by dimension
MBRange skin_ents[4];
MBErrorCode result;
- int upper_dim = MBCN::Dimension(TYPE_FROM_HANDLE(*proc_ents.begin()));
+ std::vector<int> gid_data;
- int skin_dim;
- if (shared_dim < upper_dim) {
- // if shared entity dimension is less than maximal dimension,
- // start with skin entities
- skin_dim = upper_dim-1;
- result = skinner.find_skin(proc_ents, skin_ents[skin_dim],
- skin_ents[skin_dim], true);
- RR("Failed to find skin.");
- if (debug) std::cerr << "Found skin, now resolving." << std::endl;
- }
- else {
- // otherwise start with original entities
- skin_ents[upper_dim] = proc_ents;
- skin_dim = upper_dim;
- }
+ if (!proc_ents.empty()) {
+ // find the skin entities
+ int upper_dim = MBCN::Dimension(TYPE_FROM_HANDLE(*proc_ents.begin()));
- // get entities adjacent to skin ents from shared_dim down to
- // zero; don't create them if they don't exist already
- for (int this_dim = shared_dim; this_dim >= 0; this_dim--) {
+ MBRange::iterator rit;
+ MBSkinner skinner(mbImpl);
+
+ int skin_dim;
+ if (shared_dim < upper_dim) {
+ // if shared entity dimension is less than maximal dimension,
+ // start with skin entities
+ skin_dim = upper_dim-1;
+ result = skinner.find_skin(proc_ents, skin_ents[skin_dim],
+ skin_ents[skin_dim], true);
+ RR("Failed to find skin.");
+ if (debug) std::cerr << "Found skin, now resolving." << std::endl;
+ }
+ else {
+ // otherwise start with original entities
+ skin_ents[upper_dim] = proc_ents;
+ skin_dim = upper_dim;
+ }
- if (this_dim == skin_dim) continue;
+ // get entities adjacent to skin ents from shared_dim down to
+ // zero; don't create them if they don't exist already
+ for (int this_dim = shared_dim; this_dim >= 0; this_dim--) {
+
+ if (this_dim == skin_dim) continue;
- result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
- false, skin_ents[this_dim],
- MBInterface::UNION);
- RR("Failed getting skin adjacencies.");
- }
+ result = mbImpl->get_adjacencies(skin_ents[skin_dim], this_dim,
+ false, skin_ents[this_dim],
+ MBInterface::UNION);
+ RR("Failed getting skin adjacencies.");
+ }
- // global id tag
- MBTag gid_tag; int def_val = -1;
- result = mbImpl->tag_create(GLOBAL_ID_TAG_NAME, sizeof(int),
- MB_TAG_DENSE, MB_TYPE_INTEGER, gid_tag,
- &def_val, true);
- if (MB_FAILURE == result) return result;
+ // global id tag
+ MBTag gid_tag; int def_val = -1;
+ result = mbImpl->tag_create(GLOBAL_ID_TAG_NAME, sizeof(int),
+ MB_TAG_DENSE, MB_TYPE_INTEGER, gid_tag,
+ &def_val, true);
+ if (MB_FAILURE == result) return result;
- else if (MB_ALREADY_ALLOCATED != result) {
- // just created it, so we need global ids
- result = assign_global_ids(0, upper_dim);
- RR("Failed assigning global ids.");
- }
+ else if (MB_ALREADY_ALLOCATED != result) {
+ // just created it, so we need global ids
+ result = assign_global_ids(0, upper_dim);
+ RR("Failed assigning global ids.");
+ }
- // store index in temp tag; reuse gid_data
- std::vector<int> gid_data(skin_ents[0].size());
- int idx = 0;
- for (MBRange::iterator rit = skin_ents[0].begin();
- rit != skin_ents[0].end(); rit++)
- gid_data[idx] = idx, idx++;
- MBTag idx_tag;
- result = mbImpl->tag_create("__idx_tag", sizeof(int), MB_TAG_DENSE,
- MB_TYPE_INTEGER, idx_tag, &def_val, true);
- if (MB_SUCCESS != result && MB_ALREADY_ALLOCATED != result) return result;
- result = mbImpl->tag_set_data(idx_tag, skin_ents[0], &gid_data[0]);
- RR("Couldn't assign index tag.");
+ // store index in temp tag; reuse gid_data
+ gid_data.resize(skin_ents[0].size());
+ int idx = 0;
+ for (MBRange::iterator rit = skin_ents[0].begin();
+ rit != skin_ents[0].end(); rit++)
+ gid_data[idx] = idx, idx++;
+ MBTag idx_tag;
+ result = mbImpl->tag_create("__idx_tag", sizeof(int), MB_TAG_DENSE,
+ MB_TYPE_INTEGER, idx_tag, &def_val, true);
+ if (MB_SUCCESS != result && MB_ALREADY_ALLOCATED != result) return result;
+ result = mbImpl->tag_set_data(idx_tag, skin_ents[0], &gid_data[0]);
+ RR("Couldn't assign index tag.");
- // get gids for skin verts in a vector, to pass to gs
- result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);
- RR("Couldn't get gid tag for skin vertices.");
-
+ // get gids for skin verts in a vector, to pass to gs
+ result = mbImpl->tag_get_data(gid_tag, skin_ents[0], &gid_data[0]);
+ RR("Couldn't get gid tag for skin vertices.");
+ }
+ else {
+ // need to have at least one position so we can get a ptr to it
+ gid_data.resize(1);
+ }
+
// get a crystal router
crystal_data *cd = procConfig.crystal_router();
@@ -1157,6 +1166,9 @@
result = MB_FAILURE;
RR("Couldn't create gs data.");
}
+
+ // if no entities, no more communication after this, so just return
+ if (proc_ents.empty()) return MB_SUCCESS;
// get shared proc tags
int def_vals[2] = {-10*procConfig.proc_size(), -10*procConfig.proc_size()};
@@ -1308,7 +1320,7 @@
// tag set will have one
result = mbImpl->get_entities_by_type_and_tag(0, this_type,
&sharedprocs_tag,
- NULL, 1, tmp_ents,
+ NULL, 1, shared_ents,
MBInterface::UNION);
RR("Trouble getting sharedprocs_tag for shared entities.");
}
@@ -1340,7 +1352,7 @@
RR("Failed assigning global ids.");
}
- return result;
+ return MB_SUCCESS;
}
#ifdef TEST_PARALLELCOMM
Modified: MOAB/trunk/parallel/ReadParallel.cpp
===================================================================
--- MOAB/trunk/parallel/ReadParallel.cpp 2007-10-20 05:40:29 UTC (rev 1317)
+++ MOAB/trunk/parallel/ReadParallel.cpp 2007-10-22 15:13:22 UTC (rev 1318)
@@ -10,13 +10,24 @@
#include "MBCN.hpp"
#include <iostream>
+#include <sstream>
-const bool debug = true;
+const bool debug = false;
#define RR(a) if (MB_SUCCESS != result) {\
dynamic_cast<MBCore*>(mbImpl)->get_error_handler()->set_last_error(a);\
return result;}
+enum ParallelActions {PA_READ=0, PA_BROADCAST, PA_DELETE_NONLOCAL,
+ PA_CHECK_GIDS_SERIAL, PA_GET_FILESET_ENTS};
+const char *ParallelActionsNames[] = {
+ "PARALLEL READ",
+ "PARALLEL BROADCAST",
+ "PARALLEL DELETE NONLOCAL",
+ "PARALLEL CHECK_GIDS_SERIAL",
+ "PARALLEL GET_FILESET_ENTS"
+};
+
MBErrorCode ReadParallel::load_file(const char *file_name,
MBEntityHandle& file_set,
const FileOptions &opts,
@@ -25,8 +36,6 @@
{
MBError *merror = ((MBCore*)mbImpl)->get_error_handler();
- MBCore *impl = dynamic_cast<MBCore*>(mbImpl);
-
// Get parallel settings
int parallel_mode;
const char* parallel_opts[] = { "NONE", "BCAST", "BCAST_DELETE",
@@ -51,9 +60,13 @@
if (MB_ENTITY_NOT_FOUND == result || partition_tag_name.empty())
partition_tag_name += "PARTITION";
- // Get partition tag value(s), if any
+ // Get partition tag value(s), if any, and whether they're to be
+ // distributed or assigned
std::vector<int> partition_tag_vals;
result = opts.get_ints_option("PARTITION_VAL", partition_tag_vals);
+ bool distrib = false;
+ result = opts.get_null_option("PARTITION_DISTRIBUTE");
+ if (MB_SUCCESS == result) distrib = true;
// get MPI IO processor rank
int reader_rank;
@@ -64,130 +77,199 @@
merror->set_last_error( "Unexpected value for 'MPI_IO_RANK' option\n" );
return MB_FAILURE;
}
+
+ // now that we've parsed all the parallel options, make an instruction
+ // queue
+ std::vector<int> pa_vec;
+ bool is_reader = (mbImpl->proc_rank() == reader_rank);
- // now that we've parsed all the parallel options, return
- // failure for most of them because we haven't implemented
- // most of them yet.
- if (parallel_mode == POPT_FORMAT) {
- merror->set_last_error( "Access to format-specific parallel read not implemented.\n");
- return MB_NOT_IMPLEMENTED;
- }
+ switch (parallel_mode) {
+ case POPT_BCAST:
+ if (is_reader) {
+ pa_vec.push_back(PA_READ);
+ pa_vec.push_back(PA_CHECK_GIDS_SERIAL);
+ pa_vec.push_back(PA_GET_FILESET_ENTS);
+ }
+ pa_vec.push_back(PA_BROADCAST);
+ if (!is_reader) pa_vec.push_back(PA_GET_FILESET_ENTS);
- if (parallel_mode == POPT_SCATTER) {
- merror->set_last_error( "Partitioning for PARALLEL=SCATTER not supported yet.\n");
- return MB_NOT_IMPLEMENTED;
- }
+ break;
+
+ case POPT_BCAST_DELETE:
+ if (is_reader) {
+ pa_vec.push_back(PA_READ);
+ pa_vec.push_back(PA_CHECK_GIDS_SERIAL);
+ pa_vec.push_back(PA_GET_FILESET_ENTS);
+ }
+ pa_vec.push_back(PA_BROADCAST);
+ if (!is_reader) pa_vec.push_back(PA_GET_FILESET_ENTS);
+ pa_vec.push_back(PA_DELETE_NONLOCAL);
+ break;
- if ((parallel_mode != POPT_SCATTER &&
- parallel_mode != POPT_BCAST_DELETE) ||
- reader_rank == (int)(mbImpl->proc_rank())) {
- // Try using the file extension to select a reader
- const MBReaderWriterSet* set = impl->reader_writer_set();
- MBReaderIface* reader = set->get_file_extension_reader( file_name );
- if (reader)
- {
- result = reader->load_file( file_name, file_set, opts,
- material_set_list, num_material_sets );
- delete reader;
- }
- else
- {
- // Try all the readers
- MBReaderWriterSet::iterator iter;
- for (iter = set->begin(); iter != set->end(); ++iter)
- {
- MBReaderIface* reader = iter->make_reader( mbImpl );
- if (NULL != reader)
- {
- result = reader->load_file( file_name, file_set, opts,
- material_set_list, num_material_sets );
- delete reader;
- if (MB_SUCCESS == result)
- break;
- }
- }
- }
+ case POPT_READ_DELETE:
+ pa_vec.push_back(PA_READ);
+ pa_vec.push_back(PA_DELETE_NONLOCAL);
+ break;
+
+ case POPT_FORMAT:
+ merror->set_last_error( "Access to format-specific parallel read not implemented.\n");
+ return MB_NOT_IMPLEMENTED;
+ case POPT_SCATTER:
+ merror->set_last_error( "Partitioning for PARALLEL=SCATTER not supported yet.\n");
+ return MB_NOT_IMPLEMENTED;
+ default:
+ return MB_FAILURE;
}
- else {
- result = MB_SUCCESS;
- }
- if (MB_SUCCESS != result)
- RR("Failed initial file load.");
+ return load_file(file_name, file_set, parallel_mode, partition_tag_name,
+ partition_tag_vals, distrib, pa_vec, material_set_list,
+ num_material_sets, opts, reader_rank);
+}
- if (parallel_mode == POPT_BCAST ||
- parallel_mode == POPT_BCAST_DELETE) {
- MBRange entities;
- MBParallelComm pcom( mbImpl);
+MBErrorCode ReadParallel::load_file(const char *file_name,
+ MBEntityHandle& file_set,
+ int parallel_mode,
+ std::string &partition_tag_name,
+ std::vector<int> &partition_tag_vals,
+ bool distrib,
+ std::vector<int> &pa_vec,
+ const int* material_set_list,
+ const int num_material_sets,
+ const FileOptions &opts,
+ int reader_rank)
+{
+ MBErrorCode result = MB_SUCCESS;
+ MBParallelComm pcom( mbImpl);
+ MBRange entities;
+ MBTag file_set_tag = 0;
+ int other_sets;
+ MBReaderIface* reader;
+ MBReaderWriterSet::iterator iter;
+ MBRange other_file_sets, file_sets;
+ int tag_val, *tag_val_ptr = &tag_val;
+ MBCore *impl = dynamic_cast<MBCore*>(mbImpl);
+
- // get which entities need to be broadcast, only if I'm the reader
- if (reader_rank == (int)(mbImpl->proc_rank())) {
+ for (std::vector<int>::iterator vit = pa_vec.begin();
+ vit != pa_vec.end(); vit++) {
- // if I'm root, check to make sure we have global ids (at least for
- // vertices, anyway) & generate (in serial) if not
- result = pcom.check_global_ids(file_set, 0, 1, true, false);
- RR("Failed to generate/find global ids for parallel read.");
-
- result = mbImpl->get_entities_by_handle( file_set, entities );
- if (MB_SUCCESS != result)
- entities.clear();
+ MBErrorCode tmp_result;
+ switch (*vit) {
+//==================
+ case PA_READ:
+ reader = impl->reader_writer_set()->
+ get_file_extension_reader( file_name );
+ if (reader)
+ {
+ tmp_result = reader->load_file( file_name, file_set, opts,
+ material_set_list, num_material_sets );
+ delete reader;
+ }
+ else
+ {
+ // Try all the readers
+ for (iter = impl->reader_writer_set()->begin();
+ iter != impl->reader_writer_set()->end(); ++iter)
+ {
+ reader = iter->make_reader( mbImpl );
+ if (NULL != reader)
+ {
+ tmp_result = reader->load_file( file_name, file_set, opts,
+ material_set_list, num_material_sets );
+ delete reader;
+ if (MB_SUCCESS == tmp_result)
+ break;
+ }
+ }
+ }
+ if (MB_SUCCESS != tmp_result) break;
- // add actual file set to entities too
- entities.insert(file_set);
+ // mark the file set
+ other_sets = 0;
+ tmp_result = mbImpl->tag_create("__file_set", sizeof(int),
+ MB_TAG_SPARSE,
+ MB_TYPE_INTEGER, file_set_tag,
+ 0, true);
+ if (MB_ALREADY_ALLOCATED == tmp_result) {
+ tmp_result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET,
+ &file_set_tag, NULL, 1,
+ other_file_sets);
+ if (MB_SUCCESS == tmp_result) other_sets = other_file_sets.size();
+ }
+ if (MB_SUCCESS == tmp_result)
+ tmp_result = mbImpl->tag_set_data(file_set_tag, &file_set, 1,
+ &other_sets);
+ break;
- // mark the file set so the receivers know which one it is
- MBTag file_set_tag;
- int other_sets = 0;
- result = mbImpl->tag_create("FILE_SET", sizeof(int), MB_TAG_SPARSE,
- MB_TYPE_INTEGER, file_set_tag, 0, true);
- if (MB_ALREADY_ALLOCATED == result) {
- MBRange other_file_sets;
- result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET,
- &file_set_tag, NULL, 1,
- other_file_sets);
- if (MB_SUCCESS == result) other_sets = other_file_sets.size();
- }
- if (MB_SUCCESS == result)
- result = mbImpl->tag_set_data(file_set_tag, &file_set, 1, &other_sets);
- }
+//==================
+ case PA_GET_FILESET_ENTS:
+ if (0 == file_set_tag) {
+ tmp_result = mbImpl->tag_get_handle("FILE_SET", file_set_tag);
+ if (MB_SUCCESS == tmp_result) {
+ other_file_sets.clear();
+ file_sets.clear();
+ tmp_result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET,
+ &file_set_tag,
+ NULL, 1,
+ other_file_sets);
+ if (MB_SUCCESS == tmp_result && other_file_sets.size() > 1) {
+ tag_val = other_file_sets.size();
+ result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET,
+ &file_set_tag,
+ (void*const*)
+ &tag_val_ptr, 1,
+ file_sets);
+ if (!file_sets.empty()) other_file_sets = file_sets;
+ }
+ if (!other_file_sets.empty()) file_set = *other_file_sets.rbegin();
+ }
+ }
+
+ // get entities in the file set, and add actual file set to it;
+ // mark the file set to make sure any receiving procs know which it
+ // is
+ tmp_result = mbImpl->get_entities_by_handle( file_set, entities );
+ if (MB_SUCCESS != tmp_result)
+ entities.clear();
- // do the actual broadcast; if single-processor, ignore error
- result = pcom.broadcast_entities( reader_rank, entities );
- if (mbImpl->proc_size() == 1 && MB_SUCCESS != result)
- result = MB_SUCCESS;
-
- // go get the file set if I'm not the reader
- if (MB_SUCCESS == result &&
- reader_rank != (int)(mbImpl->proc_rank())) {
- MBTag file_set_tag;
- result = mbImpl->tag_get_handle("FILE_SET", file_set_tag);
- if (MB_SUCCESS == result) {
- MBRange other_file_sets;
- result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET,
- &file_set_tag, NULL, 1,
- other_file_sets);
- if (MB_SUCCESS == result && other_file_sets.size() > 1) {
- int tag_val = other_file_sets.size(), *tag_val_ptr = &tag_val;
- MBRange file_sets;
- result = mbImpl->get_entities_by_type_and_tag(0, MBENTITYSET,
- &file_set_tag,
- (void*const*) &tag_val_ptr, 1,
- file_sets);
- if (!file_sets.empty()) other_file_sets = file_sets;
+ // add actual file set to entities too
+ entities.insert(file_set);
+ break;
+
+//==================
+ case PA_BROADCAST:
+ // do the actual broadcast; if single-processor, ignore error
+ tmp_result = pcom.broadcast_entities( reader_rank, entities );
+ if (debug) {
+ std::cerr << "Bcast done; entities:" << std::endl;
+ mbImpl->list_entities(0, 0);
}
- if (!other_file_sets.empty()) file_set = *other_file_sets.rbegin();
- }
+ break;
+
+//==================
+ case PA_DELETE_NONLOCAL:
+ tmp_result = delete_nonlocal_entities(partition_tag_name,
+ partition_tag_vals,
+ distrib,
+ file_set);
+ break;
+
+//==================
+ case PA_CHECK_GIDS_SERIAL:
+ tmp_result = pcom.check_global_ids(file_set, 0, 1, true, false);
+ break;
+
+//==================
+ default:
+ return MB_FAILURE;
}
-
- RR("Failed to broadcast mesh.");
- }
- if (parallel_mode == POPT_BCAST_DELETE ||
- parallel_mode == POPT_READ_DELETE) {
- result = delete_nonlocal_entities(partition_tag_name,
- partition_tag_vals,
- file_set);
- if (MB_SUCCESS != result) return result;
+ if (MB_SUCCESS != tmp_result) {
+ result = tmp_result;
+ std::ostringstream ostr;
+ ostr << "Failed in step " << ParallelActionsNames[*vit] << std::endl;
+ RR(ostr.str().c_str());
+ }
}
return result;
@@ -195,9 +277,10 @@
MBErrorCode ReadParallel::delete_nonlocal_entities(std::string &ptag_name,
std::vector<int> &ptag_vals,
+ bool distribute,
MBEntityHandle file_set)
{
- MBRange partition_sets, my_sets;
+ MBRange partition_sets;
MBErrorCode result;
MBTag ptag;
@@ -212,34 +295,41 @@
int proc_sz = mbImpl->proc_size();
int proc_rk = mbImpl->proc_rank();
- if (ptag_vals.empty()) {
- // no values input, just distribute sets
- int num_sets = partition_sets.size() / proc_sz, orig_numsets = num_sets;
- if (partition_sets.size() % proc_sz != 0) {
- num_sets++;
- if (proc_rk == proc_sz-1)
- num_sets = partition_sets.size() % num_sets;
- }
-
- int istart = orig_numsets * proc_rk;
- for (int i = 0; i < num_sets; i++)
- my_sets.insert(partition_sets[istart+i]);
- }
- else {
+ if (!ptag_vals.empty()) {
// values input, get sets with those values
+ MBRange tmp_sets;
std::vector<int> tag_vals(partition_sets.size());
result = mbImpl->tag_get_data(ptag, partition_sets, &tag_vals[0]);
RR("Failed to get tag data for partition vals tag.");
- for (std::vector<int>::iterator pit = ptag_vals.begin();
- pit != ptag_vals.end(); pit++) {
- std::vector<int>::iterator pit2 = std::find(tag_vals.begin(),
- tag_vals.end(), *pit);
- if (pit2 == tag_vals.end()) RR("Couldn't find partition tag value.");
- my_sets.insert(partition_sets[pit2 - tag_vals.begin()]);
+ for (std::vector<int>::iterator pit = tag_vals.begin();
+ pit != tag_vals.end(); pit++) {
+ std::vector<int>::iterator pit2 = std::find(ptag_vals.begin(),
+ ptag_vals.end(), *pit);
+ if (pit2 != ptag_vals.end())
+ tmp_sets.insert(partition_sets[pit - tag_vals.begin()]);
}
+
+ partition_sets.swap(tmp_sets);
}
+
+ if (distribute) {
+ MBRange tmp_sets;
+ // distribute the partition sets
+ int num_sets = partition_sets.size() / proc_sz, orig_numsets = num_sets;
+ if (proc_rk < partition_sets.size() % proc_sz) num_sets++;
+
+ for (int i = 0; i < num_sets; i++)
+ tmp_sets.insert(partition_sets[i*proc_sz + proc_rk]);
+
+ partition_sets.swap(tmp_sets);
+ }
+
+ if (debug) {
+ std::cerr << "My partition sets: ";
+ partition_sets.print();
+ }
- return delete_nonlocal_entities(my_sets, file_set);
+ return delete_nonlocal_entities(partition_sets, file_set);
}
MBErrorCode ReadParallel::delete_nonlocal_entities(MBRange &partition_sets,
Modified: MOAB/trunk/parallel/ReadParallel.hpp
===================================================================
--- MOAB/trunk/parallel/ReadParallel.hpp 2007-10-20 05:40:29 UTC (rev 1317)
+++ MOAB/trunk/parallel/ReadParallel.hpp 2007-10-22 15:13:22 UTC (rev 1318)
@@ -33,8 +33,21 @@
private:
MBInterface *mbImpl;
+ MBErrorCode load_file(const char *file_name,
+ MBEntityHandle& file_set,
+ int parallel_mode,
+ std::string &partition_tag_name,
+ std::vector<int> &partition_tag_vals,
+ bool distrib,
+ std::vector<int> &pa_vec,
+ const int* material_set_list,
+ const int num_material_sets,
+ const FileOptions &opts,
+ int reader_rank);
+
MBErrorCode delete_nonlocal_entities(std::string &ptag_name,
std::vector<int> &ptag_vals,
+ bool distribute,
MBEntityHandle file_set);
MBErrorCode delete_nonlocal_entities(MBRange &partition_sets,
More information about the moab-dev
mailing list