[MOAB-dev] commit/MOAB: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Apr 8 18:29:05 CDT 2013


3 new commits in MOAB:

https://bitbucket.org/fathomteam/moab/commits/f5723f66f5d2/
Changeset:   f5723f66f5d2
Branch:      None
User:        vijaysm
Date:        2013-04-09 01:23:41
Summary:     Adding more documentation and a streamlined input/output interface. All args have defaults now. Need to add FileOptions support to standardize.

Affected #:  1 file

diff --git a/examples/ReduceExchangeTags.cpp b/examples/ReduceExchangeTags.cpp
index 3c0b43e..4fb657b 100644
--- a/examples/ReduceExchangeTags.cpp
+++ b/examples/ReduceExchangeTags.cpp
@@ -1,186 +1,228 @@
-// A test file for Subset Normalization
-#include "moab/ParallelComm.hpp"
-#include "MBParallelConventions.h"
-#include "moab/Core.hpp"
-#include <iostream>
-#include <sstream>
-#include <cstdlib>
-
-using namespace moab;
-
-bool debug = true;
-
-// Error routines for use with MOAB API
-#define CHKERR(CODE, MSG)                                 \
-  do {                                                    \
-    if (MB_SUCCESS != (CODE)) {                           \
-      std::string errstr;  mbi->get_last_error(errstr);   \
-      std::cerr << errstr << std::endl;                   \
-      std::cerr << MSG << std::endl;                      \
-      MPI_Finalize();                                     \
-    }                                                     \
-  } while(false)
-
-// Error routines for use with MPI API
-#define MPICHKERR(CODE, MSG)                              \
-  do {                                                    \
-    if (0 != CODE) {                                      \
-      std::cerr << MSG << std::endl;                      \
-      MPI_Finalize();                                     \
-    }                                                     \
-  } while(false)
-
-
-// Function to parse input parameters
-ErrorCode get_file_options(int argc, char **argv,
-                           const char** filename,
-                           const char** tagName,
-                           double &tagValues)
-{
-  int npos = 1;
-
-  assert(argc >= 2);
-
-  // get mesh filename
-  *filename = argv[npos++];
-
-  // get tag selection options
-  if (argc > 3) {
-    *tagName = argv[npos++];
-    tagValues = atof(argv[npos++]);
-  }
-  else {
-    *tagName = "USERTAG";
-    tagValues = 1.0;
-  }
-
-  return MB_SUCCESS;
-}
-
-#define dbgprint(MSG)                                \
-  do {                                              \
-      if (!rank) std::cerr << MSG << std::endl;     \
-  } while(false)
-
-//
-// Start of main test program
-//
-int main(int argc, char **argv)
-{
-  ErrorCode err;
-  int ierr, rank;
-  const char *filename, *tagName;
-  double tagValue;
-  MPI_Comm comm = MPI_COMM_WORLD;
-  std::string read_options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS;PARTITION_DISTRIBUTE;PARALLEL_GHOSTS=3.0.1;PARALLEL_COMM=0";
-
-  // Initialize MPI first
-  ierr = MPI_Init(&argc, &argv);
-  MPICHKERR(ierr, "MPI_Init failed");
-
-  // Print usage if not enough arguments
-  if (argc < 2) {
-    std::cerr << "Usage: ";
-    std::cerr << argv[0] << " <file_name><tag_name><tag_value>" << std::endl;
-    std::cerr << "file_name    : mesh file name" << std::endl;
-    std::cerr << "tag_name     : name of tag to add to mesh" << std::endl;
-    std::cerr << "tag_value    : a double valued string to set for highest-dimensional entities in the mesh for the named tag" << std::endl;
-
-    ierr = MPI_Finalize();
-    MPICHKERR(ierr, "MPI_Finalize failed; Aborting");
-
-    return 1;
-  }
-
-  ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-  MPICHKERR(ierr, "MPI_Comm_rank failed");
-
-  // Create the moab instance
-  Interface *mbi = new Core();
-  CHKERR(NULL == mbi, "MOAB constructor failed");
-
-  // Get the input options
-  dbgprint( "Getting options..." );
-  err = get_file_options(argc, argv, &filename, &tagName, tagValue);
-  CHKERR(err, "get_file_options failed");
-
-  // Print out the input parameters
-  dbgprint( "    Input Parameters - " );
-  dbgprint( "      Filenames: " << filename );
-  dbgprint( "      Tag: Name=" << tagName << " Value=" << tagValue );
-
-  // Create root sets for each mesh.  Then pass these
-  // to the load_file functions to be populated.
-  EntityHandle rootset, partnset;
-  err = mbi->create_meshset(MESHSET_SET, rootset);
-  CHKERR(err, "Creating root set failed");
-  err = mbi->create_meshset(MESHSET_SET, partnset);
-  CHKERR(err, "Creating partition set failed");
-
-  // Create the parallel communicator object with the partition handle associated with MOAB
-  ParallelComm *parallel_communicator = ParallelComm::get_pcomm( mbi, partnset, &comm );
-
-  // Load the file from disk with given options
-  err = mbi->load_file( filename, &rootset, read_options.c_str() );
-  CHKERR(err, "MOAB::load_file failed");
-
-  // Get tag handles for passed in tags
-  dbgprint( "Getting tag handle " << tagName << "..." );
-  Tag tagReduce, tagExchange;
-  {
-    std::stringstream sstr;
-    sstr << tagName << "_RED";
-    err = mbi->tag_get_handle(sstr.str().c_str(), 1, MB_TYPE_DOUBLE, tagReduce, MB_TAG_CREAT|MB_TAG_DENSE, &tagValue);
-    CHKERR(err, "Retrieving tag handles failed");
-    sstr.str(""); sstr << tagName << "_EXC";
-    err = mbi->tag_get_handle(sstr.str().c_str(), 1, MB_TYPE_INTEGER, tagExchange, MB_TAG_CREAT|MB_TAG_DENSE, &tagValue);
-    CHKERR(err, "Retrieving tag handles failed");
-  }
-
-  // Set local tag data for reduction
-  {
-    Range partEnts;
-    err = parallel_communicator->get_part_entities(partEnts);
-    CHKERR(err, "ParallelComm::get_part_entities failed");
-    // Output what is in current partition sets
-    std::cout << "[" << rank << "]: Number of Partitioned entities: " <<  partEnts.size() << std::endl;
-    MPI_Barrier(comm);
-
-    std::vector<double> tagValues(partEnts.size(), tagValue*(rank+1));
-    err = mbi->tag_set_data(tagReduce, partEnts, &tagValues[0]);
-    CHKERR(err, "Setting local tag data failed during reduce phase");
-
-    Range dummy;
-    dbgprint( "Reducing tags between processors " );
-    err = parallel_communicator->reduce_tags(tagReduce, MPI_SUM, dummy/*partEnts*/);
-    CHKERR(err, "Reducing tags between processors failed");
-
-    partEnts.clear();
-  }
-
-  // Set local tag data for exchange
-  {
-    Range partEnts, dimEnts;
-    for (int dim = 0; dim <= 2; dim++) {
-      err = mbi->get_entities_by_dimension(rootset, dim, dimEnts, false);
-      std::vector<int> tagValues(dimEnts.size(), static_cast<int>(tagValue)*(rank+1)*(dim+1));
-      err = mbi->tag_set_data(tagExchange, dimEnts, &tagValues[0]);
-      CHKERR(err, "Setting local tag data failed during exchange phase");
-      partEnts.merge(dimEnts);
-    }
-
-    dbgprint( "Exchanging tags between processors " );
-    err = parallel_communicator->exchange_tags(tagExchange, partEnts);
-    CHKERR(err, "Exchanging tags between processors failed");
-  }
-
-  // Write out to output file to visualize reduction/exchange of tag data
-  mbi->write_file("test.h5m", "H5M", "PARALLEL=WRITE_PART");
-
-  // Done, cleanup
-  delete mbi;
-
-  dbgprint( "********** reduce_exchange_tags DONE! **********" );
-  MPI_Finalize();
-  return 0;
-}
+/** \example ReduceExchangeTags ReduceExchangeTags.cpp
+ * \brief Example program that shows the use case for performing tag data exchange
+ * between parallel processors in order to sync data on shared entities. The reduction
+ * operation on tag data is also shown where the user can perform any of the actions supported
+ * by MPI_Op on data residing on shared entities.
+ *
+ * This example:
+ * 0.  Initialize MPI and instantiate MOAB
+ * 1.  Get user options: Input mesh file name, tag name (default: USERTAG), tag value (default: 1.0)
+ * 2.  Create the root and partition sets
+ * 3.  Instantiate ParallelComm and read the mesh file in parallel using appropriate options
+ * 4.  Create two tags: USERTAG_EXC (exchange) and USERTAG_RED (reduction)
+ * 5.  Set tag data and exchange shared entity information between processors
+ * 5a. Get entities in all dimensions and set local (current rank, dimension) dependent data for
+ *     exchange tag (USERTAG_EXC)
+ * 5b. Perform exchange of tag data so that data on shared entities are synced via ParallelCommunicator.
+ * 6.  Set tag data and reduce shared entity information between processors using MPI_SUM
+ * 6a. Get higher dimensional entities in the current partition and set local (current rank)
+ *     dependent data for reduce tag (USERTAG_EXC)
+ * 6b. Perform the reduction operation (MPI_SUM) on shared entities via ParallelCommunicator.
+ * 7. Destroy the MOAB instance and finalize MPI
+ *
+ * To run: mpiexec -n 2 ./ReduceExchangeTags <mesh_file><tag_name><tag_value>
+ * Example: mpiexec -n 2 ./ReduceExchangeTags ../MeshFiles/unittest/64bricks_1khex.h5m USERTAG 100
+ *
+ */
+
+#include "moab/ParallelComm.hpp"
+#include "MBParallelConventions.h"
+#include "moab/Core.hpp"
+#include <iostream>
+#include <string>
+#include <sstream>
+
+using namespace moab;
+
+// Error routines for use with MOAB API
+#define CHKERR(CODE, MSG)                                 \
+  do {                                                    \
+    if (MB_SUCCESS != (CODE)) {                           \
+      std::string errstr;  mbi->get_last_error(errstr);   \
+      std::cerr << errstr << std::endl;                   \
+      std::cerr << MSG << std::endl;                      \
+      MPI_Finalize();                                     \
+    }                                                     \
+  } while(false)
+
+// Error routines for use with MPI API
+#define MPICHKERR(CODE, MSG)                              \
+  do {                                                    \
+    if (0 != CODE) {                                      \
+      std::cerr << MSG << std::endl;                      \
+      MPI_Finalize();                                     \
+    }                                                     \
+  } while(false)
+
+#define dbgprint(MSG)                                \
+  do {                                              \
+      if (!rank) std::cerr << MSG << std::endl;     \
+  } while(false)
+
+#define dbgprintall(MSG)                                      \
+  do {                                                        \
+      std::cerr << "[" << rank << "]: " << MSG << std::endl;  \
+  } while(false)
+
+
+// Function to parse input parameters
+ErrorCode get_file_options(int argc, char **argv,
+                           std::string& filename,
+                           std::string& tagName,
+                           double&      tagValues)
+{
+  // get mesh filename
+  if (argc > 1) filename = std::string(argv[1]);
+  else filename = std::string(MESH_DIR) + std::string("/64bricks_1khex.h5m");
+
+  // get tag selection options
+  if (argc > 2) tagName = std::string(argv[2]);
+  else tagName = "USERTAG";
+
+  if (argc > 3)  tagValues = atof(argv[3]);
+  else tagValues = 1.0;
+
+  return MB_SUCCESS;
+}
+
+//
+// Start of main test program
+//
+int main(int argc, char **argv)
+{
+  ErrorCode err;
+  int ierr, rank;
+  std::string filename, tagName;
+  double tagValue;
+  MPI_Comm comm = MPI_COMM_WORLD;
+  /// Parallel Read options:
+  ///   PARALLEL = type {READ_PART}
+  ///   PARTITION = PARALLEL_PARTITION : Partition as you read
+  ///   PARALLEL_RESOLVE_SHARED_ENTS : Communicate to all processors to get the shared adjacencies consistently in parallel
+  ///   PARALLEL_GHOSTS : a.b.c
+  ///                   : a = 3 - highest dimension of entities
+  ///                   : b = 0 -
+  ///                   : c = 1 - number of layers
+  ///   PARALLEL_COMM = index
+  std::string read_options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS;PARTITION_DISTRIBUTE;PARALLEL_GHOSTS=3.0.1;PARALLEL_COMM=0";
+
+  // Print usage if not enough arguments
+  if (argc < 1) {
+    std::cerr << "Usage: ";
+    std::cerr << argv[0] << " <file_name><tag_name><tag_value>" << std::endl;
+    std::cerr << "file_name    : mesh file name" << std::endl;
+    std::cerr << "tag_name     : name of tag to add to mesh" << std::endl;
+    std::cerr << "tag_value    : a double valued string to set for highest-dimensional entities in the mesh for the named tag" << std::endl;
+
+    ierr = MPI_Finalize();
+    MPICHKERR(ierr, "MPI_Finalize failed; Aborting");
+
+    return 1;
+  }
+
+  // Initialize MPI first
+  ierr = MPI_Init(&argc, &argv);
+  MPICHKERR(ierr, "MPI_Init failed");
+
+  ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  MPICHKERR(ierr, "MPI_Comm_rank failed");
+
+  dbgprint( "********** reduce_exchange_tags **********\n" );
+
+  // Create the moab instance
+  Interface *mbi = new Core();
+  CHKERR(NULL == mbi, "MOAB constructor failed");
+
+  // Get the input options
+  err = get_file_options(argc, argv, filename, tagName, tagValue);
+  CHKERR(err, "get_file_options failed");
+
+  // Print out the input parameters
+  dbgprint( " Input Parameters - " );
+  dbgprint( "   Filenames: " << filename );
+  dbgprint( "   Tag: Name=" << tagName << " Value=" << tagValue << std::endl );
+
+  // Create root sets for each mesh.  Then pass these
+  // to the load_file functions to be populated.
+  EntityHandle rootset, partnset;
+  err = mbi->create_meshset(MESHSET_SET, rootset);
+  CHKERR(err, "Creating root set failed");
+  err = mbi->create_meshset(MESHSET_SET, partnset);
+  CHKERR(err, "Creating partition set failed");
+
+  // Create the parallel communicator object with the partition handle associated with MOAB
+  ParallelComm *parallel_communicator = ParallelComm::get_pcomm( mbi, partnset, &comm );
+
+  // Load the file from disk with given options
+  err = mbi->load_file( filename.c_str(), &rootset, read_options.c_str() );
+  CHKERR(err, "MOAB::load_file failed");
+
+  // Create two tag handles: Exchange and Reduction operations
+  dbgprint( "-Creating tag handle " << tagName << "..." );
+  Tag tagReduce, tagExchange;
+  {
+    std::stringstream sstr;
+    // Create the exchange tag: default name = USERTAG_EXC
+    sstr << tagName << "_EXC";
+    err = mbi->tag_get_handle(sstr.str().c_str(), 1, MB_TYPE_INTEGER, tagExchange, MB_TAG_CREAT|MB_TAG_DENSE, &tagValue);
+    CHKERR(err, "Retrieving tag handles failed");
+
+    // Create the exchange tag: default name = USERTAG_RED
+    sstr.str(""); sstr << tagName << "_RED";
+    err = mbi->tag_get_handle(sstr.str().c_str(), 1, MB_TYPE_DOUBLE, tagReduce, MB_TAG_CREAT|MB_TAG_DENSE, &tagValue);
+    CHKERR(err, "Retrieving tag handles failed");
+  }
+
+  // Perform exchange tag data
+  dbgprint( "-Exchanging tags between processors " );
+  {
+    Range partEnts, dimEnts;
+    for (int dim = 0; dim <= 3; dim++) {
+      // Get all entities of dimension = dim
+      err = mbi->get_entities_by_dimension(rootset, dim, dimEnts, false);
+
+      std::vector<int> tagValues(dimEnts.size(), static_cast<int>(tagValue)*(rank+1)*(dim+1));
+      // Set local tag data for exchange
+      err = mbi->tag_set_data(tagExchange, dimEnts, &tagValues[0]);
+      CHKERR(err, "Setting local tag data failed during exchange phase");
+      // Merge entities into parent set
+      partEnts.merge(dimEnts);
+    }
+
+    // Exchange tags between processors
+    err = parallel_communicator->exchange_tags(tagExchange, partEnts);
+    CHKERR(err, "Exchanging tags between processors failed");
+  }
+
+  // Perform reduction of tag data
+  dbgprint( "-Reducing tags between processors " );
+  {
+    Range partEnts;
+    // Get all higher dimensional entities belonging to current partition
+    err = parallel_communicator->get_part_entities(partEnts);
+    CHKERR(err, "ParallelComm::get_part_entities failed");
+
+    // Output what is in current partition sets
+    dbgprintall( "Number of Partitioned entities: " <<  partEnts.size() );
+    MPI_Barrier(comm);
+
+    // Set local tag data for reduction
+    std::vector<double> tagValues(partEnts.size(), tagValue*(rank+1));
+    err = mbi->tag_set_data(tagReduce, partEnts, &tagValues[0]);
+    CHKERR(err, "Setting local tag data failed during reduce phase");
+
+    Range dummy;
+    // Reduce tag data using MPI_SUM on the interface between partitions
+    err = parallel_communicator->reduce_tags(tagReduce, MPI_SUM, dummy/*partEnts*/);
+    CHKERR(err, "Reducing tags between processors failed");
+  }
+  // Write out to output file to visualize reduction/exchange of tag data
+  mbi->write_file("test.h5m", "H5M", "PARALLEL=WRITE_PART");
+
+  // Done, cleanup
+  delete mbi;
+
+  dbgprint( "\n********** reduce_exchange_tags DONE! **********" );
+  MPI_Finalize();
+  return 0;
+}


https://bitbucket.org/fathomteam/moab/commits/504deef3cb46/
Changeset:   504deef3cb46
Branch:      None
User:        vijaysm
Date:        2013-04-09 01:24:29
Summary:     Merge remote-tracking branch 'upstream/master'

Affected #:  4 files

diff --git a/.gitignore b/.gitignore
index fe08584..6b35368 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,6 +22,7 @@ configure
 doc/config.tex
 doc/dev.dox
 doc/user.dox
+doc/user/*
 examples/examples.make
 itaps/iBase_f.h
 itaps/igeom/FBiGeom-Defs.inc
@@ -66,3 +67,112 @@ lib/*
 share/*
 bin/*
 *~
+examples/HelloMoabPar
+itaps/igeom/FBiGeom_protos.h
+itaps/igeom/testSmooth2
+itaps/igeom/testSmoothGeom
+itaps/igeom/testgeom
+itaps/imesh/FindAdjacencyF90
+itaps/imesh/MOAB_iMeshP_unit_tests
+itaps/imesh/ScdMeshF77
+itaps/imesh/ScdMeshF90
+itaps/imesh/partest
+test/adaptive_kd_tree_tests
+test/bsp_tree_poly_test
+test/bsp_tree_test
+test/*.gen
+test/coords_connect_iterate
+test/cropvol_test
+test/dual/dual_test
+test/file_options_test
+test/geom_util_test
+test/gttool_test
+test/h5file/dump_sets
+test/h5file/h5legacy
+test/h5file/h5partial
+test/h5file/h5portable
+test/h5file/h5regression
+test/h5file/h5sets_test
+test/h5file/h5test
+test/h5file/h5varlen
+test/*.g
+test/homxform_test
+test/io/cub_file_test
+test/io/exodus_test
+test/io/gmsh_test
+test/io/ideas_test
+test/io/*.g
+test/io/nastran_test
+test/io/read_cgm_test
+test/io/read_nc
+test/io/read_ucd_nc
+test/io/readutil_test
+test/io/smf_test
+test/io/stl_test
+test/io/tqdcfr
+test/io/vtk_test
+test/kd_tree_test
+test/kd_tree_time
+test/kd_tree_tool
+test/mbcn_test
+test/mbfacet_test
+test/mbground_test
+test/mesh_set_test
+test/moab_test
+test/obb/obb_test
+test/obb/obb_time
+test/obb/obb_tree_tool
+test/obb_test
+test/oldinc/test_oldinc
+test/parallel/*.h5m
+test/parallel/*.vtk
+test/parallel/mbparallelcomm_test
+test/parallel/mhdf_parallel
+test/parallel/par_coupler_test
+test/parallel/par_intx_sph
+test/parallel/parallel_hdf5_test
+test/parallel/parallel_unit_tests
+test/parallel/parallel_write_test
+test/parallel/parmerge
+test/parallel/partcheck
+test/parallel/pcomm_serial
+test/parallel/pcomm_unit
+test/parallel/read_nc_par
+test/parallel/scdpart
+test/parallel/scdtest
+test/parallel/structured3
+test/parallel/uber_parallel_test
+test/parallel/ucdtrvpart
+test/perf/adj_time
+test/perf/perf
+test/perf/perftool
+test/perf/seqperf
+test/perf/tstt_perf_binding
+test/range_test
+test/reorder_test
+test/scdseq_test
+test/seq_man_test
+test/tag_test
+test/test_adj
+test/test_prog_opt
+test/var_len_test
+test/var_len_test_no_template
+test/xform_test
+tools/dagmc/pt_vol_test
+tools/dagmc/ray_fire_test
+tools/dagmc/test_geom
+tools/dagmc/update_coords
+tools/mbcoupler/*.h5m
+tools/mbcslam/case1_test
+tools/mbcslam/intersect1.h5m
+tools/mbcslam/intx.vtk
+tools/mbcslam/intx1.vtk
+tools/mbcslam/intx_in_plane_test
+tools/mbcslam/intx_on_sphere_test
+tools/mbcslam/lagr.h5m
+tools/mbcslam/spec_visu_test
+tools/mbcslam/spectral.vtk
+tools/mbcslam/spherical_area_test
+.project
+.cproject
+

diff --git a/MeshFiles/unittest/disk.h5m b/MeshFiles/unittest/disk.h5m
new file mode 100644
index 0000000..ef19883
Binary files /dev/null and b/MeshFiles/unittest/disk.h5m differ

diff --git a/examples/HelloMoabPar.cpp b/examples/HelloMoabPar.cpp
new file mode 100644
index 0000000..cf308a7
--- /dev/null
+++ b/examples/HelloMoabPar.cpp
@@ -0,0 +1,154 @@
+/** @example HelloMoabPar.cpp \n
+ * \brief Read mesh into MOAB in parallel \n
+ * This example shows the simplest way of telling MOAB to read in parallel. \n
+ *
+ * 0. Initialize MPI and get the rank and number of processors \n
+ * 1. Process arguments (file name and options for parallel read) \n
+ * 2. Initialize MOAB \n
+ * 3. Load a partitioned file in parallel; \n
+ * 4. retrieve shared entities on each processor \n
+ * 5. Filter owned entities among shared ones on each processor \n
+ * 6. Exchange ghost layers, and repeat the reports \n
+ *
+ * To compile: \n
+ *    make HelloMoabPar MOAB_DIR=<installdir>  \n
+ * To run: mpiexec -np 4 HelloMoabPar \n
+ *  (depending on your configuration, LD_LIBRARY_PATH may need to contain <hdf5>/lib folder)
+ *
+ */
+
+#include "moab/ParallelComm.hpp"
+#include "MBParallelConventions.h"
+#include "moab/Core.hpp"
+#include <iostream>
+
+using namespace moab;
+
+int main(int argc, char **argv)
+{
+  MPI_Init(&argc, &argv);
+
+  int nprocs, rank;
+  MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+  std::string filename;
+  std::string options;
+  if (3 != argc)
+  {
+    if (rank == 0)
+    {
+      std::cout << "Usage: " << argv[0] << " <filename><options (separated by;)>\n ";
+    }
+    /* this file has a partition with 4 parts */
+    filename = "../MeshFiles/unittest/disk.h5m";
+    /*  Options for reading
+     *  - read in parallel
+     *  - use PARALLEL_PARTITION tag
+     *  - resolve shared entities after reading
+    */
+    options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
+  }
+  else
+  {
+    filename = argv[1];
+    options = argv[2];
+  }
+  if (rank == 0)
+    std::cout << "reading file " << filename << "\n  with options:" << options <<
+      "\n  on " << nprocs << " processors\n";
+
+  // get MOAB instance and read the file with the specified options
+  Interface *mbImpl = new Core;
+  if (NULL == mbImpl) return 1;
+  ErrorCode rval = mbImpl->load_file(filename.c_str(), 0, options.c_str());
+  if (rval != MB_SUCCESS) return 1;
+
+  // get the ParallelComm instance
+  ParallelComm* pcomm = ParallelComm::get_pcomm(mbImpl, 0);
+  MPI_Comm comm = pcomm->comm();
+  if (0 == pcomm) return 1;
+
+  // get all shared entities with other processors
+  Range shared_ents;
+  rval = pcomm->get_shared_entities(-1, // -1 means all other processors                                    Range &shared_ents,
+      shared_ents);
+  if (rval != MB_SUCCESS) return 1;
+  /* Among shared entities, get those owned by the current processor
+   * For this, use a filter operation;
+   * Each shared entity is owned by exactly one processor;
+   * An entity could be simply-shared (with exactly one other processor) or
+   *  multi-shared.
+   */
+  Range owned_entities;
+  rval = pcomm->filter_pstatus(shared_ents, // pass entities that we want to filter
+      PSTATUS_NOT_OWNED, // status we are looking for
+      PSTATUS_NOT, // operation applied ; so it will return owned entities (!not_owned = owned)
+      -1, // this means all processors
+      &owned_entities);
+  if (rval != MB_SUCCESS) return 1;
+  unsigned int nums[4]={0}; // to store the owned entities per dimension
+  for (int i=0; i<3; i++)
+  {
+    nums[i]=(int)owned_entities.num_of_dimension(i);
+  }
+  int * rbuf;
+  if (rank==0)
+    rbuf = (int *)malloc(nprocs*4*sizeof(int));
+  MPI_Gather( nums, 4, MPI_INT, rbuf, 4, MPI_INT, 0, comm);
+  // print the stats gathered:
+  if (rank == 0)
+  {
+    for (int i=0; i<nprocs; i++)
+    {
+      std::cout << " shared, owned entities on proc " << i << " :" << rbuf[4*i] << " verts, " <<
+          rbuf[4*i+1] << " edges, " << rbuf[4*i+2] << " faces\n";
+    }
+
+  }
+
+  /*
+   * Now exchange 1 layer of ghost elements, using vertices as bridge
+   *   we could have done this as part of reading process, by passing an extra read option
+   *    ";PARALLEL_GHOSTS=2.0.1.0"
+   */
+  rval = pcomm->exchange_ghost_cells(2, // int ghost_dim,
+                                     0, // int bridge_dim,
+                                     1, //int num_layers,
+                                     0, //int addl_ents,
+                                     true); // bool store_remote_handles);
+  if (rval != MB_SUCCESS) return 1;
+
+  // repeat the reports, after ghost exchange
+  shared_ents.clear();
+  owned_entities.clear();
+  rval = pcomm->get_shared_entities(-1, // -1 means all other processors                                    Range &shared_ents,
+        shared_ents);
+  if (rval != MB_SUCCESS) return 1;
+  rval = pcomm->filter_pstatus(shared_ents,
+        PSTATUS_NOT_OWNED,
+        PSTATUS_NOT,
+        -1,
+        &owned_entities);
+  if (rval != MB_SUCCESS)  return 1;
+
+  // find out how many shared entities of each dimension are owned on this processor
+  for (int i=0; i<3; i++)
+    nums[i]=(int)owned_entities.num_of_dimension(i);
+
+  // gather the statistics on processor 0
+  MPI_Gather( nums, 4, MPI_INT, rbuf, 4, MPI_INT, 0, comm);
+  if (rank == 0)
+  {
+    std::cout << " \n\n After exchanging one ghost layer: \n";
+    for (int i=0; i<nprocs; i++)
+    {
+      std::cout << " shared, owned entities on proc " << i << " :" << rbuf[4*i] << " verts, " <<
+          rbuf[4*i+1] << " edges, " << rbuf[4*i+2] << " faces\n";
+    }
+    free(rbuf);
+  }
+  MPI_Finalize();
+
+  return 0;
+}

diff --git a/examples/makefile b/examples/makefile
index 1d8f6da..e1b8ef0 100644
--- a/examples/makefile
+++ b/examples/makefile
@@ -15,7 +15,9 @@ HelloMOAB : HelloMOAB.o
 ReduceExchangeTags : ReduceExchangeTags.o
 	${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK}
 
+HelloMoabPar: HelloMoabPar.o
+	${MOAB_CXX} -o $@ $< ${MOAB_LIBS_LINK} 
+	
 .cpp.o :
 	${MOAB_CXX} ${MOAB_CXXFLAGS} ${MOAB_INCLUDES} -DMESH_DIR=\"${MESH_DIR}\" -c $<
 
-


https://bitbucket.org/fathomteam/moab/commits/eea951f72df3/
Changeset:   eea951f72df3
Branch:      master
User:        vijaysm
Date:        2013-04-09 01:26:22
Summary:     Adding reduce tag example to ignore.

Affected #:  1 file

diff --git a/.gitignore b/.gitignore
index 6b35368..52cd4d2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -175,4 +175,6 @@ tools/mbcslam/spectral.vtk
 tools/mbcslam/spherical_area_test
 .project
 .cproject
+examples/*.h5m
+examples/ReduceExchangeTags

Repository URL: https://bitbucket.org/fathomteam/moab/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the moab-dev mailing list