[MOAB-dev] r1716 - in MOAB/trunk: . tools/mbzoltan

tautges at mcs.anl.gov tautges at mcs.anl.gov
Fri Mar 28 11:42:32 CDT 2008


Author: tautges
Date: 2008-03-28 11:42:32 -0500 (Fri, 28 Mar 2008)
New Revision: 1716

Modified:
   MOAB/trunk/mbparallelcomm_test.cpp
   MOAB/trunk/tools/mbzoltan/MBZoltan.cpp
   MOAB/trunk/tools/mbzoltan/main.cpp
Log:
Adding capability to track remote handles on sharing processors for
shared entities.

MBParallelComm:
- added lots more packing/unpacking debug output to help match
  packing/unpacking calls
- enhanced to handle communicating/storing remote handle data (option
  to the communicate_entities function now)
- added functions for getting remote/local handles given local/remote
  handles

- updating mbzoltan for recent parallel changes
- added 4th option to mbparallelcomm_test for testing
  packing/unpacking, and added more debug output

Modified: MOAB/trunk/mbparallelcomm_test.cpp
===================================================================
--- MOAB/trunk/mbparallelcomm_test.cpp	2008-03-28 16:42:01 UTC (rev 1715)
+++ MOAB/trunk/mbparallelcomm_test.cpp	2008-03-28 16:42:32 UTC (rev 1716)
@@ -13,6 +13,7 @@
 #include "ScdVertexData.hpp"
 #include "StructuredElementSeq.hpp"
 #include "SequenceManager.hpp"
+#include "MBError.hpp"
 #include "mpi.h"
 #include <iostream>
 #include <sstream>
@@ -20,7 +21,7 @@
 
 #define REALTFI 1
 
-const bool debug = true;
+const bool debug = false;
 
 #define ERROR(a, b) {std::cerr << a << std::endl; return b;}
 
@@ -30,6 +31,12 @@
         if (last_error.empty()) std::cerr << "(none)" << std::endl;\
         else std::cerr << last_error << std::endl;\
         }
+#define RRA(a) if (MB_SUCCESS != result) {\
+      std::string tmp_str; mbImpl->get_last_error(tmp_str);\
+      tmp_str.append("\n"); tmp_str.append(a);\
+      dynamic_cast<MBCore*>(mbImpl)->get_error_handler()->set_last_error(tmp_str.c_str()); \
+      return result;}
+
 MBErrorCode create_linear_mesh(MBInterface *mbImpl,
                                int N, int M, int &nshared);
 
@@ -40,6 +47,8 @@
                       const char *tag_name, int tag_val, int distrib,
                       int parallel_option);
 
+MBErrorCode test_packing(MBInterface *mbImpl, const char *filename);
+
 MBErrorCode report_nsets(MBInterface *mbImpl);
 
 int main(int argc, char **argv) 
@@ -141,6 +150,17 @@
         }
         nshared = -1;
         break;
+
+      case 4:
+        filename = argv[npos++];
+        tmp_result = test_packing(mbImpl, filename);
+        if (MB_SUCCESS != tmp_result) {
+          result = tmp_result;
+          std::cerr << "Packing test failed; error message:" << std::endl;
+          PRINT_LAST_ERROR
+        }
+        break;
+        
       default:
         std::cerr << "Unrecognized option \"" << this_opt
                   << "\"; skipping." << std::endl;
@@ -149,7 +169,7 @@
     
 
     if (0 == rank) rtime = MPI_Wtime();
-    if (MB_SUCCESS == tmp_result) {
+    if (MB_SUCCESS == tmp_result && 4 != this_opt) {
         // now figure out which vertices are shared
       MBParallelComm *pcomm = new MBParallelComm(mbImpl);
       tmp_result = pcomm->resolve_shared_ents();
@@ -266,20 +286,42 @@
       std::vector<int> ids( b .size());\
       result = mbImpl->tag_get_data(gidtag, b, &ids[0]); \
       if (MB_SUCCESS == result) {\
-        std::cout << "Proc " << rank << ": " << c << ids[0]; \
+        std::cout << "Proc " << rank << ": " << c \
+          << " (total " << b.size() << "): " \
+           << ids[0]; \
         for (unsigned int i = 1; i < b .size(); i++) \
           std::cout << ", " << ids[i]; \
         std::cout << std::endl; \
       } } }
   
-  PRINTSETS(mtag, matsets, "material sets: ", NULL);
+  PRINTSETS(mtag, matsets, "material sets", NULL);
   
   int tval = 3;
   void *pval = &tval;
   
-  PRINTSETS(gtag, geomsets, "geom sets: ", &pval);
+  PRINTSETS(gtag, geomsets, "geom sets (vols)", &pval);
+  tval = 2;
+  geomsets.clear();
+  PRINTSETS(gtag, geomsets, "geom sets (surfs)", &pval);
+  tval = 1;
+  geomsets.clear();
+  PRINTSETS(gtag, geomsets, "geom sets (curves)", &pval);
+  tval = 0;
+  geomsets.clear();
+  PRINTSETS(gtag, geomsets, "geom sets (verts)", &pval);
   
-  PRINTSETS(ptag, parsets, "partition sets: ", NULL);
+  PRINTSETS(ptag, parsets, "partition sets", NULL);
+
+  if (debug) {
+      // list info on all ent sets, reuse parsets
+    parsets.clear();
+    result = mbImpl->get_entities_by_type(0, MBENTITYSET, parsets);
+    if (MB_SUCCESS == result) {
+      std::cout << "Total sets (by range): " << parsets.size() << "; sets: " << std::endl;
+      parsets.print("  ");
+      mbImpl->list_entities(parsets);
+    }
+  }
   
   return MB_SUCCESS;
 }
@@ -662,3 +704,36 @@
   }
 }
 
+MBErrorCode test_packing(MBInterface *mbImpl, const char *filename) 
+{
+    // read the mesh
+  MBEntityHandle file_set;
+  MBErrorCode result = mbImpl->load_file(filename, file_set, NULL);
+  if (MB_SUCCESS != result) {
+    std::cerr << "Reading file failed; message:" << std::endl;
+    PRINT_LAST_ERROR;
+    return result;
+  }
+  
+    // get 3d entities and pack a buffer with them
+  MBRange ents, new_ents, whole_range;
+  result = mbImpl->get_entities_by_dimension(0, 3, ents);
+  RRA("Getting 3d ents failed.");
+  
+  MBParallelComm *pcomm = new MBParallelComm(mbImpl);
+  int buff_size;
+  result = pcomm->pack_buffer(ents, false, true, true, false, -1,
+                              whole_range, buff_size);
+  RRA("Packing buffer count (non-stored handles) failed.");
+
+  pcomm->buffer_size(buff_size);
+  
+  result = pcomm->pack_buffer(ents, false, true, false, false, -1,
+                              whole_range, buff_size);
+  RRA("Packing buffer (non-stored handles) failed.");
+
+  result = pcomm->unpack_buffer(new_ents, false, -1);
+  RRA("Unacking buffer (non-stored handles) failed.");
+
+  return MB_SUCCESS;
+}

Modified: MOAB/trunk/tools/mbzoltan/MBZoltan.cpp
===================================================================
--- MOAB/trunk/tools/mbzoltan/MBZoltan.cpp	2008-03-28 16:42:01 UTC (rev 1715)
+++ MOAB/trunk/tools/mbzoltan/MBZoltan.cpp	2008-03-28 16:42:32 UTC (rev 1716)
@@ -30,7 +30,7 @@
 #include "MBRange.hpp"
 #include "MBWriteUtilIface.hpp"
 #include "MeshTopoUtil.hpp"
-#include "MBParallelComm.hpp"
+#include "parallel/MBParallelComm.hpp"
 #include "MBTagConventions.hpp"
 #include "MBCN.hpp"
 
@@ -58,7 +58,7 @@
       !strcmp(zmethod, "PHG") && !strcmp(zmethod, "PARMETIS") &&
       !strcmp(zmethod, "OCTPART")) 
   {
-    std::cout << "ERROR node " << mbImpl->proc_config().rank() << ": Method must be "
+    std::cout << "ERROR node " << mbImpl->proc_rank() << ": Method must be "
               << "RCB, RIB, HSFC, Hypergraph (PHG), PARMETIS, or OCTPART"
               << std::endl;
     return MB_FAILURE;
@@ -73,7 +73,7 @@
 
   MBErrorCode result;
   
-  if (mbImpl->proc_config().rank() == 0) {
+  if (mbImpl->proc_rank() == 0) {
     result = assemble_graph(3, pts, ids, adjs, length, elems); RR;
   }
   
@@ -161,8 +161,8 @@
   mbFinalizePoints((int)ids.size(), numExport, exportLocalIds,
                    exportProcs, &assignment);
   
-  if (mbImpl->proc_config().rank() == 0) {
-    MBErrorCode result = write_partition(mbImpl->proc_config().size(), elems, assignment);
+  if (mbImpl->proc_rank() == 0) {
+    MBErrorCode result = write_partition(mbImpl->proc_size(), elems, assignment);
 
     if (MB_SUCCESS != result) return result;
 
@@ -193,14 +193,14 @@
                                      const char *other_method) 
 {
     // should only be called in serial
-  if (mbImpl->proc_config().size() != 1) return MB_FAILURE;
+  if (mbImpl->proc_size() != 1) return MB_FAILURE;
   
   if (NULL != zmethod && strcmp(zmethod, "RCB") && strcmp(zmethod, "RIB") &&
       strcmp(zmethod, "HSFC") && strcmp(zmethod, "Hypergraph") &&
       strcmp(zmethod, "PHG") && strcmp(zmethod, "PARMETIS") &&
       strcmp(zmethod, "OCTPART")) 
   {
-    std::cout << "ERROR node " << mbImpl->proc_config().rank() << ": Method must be "
+    std::cout << "ERROR node " << mbImpl->proc_rank() << ": Method must be "
               << "RCB, RIB, HSFC, Hypergraph (PHG), PARMETIS, or OCTPART"
               << std::endl;
     return MB_FAILURE;
@@ -317,8 +317,8 @@
   if (MB_SUCCESS != result || elems.empty()) return result;
   
     // assign global ids
-  MBParallelComm mbpc(mbImpl, NULL, NULL);
-  result = mbpc.assign_global_ids(dimension, 1); RR;
+  MBParallelComm mbpc(mbImpl);
+  result = mbpc.assign_global_ids(0, dimension); RR;
 
     // now assemble the graph, calling MeshTopoUtil to get bridge adjacencies through d-1 dimensional
     // neighbors
@@ -449,7 +449,7 @@
 
 void MBZoltan::SetRCB_Parameters()
 {
-  if (mbImpl->proc_config().rank() == 0) std::cout << "\nRecursive Coordinate Bisection" << std::endl;
+  if (mbImpl->proc_rank() == 0) std::cout << "\nRecursive Coordinate Bisection" << std::endl;
   // General parameters:
 
   myZZ->Set_Param("DEBUG_LEVEL", "0");     // no debug messages
@@ -464,7 +464,7 @@
 
 void MBZoltan::SetRIB_Parameters()
 {
-  if (mbImpl->proc_config().rank() == 0) std::cout << "\nRecursive Inertial Bisection" << std::endl;
+  if (mbImpl->proc_rank() == 0) std::cout << "\nRecursive Inertial Bisection" << std::endl;
   // General parameters:
 
   myZZ->Set_Param("DEBUG_LEVEL", "0");     // no debug messages
@@ -478,7 +478,7 @@
 
 void MBZoltan::SetHSFC_Parameters()
 {
-  if (mbImpl->proc_config().rank() == 0) std::cout << "\nHilbert Space Filling Curve" << std::endl;
+  if (mbImpl->proc_rank() == 0) std::cout << "\nHilbert Space Filling Curve" << std::endl;
   // General parameters:
 
   myZZ->Set_Param("DEBUG_LEVEL", "0");     // no debug messages
@@ -491,7 +491,7 @@
 
 void MBZoltan::SetHypergraph_Parameters(const char *phg_method)
 {
-  if (mbImpl->proc_config().rank() == 0) std::cout << "\nHypergraph (or PHG): " << std::endl;
+  if (mbImpl->proc_rank() == 0) std::cout << "\nHypergraph (or PHG): " << std::endl;
   // General parameters:
 
   myZZ->Set_Param("DEBUG_LEVEL", "0");     // no debug messages
@@ -503,7 +503,7 @@
 
 void MBZoltan::SetPARMETIS_Parameters(const char *parmetis_method)
 {
-  if (mbImpl->proc_config().rank() == 0) std::cout << "\nPARMETIS: " << parmetis_method << std::endl;
+  if (mbImpl->proc_rank() == 0) std::cout << "\nPARMETIS: " << parmetis_method << std::endl;
   // General parameters:
 
   myZZ->Set_Param("DEBUG_LEVEL", "0");     // no debug messages
@@ -516,7 +516,7 @@
 
 void MBZoltan::SetOCTPART_Parameters(const char *oct_method)
 {
-  if (mbImpl->proc_config().rank() == 0) std::cout << "\nOctree Partitioning: " << oct_method
+  if (mbImpl->proc_rank() == 0) std::cout << "\nOctree Partitioning: " << oct_method
 			   << std::endl;
   // General parameters:
 
@@ -543,23 +543,23 @@
   int *sendNborId;
   int *sendProcs;
 
-  if (mbImpl->proc_config().rank() == 0)
+  if (mbImpl->proc_rank() == 0)
   {
       /* divide pts to start */
 
-    numPts = (int *)malloc(sizeof(int) * mbImpl->proc_config().size());
-    ptsPerProc = npts / mbImpl->proc_config().size();
+    numPts = (int *)malloc(sizeof(int) * mbImpl->proc_size());
+    ptsPerProc = npts / mbImpl->proc_size();
     ptsAssigned = 0;
 
-    for (i=0; i<mbImpl->proc_config().size()-1; i++)
+    for (i=0; i<mbImpl->proc_size()-1; i++)
     {
       numPts[i] = ptsPerProc;
       ptsAssigned += ptsPerProc;
     }
 
-    numPts[mbImpl->proc_config().size()-1] = npts - ptsAssigned;
+    numPts[mbImpl->proc_size()-1] = npts - ptsAssigned;
 
-    mySize = numPts[mbImpl->proc_config().rank()];
+    mySize = numPts[mbImpl->proc_rank()];
     sendPts = pts + (3 * numPts[0]);
     sendIds = ids + numPts[0];
     sendEdges = length + numPts[0];
@@ -576,14 +576,14 @@
     nborProcs = (int *)malloc(sizeof(int) * sum);
 
     for (j=0; j<sum; j++)
-      if ((i = adjs[j]/ptsPerProc) < mbImpl->proc_config().size())
+      if ((i = adjs[j]/ptsPerProc) < mbImpl->proc_size())
         nborProcs[j] = i;
       else
-        nborProcs[j] = mbImpl->proc_config().size() - 1;
+        nborProcs[j] = mbImpl->proc_size() - 1;
 
     sendProcs = nborProcs + (sendNborId - adjs);
 
-    for (i=1; i<mbImpl->proc_config().size(); i++)
+    for (i=1; i<mbImpl->proc_size(); i++)
     {
       MPI_Send(&numPts[i], 1, MPI_INT, i, 0x00,MPI_COMM_WORLD);
       MPI_Send(sendPts, 3 * numPts[i], MPI_DOUBLE, i, 0x01,MPI_COMM_WORLD);
@@ -647,24 +647,24 @@
 
   /* assign pts to start */
 
-  if (mbImpl->proc_config().rank() == 0)
+  if (mbImpl->proc_rank() == 0)
     MyAssignment = (int *)malloc(sizeof(int) * npts);
   else
     MyAssignment = (int *)malloc(sizeof(int) * NumPoints);
 
   for (i=0; i<NumPoints; i++)
-    MyAssignment[i] = mbImpl->proc_config().rank();
+    MyAssignment[i] = mbImpl->proc_rank();
 
   for (i=0; i<numExport; i++)
     MyAssignment[exportLocalIDs[i]] = exportProcs[i];
 
-  if (mbImpl->proc_config().rank() == 0)
+  if (mbImpl->proc_rank() == 0)
     {
       /* collect pts */
 
       recvA = MyAssignment + NumPoints;
 
-      for (i=1; i< (int) mbImpl->proc_config().size(); i++)
+      for (i=1; i< (int) mbImpl->proc_size(); i++)
 	{
 	  MPI_Recv(&numPts, 1, MPI_INT, i, 0x04, MPI_COMM_WORLD, &stat);
 	  MPI_Recv(recvA, numPts, MPI_INT, i, 0x05, MPI_COMM_WORLD, &stat);
@@ -685,13 +685,13 @@
 {
   int fail = 0;
   unsigned int i;
-  int *vals = (int *)malloc(mbImpl->proc_config().size() * sizeof(int));
+  int *vals = (int *)malloc(mbImpl->proc_size() * sizeof(int));
 
   MPI_Allgather(&rc, 1, MPI_INT, vals, 1, MPI_INT, MPI_COMM_WORLD);
 
-  for (i=0; i<mbImpl->proc_config().size(); i++){
+  for (i=0; i<mbImpl->proc_size(); i++){
     if (vals[i] != ZOLTAN_OK){
-      if (0 == mbImpl->proc_config().rank()){
+      if (0 == mbImpl->proc_rank()){
         mbShowError(vals[i], "Result on process ");
       }
       fail = 1;
@@ -715,15 +715,15 @@
   v1[2] = exp;
   v1[3] = change;
 
-  if (mbImpl->proc_config().rank() == 0){
-    v2 = (int *)malloc(4 * mbImpl->proc_config().size() * sizeof(int));
+  if (mbImpl->proc_rank() == 0){
+    v2 = (int *)malloc(4 * mbImpl->proc_size() * sizeof(int));
   }
 
   MPI_Gather(v1, 4, MPI_INT, v2, 4, MPI_INT, 0, MPI_COMM_WORLD);
 
-  if (mbImpl->proc_config().rank() == 0){
+  if (mbImpl->proc_rank() == 0){
     fprintf(stdout,"======%s======\n",s);
-    for (i=0, v=v2; i<mbImpl->proc_config().size(); i++, v+=4){
+    for (i=0, v=v2; i<mbImpl->proc_size(); i++, v+=4){
       fprintf(stdout,"%d: originally had %d, import %d, exp %d, %s\n",
         i, v[0], v[1], v[2],
         v[3] ? "a change of partitioning" : "no change");
@@ -746,19 +746,19 @@
   switch (val)
     {
     case ZOLTAN_OK:
-      printf("%d: SUCCESSFUL\n", mbImpl->proc_config().rank());
+      printf("%d: SUCCESSFUL\n", mbImpl->proc_rank());
       break;
     case ZOLTAN_WARN:
-      printf("%d: WARNING\n", mbImpl->proc_config().rank());
+      printf("%d: WARNING\n", mbImpl->proc_rank());
       break;
     case ZOLTAN_FATAL:
-      printf("%d: FATAL ERROR\n", mbImpl->proc_config().rank());
+      printf("%d: FATAL ERROR\n", mbImpl->proc_rank());
       break;
     case ZOLTAN_MEMERR:
-      printf("%d: MEMORY ALLOCATION ERROR\n", mbImpl->proc_config().rank());
+      printf("%d: MEMORY ALLOCATION ERROR\n", mbImpl->proc_rank());
       break;
     default:
-      printf("%d: INVALID RETURN CODE\n", mbImpl->proc_config().rank());
+      printf("%d: INVALID RETURN CODE\n", mbImpl->proc_rank());
       break;
     }
   return;

Modified: MOAB/trunk/tools/mbzoltan/main.cpp
===================================================================
--- MOAB/trunk/tools/mbzoltan/main.cpp	2008-03-28 16:42:01 UTC (rev 1715)
+++ MOAB/trunk/tools/mbzoltan/main.cpp	2008-03-28 16:42:32 UTC (rev 1716)
@@ -37,7 +37,16 @@
     return 1;
   }
 
-  MBInterface *mbImpl = new MBCore();
+    // need to init MPI first, to tell how many procs and rank
+  int err = MPI_Init(&argc, &argv);
+
+  int nprocs, rank;
+  err = MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
+  err = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+    // create MOAB instance based on that
+  MBInterface *mbImpl = new MBCore(rank, nprocs);
+  if (NULL == mbImpl) return 1;
   
   MBErrorCode result = mbImpl->load_mesh(argv[2]); RR;
   




More information about the moab-dev mailing list