/** @example HelloParMOAB.cpp \n
 * \brief Read mesh into MOAB and resolve/exchange/report shared and ghosted entities \n
 * To run: mpiexec -np 4 HelloParMOAB [filename]\n
 *
 *  It shows how to load the mesh in parallel
 *
 *  mpiexec -np 8 HelloParMOAB [filename]
 */
#include "moab/Core.hpp"
#ifdef MOAB_HAVE_MPI
#include "moab/ParallelComm.hpp"
#endif
#include "MBParallelConventions.h"
#include 
#include "moab/Skinner.hpp"
using namespace moab;
using namespace std;
string test_file_name = string(MESH_DIR) + string("/64bricks_512hex_256part.h5m");
int main(int argc, char **argv)
{
#ifdef MOAB_HAVE_MPI
  MPI_Init(&argc, &argv);
  string options;
  // Need option handling here for input filename
  if (argc > 1) {
    // User has input a mesh file
    test_file_name = argv[1];
  }  
  options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS";
  // Get MOAB instance
  Interface* mb = new (std::nothrow) Core;
  if (NULL == mb)
    return 1;
  MPI_Comm comm;
  int global_rank, global_size;
  MPI_Comm_rank(MPI_COMM_WORLD, &global_rank);
  MPI_Comm_rank(MPI_COMM_WORLD, &global_size);
  comm = MPI_COMM_WORLD;
  // Get the ParallelComm instance
  ParallelComm* pcomm = new ParallelComm(mb, comm);
  int nprocs = pcomm->proc_config().proc_size();
  int rank = pcomm->proc_config().proc_rank();
#ifndef NDEBUG
  MPI_Comm rcomm = pcomm->proc_config().proc_comm();
  assert(rcomm == comm);
#endif
  MPI_Barrier(MPI_COMM_WORLD);
  if (0 == global_rank)
    cout << "Reading file " << test_file_name << "\n with options: " << options <<
         "\n on " << nprocs << " processors \n";
  // Read the file with the specified options
  ErrorCode rval = mb->load_file(test_file_name.c_str(), 0, options.c_str());MB_CHK_ERR(rval);
  Range shared_ents;
  // Get entities shared with all other processors
  rval = pcomm->get_shared_entities(-1, shared_ents);MB_CHK_ERR(rval);
  // Filter shared entities with not not_owned, which means owned
  Range owned_entities;
  rval = pcomm->filter_pstatus(shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &owned_entities);MB_CHK_ERR(rval);
  unsigned int nums[4] = {0}; // to store the owned entities per dimension
  for (int i = 0; i < 4; i++)
    nums[i] = (int)owned_entities.num_of_dimension(i);
  vector rbuf(nprocs*4, 0);
  MPI_Gather(nums, 4, MPI_INT, &rbuf[0], 4, MPI_INT, 0, comm);
  // Print the stats gathered:
  if (0 == global_rank) {
    for (int i = 0; i < nprocs; i++)
      cout << " Shared, owned entities on proc " << i << ": " << rbuf[4*i] << " verts, " <<
          rbuf[4*i + 1] << " edges, " << rbuf[4*i + 2] << " faces, " << rbuf[4*i + 3] << " elements" << endl;
  }
  // get all the 3d elements on each task
  Range cells;
  rval = mb->get_entities_by_dimension(0, 3, cells); MB_CHK_ERR(rval);
  // get face skin
  Skinner tool(mb);
  Range faces;
  rval = tool.find_skin(0, cells, false, faces); MB_CHK_ERR(rval);
  // filter out faces that are shared with other tasks; they will not be on the true skin
  Range  tmp_faces;
  rval = pcomm->filter_pstatus(faces, PSTATUS_SHARED, PSTATUS_AND,
                                   -1, &tmp_faces);  MB_CHK_ERR(rval);
  if (!tmp_faces.empty())
    faces = subtract(faces, tmp_faces);
  // put the faces in one set
  EntityHandle mset;
  rval = mb->create_meshset(MESHSET_SET, mset); MB_CHK_ERR(rval);
  rval = mb->add_entities(mset, faces); MB_CHK_ERR(rval);
  // write the set in parallel
  rval = mb->write_file("skin.h5m", 0, ";;PARALLEL=WRITE_PART", &mset, 1); MB_CHK_ERR(rval);
  delete mb;
  MPI_Finalize();
#else
  std::cout<<" compile with MPI and hdf5 for this example to work\n";
#endif
  return 0;
}