[MOAB-dev] r2732 - MOAB/trunk/parallel
kraftche at cae.wisc.edu
kraftche at cae.wisc.edu
Fri Mar 20 09:53:31 CDT 2009
Author: kraftche
Date: 2009-03-20 09:53:30 -0500 (Fri, 20 Mar 2009)
New Revision: 2732
Added:
MOAB/trunk/parallel/parallel_write_test.cc
Modified:
MOAB/trunk/parallel/Makefile.am
Log:
test code for timing parallel writer
Modified: MOAB/trunk/parallel/Makefile.am
===================================================================
--- MOAB/trunk/parallel/Makefile.am 2009-03-19 21:26:02 UTC (rev 2731)
+++ MOAB/trunk/parallel/Makefile.am 2009-03-20 14:53:30 UTC (rev 2732)
@@ -51,7 +51,7 @@
MOAB_PARALLEL_SRCS += WriteHDF5Parallel.cpp
MOAB_PARALLEL_HDRS += WriteHDF5Parallel.hpp
- MOAB_PARALLEL_TEST += parallel_hdf5_test mhdf_parallel
+ MOAB_PARALLEL_TEST += parallel_hdf5_test mhdf_parallel parallel_write_test
endif
MOAB_PARALLEL_CHECK += mbparallelcomm_test
@@ -82,7 +82,10 @@
mhdf_parallel_LDADD = ../mhdf/libmhdf.la
parallel_unit_tests_SOURCES = parallel_unit_tests.cpp
parallel_unit_tests_LDADD = ../libMOAB.la
+parallel_write_test_SOURCES = parallel_write_test.cc
+parallel_write_test_LDADD = ../libMOAB.la
+
# Other files to clean up (e.g. output from tests)
MOSTLYCLEANFILES = mhdf_ll.h5m
Added: MOAB/trunk/parallel/parallel_write_test.cc
===================================================================
--- MOAB/trunk/parallel/parallel_write_test.cc (rev 0)
+++ MOAB/trunk/parallel/parallel_write_test.cc 2009-03-20 14:53:30 UTC (rev 2732)
@@ -0,0 +1,331 @@
+#include "MBCore.hpp"
+#include "MBParallelComm.hpp"
+#include "MBTagConventions.hpp"
+#include <mpi.h>
+#include <stdlib.h>
+#include <iostream>
+#include <time.h>
+#include <string.h>
+#include <math.h>
+#include <assert.h>
+
+const int DEFAULT_INTERVALS = 2;
+const char* DEFAULT_FILE_NAME = "paralle_write_test.h5m";
+
+// Create mesh for each processor that is a cube of hexes
+// with the specified interval count along each edge. Cubes
+// of mesh will be positioned and vertex IDs assigned such that
+// there are shared entities. If the cubic root of the number
+// of processors is a whole number, then each processors mesh
+// will be a cube in a grid with that number of processor blocks
+// along each edge. Otherwise processor blocks will be arranged
+// within the subset of the grid that is the ceiling of the cubic
+// root of the comm size such that there are no disjoint regions.
+MBErrorCode generate_mesh( MBInterface& moab, int intervals );
+
+const char args[] = "[-i <intervals>] [-o <filename>] [-g <filename>]";
+void help() {
+ std::cout << "parallel_write_test " << args << std::endl
+ << "-i <N> Each processor owns an NxNxN cube of hex elements (default: " << DEFAULT_INTERVALS << ")" << std::endl
+ << "-o <name> Retain output file and name it as specified." << std::endl
+ << "-g <name> Write local mesh to file name prefixed with MPI rank" << std::endl
+ << std::endl;
+}
+
+int main( int argc, char* argv[] )
+{
+ int ierr = MPI_Init( &argc, &argv );
+ if (ierr) {
+ std::cerr << "MPI_Init failed with error code: " << ierr << std::endl;
+ return ierr;
+ }
+ int rank;
+ ierr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ if (ierr) {
+ std::cerr << "MPI_Comm_rank failed with error code: " << ierr << std::endl;
+ return ierr;
+ }
+ int size;
+ ierr = MPI_Comm_size( MPI_COMM_WORLD, &size );
+ if (ierr) {
+ std::cerr << "MPI_Comm_size failed with error code: " << ierr << std::endl;
+ return ierr;
+ }
+
+ // settings controlled by CL flags
+ const char* output_file_name = 0;
+ const char* indiv_file_name = 0;
+ int intervals = 0;
+ // state for CL flag processing
+ bool expect_intervals = false;
+ bool expect_file_name = false;
+ bool expect_indiv_file = false;
+ // process CL args
+ for (int i = 1; i < argc; ++i) {
+ if (expect_intervals) {
+ char* endptr = 0;
+ intervals = (int)strtol( argv[i], &endptr, 0 );
+ if (*endptr || intervals < 1) {
+ std::cerr << "Invalid block interval value: " << argv[i] << std::endl;
+ return 1;
+ }
+ expect_intervals = false;
+ }
+ else if (expect_indiv_file) {
+ indiv_file_name = argv[i];
+ expect_indiv_file = false;
+ }
+ else if (expect_file_name) {
+ output_file_name = argv[i];
+ expect_file_name = false;
+ }
+ else if (!strcmp( "-i", argv[i]))
+ expect_intervals = true;
+ else if (!strcmp( "-o", argv[i]))
+ expect_file_name = true;
+ else if (!strcmp( "-g", argv[i]))
+ expect_indiv_file = true;
+ else if (!strcmp( "-h", argv[i])) {
+ help();
+ return 0;
+ }
+ else {
+ std::cerr << "Unexpected argument: " << argv[i] << std::endl
+ << "Usage: " << argv[0] << " " << args << std::endl
+ << " " << argv[0] << " -h" << std::endl
+ << " Try '-h' for help." << std::endl;
+ return 1;
+ }
+ }
+ // Check for missing argument after last CL flag
+ if (expect_file_name || expect_intervals || expect_indiv_file) {
+ std::cerr << "Missing argument for '" << argv[argc-1] << "'" << std::endl;
+ return 1;
+ }
+ // If intervals weren't specified, use default
+ if (intervals == 0) {
+ std::cout << "Using default interval count: " << DEFAULT_INTERVALS << std::endl;
+ intervals = DEFAULT_INTERVALS;
+ }
+ // If no output file was specified, use default one and note that
+ // we need to delete it when the test completes.
+ bool keep_output_file = true;
+ if (!output_file_name) {
+ output_file_name = DEFAULT_FILE_NAME;
+ keep_output_file = false;
+ }
+
+ // Create mesh
+ MBCore mb;
+ MBInterface& moab = mb;
+ MBErrorCode rval = generate_mesh( moab, intervals );
+ if (MB_SUCCESS != rval) {
+ std::cerr << "Mesh creation failed with error code: " << rval << std::endl;
+ return (int)rval;
+ }
+
+ // Write out local mesh on each processor if requested.
+ if (indiv_file_name) {
+ char buffer[64];
+ int width = (int)ceil( log10( size ) );
+ sprintf(buffer,"%0*d-", width, rank );
+ std::string name(buffer);
+ name += indiv_file_name;
+ rval = moab.write_file( name.c_str() );
+ if (MB_SUCCESS != rval) {
+ std::cerr << "Failed to write file: " << name << std::endl;
+ return (int)rval;
+ }
+ }
+
+ // Negotiate shared entities using vertex global IDs
+ MBRange hexes;
+ moab.get_entities_by_type( 0, MBHEX, hexes );
+ MBParallelComm* pcomm = new MBParallelComm( &moab );
+ rval = pcomm->resolve_shared_ents( 0, hexes, 3, 0 );
+ if (MB_SUCCESS != rval) {
+ std::cerr << "MBParallelComm::resolve_shared_ents failed" << std::endl;
+ return rval;
+ }
+
+ // Do parallel write
+ clock_t t = clock();
+ rval = moab.write_file( output_file_name, "MOAB", "PARALLEL=FORMAT" );
+ t = clock() - t;
+ if (MB_SUCCESS != rval) {
+ std::cerr << "File creation failed with error code: " << rval << std::endl;
+ return (int)rval;
+ }
+
+ // Clean up and summarize
+ if (0 == rank) {
+ double sec = (double)t / CLOCKS_PER_SEC;
+ std::cout << "Wrote " << hexes.size()*size << " hexes in " << sec << " seconds." << std::endl;
+
+ if (!keep_output_file)
+ remove( output_file_name );
+ }
+
+ return MPI_Finalize();
+}
+
+#define IDX(i,j,k) ((num_interval+1)*((num_interval+1)*(k) + (j)) + (i))
+
+MBErrorCode generate_mesh( MBInterface& moab, int num_interval )
+{
+ int rank, size;
+ MPI_Comm_rank( MPI_COMM_WORLD, &rank );
+ MPI_Comm_size( MPI_COMM_WORLD, &size );
+
+ MBErrorCode rval;
+ MBTag global_id;
+ rval = moab.tag_get_handle( GLOBAL_ID_TAG_NAME, global_id );
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ // Each processor will own one cube of mesh within
+ // an 3D grid of cubes. Calculate the dimensions of
+ // that grid in numbers of cubes.
+ int root = 1;
+ while (root*root*root < size)
+ ++root;
+ int num_x_blocks = root;
+ int num_y_blocks = root-1;
+ int num_z_blocks = root-1;
+ if (num_x_blocks * num_y_blocks * num_z_blocks < size)
+ ++num_y_blocks;
+ if (num_x_blocks * num_y_blocks * num_z_blocks < size)
+ ++num_z_blocks;
+
+ // calculate position of this processor in grid
+ int my_z_block = rank / (num_x_blocks * num_y_blocks);
+ int rem = rank % (num_x_blocks * num_y_blocks);
+ int my_y_block = rem / num_x_blocks;
+ int my_x_block = rem % num_x_blocks;
+
+ // Each processor's cube of mesh will be num_iterval^3 elements
+ // and will be 1.0 units on a side
+
+ // create vertices
+ const int num_x_vtx = num_interval * num_x_blocks + 1;
+ const int num_y_vtx = num_interval * num_y_blocks + 1;
+ const int x_offset = my_x_block * num_interval;
+ const int y_offset = my_y_block * num_interval;
+ const int z_offset = my_z_block * num_interval;
+ double step = 1.0 / num_interval;
+ std::vector<MBEntityHandle> vertices( (num_interval+1)*(num_interval+1)*(num_interval+1) );
+ std::vector<MBEntityHandle>::iterator v = vertices.begin();
+ for (int k = 0; k <= num_interval; ++k) {
+ for (int j = 0; j <= num_interval; ++j) {
+ for (int i = 0; i <= num_interval; ++i) {
+ double coords[] = { my_x_block + i*step,
+ my_y_block + j*step,
+ my_z_block + k*step };
+ MBEntityHandle h;
+ rval = moab.create_vertex( coords, h );
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ int id = 1 + x_offset + i
+ + (y_offset + j) * num_x_vtx
+ + (z_offset + k) * num_x_vtx * num_y_vtx;
+ rval = moab.tag_set_data( global_id, &h, 1, &id );
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ *v++ = h;
+ }
+ }
+ }
+
+ // create hexes
+ for (int k = 0; k < num_interval; ++k) {
+ for (int j = 0; j < num_interval; ++j) {
+ for (int i = 0; i < num_interval; ++i) {
+ assert( IDX(i+1,j+1,k+1) < (int)vertices.size() );
+ const MBEntityHandle conn[] = { vertices[IDX(i, j, k )],
+ vertices[IDX(i+1,j, k )],
+ vertices[IDX(i+1,j+1,k )],
+ vertices[IDX(i, j+1,k )],
+ vertices[IDX(i, j, k+1)],
+ vertices[IDX(i+1,j, k+1)],
+ vertices[IDX(i+1,j+1,k+1)],
+ vertices[IDX(i, j+1,k+1)] };
+ MBEntityHandle elem;
+ rval = moab.create_element( MBHEX, conn, 8, elem );
+ if (MB_SUCCESS != rval)
+ return rval;
+ }
+ }
+ }
+ /*
+ // create interface quads
+ for (int j = 0; j < num_interval; ++j) {
+ for (int i = 0; i < num_interval; ++i) {
+ MBEntityHandle h;
+
+ const MBEntityHandle conn1[] = { vertices[IDX(i, j, 0)],
+ vertices[IDX(i+1,j, 0)],
+ vertices[IDX(i+1,j+1,0)],
+ vertices[IDX(i, j+1,0)] };
+ rval = moab.create_element( MBQUAD, conn1, 4, h );
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ const MBEntityHandle conn2[] = { vertices[IDX(i, j, num_interval)],
+ vertices[IDX(i+1,j, num_interval)],
+ vertices[IDX(i+1,j+1,num_interval)],
+ vertices[IDX(i, j+1,num_interval)] };
+ rval = moab.create_element( MBQUAD, conn2, 4, h );
+ if (MB_SUCCESS != rval)
+ return rval;
+ }
+ }
+ for (int k = 0; k < num_interval; ++k) {
+ for (int i = 0; i < num_interval; ++i) {
+ MBEntityHandle h;
+
+ const MBEntityHandle conn1[] = { vertices[IDX(i, 0,k )],
+ vertices[IDX(i+1,0,k )],
+ vertices[IDX(i+1,0,k+1)],
+ vertices[IDX(i, 0,k+1)] };
+ rval = moab.create_element( MBQUAD, conn1, 4, h );
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ const MBEntityHandle conn2[] = { vertices[IDX(i, num_interval,k )],
+ vertices[IDX(i+1,num_interval,k )],
+ vertices[IDX(i+1,num_interval,k+1)],
+ vertices[IDX(i, num_interval,k+1)] };
+ rval = moab.create_element( MBQUAD, conn2, 4, h );
+ if (MB_SUCCESS != rval)
+ return rval;
+ }
+ }
+ for (int k = 0; k < num_interval; ++k) {
+ for (int j = 0; j < num_interval; ++j) {
+ MBEntityHandle h;
+
+ const MBEntityHandle conn1[] = { vertices[IDX(0,j, k )],
+ vertices[IDX(0,j+1,k )],
+ vertices[IDX(0,j+1,k+1)],
+ vertices[IDX(0,j, k+1)] };
+ rval = moab.create_element( MBQUAD, conn1, 4, h );
+ if (MB_SUCCESS != rval)
+ return rval;
+
+ const MBEntityHandle conn2[] = { vertices[IDX(num_interval,j, k )],
+ vertices[IDX(num_interval,j+1,k )],
+ vertices[IDX(num_interval,j+1,k+1)],
+ vertices[IDX(num_interval,j, k+1)] };
+ rval = moab.create_element( MBQUAD, conn2, 4, h );
+ if (MB_SUCCESS != rval)
+ return rval;
+ }
+ }
+ */
+ return MB_SUCCESS;
+}
+
+
+
More information about the moab-dev
mailing list