#include #ifdef PETSC_BUG #include #endif #include #include int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); double* rhs = new double[size * 4]; double* rhsSlave = new double[4]; if(rank == 0) { // there is an issue here with MPI_IN_PLACE, normally we should not have to specify // the number of bytes and type for recv buffer but here we have to otherwise the scatter hangs // it seems that it occurs only when including petsc.h MPI_Scatter(rhs, 4, MPI_DOUBLE, MPI_IN_PLACE, 4, MPI_DATATYPE_NULL, 0, MPI_COMM_WORLD); // MPI_Scatter(rhs, 4, MPI_DOUBLE, MPI_IN_PLACE, 4, MPI_DOUBLE, 0, MPI_COMM_WORLD); // < this line doesn't hang } else { MPI_Scatter(NULL, 0, MPI_DATATYPE_NULL, rhsSlave, 4, MPI_DOUBLE, 0, MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); if(rank == 0) std::cout << "Done with the scatter !" << std::endl; if(rank == 0) { std::fill(rhs, rhs + 4, rank); // there is an issue here with MPI_IN_PLACE, normally we should not have to specify // the number of bytes and type for send buffer but here we have to otherwise the gather returns wrong results // it seems that it occurs only when including petsc.h MPI_Gather(MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, rhs, 4, MPI_DOUBLE, 0, MPI_COMM_WORLD); // MPI_Gather(MPI_IN_PLACE, 0, MPI_DOUBLE, rhs, 4, MPI_DOUBLE, 0, MPI_COMM_WORLD); // < this line gives the correct results for(int i = 0; i < size * 4; ++i) { std::cout << rhs[i] << " "; if((i + 1) % 4 == 0) std::cout << "(this line should be filled with " << i / 4 << ")" << std::endl; } std::cout << "Done with the gather !" << std::endl; } else { std::fill(rhsSlave, rhsSlave + 4, rank); MPI_Gather(rhsSlave, 4, MPI_DOUBLE, NULL, 0, MPI_DATATYPE_NULL, 0, MPI_COMM_WORLD); } delete [] rhs; delete [] rhsSlave; MPI_Finalize(); return 0; }