[mpich2-commits] r7829 - mpich2/trunk/test/mpi/rma

dinan at mcs.anl.gov dinan at mcs.anl.gov
Tue Jan 25 13:25:17 CST 2011


Author: dinan
Date: 2011-01-25 13:25:17 -0600 (Tue, 25 Jan 2011)
New Revision: 7829

Added:
   mpich2/trunk/test/mpi/rma/strided_acc_indexed.c
   mpich2/trunk/test/mpi/rma/strided_acc_onelock.c
   mpich2/trunk/test/mpi/rma/strided_acc_subarray.c
   mpich2/trunk/test/mpi/rma/strided_get_indexed.c
   mpich2/trunk/test/mpi/rma/strided_putget_indexed.c
   mpich2/trunk/test/mpi/rma/window_creation.c
Modified:
   mpich2/trunk/test/mpi/rma/Makefile.sm
Log:
Adding several RMA tests developed as part of my one-sided work.  Several of
these use datatypes to achieve one-sided gather/scatter.



Modified: mpich2/trunk/test/mpi/rma/Makefile.sm
===================================================================
--- mpich2/trunk/test/mpi/rma/Makefile.sm	2011-01-25 18:00:10 UTC (rev 7828)
+++ mpich2/trunk/test/mpi/rma/Makefile.sm	2011-01-25 19:25:17 UTC (rev 7829)
@@ -62,6 +62,13 @@
 mixedsync_SOURCES = mixedsync.c
 selfrma_SOURCES   = selfrma.c
 
+strided_acc_onelock_SOURCES    = strided_acc_onelock.c
+strided_putget_indexed_SOURCES = strided_putget_indexed.c
+strided_acc_indexed_SOURCES    = strided_acc_indexed.c
+strided_acc_subarray_SOURCES   = strided_acc_subarray.c
+strided_get_indexed_SOURCES    = strided_get_indexed.c
+window_creation_SOURCES        = window_creation.c
+
 ../util/mtest.o:
 	(cd ../util && $(MAKE) )
 

Added: mpich2/trunk/test/mpi/rma/strided_acc_indexed.c
===================================================================
--- mpich2/trunk/test/mpi/rma/strided_acc_indexed.c	                        (rev 0)
+++ mpich2/trunk/test/mpi/rma/strided_acc_indexed.c	2011-01-25 19:25:17 UTC (rev 7829)
@@ -0,0 +1,146 @@
+/* One-Sided MPI 2-D Strided Accumulate Test
+ *
+ * Author: James Dinan <dinan at mcs.anl.gov> 
+ * Date  : December, 2010
+ *
+ * This code performs N accumulates into a 2d patch of a shared array.  The
+ * array has dimensions [X, Y] and the subarray has dimensions [SUB_X, SUB_Y]
+ * and begins at index [0, 0].  The input and output buffers are specified
+ * using an MPI indexed type.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+#define XDIM 16
+#define YDIM 16
+#define SUB_XDIM 8
+#define SUB_YDIM 8
+#define ITERATIONS 1
+
+static int verbose = 0;
+
+int main(int argc, char **argv) {
+    int i, j, rank, nranks, peer, bufsize, errors;
+    double *win_buf, *src_buf;
+    MPI_Win buf_win;
+
+    MPI_Init(&argc, &argv);
+
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &nranks);
+
+    bufsize = XDIM * YDIM * sizeof(double);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &win_buf);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);
+
+    if (rank == 0)
+        if (verbose) ("MPI RMA Strided Accumulate Test:\n");
+
+    for (i = 0; i < XDIM*YDIM; i++) {
+        *(win_buf  + i) = 1.0 + rank;
+        *(src_buf + i) = 1.0 + rank;
+    }
+
+    MPI_Win_create(win_buf, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);
+
+    peer = (rank+1) % nranks;
+
+    // Perform ITERATIONS strided accumulate operations
+
+    for (i = 0; i < ITERATIONS; i++) {
+      MPI_Aint idx_loc[SUB_YDIM];
+      int idx_rem[SUB_YDIM];
+      int blk_len[SUB_YDIM];
+      MPI_Datatype src_type, dst_type;
+
+      for (i = 0; i < SUB_YDIM; i++) {
+        MPI_Get_address(&src_buf[i*XDIM], &idx_loc[i]);
+        idx_rem[i] = i*XDIM;
+        blk_len[i] = SUB_XDIM;
+      }
+
+#ifdef ABSOLUTE
+      MPI_Type_hindexed(SUB_YDIM, blk_len, idx_loc, MPI_DOUBLE, &src_type);
+#else
+      MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
+#endif
+      MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
+
+      MPI_Type_commit(&src_type);
+      MPI_Type_commit(&dst_type);
+
+      MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
+
+#ifdef ABSOLUTE
+      MPI_Accumulate(MPI_BOTTOM, 1, src_type, peer, 0, 1, dst_type, MPI_SUM, buf_win);
+#else
+      MPI_Accumulate(src_buf, 1, src_type, peer, 0, 1, dst_type, MPI_SUM, buf_win);
+#endif
+
+      MPI_Win_unlock(peer, buf_win);
+
+      MPI_Type_free(&src_type);
+      MPI_Type_free(&dst_type);
+    }
+
+    MPI_Barrier(MPI_COMM_WORLD);
+
+    // Verify that the results are correct
+
+    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
+    errors = 0;
+    for (i = 0; i < SUB_XDIM; i++) {
+      for (j = 0; j < SUB_YDIM; j++) {
+        const double actual   = *(win_buf + i + j*XDIM);
+        const double expected = (1.0 + rank) + (1.0 + ((rank+nranks-1)%nranks)) * (ITERATIONS);
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    for (i = SUB_XDIM; i < XDIM; i++) {
+      for (j = 0; j < SUB_YDIM; j++) {
+        const double actual   = *(win_buf + i + j*XDIM);
+        const double expected = 1.0 + rank;
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    for (i = 0; i < XDIM; i++) {
+      for (j = SUB_YDIM; j < YDIM; j++) {
+        const double actual   = *(win_buf + i + j*XDIM);
+        const double expected = 1.0 + rank;
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    MPI_Win_unlock(rank, buf_win);
+
+    MPI_Win_free(&buf_win);
+    MPI_Free_mem(win_buf);
+    MPI_Free_mem(src_buf);
+
+    MPI_Finalize();
+
+    if (errors == 0) {
+      if (rank == 0)
+        printf(" No Errors\n");
+      return 0;
+    } else {
+      printf("%d: Fail\n", rank);
+      return 1;
+    }
+}

Added: mpich2/trunk/test/mpi/rma/strided_acc_onelock.c
===================================================================
--- mpich2/trunk/test/mpi/rma/strided_acc_onelock.c	                        (rev 0)
+++ mpich2/trunk/test/mpi/rma/strided_acc_onelock.c	2011-01-25 19:25:17 UTC (rev 7829)
@@ -0,0 +1,80 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+#define XDIM 1024 
+#define YDIM 1024
+#define ITERATIONS 10
+
+static int verbose = 0;
+
+int main(int argc, char **argv) {
+    int i, j, rank, nranks, peer, bufsize, errors;
+    double *buffer, *src_buf;
+    MPI_Win buf_win;
+
+    MPI_Init(&argc, &argv);
+
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &nranks);
+
+    bufsize = XDIM * YDIM * sizeof(double);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &buffer);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);
+
+    if (rank == 0)
+        if (verbose) printf("MPI RMA Strided Accumulate Test:\n");
+
+    for (i = 0; i < XDIM*YDIM; i++) {
+        *(buffer  + i) = 1.0 + rank;
+        *(src_buf + i) = 1.0 + rank;
+    }
+
+    MPI_Win_create(buffer, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);
+
+    peer = (rank+1) % nranks;
+
+    for (i = 0; i < ITERATIONS; i++) {
+
+      MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
+
+      for (j = 0; j < YDIM; j++) {
+        MPI_Accumulate(src_buf + j*XDIM, XDIM, MPI_DOUBLE, peer,
+                       j*XDIM*sizeof(double), XDIM, MPI_DOUBLE, MPI_SUM, buf_win);
+      }
+
+      MPI_Win_unlock(peer, buf_win);
+    }
+
+    MPI_Barrier(MPI_COMM_WORLD);
+
+    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
+    for (i = errors = 0; i < XDIM; i++) {
+      for (j = 0; j < YDIM; j++) {
+        const double actual   = *(buffer + i + j*XDIM);
+        const double expected = (1.0 + rank) + (1.0 + ((rank+nranks-1)%nranks)) * (ITERATIONS);
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    MPI_Win_unlock(rank, buf_win);
+
+    MPI_Win_free(&buf_win);
+    MPI_Free_mem(buffer);
+    MPI_Free_mem(src_buf);
+
+    MPI_Finalize();
+
+    if (errors == 0) {
+      if (rank == 0) 
+        printf(" No Errors\n");
+      return 0;
+    } else {
+      printf("%d: Fail\n", rank);
+      return 1;
+    }
+}

Added: mpich2/trunk/test/mpi/rma/strided_acc_subarray.c
===================================================================
--- mpich2/trunk/test/mpi/rma/strided_acc_subarray.c	                        (rev 0)
+++ mpich2/trunk/test/mpi/rma/strided_acc_subarray.c	2011-01-25 19:25:17 UTC (rev 7829)
@@ -0,0 +1,139 @@
+/* One-Sided MPI 2-D Strided Accumulate Test
+ *
+ * Author: James Dinan <dinan at mcs.anl.gov> 
+ * Date  : December, 2010
+ *
+ * This code performs N accumulates into a 2d patch of a shared array.  The
+ * array has dimensions [X, Y] and the subarray has dimensions [SUB_X, SUB_Y]
+ * and begins at index [0, 0].  The input and output buffers are specified
+ * using an MPI subarray type.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+#define XDIM 1024 
+#define YDIM 1024
+#define SUB_XDIM 512
+#define SUB_YDIM 512
+#define ITERATIONS 10
+
+static int verbose = 0;
+
+int main(int argc, char **argv) {
+    int i, j, rank, nranks, peer, bufsize, errors;
+    double *win_buf, *src_buf;
+    MPI_Win buf_win;
+
+    MPI_Init(&argc, &argv);
+
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &nranks);
+
+    bufsize = XDIM * YDIM * sizeof(double);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &win_buf);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);
+
+    if (rank == 0)
+        if (verbose) printf("MPI RMA Strided Accumulate Test:\n");
+
+    for (i = 0; i < XDIM*YDIM; i++) {
+        *(win_buf  + i) = 1.0 + rank;
+        *(src_buf + i) = 1.0 + rank;
+    }
+
+    MPI_Win_create(win_buf, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);
+
+    peer = (rank+1) % nranks;
+
+    // Perform ITERATIONS strided accumulate operations
+
+    for (i = 0; i < ITERATIONS; i++) {
+      int ndims               = 2;
+      int src_arr_sizes[2]    = { XDIM, YDIM };
+      int src_arr_subsizes[2] = { SUB_XDIM, SUB_YDIM };
+      int src_arr_starts[2]   = {    0,    0 };
+      int dst_arr_sizes[2]    = { XDIM, YDIM };
+      int dst_arr_subsizes[2] = { SUB_XDIM, SUB_YDIM };
+      int dst_arr_starts[2]   = {    0,    0 };
+      MPI_Datatype src_type, dst_type;
+
+      MPI_Type_create_subarray(ndims, src_arr_sizes, src_arr_subsizes, src_arr_starts,
+          MPI_ORDER_C, MPI_DOUBLE, &src_type);
+
+      MPI_Type_create_subarray(ndims, dst_arr_sizes, dst_arr_subsizes, dst_arr_starts,
+          MPI_ORDER_C, MPI_DOUBLE, &dst_type);
+
+      MPI_Type_commit(&src_type);
+      MPI_Type_commit(&dst_type);
+
+      MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
+
+      MPI_Accumulate(src_buf, 1, src_type, peer, 0, 1, dst_type, MPI_SUM, buf_win);
+
+      MPI_Win_unlock(peer, buf_win);
+
+      MPI_Type_free(&src_type);
+      MPI_Type_free(&dst_type);
+    }
+
+    MPI_Barrier(MPI_COMM_WORLD);
+
+    // Verify that the results are correct
+
+    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
+    errors = 0;
+    for (i = 0; i < SUB_XDIM; i++) {
+      for (j = 0; j < SUB_YDIM; j++) {
+        const double actual   = *(win_buf + i + j*XDIM);
+        const double expected = (1.0 + rank) + (1.0 + ((rank+nranks-1)%nranks)) * (ITERATIONS);
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    for (i = SUB_XDIM; i < XDIM; i++) {
+      for (j = 0; j < SUB_YDIM; j++) {
+        const double actual   = *(win_buf + i + j*XDIM);
+        const double expected = 1.0 + rank;
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    for (i = 0; i < XDIM; i++) {
+      for (j = SUB_YDIM; j < YDIM; j++) {
+        const double actual   = *(win_buf + i + j*XDIM);
+        const double expected = 1.0 + rank;
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    MPI_Win_unlock(rank, buf_win);
+
+    MPI_Win_free(&buf_win);
+    MPI_Free_mem(win_buf);
+    MPI_Free_mem(src_buf);
+
+    MPI_Finalize();
+
+    if (errors == 0) {
+      if (rank == 0)
+        printf(" No Errors\n");
+      return 0;
+    } else {
+      printf("%d: Fail\n", rank);
+      return 1;
+    }
+}

Added: mpich2/trunk/test/mpi/rma/strided_get_indexed.c
===================================================================
--- mpich2/trunk/test/mpi/rma/strided_get_indexed.c	                        (rev 0)
+++ mpich2/trunk/test/mpi/rma/strided_get_indexed.c	2011-01-25 19:25:17 UTC (rev 7829)
@@ -0,0 +1,136 @@
+/* One-Sided MPI 2-D Strided Get Test
+ *
+ * Author: James Dinan <dinan at mcs.anl.gov> 
+ * Date  : December, 2010
+ *
+ * This code performs N strided get operations from a 2d patch of a shared
+ * array.  The array has dimensions [X, Y] and the subarray has dimensions
+ * [SUB_X, SUB_Y] and begins at index [0, 0].  The input and output buffers are
+ * specified using an MPI indexed type.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+#define XDIM 8
+#define YDIM 1024
+#define SUB_XDIM 8
+#define SUB_YDIM 256
+
+static int verbose = 0;
+
+int main(int argc, char **argv) {
+    int i, j, rank, nranks, peer, bufsize, errors;
+    double *win_buf, *loc_buf;
+    MPI_Win buf_win;
+
+    MPI_Aint idx_loc[SUB_YDIM];
+    int idx_rem[SUB_YDIM];
+    int blk_len[SUB_YDIM];
+    MPI_Datatype loc_type, rem_type;
+
+    MPI_Init(&argc, &argv);
+
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &nranks);
+
+    bufsize = XDIM * YDIM * sizeof(double);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &win_buf);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &loc_buf);
+
+    if (rank == 0)
+        if (verbose) printf("MPI RMA Strided Get Test:\n");
+
+    for (i = 0; i < XDIM*YDIM; i++)
+        *(win_buf + i) = 1.0 + rank;
+
+    MPI_Win_create(win_buf, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);
+
+    peer = (rank+1) % nranks;
+
+    // Build the datatype
+
+    for (i = 0; i < SUB_YDIM; i++) {
+      MPI_Get_address(&loc_buf[i*XDIM], &idx_loc[i]);
+      idx_rem[i] = i*XDIM;
+      blk_len[i] = SUB_XDIM;
+    }
+
+    MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &loc_type);
+    MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &rem_type);
+
+    MPI_Type_commit(&loc_type);
+    MPI_Type_commit(&rem_type);
+
+    // Perform get operation
+
+    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
+
+    MPI_Get(loc_buf, 1, loc_type, peer, 0, 1, rem_type, buf_win);
+
+    // Use the datatype only on the remote side (must have SUB_XDIM == XDIM)
+    // MPI_Get(loc_buf, SUB_XDIM*SUB_YDIM, MPI_DOUBLE, peer, 0, 1, rem_type, buf_win);
+
+    MPI_Win_unlock(peer, buf_win);
+
+    MPI_Type_free(&loc_type);
+    MPI_Type_free(&rem_type);
+
+    MPI_Barrier(MPI_COMM_WORLD);
+
+    // Verify that the results are correct
+
+    errors = 0;
+    for (i = 0; i < SUB_XDIM; i++) {
+      for (j = 0; j < SUB_YDIM; j++) {
+        const double actual   = *(loc_buf + i + j*XDIM);
+        const double expected = (1.0 + peer);
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    for (i = SUB_XDIM; i < XDIM; i++) {
+      for (j = 0; j < SUB_YDIM; j++) {
+        const double actual   = *(loc_buf + i + j*XDIM);
+        const double expected = 1.0 + rank;
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    for (i = 0; i < XDIM; i++) {
+      for (j = SUB_YDIM; j < YDIM; j++) {
+        const double actual   = *(loc_buf + i + j*XDIM);
+        const double expected = 1.0 + rank;
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+
+    MPI_Win_free(&buf_win);
+    MPI_Free_mem(win_buf);
+    MPI_Free_mem(loc_buf);
+
+    MPI_Finalize();
+
+    if (errors == 0) {
+      if (rank == 0) 
+        printf(" No Errors\n");
+      return 0;
+    } else {
+      printf("%d: Fail\n", rank);
+      return 1;
+    }
+}

Added: mpich2/trunk/test/mpi/rma/strided_putget_indexed.c
===================================================================
--- mpich2/trunk/test/mpi/rma/strided_putget_indexed.c	                        (rev 0)
+++ mpich2/trunk/test/mpi/rma/strided_putget_indexed.c	2011-01-25 19:25:17 UTC (rev 7829)
@@ -0,0 +1,142 @@
+/* One-Sided MPI 2-D Strided Accumulate Test
+ *
+ * Author: James Dinan <dinan at mcs.anl.gov> 
+ * Date  : December, 2010
+ *
+ * This code performs N strided put operations followed by get operations into
+ * a 2d patch of a shared array.  The array has dimensions [X, Y] and the
+ * subarray has dimensions [SUB_X, SUB_Y] and begins at index [0, 0].  The
+ * input and output buffers are specified using an MPI indexed type.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+#define XDIM 8
+#define YDIM 1024
+#define SUB_XDIM 8
+#define SUB_YDIM 255
+#define ITERATIONS 1
+
+static int verbose = 0;
+
+int main(int argc, char **argv) {
+    int i, j, rank, nranks, peer, bufsize, errors;
+    double *win_buf, *src_buf, *dst_buf;
+    MPI_Win buf_win;
+
+    MPI_Init(&argc, &argv);
+
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &nranks);
+
+    bufsize = XDIM * YDIM * sizeof(double);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &win_buf);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);
+    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &dst_buf);
+
+    if (rank == 0)
+        if (verbose) printf("MPI RMA Strided Accumulate Test:\n");
+
+    for (i = 0; i < XDIM*YDIM; i++) {
+        *(win_buf  + i) = 1.0 + rank;
+        *(src_buf + i) = 1.0 + rank;
+    }
+
+    MPI_Win_create(win_buf, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);
+
+    peer = (rank+1) % nranks;
+
+    // Perform ITERATIONS strided accumulate operations
+
+    for (i = 0; i < ITERATIONS; i++) {
+      MPI_Aint idx_loc[SUB_YDIM];
+      int idx_rem[SUB_YDIM];
+      int blk_len[SUB_YDIM];
+      MPI_Datatype src_type, dst_type;
+
+      for (i = 0; i < SUB_YDIM; i++) {
+        MPI_Get_address(&src_buf[i*XDIM], &idx_loc[i]);
+        idx_rem[i] = i*XDIM;
+        blk_len[i] = SUB_XDIM;
+      }
+
+      MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &src_type);
+      MPI_Type_indexed(SUB_YDIM, blk_len, idx_rem, MPI_DOUBLE, &dst_type);
+
+      MPI_Type_commit(&src_type);
+      MPI_Type_commit(&dst_type);
+
+      MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
+      MPI_Put(src_buf, 1, src_type, peer, 0, 1, dst_type, buf_win);
+      MPI_Win_unlock(peer, buf_win);
+
+      MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);
+      MPI_Get(dst_buf, 1, src_type, peer, 0, 1, dst_type, buf_win);
+      MPI_Win_unlock(peer, buf_win);
+
+      MPI_Type_free(&src_type);
+      MPI_Type_free(&dst_type);
+    }
+
+    MPI_Barrier(MPI_COMM_WORLD);
+
+    // Verify that the results are correct
+
+    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
+    errors = 0;
+    for (i = 0; i < SUB_XDIM; i++) {
+      for (j = 0; j < SUB_YDIM; j++) {
+        const double actual   = *(win_buf + i + j*XDIM);
+        const double expected = (1.0 + ((rank+nranks-1)%nranks));
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    for (i = SUB_XDIM; i < XDIM; i++) {
+      for (j = 0; j < SUB_YDIM; j++) {
+        const double actual   = *(win_buf + i + j*XDIM);
+        const double expected = 1.0 + rank;
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    for (i = 0; i < XDIM; i++) {
+      for (j = SUB_YDIM; j < YDIM; j++) {
+        const double actual   = *(win_buf + i + j*XDIM);
+        const double expected = 1.0 + rank;
+        if (actual - expected > 1e-10) {
+          printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
+              rank, j, i, expected, actual);
+          errors++;
+          fflush(stdout);
+        }
+      }
+    }
+    MPI_Win_unlock(rank, buf_win);
+
+    MPI_Win_free(&buf_win);
+    MPI_Free_mem(win_buf);
+    MPI_Free_mem(src_buf);
+    MPI_Free_mem(dst_buf);
+
+    MPI_Finalize();
+
+    if (errors == 0) {
+      if (rank == 0)
+        printf(" No Errors\n");
+      return 0;
+    } else {
+      printf("%d: Fail\n", rank);
+      return 1;
+    }
+}

Added: mpich2/trunk/test/mpi/rma/window_creation.c
===================================================================
--- mpich2/trunk/test/mpi/rma/window_creation.c	                        (rev 0)
+++ mpich2/trunk/test/mpi/rma/window_creation.c	2011-01-25 19:25:17 UTC (rev 7829)
@@ -0,0 +1,47 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <mpi.h>
+
+#define DATA_NELTS  1000
+#define NUM_WIN     1000   // Error starts at 17.  Up to 16 is ok.
+#define DATA_SZ     (DATA_NELTS*sizeof(int))
+
+static int verbose = 0;
+
+int main(int argc, char ** argv) {
+  int      rank, nproc, i;
+  void    *base_ptrs[NUM_WIN];
+  MPI_Win  windows[NUM_WIN];
+
+  MPI_Init(&argc, &argv);
+
+  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+  MPI_Comm_size(MPI_COMM_WORLD, &nproc);
+
+  if (rank == 0) if (verbose) printf("Starting MPI window creation test with %d processes\n", nproc);
+
+  // Perform a pile of window creations
+  for (i = 0; i < NUM_WIN; i++) {
+    if (rank == 0) if (verbose) printf(" + Creating window %d\n", i);
+
+    MPI_Alloc_mem(DATA_SZ, MPI_INFO_NULL, &base_ptrs[i]);
+    MPI_Win_create(base_ptrs[i], DATA_SZ, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &windows[i]);
+  }
+
+  MPI_Barrier(MPI_COMM_WORLD);
+
+  // Free all the windows
+  for (i = 0; i < NUM_WIN; i++) {
+    if (rank == 0) if (verbose) printf(" + Freeing window %d\n", i);
+
+    MPI_Win_free(&windows[i]);
+    MPI_Free_mem(base_ptrs[i]);
+  }
+
+  if (rank == 0) printf(" No Errors\n");
+
+  MPI_Finalize();
+
+  return 0;
+}



More information about the mpich2-commits mailing list