! Need to determine offsets/displacement (disp) in restart file for each processor ! Each processor will then create a derived type (SUBARRAY) to access its portion of data ! Displacement always in bytes, and initial displacement skips the header info above ! and they will be the same for all blocks/processors that come from the same grid ! disp = 0 DO ngx = 1, ngrd ! ! These are the dimensions of the original (non-decomposed) grid ! is = 1 ie = ieng(ngx) js = 1 je = jeng(ngx) ks = 1 ke = keng(ngx) ! Need the original grid sizes for SUBARRY derived type we will use for parallel IO arraysz(1) = ieng(ngx) arraysz(2) = jeng(ngx) arraysz(3) = keng(ngx) ! ! Get decomposed grid sizes and displacements to use to read restart file ! numblks = nbeptr(ngx) - nbsptr(ngx) + 1 DO nbl = 1, numblks nbg = nbl + nbsptr(ngx) - 1 !ranges from 1 to nblk nproc = nbg - 1 !ranges from 0 to nblk-1 IF ( nbl == 1 ) THEN IF ( nproc == 0 ) THEN lcount = INT(ie*je*ke,MPI_OFFSET_KIND)*8 disp(1,nbg) = 4 + 8 + 4 + 3*ngrd*4 ! Skip header disp(2,nbg) = disp(1,nbg) + lcount ! Skip x disp(3,nbg) = disp(2,nbg) + lcount ! Skip y disp(4,nbg) = disp(3,nbg) + lcount ! Skip z disp(5,nbg) = disp(4,nbg) + lcount*NVAR ! Skip var IF ( itimeint >= 2 ) disp(6,nbg) = disp(5,nbg) + lcount*NVAR ! Skip qold ELSE lcount = INT(ieng(ngx-1)*jeng(ngx-1)*keng(ngx-1),MPI_OFFSET_KIND)*8 ! Count based on previous grid dimensions disp(1,nbg) = disp(5,nbg-1) ! Skip previous grid var IF ( itimeint >= 2 ) disp(1,nbg) = disp(6,nbg-1) ! Skip previous grid qold IF ( itimeint == 3 .OR. itimeint == 4 ) disp(1,nbg) = disp(1,nbg) + lcount*NVAR ! Skip prev grid qreold lcount = INT(ie*je*ke,MPI_OFFSET_KIND)*8 disp(2,nbg) = disp(1,nbg) + lcount ! Skip x for current grid disp(3,nbg) = disp(2,nbg) + lcount ! Skip y disp(4,nbg) = disp(3,nbg) + lcount ! Skip z disp(5,nbg) = disp(4,nbg) + lcount*NVAR ! Skip var IF ( itimeint >= 2 ) disp(6,nbg) = disp(5,nbg) + lcount*NVAR ! Skip qold END IF ELSE disp(1,nbg) = disp(1,nbg-1) !Displacements the same for all processors from same grid disp(2,nbg) = disp(2,nbg-1) disp(3,nbg) = disp(3,nbg-1) disp(4,nbg) = disp(4,nbg-1) disp(5,nbg) = disp(5,nbg-1) disp(6,nbg) = disp(6,nbg-1) END IF ! Need block sizes and start points in grid blocks for SUBARRAY, start points begin at 0 startsubarray(1) = isa(ngx, nbl) - 1 startsubarray(2) = jsa(ngx, nbl) - 1 startsubarray(3) = ksa(ngx, nbl) - 1 IF ( nproc .EQ. 0 ) THEN ! Block sizes needed for derived type subarraysz(1) = iea(ngx, nbl) - isa(ngx, nbl) + 1 subarraysz(2) = jea(ngx, nbl) - jsa(ngx, nbl) + 1 subarraysz(3) = kea(ngx, nbl) - ksa(ngx, nbl) + 1 ! Create derived types needed for MPIIO CALL MPI_TYPE_CREATE_SUBARRAY ( 3, arraysz, subarraysz, startsubarray, MPI_ORDER_FORTRAN, & & MPI_REAL8, restarttype, ierror ) CALL MPI_TYPE_COMMIT ( restarttype, ierror ) ELSE ! ! Send the proper grid and block sizes to the corresponding processor ! tag1 = nproc * 100 + 1 tag2 = nproc * 100 + 2 CALL MPI_SEND ( arraysz, 3, mpi_integer4, nproc, tag1, mpi_comm_world, ierror ) CALL MPI_SEND ( startsubarray, 3, mpi_integer4, nproc, tag2, mpi_comm_world, ierror ) END IF END DO END DO ELSE ! Not mpi_rank = 0 nproc = mpi_rank !Ranges from 0 to nblk-1 tag1 = nproc * 100 + 1 tag2 = nproc * 100 + 2 is = 1 ie = ienb(ng) js = 1 je = jenb(ng) ks = 1 ke = kenb(ng) subarraysz(1) = ie subarraysz(2) = je subarraysz(3) = ke ! ! Receive original grid dimensions and starting location of block ! CALL MPI_RECV ( arraysz, 3, mpi_integer4, 0, tag1, mpi_comm_world, mpi_status, ierror ) CALL MPI_RECV ( startsubarray, 3, mpi_integer4, 0, tag2, mpi_comm_world, mpi_status, ierror ) ! ! Create derived types needed for MPIIO ! write(*,*) nproc, subarraysz, arraysz CALL MPI_TYPE_CREATE_SUBARRAY ( 3, arraysz, subarraysz, startsubarray, MPI_ORDER_FORTRAN, & & MPI_REAL8, restarttype, ierror ) CALL MPI_TYPE_COMMIT ( restarttype, ierror ) END IF ! All processes need to know displacements in restart file for their data CALL MPI_SCATTER ( disp, 6, mpi_integer8, displacerestart, 6, mpi_integer8, 0, mpi_comm_world, ierror ) ! Ready to read data now with collective, parallel I/O ! Read x, y, and z count = ie*je*ke CALL MPI_FILE_SET_VIEW ( mpiio_restart_in, displacerestart(1), mpi_real8, restarttype, "native", mpi_info_null, ierror ) CALL MPI_FILE_READ_ALL ( mpiio_restart_in, x, count, mpi_real8, mpi_status, ierror ) CALL MPI_FILE_SET_VIEW ( mpiio_restart_in, displacerestart(2), mpi_real8, restarttype, "native", mpi_info_null, ierror ) call MPI_FILE_READ_ALL ( mpiio_restart_in, y, count, mpi_real8, mpi_status, ierror ) CALL MPI_FILE_SET_VIEW ( mpiio_restart_in, displacerestart(3), mpi_real8, restarttype, "native", mpi_info_null, ierror ) call MPI_FILE_READ_ALL ( mpiio_restart_in, z, count, mpi_real8, mpi_status, ierror )