!-*- F90 -*- program mpi_setup_netCDFout !--------------------------------------------------------------------! ! author: Alex HOFFMANN ! ! email: ah519@cam.ac.uk ! ! date: February 2009 ! ! version: development ! ! ! ! simple program to test mpi runs & netCDF output (serial & parallel)! !--------------------------------------------------------------------! ! test code designed to be used with the variables from the mesoscale ! atmospheric model ATHAM ! re-structured as stand-alone ! reads and writes as netCDF an index matrix for a 4x4 matrix in real4 ! use precision ! use atham_module, only : nx,ny,ntx,nty,npx,npy implicit none !private !public ! --- mpi interface for Fortran 77 only, use header files----------- include 'mpif.h' ! --- precision ---------------------------------------------------- integer, parameter :: real4 = selected_real_kind(6), & real8 = selected_real_kind(12), & kreal = real8 integer, parameter :: int4 = selected_int_kind(7), & int8 = selected_int_kind(13), & kint = int4 ! --- atham_module ------------------------------------------------- integer(kint) :: nx,ny,ntx,nty,npx,npy integer(kint) :: my_ranks, nprocs, & my_cart, myrank, mycoor(2) logical :: mpi_initialized, mpi ! --- other -------------------------------------------------------- integer(kint) :: ierror real(real4), allocatable, dimension(:,:) :: array_g, array mpi_initialized=.false. ! ---for initial testing-------------------------------------------- nx = 4_kint ! user-specified number of grid points along x ny = 4_kint npx = 2_kint ! number of processes along x npy = 2_kint ntx = nx ! re-adjusted total number of grid points along x nty = ny nx = nx/npx ! number of grid points along x per process ny = ny/npy ! --- MAIN ! --- mpi setup calls ---------------------------------------------- call initialize_mpi call setup_mpi ! --- netCDF out calls --------------------------------------------- ! call nc_out_s call nc_out_p ! --- mpi finalize call -------------------------------------------- call mpi_Finalize(ierror) ! --- END MAIN contains !===================================================================== subroutine initialize_mpi() ! use atham_module, only : mpi, mpi_initialized ! use atham_module, only : myrank, nprocs integer(kint) :: ierror, errorclass character(len=100) :: message ! assign variables for serial version ierror=0 myrank=0 nprocs=1 mpi=.false. ! initialize MPI mpi=.true. if (.not. mpi_initialized) call mpi_init(ierror) call MPI_Comm_rank(mpi_comm_world,myrank,ierror) call MPI_Comm_size(mpi_comm_world,nprocs,ierror) mpi=.true. mpi_initialized=.true. if (myrank==0) then print *, 'Myrank 0: ',myrank print *, 'MPI initialized!' print *, 'Number of processes [nprocs]: ',nprocs endif if (ierror>0) then call MPI_Error_string(ierror,message,100) call MPI_Error_class(ierror,errorclass) call exec_stop(message,errorclass) endif end subroutine initialize_mpi !===================================================================== subroutine setup_mpi() ! use atham_module, only : mpi, mpi_initialized ! use atham_module, only : myrank,nprocs,my_cart,mycoor integer(kint) :: ierror, errorclass character(len=100) :: message integer(kint) :: idims(2),shape_array(2) logical :: reorder integer(kint) :: i,j,istart,jstart if (nprocs/=npx*npy) then print *, 'nprocs /= npx*npy - execution aborted' call mpi_abort(mpi_comm_world,ierror) stop endif ! create virtual topology idims(1:2) = (/npx,npy/) reorder = .true. call MPI_Cart_create(mpi_comm_world,2,idims,(/.false.,.false./),reorder,my_cart,ierror) call MPI_Comm_rank(my_cart,myrank,ierror) ! ranks on new communicator call MPI_Cart_coords(my_cart,myrank,2,mycoor,ierror) if(myrank==0) then print *, 'Coords of process ',myrank,' :',mycoor endif if (ierror>0) then call MPI_Error_string(ierror,message,100) call MPI_Error_class(ierror,errorclass) call exec_stop(message,errorclass) endif ! allocate and assign global grid on each process allocate(array_g(ntx,nty)) array_g(1,:) = (/ 11.0,12.0,13.0,14.0 /) array_g(2,:) = (/ 21.0,22.0,23.0,24.0 /) array_g(3,:) = (/ 31.0,32.0,33.0,34.0 /) array_g(4,:) = (/ 41.0,42.0,43.0,44.0 /) if (myrank==0) then print *, 'whole 4*4 array on each process:' print *, array_g(1,:) print *, array_g(2,:) print *, array_g(3,:) print *, array_g(4,:) endif ! allocate and assign subgrid portions on various processes allocate(array(nx,ny)) istart = mycoor(1)*nx jstart = mycoor(2)*ny do i=1,nx do j=1,ny array(i,j) = array_g(istart+i,jstart+j) enddo enddo ! output results from one process if (myrank==0) then print *, '' print *, 'subgrid array on process with coords ',mycoor,':' shape_array = shape(array) print *, 'shape: ',shape_array do i=1,shape_array(1) print *, array(i,:) enddo print *, '' endif end subroutine setup_mpi !===================================================================== subroutine nc_out_p ! use atham_module, only : mycoor,my_cart,myrank ! use atham_module, only : nx,ny ! precompiler problem: include file had to be hardwired into code ! include 'pnetcdf.inc' ! #include "/usr/local/parallel-netcdf-1.0.1/include/pnetcdf.inc" # include "pnetcdf.inc" integer(kint) :: NCIDP character(len=10) :: name integer(kint) :: x_dimIDP,y_dimIDP,x_varIDP,y_varIDP,array_varIDP integer(kint) :: dimIDPs(2) integer(MPI_OFFSET_KIND) :: dimlenx,dimleny integer(MPI_OFFSET_KIND) :: istart,jstart integer(MPI_OFFSET_KIND) :: ntx_mpi,nty_mpi,nx_mpi,ny_mpi integer(MPI_OFFSET_KIND) :: starts(2), counts(2) ntx_mpi = ntx nty_mpi = nty nx_mpi = nx ny_mpi = ny name = 'out_glb.nc' istart = mycoor(1)*nx_mpi + 1 jstart = mycoor(2)*ny_mpi + 1 call handle_err_nc_p(nfmpi_create(my_cart,'./'//trim(name),0,MPI_INFO_NULL,NCIDP),0) call handle_err_nc_p(nfmpi_def_dim(NCIDP,'x',ntx_mpi,x_dimIDP),1) call handle_err_nc_p(nfmpi_def_dim(NCIDP,'y',nty_mpi,y_dimIDP),2) call handle_err_nc_p(nfmpi_def_var(NCIDP,'x',NF_FLOAT,1,x_dimIDP,x_varIDP),3) call handle_err_nc_p(nfmpi_def_var(NCIDP,'y',NF_FLOAT,1,y_dimIDP,y_varIDP),4) call handle_err_nc_p(nfmpi_def_var(NCIDP,'array',NF_FLOAT,2,(/x_dimIDP,y_dimIDP/),array_varIDP),5) call handle_err_nc_p(nfmpi_enddef(NCIDP),6) if (myrank==0) then print *, '' print *, 'array in nc_out_p subroutine' print *, array print *, '' print *, '(/nx,ny/) ',(/nx,ny/) print *, '(/istart,jstart/) ',(/istart,jstart/) endif if (myrank==0) then print *, '' call handle_err_nc_p(nfmpi_inq_dimlen(NCIDP,x_dimIDP,dimlenx),7) call handle_err_nc_p(nfmpi_inq_dimlen(NCIDP,y_dimIDP,dimleny),8) print *, 'pnetCDF var dimensions in xy ',dimlenx,dimleny print *, 'ntx_mpi ',ntx_mpi print *, 'ntx ',ntx print *, 'nx_mpi ',nx_mpi print *, 'nx ',nx endif ! this should have been possible in collective data mode, but did not seem to work !!! ! call handle_err_nc_p(nfmpi_begin_indep_data(NCIDP),9) ! call handle_err_nc_p(nfmpi_put_vara_real(NCIDP,array_varIDP,(/istart,jstart/),(/nx_mpi,ny_mpi/),reshape(array,(/nx_mpi,ny_mpi/))),10) starts = (/istart,jstart/) counts = (/nx_mpi,ny_mpi/) call handle_err_nc_p(nfmpi_put_vara_real_all(NCIDP,array_varIDP,starts,counts,reshape(array,(/nx_mpi,ny_mpi/))),10) ! call handle_err_nc_p(nfmpi_end_indep_data(NCIDP),11) call handle_err_nc_p(nfmpi_close(NCIDP),12) end subroutine nc_out_p !===================================================================== !===================================================================== subroutine handle_err_nc_p(status,callID) ! use atham_module, only : myrank ! #include "/usr/local/parallel-netcdf-1.0.1/include/pnetcdf.inc" # include "pnetcdf.inc" integer, intent (in ) :: status integer, intent (in ) :: callID if ( status /= nf_noerr ) then if (myrank==0) then print *, "" print *, trim(nfmpi_strerror(status)) print *, "atham_pnetcdf error: program execution stopped ",callID print *, "" ! stop end if endif return end subroutine handle_err_nc_p !===================================================================== subroutine exec_stop(MESSAGE,ERRORCLASS) character(len=*), intent(in ) :: MESSAGE integer(kint), intent(in ) :: ERRORCLASS integer(kint) :: ierror print *, 'ERROR: MPI execution stopped' print *, 'error class: ', ERRORCLASS print *, 'message: ', trim(MESSAGE) call mpi_abort(mpi_comm_world,ierror) stop end subroutine exec_stop !===================================================================== subroutine makename(prefix,icoor,name) !------------------------------------------------------------------! ! create character name=prefix_(icoor(1)+1)_(icoor(2)+1) ! !------------------------------------------------------------------! character(len=*), intent(in) :: prefix integer(kint), dimension(2), intent(in) :: icoor character(len=*), intent(out) :: name !------------------------------------------------------------------! ! local variables ! !------------------------------------------------------------------! character(len=25) :: numx,numy integer(kint) :: lenx,leny,lenf write(numx,'(i5)') icoor(1)+1 numx=adjustl(numx) lenx=len_trim(numx) write(numy,'(i5)') icoor(2)+1 numy=adjustl(numy) leny=len_trim(numy) name=adjustl(prefix) lenf=len_trim(name) name=name(1:lenf)//'_'//numx(1:lenx)//'_'//numy(1:leny) end subroutine makename !===================================================================== end program mpi_setup_netCDFout