[mpich-discuss] Asking for help on MPI derived data type
Jilong Yin
yinjilong at gmail.com
Thu May 6 23:18:50 CDT 2010
Hello,
I am trying to define MPI derived data type to simplify my program.
But I can not get it working properly.
First I describe the basic thing about the following test program.
There are many layers consisting by many nodes. Each node has a
position(x,y,z) ( real data type )and a bound condition type description in
(x,y,z) direction( integer data) .
I want to send i-th layer node data in a so-called buffer space to j-th
layer node in a so-called computation space.
I first define two MPI derived data types to describing node layer data.
And then using the new data type to exchange data between the two space.
The following is my FORTRAN code, though it can be compiled and run , the
result is wrong.
Can anyone help me out?
Thank you .
JILONG YIN
2010-05-07
[CODE]
C The following program is used to test MPI derived data type in parallel
finite element program
C The node data in buffer space will be send to computation space
PROGRAM TEST
IMPLICIT NONE
INCLUDE 'MPIF.H'
C The number of nodes on a crossection
INTEGER,PARAMETER::N_NOD_CROSEC=10
C The number of nodes along the longitude direction
INTEGER,PARAMETER::N_NOD_STREAM=13
C The number of total nodes
INTEGER,PARAMETER::N_NOD=N_NOD_CROSEC*N_NOD_STREAM
C Derived data type for node layer data buffer space
INTEGER NOD_LAY_DATA_BF(N_NOD_STREAM)
C The node position data in buffer space
DOUBLE PRECISION POSITION_BF(3,N_NOD)
C The node boundary condition type data in buffer space
INTEGER IFLAG_BD_COND_BF(3,N_NOD)
C Derived data type for node layer data computation space
INTEGER NOD_LAY_DATA(N_NOD_STREAM)
C The node position data in computation space
DOUBLE PRECISION POSITION(3,N_NOD)
C The node boundary condition type data in computation space
INTEGER IFLAG_BD_COND(3,N_NOD)
C Some varibles for defining MPI derived data type
INTEGER IBLOCK(99),IDISP(99),ITYPE(99)
INTEGER NBLOCK
C Node data type(for buffer space and computation space)
INTEGER NOD_LAY_DATA_BF_TYPE
INTEGER NOD_LAY_DATA_TYPE
C MPI function status return
INTEGER::ISTATUS(MPI_STATUS_SIZE),IERR
C My rank
INTEGER MYID
C source rank and destination rank ID
INTEGER ID_SRC,ID_DEST
C node layer number for sending and receiving
INTEGER ISNS,IRNS
INTEGER I,J,NOD1,NOD2
C Initilize the MPI enviroment
CALL MPI_INIT(IERR)
C Get the rank ID
CALL MPI_COMM_RANK(MPI_COMM_WORLD,MYID,IERR)
C----------------------------------------------------------
C Define node layer derived data type for buffer space
NBLOCK=0
NBLOCK=NBLOCK+1
IBLOCK(NBLOCK)=1
ITYPE(NBLOCK)=MPI_INTEGER
CALL MPI_ADDRESS(NOD_LAY_DATA_BF,IDISP(NBLOCK),IERR)
C Node position
NBLOCK=NBLOCK+1
IBLOCK(NBLOCK)=3*N_NOD_CROSEC
ITYPE(NBLOCK)=MPI_DOUBLE_PRECISION
CALL MPI_ADDRESS(POSITION_BF,IDISP(NBLOCK),IERR)
C Node boundary condition type
NBLOCK=NBLOCK+1
IBLOCK(NBLOCK)=3*N_NOD_CROSEC
ITYPE(NBLOCK)=MPI_INTEGER
CALL MPI_ADDRESS(IFLAG_BD_COND_BF,IDISP(NBLOCK),IERR)
C convert to relative address
DO I=NBLOCK,1,-1
IDISP(I)=IDISP(I)-IDISP(1)
END DO
C generate new derived data type
CALL MPI_TYPE_STRUCT(NBLOCK,IBLOCK,IDISP,ITYPE,
& NOD_LAY_DATA_BF_TYPE,IERR)
CALL MPI_TYPE_COMMIT(NOD_LAY_DATA_BF_TYPE,IERR)
C---------------------------------------------------------
C Define node layer derived data type for computation space
C
NBLOCK=0
NBLOCK=NBLOCK+1
IBLOCK(NBLOCK)=1
ITYPE(NBLOCK)=MPI_INTEGER
CALL MPI_ADDRESS(NOD_LAY_DATA,IDISP(NBLOCK),IERR)
C Node position
NBLOCK=NBLOCK+1
IBLOCK(NBLOCK)=3*N_NOD_CROSEC
ITYPE(NBLOCK)=MPI_DOUBLE_PRECISION
CALL MPI_ADDRESS(POSITION,IDISP(NBLOCK),IERR)
C Node boundary condition type
NBLOCK=NBLOCK+1
IBLOCK(NBLOCK)=3*N_NOD_CROSEC
ITYPE(NBLOCK)=MPI_INTEGER
CALL MPI_ADDRESS(IFLAG_BD_COND(1,1),IDISP(NBLOCK),IERR)
C convert to relative address
DO I=NBLOCK,1,-1
IDISP(I)=IDISP(I)-IDISP(1)
END DO
C generate new derived data type
CALL MPI_TYPE_STRUCT(NBLOCK,IBLOCK,IDISP,ITYPE,
& NOD_LAY_DATA_TYPE,IERR)
CALL MPI_TYPE_COMMIT(NOD_LAY_DATA_TYPE,IERR)
CC---------------------------------------------------------
C Node data initilize for computation space
NOD_LAY_DATA(:)=0
POSITION(:,:)=0.0D0
IFLAG_BD_COND(:,:)=-1
C Prepare test data for buffer space
DO I=1,N_NOD_STREAM
NOD_LAY_DATA_BF(I)=I
END DO
DO I=1,N_NOD
DO J=1,3
POSITION_BF(J,I)=J+I*10.0D0
IFLAG_BD_COND_BF(J,I)=J+I*10+90000000
END DO
END DO
C I will send the ISNS-th layer node data in buffer space to IRNS-th layer
node data in computation space
ISNS=1
IRNS=2
C This is the source rank id and destination rank id
ID_SRC=0
ID_DEST=1
C send node layer data using derived data type 1
IF(MYID.EQ.ID_SRC) THEN
CALL MPI_SEND(NOD_LAY_DATA_BF(ISNS),1,NOD_LAY_DATA_BF_TYPE,
& ID_DEST,123,MPI_COMM_WORLD,IERR)
END IF
C receive node layer data using derived data type 2
IF(MYID.EQ.ID_DEST) THEN
CALL MPI_RECV(NOD_LAY_DATA(IRNS),1,NOD_LAY_DATA_TYPE,
& ID_SRC,123,MPI_COMM_WORLD,ISTATUS,IERR)
END IF
print*,'Myid=',MYID,'IERR=',ierr
C output the received data to verify them
IF(MYID.EQ.ID_DEST) THEN
PRINT*,ID_SRC,NOD_LAY_DATA_BF(ISNS),
& ID_DEST,NOD_LAY_DATA(IRNS)
DO I=1,N_NOD_CROSEC
NOD1=I+(ISNS-1)*N_NOD_CROSEC
NOD2=I+(IRNS-1)*N_NOD_CROSEC
PRINT*,NOD1,POSITION_BF(1,NOD1),NOD2,POSITION(1,NOD2)
PRINT*,NOD1,POSITION_BF(2,NOD1),NOD2,POSITION(2,NOD2)
PRINT*,NOD1,POSITION_BF(3,NOD1),NOD2,POSITION(3,NOD2)
PRINT*,NOD1,IFLAG_BD_COND_BF(1,NOD1),NOD2,IFLAG_BD_COND(1,NOD2)
PRINT*,NOD1,IFLAG_BD_COND_BF(2,NOD1),NOD2,IFLAG_BD_COND(2,NOD2)
PRINT*,NOD1,IFLAG_BD_COND_BF(3,NOD1),NOD2,IFLAG_BD_COND(3,NOD2)
END DO
END IF
c
CALL MPI_TYPE_FREE(NOD_LAY_DATA_BF_TYPE,IERR)
CALL MPI_TYPE_FREE(NOD_LAY_DATA_TYPE,IERR)
CALL MPI_FINALIZE(IERR)
END
[/CODE]
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.mcs.anl.gov/pipermail/mpich-discuss/attachments/20100507/9604b1a6/attachment.htm>
More information about the mpich-discuss
mailing list