[mpich-discuss] Sendind an array of dada type
Hossein Beiramy
beiramy_hossein at yahoo.com
Mon Mar 28 09:47:10 CDT 2011
I have installed MPICH2-1.3.2p1 (MPICH2 Windows EM64T/AMD64 ) and I linked Intel MPI library with small project in Visual studio 2010. I use Intel Fortran compiler version 12. In the following example, I want to send an array of data type to the client processes. Specification of my PC is:
Windows 7 OS, 64 Bit, 4 GB of RAM, Intel® Core(TM)2Duo CPU 3.34 GHz
Client processes gets the array up to 522 cells and will receive garbage data for the rest cells.
program ping
implicit none
include 'mpif.h'
integer :: numtasks, rank, dest, source, tag, ierr
integer :: i
integer :: NELEM
integer :: stat(MPI_STATUS_SIZE)
type Particle
INTEGER(4) :: IW1
INTEGER(4) :: IP1
INTEGER(4) :: IW2
INTEGER(4) :: IP2
INTEGER(4) :: IPWF
INTEGER(4) :: IP
INTEGER(4) :: IL10
INTEGER(2) :: IMP4
INTEGER(2) :: IMP5
INTEGER(2) :: LIB1
INTEGER(2) :: LIB2
INTEGER(2) :: LIB3
INTEGER(2) :: IVOL
INTEGER(2) :: ITARG
INTEGER(2) :: ISEG
INTEGER(2) :: NFAU
INTEGER(2) :: NMAX
INTEGER(2) :: NTOT
LOGICAL :: nomix
LOGICAL :: TRcorr
INTEGER(4) :: JZ(6)
INTEGER(4) :: IJ(6)
end type Particle
type (Particle),DIMENSION(:),POINTER :: p, particles
integer particletype, oldtypes(0:3), blockcounts(0:3), offsets(0:3), extent
integer rowtype,columntype,SIZE,id
integer NUMSEG,indextype_INTBB
integer,allocatable,dimension(:) :: blocklengths,displacements
tag = 1
call MPI_INIT(ierr)
call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
call MPI_COMM_SIZE(MPI_COMM_WORLD, numtasks, ierr)
NELEM=10000
ALLOCATE(particles(0:NELEM))
ALLOCATE(p(0:1010))
! Setup description of the 4 MPI_REAL fields x, y, z, velocity
offsets(0) = 0
oldtypes(0) = MPI_INTEGER4
blockcounts(0) = 7
call MPI_TYPE_EXTENT(MPI_INTEGER4, extent, ierr)
offsets(1) = 7 * extent
oldtypes(1) = MPI_INTEGER2
blockcounts(1) = 11
! Setup description of the 2 MPI_INTEGER fields n, type
! Need to first figure offset by getting size of MPI_REAL
call MPI_TYPE_EXTENT(MPI_INTEGER2, extent, ierr)
offsets(2) = 11 * extent
oldtypes(2) = MPI_LOGICAL
blockcounts(2) = 2
SIZE=6
!call MPI_TYPE_VECTOR(SIZE, 1, SIZE, MPI_REAL, rowtype, ierr)
!call MPI_TYPE_COMMIT(rowtype, ierr)
call MPI_TYPE_CONTIGUOUS(SIZE, MPI_INTEGER4, columntype, ierr)
call MPI_TYPE_COMMIT(columntype, ierr)
call MPI_TYPE_EXTENT(MPI_LOGICAL, extent, ierr)
offsets(3) = 2 * extent
oldtypes(3) = columntype
blockcounts(3) = 2
! Now define structured type and commit it
call MPI_TYPE_STRUCT(4, blockcounts, offsets, oldtypes, particletype, ierr)
call MPI_TYPE_COMMIT(particletype, ierr)
! Initialize the particle array and then send it to each task
tag = 1
if (rank .eq. 0) then
do 10 i=0, NELEM-1
particles(i)%IW1=i+1
particles(i)%IP1=i+2
particles(i)%IW2=i+3
particles(i)%IP2=i+4
particles(i)%IPWF=i+5
particles(i)%IP=i+6
particles(i)%IL10=i+7
particles(i)%IMP4=i+8
particles(i)%IMP5=i+9
particles(i)%LIB1=i+10
particles(i)%LIB2=i+11
particles(i)%LIB3=i+12
particles(i)%IVOL=i+13
particles(i)%ITARG=i+14
particles(i)%ISEG=i+15
particles(i)%NFAU=i+16
particles(i)%NMAX=i+17
particles(i)%NTOT=i+18
particles(i)%nomix=.FALSE.
particles(i)%TRcorr=.FALSE.
particles(i)%JZ=i
particles(i)%IJ=i+1
10 continue
NUMSEG = 4
ALLOCATE(blocklengths(NUMSEG), displacements(NUMSEG))
blocklengths(1)=798; displacements(1)=0
blocklengths(2)=84; displacements(2)=7798
blocklengths(3)=114; displacements(3)=4800
blocklengths(4)=12; displacements(4)=8952
CALL MPI_TYPE_INDEXED(NUMSEG, blocklengths, displacements, particletype,indextype_INTBB, ierr)
call MPI_TYPE_COMMIT(indextype_INTBB, ierr)
do 20 i=1, numtasks-1
call MPI_SEND(particles, 1, indextype_INTBB, i, tag, MPI_COMM_WORLD, ierr)
! call MPI_SEND(particles, 1008, particletype, i, tag, MPI_COMM_WORLD, ierr)
20 continue
endif
id=521
if (rank>0) then
source = 0
call MPI_RECV(p, 1008, particletype, source, tag, MPI_COMM_WORLD, stat, ierr)
print *, 'rank= ',rank,' p=',p(id)%nomix,p(id)%TRcorr,p(0)%JZ(1),p(id)%JZ(1),p(id)%JZ(6),p(id)%IJ
end if
call MPI_TYPE_FREE(particletype, ierr)
call MPI_FINALIZE(ierr)
end program ping
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.mcs.anl.gov/pipermail/mpich-discuss/attachments/20110328/6e1b73de/attachment-0001.htm>
-------------- next part --------------
An embedded and charset-unspecified text was scrubbed...
Name: Source1.F90
URL: <http://lists.mcs.anl.gov/pipermail/mpich-discuss/attachments/20110328/6e1b73de/attachment-0001.diff>
More information about the mpich-discuss
mailing list