[mpich-discuss] please help me to fix a mpi error
sc Deng
dengsc74.sj at gmail.com
Wed Jan 25 11:30:34 CST 2012
Hello,
I meet a MPI error now. I modified my previous code (the previous is F77
and OK ) to F90 and the error is given as follows (using 27 processes,but
just get 26 or less processes' information):
rank=: 0 1780 6
rank=: 2 656 6
rank=: 4 1825 6
rank=: 8 425 6
rank=: 16 1233 6
rank=: 12 1859 6
rank=: 6 1171 6
rank=: 20 434 6
rank=: 5 668 6
rank=: 14 688 6
rank=: 21 1208 6
rank=: 18 1174 6
rank=: 24 779 6
rank=: 7 1186 6
rank=: 10 1841 6
rank=: 17 433 6
rank=: 11 688 6
rank=: 22 1216 6
rank=: 19 1189 6
rank=: 25 792 6
rank=: 3 1808 6
rank=: 15 1213 6
rank=: 13 1895 6
rank=: 1 1788 6
rank=: 9 1815 6
rank=: 23 441 6
[mpiexec at fn-148-148] ONE OF THE PROCESSES TERMINATED BADLY: CLEANING UP
APPLICATION TERMINATED WITH THE EXIT STRING: Terminated (signal 15)
it is a particle simulation program, and the corresponding code is:
!send to upper and receive from bottom
num = 0 ; num_tmp = 0
do i = 0 , nx+1
do j = 0 , ny+1
p = last_particle_in_cell(i,j,nz)
do while(p>0)
num = num + 1
if(num > num_send) print*, 'Please enlarge the number of
sent ghost - "num_send"' , num , num_send
send_ghost(num) = particle_vector(p)%basic_
particle
p = particle_pointer(p)
end do
end do
end do
write(*,*) 'rank=:' , rank , num , 6
if(mod(cpu_coords(1)+cpu_coords(2)+cpu_coords(3),2) == 0) then
call MPI_SEND(num , 1 , MPI_INTEGER , upper_nbr , tag_tmp6 ,
GRID_COMM , ierror)
call MPI_RECV(num_tmp , 1 , MPI_INTEGER , bottom_nbr , tag_tmp6 ,
GRID_COMM , stat , ierror)
end if
if(mod(cpu_coords(1)+cpu_coords(2)+cpu_coords(3),2) == 1) then
call MPI_RECV(num_tmp , 1 , MPI_INTEGER , bottom_nbr , tag_tmp6 ,
GRID_COMM , stat , ierror)
call MPI_SEND(num , 1 , MPI_INTEGER , upper_nbr , tag_tmp6 ,
GRID_COMM , ierror)
end if
call MPI_BARRIER(GRID_COMM , ierror)
if(mod(cpu_coords(1)+cpu_coords(2)+cpu_coords(3),2) == 0) then
! if(num > 0) call MPI_SEND(send_ghost , num , ghost_data_mpi ,
upper_nbr , upper_tag , GRID_COMM , ierror)
call MPI_SEND(send_ghost , num , ghost_data_mpi , upper_nbr ,
upper_tag , GRID_COMM , ierror)
! if(num_tmp > 0) call MPI_RECV(recv_ghost , num_tmp ,
ghost_data_mpi , &
! bottom_nbr , upper_tag , GRID_COMM , stat , ierror)
call MPI_RECV(recv_ghost , num_tmp , ghost_data_mpi , &
bottom_nbr , upper_tag , GRID_COMM , stat , ierror)
end if
if(mod(cpu_coords(1)+cpu_coords(2)+cpu_coords(3),2) == 1) then
! if(num_tmp > 0) call MPI_RECV(recv_ghost , num_tmp ,
ghost_data_mpi , &
! bottom_nbr , upper_tag , GRID_COMM , stat , ierror)
call MPI_RECV(recv_ghost , num_tmp , ghost_data_mpi , &
bottom_nbr , upper_tag , GRID_COMM , stat , ierror)
! if(num > 0) call MPI_SEND(send_ghost , num , ghost_data_mpi ,
upper_nbr , upper_tag , GRID_COMM , ierror)
call MPI_SEND(send_ghost , num , ghost_data_mpi , upper_nbr ,
upper_tag , GRID_COMM , ierror)
end if
call MPI_BARRIER(GRID_COMM , ierror)
if(num_tmp > 0) then
do i = 1 , num_tmp
particle_vector(ghost_pointer+i-1)%basic_particle =
recv_ghost(i)
end do
end if
ghost_pointer = ghost_pointer + num_tmp
write(*,*) 'rank =: ',rank,upper_nbr,' num =: ' , num , ' num_tmp =:' ,
num_tmp
if(ghost_pointer > max_num_particle_per_cpu+max_num_ghost_per_cpu) then
print*, 'should enlarge "max_num_ghost_per_cpu" and/or
"max_num_particle_per_cpu"',ghost_pointer,num_tmp
end if
if(num_tmp > 0) then
left = 1 ; right = 1
i = ghost_pointer-num_tmp
do while(i <= ghost_pointer-1)
if(particle_vector(i)%basic_particle%global_index /= 0) then
call
get_index(particle_vector(i)%basic_particle%coor(1),rmax,coor_box(1,1),coor_box(1,2),ind(1))
call
get_index(particle_vector(i)%basic_particle%coor(2),rmax,coor_box(2,1),coor_box(2,2),ind(2))
call
get_index(particle_vector(i)%basic_particle%coor(3),rmax,coor_box(3,1),coor_box(3,2),ind(3))
if(ind(3) ==0 .and. ind(1)>=0 .and. ind(1)<=nx+1 .and.
ind(2)>=0 .and. ind(2)<=ny+1) then
call
add_cell_link(max_num_particle_per_cpu+max_num_ghost_per_cpu,nx,ny,nz,left,right,
ind(1), ind(2), ind(3) , &
i,particle_pointer,last_particle_in_cell)
end if
end if
i = i + 1
end do
end if
call MPI_BARRIER(GRID_COMM , ierror)
Please someone help me to fix it. Thank you very much.
--
Your sincerely,
Shouchun Deng
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.mcs.anl.gov/pipermail/mpich-discuss/attachments/20120125/d2a3dd08/attachment.htm>
More information about the mpich-discuss
mailing list