Hello,<br><br>I meet a MPI error now. I modified my previous code 
(the previous is F77 and OK ) to F90 and the error is given as follows 
(using 27 processes,but just get 26 or less processes&#39; information):<br><br> rank=:           0        1780           6<br>
 rank=:           2         656           6<br> rank=:           4        1825           6<br> rank=:           8         425           6<br> rank=:          16        1233           6<br> rank=:          12        1859           6<br>

 rank=:           6        1171           6<br> rank=:          20         434           6<br> rank=:           5         668           6<br> rank=:          14         688           6<br> rank=:          21        1208           6<br>

 rank=:          18        1174           6<br> rank=:          24         779           6<br> rank=:           7        1186           6<br> rank=:          10        1841           6<br> rank=:          17         433           6<br>

 rank=:          11         688           6<br> rank=:          22        1216           6<br> rank=:          19        1189           6<br> rank=:          25         792           6<br> rank=:           3        1808           6<br>

 rank=:          15        1213           6<br> rank=:          13        1895           6<br> rank=:           1        1788           6<br> rank=:           9        1815           6<br> rank=:          23         441           6<br>

[mpiexec@fn-148-148] ONE OF THE PROCESSES TERMINATED BADLY: CLEANING UP<br>APPLICATION TERMINATED WITH THE EXIT STRING: Terminated (signal 15)<br clear="all"><br>it is a particle simulation program, and the corresponding code is:<br>

<br>    !send to upper and receive from bottom<br>    num = 0 ; num_tmp = 0<br>    do i = 0 , nx+1<br>        do j = 0 , ny+1<br>            p = last_particle_in_cell(i,j,nz)<br>            do while(p&gt;0)<br>                num = num + 1<br>

                if(num &gt; num_send) print*, &#39;Please enlarge the number of sent ghost - &quot;num_send&quot;&#39; , num , num_send<br>                send_ghost(num) = particle_vector(p)%basic_<div id=":86">particle<br>
                p = particle_pointer(p)<br>
            end do<br>        end do<br>    end do<br>    write(*,*) &#39;rank=:&#39; , rank , num , 6<br>    if(mod(cpu_coords(1)+cpu_coords(2)+cpu_coords(3),2) == 0) then<br>        call MPI_SEND(num     , 1 , MPI_INTEGER , upper_nbr  , tag_tmp6 , GRID_COMM , ierror)<br>

        call MPI_RECV(num_tmp , 1 , MPI_INTEGER , bottom_nbr , tag_tmp6 , GRID_COMM , stat , ierror)<br>    end if<br>    if(mod(cpu_coords(1)+cpu_coords(2)+cpu_coords(3),2) == 1) then<br>        call MPI_RECV(num_tmp , 1 , MPI_INTEGER , bottom_nbr , tag_tmp6 , GRID_COMM , stat , ierror)<br>

        call MPI_SEND(num     , 1 , MPI_INTEGER , upper_nbr  , tag_tmp6 , GRID_COMM , ierror)<br>    end if<br>    call MPI_BARRIER(GRID_COMM , ierror)<br>    if(mod(cpu_coords(1)+cpu_coords(2)+cpu_coords(3),2) == 0) then<br>

!        if(num &gt; 0) call MPI_SEND(send_ghost , num , ghost_data_mpi , upper_nbr , upper_tag , GRID_COMM , ierror)<br>        call MPI_SEND(send_ghost , num , ghost_data_mpi , upper_nbr , upper_tag , GRID_COMM , ierror)<br>

!        if(num_tmp &gt; 0) call MPI_RECV(recv_ghost , num_tmp , ghost_data_mpi , &amp;<br>!                        bottom_nbr , upper_tag , GRID_COMM , stat , ierror)<br>        call MPI_RECV(recv_ghost , num_tmp , ghost_data_mpi , &amp;<br>

                        bottom_nbr , upper_tag , GRID_COMM , stat , ierror)<br>    end if<br>    if(mod(cpu_coords(1)+cpu_coords(2)+cpu_coords(3),2) == 1) then<br>!        if(num_tmp &gt; 0) call MPI_RECV(recv_ghost , num_tmp , ghost_data_mpi , &amp;<br>

!                        bottom_nbr , upper_tag , GRID_COMM , stat , ierror)<br>        call MPI_RECV(recv_ghost , num_tmp , ghost_data_mpi , &amp;<br>                        bottom_nbr , upper_tag , GRID_COMM , stat , ierror)<br>

!        if(num &gt; 0) call MPI_SEND(send_ghost , num , ghost_data_mpi , upper_nbr , upper_tag , GRID_COMM , ierror)<br>        call MPI_SEND(send_ghost , num , ghost_data_mpi , upper_nbr , upper_tag , GRID_COMM , ierror)<br>

    end if<br>    call MPI_BARRIER(GRID_COMM , ierror)<br>    if(num_tmp &gt; 0) then<br>        do i = 1 , num_tmp<br>            particle_vector(ghost_pointer+i-1)%basic_particle = recv_ghost(i)<br>        end do<br>    end if<br>

    ghost_pointer = ghost_pointer + num_tmp<br>    write(*,*) &#39;rank =: &#39;,rank,upper_nbr,&#39; num =: &#39; , num , &#39; num_tmp =:&#39; , num_tmp<br>    if(ghost_pointer &gt; max_num_particle_per_cpu+max_num_ghost_per_cpu) then<br>

        print*, &#39;should enlarge &quot;max_num_ghost_per_cpu&quot; and/or &quot;max_num_particle_per_cpu&quot;&#39;,ghost_pointer,num_tmp<br>    end if<br>    if(num_tmp &gt; 0) then<br>        left = 1 ; right = 1 <br>

        i = ghost_pointer-num_tmp<br>        do while(i &lt;= ghost_pointer-1)  <br>            if(particle_vector(i)%basic_particle%global_index /= 0) then<br>                call get_index(particle_vector(i)%basic_particle%coor(1),rmax,coor_box(1,1),coor_box(1,2),ind(1))<br>

                call get_index(particle_vector(i)%basic_particle%coor(2),rmax,coor_box(2,1),coor_box(2,2),ind(2))<br>                call get_index(particle_vector(i)%basic_particle%coor(3),rmax,coor_box(3,1),coor_box(3,2),ind(3))<br>

                if(ind(3)   ==0 .and. ind(1)&gt;=0 .and. ind(1)&lt;=nx+1 .and. ind(2)&gt;=0 .and. ind(2)&lt;=ny+1) then<br>                    call add_cell_link(max_num_particle_per_cpu+max_num_ghost_per_cpu,nx,ny,nz,left,right, ind(1), ind(2), ind(3) , &amp;<br>

                                            i,particle_pointer,last_particle_in_cell)<br>                end if<br>            end if                                <br>            i = i + 1<br>        end do<br>    end if<br>

    call MPI_BARRIER(GRID_COMM , ierror)<br><br>Please someone help me to fix it. Thank you very much.</div><br clear="all"><br>-- <br>Your sincerely,<br>Shouchun Deng<br><br>