module flux_area use global_data implicit none #define PETSC_AVOID_DECLARATIONS #include "include/finclude/petsc.h" #include "include/finclude/petscvec.h" #include "include/finclude/petscmat.h" #include "include/finclude/petscksp.h" #include "include/finclude/petscpc.h" #undef PETSC_AVOID_DECLARATIONS contains real(8) function phi_f(u1,u2,d1,d2) !evaluate any arbitrary phi at center of face !u can be substituted with pres real(8), intent(in) :: u1,u2,d1,d2 phi_f = (d1*u2+d2*u1)/(d1+d2) end function phi_f real(8) function grad_p(p1,p2,d1,d2) !evaluate pressure gradient real(8), intent(in) :: p1,p2,d1,d2 grad_p = (p2-p1)/(d1+d2) end function grad_p real(8) function F_d(u1,u2,d1,d2,d_ab) !evaluate diffusive flux at face !note depending on face, u1 & u2 may need to interchange !n vector points from u1 to u2 real(8), intent(in) :: u1,u2,d1,d2,d_ab F_d = -(inv_Re) * ((u2-u1)/(d1+d2)) * d_ab end function F_d real(8) function F_c(vel_f,u_int,Ed) !evaluate convective flux at face real(8), intent(in) :: vel_f,u_int,Ed F_c = vel_f * Ed * u_int end function F_c subroutine v_ast_row_copy #include "include/finclude/petsc.h" #include "include/finclude/petscvec.h" #include "include/finclude/petscmat.h" #include "include/finclude/petscksp.h" #include "include/finclude/petscpc.h" #include "include/finclude/petscsys.h" !to copy data of jend row to others integer :: inext,iprev,istatus(MPI_STATUS_SIZE),irecv1,ierr,isend1 inext = myid + 1 iprev = myid - 1 if (myid == num_procs - 1) inext = MPI_PROC_NULL if (myid == 0) iprev = MPI_PROC_NULL CALL MPI_ISEND(v_ast(1,jend),size_x,MPI_REAL8,inext,1,MPI_COMM_WORLD,isend1,ierr) CALL MPI_IRECV(v_ast(1,jsta-1),size_x,MPI_REAL8,iprev,1,MPI_COMM_WORLD,irecv1,ierr) CALL MPI_WAIT(isend1, istatus, ierr) CALL MPI_WAIT(irecv1, istatus, ierr) end subroutine v_ast_row_copy subroutine phi_row_copy #include "include/finclude/petsc.h" #include "include/finclude/petscvec.h" #include "include/finclude/petscmat.h" #include "include/finclude/petscksp.h" #include "include/finclude/petscpc.h" #include "include/finclude/petscsys.h" !to copy data of jsta,jend row to others integer :: inext,iprev,istatus(MPI_STATUS_SIZE),irecv2,ierr,isend2,irecv1,isend1 inext = myid + 1 iprev = myid - 1 if (myid == num_procs - 1) inext = MPI_PROC_NULL if (myid == 0) iprev = MPI_PROC_NULL CALL MPI_ISEND(phi(1,jend-1),2*size_x,MPI_REAL8,inext,1,MPI_COMM_WORLD,isend1,ierr) CALL MPI_ISEND(phi(1,jsta) ,2*size_x,MPI_REAL8,iprev,1,MPI_COMM_WORLD,isend2,ierr) CALL MPI_IRECV(phi(1,jsta-2),2*size_x,MPI_REAL8,iprev,1,MPI_COMM_WORLD,irecv1,ierr) CALL MPI_IRECV(phi(1,jend+1),2*size_x,MPI_REAL8,inext,1,MPI_COMM_WORLD,irecv2,ierr) CALL MPI_WAIT(isend1, istatus, ierr) CALL MPI_WAIT(isend2, istatus, ierr) CALL MPI_WAIT(irecv1, istatus, ierr) CALL MPI_WAIT(irecv2, istatus, ierr) end subroutine phi_row_copy subroutine mpi_add(val) !find value across diff processors, use mpi_gather to add all values #include "include/finclude/petsc.h" #include "include/finclude/petscvec.h" #include "include/finclude/petscmat.h" #include "include/finclude/petscksp.h" #include "include/finclude/petscpc.h" #include "include/finclude/petscsys.h" integer :: i,j,ierr real(8) :: temp(num_procs) real(8), intent(inout) :: val call MPI_GATHER(val,1,MPI_REAL8,temp,1,MPI_REAL8,0,MPI_COMM_WORLD,ierr) val=sum(temp) end subroutine mpi_add subroutine mpi_add_bcast(val) !find value across diff processors, use mpi_gather to add all values and broadcast #include "include/finclude/petsc.h" #include "include/finclude/petscvec.h" #include "include/finclude/petscmat.h" #include "include/finclude/petscksp.h" #include "include/finclude/petscpc.h" #include "include/finclude/petscsys.h" integer :: i,j,ierr real(8) :: temp(num_procs) real(8), intent(inout) :: val call MPI_GATHER(val,1,MPI_REAL8,temp,1,MPI_REAL8,0,MPI_COMM_WORLD,ierr) val=sum(temp) call MPI_BCAST(val,1,MPI_REAL8,0,MPI_COMM_WORLD,ierr) end subroutine mpi_add_bcast subroutine uvp_row_copy #include "include/finclude/petsc.h" #include "include/finclude/petscvec.h" #include "include/finclude/petscmat.h" #include "include/finclude/petscksp.h" #include "include/finclude/petscpc.h" #include "include/finclude/petscsys.h" !to copy data of jstajend row to others integer :: inext,iprev,istatus(MPI_STATUS_SIZE),irecv2,ierr,isend2,irecv1,isend1 inext = myid + 1 iprev = myid - 1 if (myid == num_procs - 1) inext = MPI_PROC_NULL if (myid == 0) iprev = MPI_PROC_NULL CALL MPI_ISEND(u(1,jend),size_x,MPI_REAL8,inext,1,MPI_COMM_WORLD,isend1,ierr) CALL MPI_ISEND(u(1,jsta) ,size_x,MPI_REAL8,iprev,1,MPI_COMM_WORLD,isend2,ierr) CALL MPI_IRECV(u(1,jsta-1),size_x,MPI_REAL8,iprev,1,MPI_COMM_WORLD,irecv1,ierr) CALL MPI_IRECV(u(1,jend+1),size_x,MPI_REAL8,inext,1,MPI_COMM_WORLD,irecv2,ierr) CALL MPI_WAIT(isend1, istatus, ierr) CALL MPI_WAIT(isend2, istatus, ierr) CALL MPI_WAIT(irecv1, istatus, ierr) CALL MPI_WAIT(irecv2, istatus, ierr) CALL MPI_ISEND(v(1,jend),size_x,MPI_REAL8,inext,1,MPI_COMM_WORLD,isend1,ierr) CALL MPI_ISEND(v(1,jsta) ,size_x,MPI_REAL8,iprev,1,MPI_COMM_WORLD,isend2,ierr) CALL MPI_IRECV(v(1,jsta-1),size_x,MPI_REAL8,iprev,1,MPI_COMM_WORLD,irecv1,ierr) CALL MPI_IRECV(v(1,jend+1),size_x,MPI_REAL8,inext,1,MPI_COMM_WORLD,irecv2,ierr) CALL MPI_WAIT(isend1, istatus, ierr) CALL MPI_WAIT(isend2, istatus, ierr) CALL MPI_WAIT(irecv1, istatus, ierr) CALL MPI_WAIT(irecv2, istatus, ierr) CALL MPI_ISEND(p(1,jsta) ,size_x,MPI_REAL8,iprev,1,MPI_COMM_WORLD,isend2,ierr) CALL MPI_IRECV(p(1,jend+1),size_x,MPI_REAL8,inext,1,MPI_COMM_WORLD,irecv2,ierr) CALL MPI_WAIT(isend2, istatus, ierr) CALL MPI_WAIT(irecv2, istatus, ierr) end subroutine uvp_row_copy subroutine para_range(n1, n2, nprocs, irank, jsta, jend) ! block distribution integer n1 !The lowest value of the iteration variable (IN) integer n2 !The highest value of the iteration variable (IN) integer nprocs !The number of processes (IN) integer irank !The rank for which you want to know the range of iterations(IN) integer jsta !The lowest value of the iteration variable that process irank executes (OUT) integer jend !The highest value of the iteration variable that process irank executes (OUT) integer iwork1,iwork2 iwork1 = (n2 - n1 + 1) / nprocs iwork2 = mod(n2 - n1 + 1, nprocs) jsta = irank * iwork1 + n1 + min(irank, iwork2) jend = jsta + iwork1 - 1 if (iwork2 > irank) jend = jend + 1 end subroutine para_range end module flux_area