#!/usr/bin/python """ NOT WORKING ATM Python translation of /ts/examples/tutorials/ex4.c. Comments mostly taken from there This program solves the one-dimensional heat equation (also called the diffusion equation), u_t = u_xx, on the domain 0 <= x <= 1, with the boundary conditions u(t,0) = 0, u(t,1) = 0, and the initial condition u(0,x) = sin(6*pi*x) + 3*sin(2*pi*x). This is a linear, second-order, parabolic equation. We discretize the right-hand side using finite differences with uniform grid spacing h: u_xx = (u_{i+1} - 2u_{i} + u_{i-1})/(h^2) We then demonstrate time evolution using the various TS methods by running the program via mpiexec -n ex3 -ts_type We compare the approximate solution with the exact solution, given by u_exact(x,t) = exp(-36*pi*pi*t) * sin(6*pi*x) + 3*exp(-4*pi*pi*t) * sin(2*pi*x) Notes: This code demonstrates the TS solver interface to two variants of linear problems, u_t = f(u,t), namely - time-dependent f: f(u,t) is a function of t - time-independent f: f(u,t) is simply f(u) The uniprocessor version of this code is ts/examples/tutorials/ex3.c """ from __future__ import print_function import numpy as np import sys import petsc4py petsc4py.init(sys.argv) from petsc4py import PETSc class AppCtx: """ Application context class to store user data at make them availbel to the functions registered with the TS solver Parameters ------------ comm: mpi4py object a mpi4py communicatro object da: PETSc DA object a distributed array data structure localwork:PETSc Vec local ghosted workvector u_local: PETSc Vec local ghosted approximated solution vector solution: PETSc Vec global exact solution vector m: int total number of grid point h: float mesh width (1/m-1) debug: boolean flag for debug output viewer1: PETSc Viewer viewer for the solution viewer2: PETSc Viewer viewer for the error norm_2: float the 2-norm error norm_max: float the maxe-norm error """ def __init__(self, comm, da, localwork, u_local,solution,m,h,debug,viewer1, viewer2,norm_2=0, norm_max=0): self.comm = comm self.da = da self.localwork = localwork self.u_local = u_local self.solution = solution self.m = m self.h = h self.debug = debug self.viewer1 = viewer1 self.viewer2 = viewer2 self.norm_2 = norm_2 self.norm_max = norm_max def exactSolution(t, solution, mctx): """ Compute the exact solution Parameters ----------- t: float the current time u: PETSc Vec the current solution iteration mctx: AppCtx the monitor contest Returns -------- 0 """ h = mctx.h start, end = solution.getOwnershipRange() with solution as s: ex1 = np.exp(-36.0 * np.pi * t) ex2 = np.exp(-4.0 * np.pi * t) sc1 = np.pi*6.0*h sc2 = np.pi*2.0*h for i in range(start,end): j= i-start s[j] = ex1 * np.sin(sc1*j) + 3.0 * ex2 * np.sin(sc2*j) return 0 def initialConditions(U, ctx): """ Sets the initial conditions in the solution vector u Parameters ----------- U: PETSc Vec the solution vector ctx: AppCtx object the application context object Returns -------- 0 """ h = ctx.h start, end = U.getOwnershipRange() with U as u: for i in range(start, end): u[i-start] = np.sin(np.pi*i*6*h) + 3.0 * np.sin(np.pi*i*2*h) #Print debugging information if desired if ctx.debug: PETSc.Sys.Print("Computed solution vector:") U.view(PETSc.Viewer().STDOUT()) return 0 def monitor(ts, step, time, u, mctx): """ User defined monitor function Parameters ----------- ts: PETSc TS the TS solver object step: intExact solution vector: the iteration number time: float the current time u: PETSc Vec the current solution iteration mctx: AppCtx the monitor contest """ #View a graph of the current iterate mctx.viewer2.view(obj=u) #Compute the exact solution exactSolution(time,mctx.solution,mctx) #Print debugging information if desired if mctx.debug: PETSc.Sys.Print("Computed solution vector:") u.view(PETSc.Viewer().STDOUT()) PETSc.Sys.Print("Exact solution vector:") mctx.solution.view(PETSc.Viewer().STDOUT()) #Compute the 2-norm and max-norm of the error mctx.solution.axpy(-1.0,u) norm_2 = mctx.solution.norm(norm_type = PETSc.NormType.NORM_2) norm_2 = np.sqrt(mctx.h*norm_2) if norm_2 < 1e-14: norm_2 = 0 norm_max = mctx.solution.norm(norm_type = PETSc.NormType.MAX) if norm_max < 1e-14: norm_max = 0 PETSc.Sys.Print("Timestep ",step,": time =", time, " 2-norm error =", norm_2," max norm error =", norm_max) mctx.norm_2 += norm_2 mctx.norm_max += norm_max #View a graph of the error: mctx.solution.view(mctx.viewer1) #Print debugging information if desired if mctx.debug: PETSc.Sys.Print("Error vector ") mctx.solution.view(PETSc.Viewer().STDOUT()) def main(m = 60, debug = False, nlflag = False, tdflag = False): """ Main program routine Parameters ----------- m: float, optional the number of grid points. Defaults to 60. debug: bool, optional whether or not to print debug output. Defaults to False nlflag: bool, optional whether or not set the problem as non linear. Defaults to False. tdflag: bool, optional flag telling if the RHS of the problem is time dependent. This changes the way the problem is defined Defaults to False. """ #globals time_steps_max = 100 time_total_max = 1.0 comm = PETSc.COMM_WORLD.tompi4py() size = comm.size PETSc.Sys.Print("Solving a linear TS problem, number of processors = ", size) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Create vector data structures #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # #Create distributed array (DMDA) to manage parallel grid and vectors #and to set up the ghost point communication pattern. There are M #total grid values spread equally among all the processors. da = PETSc.DMDA().create(dim=1,sizes=(m,),dof=1, stencil_width=1, boundary_type=PETSc.DMDA.BoundaryType.NONE) #Extract global and local vectors from DMDA; we use these to store the #approximate solution. Then duplicate these for remaining vectors that #have the same types. u = da.createGlobalVec() u_local = da.createLocalVec() #Create local work vector for use in evaluating right-hand-side function; #create global work vector for storing exact solution localwork = u_local.duplicate() solution = u.duplicate() #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Set up displays to show graphs of the solution and error #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - viewer1 = PETSc.Viewer().createDraw(comm = comm, title ="", position = (80,380), size=(400,160)) viewer2 = PETSc.Viewer().createDraw(comm = comm, title ="", position = (80,0), size=(400,160)) #create the application context appctx = AppCtx(comm, da, localwork, u_local, solution, m, 1.0/(m-1.0), debug, viewer1, viewer2) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Create timestepping solver context #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ts = PETSc.TS().create() if nlflag: ts.setProblemType(PETSc.TS.ProblemType.NONLINEAR) else: ts.setProblemType(PETSc.TS.ProblemType.LINEAR) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Set optional user-defined monitoring routine #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ts.setMonitor(monitor,args = (appctx,)) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Create matrix data structure; set matrix evaluation routine. # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - A = PETSc.Mat().createAIJ([m,m],comm = comm) A.setFromOptions() A.setUp() if tdflag: #time dependent #For linear problems with a time-dependent f(u,t) in the equation #u_t = f(u,t), the user provides the discretized right-hand-side #as a time-dependent matrix. ts.setRHSFunction(ts.computeRHSFunctionLinear, args=(appctx,)) ts.setRHSJacobian(RHSMatrixHeat, J = A, P = A, args=(appctx,)) else: #time independent #For linear problems with a time-independent f(u) in the equation #u_t = f(u), the user provides the discretized right-hand-side #as a matrix only once, and then sets a null matrix evaluation #routine. RHSMatrixHeat(ts,0.0,u,A,A,appctx) ts.setRHSFunction(ts.computeRHSFunctionLinear, args=(appctx,)) ts.setRHSJacobian(ts.computeRHSJacobianConstant, J = A, P = A, args = (appctx,)) if nlflag: ts.setRHSFunction(RHSFunctionHeat, args = (appctx,)) snes = ts.getSNES() #serve 'sta roba ddel jacobiano di defualt? snes #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Set solution vector and initial timestep #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - dt = appctx.h*appctx.h/2.0 ts.setInitialTimeStep(0.0,dt) ts.setSolution(u) #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Customize timestepping solver: # - Set the solution method to be the Backward Euler method. # - Set timestepping duration info #Then set runtime options, which can override these defaults. #For example, # -ts_max_steps -ts_final_time #to override the defaults set by TSSetDuration(). #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ts.setDuration(time_total_max,time_steps_max) ts.setExactFinalTime(ts.ExactFinalTimeOption.STEPOVER) ts.setFromOptions() #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Solve the problem #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Evaluate initial conditions initialConditions(u,appctx) #Run the timestepping solver ts.solve(u) #get solution infos ftime = ts.getSolveTime() steps = ts.getStepNumber() #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #View the time stepping information #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - PETSc.Sys.Print("Total timesteps ",steps, "Final time ", ftime) PETSc.Sys.Print("Avg. error (2 norm) = ",appctx.norm_2, "Avg. error (max norm) = ", appctx.norm_max) def RHSFunctionHeat( ts,t,globalin, globalout,ctx): """ The non linear case RHSFunction Parameters ----------- ts: PETSc TS the TS solver object t: float current time globalin: PETSc Vec the input vector globalout: PETSc Vec the ouput vector ctx: AppCtx object the application context """ A = ts.getRHSJacobian() RHSMatrixHeat(ts, t, globalin,A,None,appctx) A.matMult(globalin, result = globalout) return 0 #def RHSMatrixHeat(ts, t, X, AA, BB, strf, appctx): def RHSMatrixHeat(ts, t, X, AA, BB, appctx): """ User-provided routine to compute the right-hand-side matrix for the heat equation. Parameters ----------- ts: PETSc TS the TS solver object t: float current time X: PETSc Vec the global input vector AA: PETSc Mat Jacobian matrix BB: PETSc Mat optionally different preconditioning matrix str: flag indicating matrix structure Returns -------- 0 Notes -------- RHSMatrixHeat computes entries for the locally owned part of the system. - Currently, all PETSc parallel matrix formats are partitioned by contiguous chunks of rows across the processors. - Each processor needs to insert only elements that it owns locally (but any non-local elements will be sent to the appropriate processor during matrix assembly). - Always specify global row and columns of matrix entries when using MatSetValues(); we could alternatively use MatSetValuesLocal(). - Here, we set all entries for a particular row at once. - Note that MatSetValues() uses 0-based row and column numbers in Fortran as well as in C. """ #variables stwo = -2.0/(appctx.h**2) sone = -0.5*stwo #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Compute entries for the locally owned part of the matrix #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - start, end = AA.getOwnershipRange() #Set matrix rows corresponding to boundary data if start ==0:#first processor only AA.setValue(start,start,1.0) start += 1 if end == appctx.m:#last processor only end -= 1 AA.setValue(end,end,1.0) #Set matrix rows corresponding to interior data. We construct the #matrix one row at a time. v = np.array([sone, stwo, sone]) idx = np.zeros_like(v, dtype='int32') for i in range(start,end): idx[0] = i-1 idx[1] = 1 idx[2] = i+1 AA.setValues(i,idx,v) #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Complete the matrix assembly process and set some options #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #Assemble matrix, using the 2-step process: #MatAssemblyBegin(), MatAssemblyEnd() #Computations can be done while messages are in transition #by placing code between these two statements. AA.assemblyBegin() AA.assemblyEnd() #Set and option to indicate that we will never add a new nonzero location #to the matrix. If we do, it will generate an error. AA.setOption(AA.Option.NEW_NONZERO_LOCATION_ERR,True) return 0 if __name__=="__main__": #PETSC options OptDB = PETSc.Options()#get PETSc option DB #set options, one can set them from the command line m = OptDB.getInt('M', 60)#grid dimensions nlflag = OptDB.getBool('-nonlinear', False) tdflag = OptDB.getBool("-time_dependent_rhs", False) if OptDB.hasName('debug')==0: debug = False else: debug = True main(m = m, debug = debug)