Actual source code: ex14.c
petsc-3.12.2 2019-11-22
2: static char help[] = "Solves a nonlinear system in parallel with a user-defined Newton method.\n\
3: Uses KSP to solve the linearized Newton sytems. This solver\n\
4: is a very simplistic inexact Newton method. The intent of this code is to\n\
5: demonstrate the repeated solution of linear sytems with the same nonzero pattern.\n\
6: \n\
7: This is NOT the recommended approach for solving nonlinear problems with PETSc!\n\
8: We urge users to employ the SNES component for solving nonlinear problems whenever\n\
9: possible, as it offers many advantages over coding nonlinear solvers independently.\n\
10: \n\
11: We solve the Bratu (SFI - solid fuel ignition) problem in a 2D rectangular\n\
12: domain, using distributed arrays (DMDAs) to partition the parallel grid.\n\
13: The command line options include:\n\
14: -par <parameter>, where <parameter> indicates the problem's nonlinearity\n\
15: problem SFI: <parameter> = Bratu parameter (0 <= par <= 6.81)\n\
16: -mx <xg>, where <xg> = number of grid points in the x-direction\n\
17: -my <yg>, where <yg> = number of grid points in the y-direction\n\
18: -Nx <npx>, where <npx> = number of processors in the x-direction\n\
19: -Ny <npy>, where <npy> = number of processors in the y-direction\n\n";
21: /*T
22: Concepts: KSP^writing a user-defined nonlinear solver (parallel Bratu example);
23: Concepts: DMDA^using distributed arrays;
24: Processors: n
25: T*/
27: /* ------------------------------------------------------------------------
29: Solid Fuel Ignition (SFI) problem. This problem is modeled by
30: the partial differential equation
32: -Laplacian u - lambda*exp(u) = 0, 0 < x,y < 1,
34: with boundary conditions
36: u = 0 for x = 0, x = 1, y = 0, y = 1.
38: A finite difference approximation with the usual 5-point stencil
39: is used to discretize the boundary value problem to obtain a nonlinear
40: system of equations.
42: The SNES version of this problem is: snes/examples/tutorials/ex5.c
43: We urge users to employ the SNES component for solving nonlinear
44: problems whenever possible, as it offers many advantages over coding
45: nonlinear solvers independently.
47: ------------------------------------------------------------------------- */
49: /*
50: Include "petscdmda.h" so that we can use distributed arrays (DMDAs).
51: Include "petscksp.h" so that we can use KSP solvers. Note that this
52: file automatically includes:
53: petscsys.h - base PETSc routines petscvec.h - vectors
54: petscmat.h - matrices
55: petscis.h - index sets petscksp.h - Krylov subspace methods
56: petscviewer.h - viewers petscpc.h - preconditioners
57: */
58: #include <petscdm.h>
59: #include <petscdmda.h>
60: #include <petscksp.h>
62: /*
63: User-defined application context - contains data needed by the
64: application-provided call-back routines, ComputeJacobian() and
65: ComputeFunction().
66: */
67: typedef struct {
68: PetscReal param; /* test problem parameter */
69: PetscInt mx,my; /* discretization in x,y directions */
70: Vec localX; /* ghosted local vector */
71: DM da; /* distributed array data structure */
72: } AppCtx;
74: /*
75: User-defined routines
76: */
77: extern PetscErrorCode ComputeFunction(AppCtx*,Vec,Vec),FormInitialGuess(AppCtx*,Vec);
78: extern PetscErrorCode ComputeJacobian(AppCtx*,Vec,Mat);
80: int main(int argc,char **argv)
81: {
82: /* -------------- Data to define application problem ---------------- */
83: MPI_Comm comm; /* communicator */
84: KSP ksp; /* linear solver */
85: Vec X,Y,F; /* solution, update, residual vectors */
86: Mat J; /* Jacobian matrix */
87: AppCtx user; /* user-defined work context */
88: PetscInt Nx,Ny; /* number of preocessors in x- and y- directions */
89: PetscMPIInt size; /* number of processors */
90: PetscReal bratu_lambda_max = 6.81,bratu_lambda_min = 0.;
91: PetscInt m,N;
94: /* --------------- Data to define nonlinear solver -------------- */
95: PetscReal rtol = 1.e-8; /* relative convergence tolerance */
96: PetscReal xtol = 1.e-8; /* step convergence tolerance */
97: PetscReal ttol; /* convergence tolerance */
98: PetscReal fnorm,ynorm,xnorm; /* various vector norms */
99: PetscInt max_nonlin_its = 3; /* maximum number of iterations for nonlinear solver */
100: PetscInt max_functions = 50; /* maximum number of function evaluations */
101: PetscInt lin_its; /* number of linear solver iterations for each step */
102: PetscInt i; /* nonlinear solve iteration number */
103: PetscBool no_output = PETSC_FALSE; /* flag indicating whether to surpress output */
105: PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr;
106: comm = PETSC_COMM_WORLD;
107: PetscOptionsGetBool(NULL,NULL,"-no_output",&no_output,NULL);
109: /*
110: Initialize problem parameters
111: */
112: user.mx = 4; user.my = 4; user.param = 6.0;
114: PetscOptionsGetInt(NULL,NULL,"-mx",&user.mx,NULL);
115: PetscOptionsGetInt(NULL,NULL,"-my",&user.my,NULL);
116: PetscOptionsGetReal(NULL,NULL,"-par",&user.param,NULL);
117: if (user.param >= bratu_lambda_max || user.param <= bratu_lambda_min) SETERRQ(PETSC_COMM_WORLD,1,"Lambda is out of range");
118: N = user.mx*user.my;
120: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
121: Create linear solver context
122: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
124: KSPCreate(comm,&ksp);
126: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
127: Create vector data structures
128: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
130: /*
131: Create distributed array (DMDA) to manage parallel grid and vectors
132: */
133: MPI_Comm_size(comm,&size);
134: Nx = PETSC_DECIDE; Ny = PETSC_DECIDE;
135: PetscOptionsGetInt(NULL,NULL,"-Nx",&Nx,NULL);
136: PetscOptionsGetInt(NULL,NULL,"-Ny",&Ny,NULL);
137: if (Nx*Ny != size && (Nx != PETSC_DECIDE || Ny != PETSC_DECIDE)) SETERRQ(PETSC_COMM_WORLD,1,"Incompatible number of processors: Nx * Ny != size");
138: DMDACreate2d(PETSC_COMM_WORLD,DM_BOUNDARY_NONE,DM_BOUNDARY_NONE,DMDA_STENCIL_STAR,user.mx,user.my,Nx,Ny,1,1,NULL,NULL,&user.da);
139: DMSetFromOptions(user.da);
140: DMSetUp(user.da);
142: /*
143: Extract global and local vectors from DMDA; then duplicate for remaining
144: vectors that are the same types
145: */
146: DMCreateGlobalVector(user.da,&X);
147: DMCreateLocalVector(user.da,&user.localX);
148: VecDuplicate(X,&F);
149: VecDuplicate(X,&Y);
152: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
153: Create matrix data structure for Jacobian
154: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
155: /*
156: Note: For the parallel case, vectors and matrices MUST be partitioned
157: accordingly. When using distributed arrays (DMDAs) to create vectors,
158: the DMDAs determine the problem partitioning. We must explicitly
159: specify the local matrix dimensions upon its creation for compatibility
160: with the vector distribution. Thus, the generic MatCreate() routine
161: is NOT sufficient when working with distributed arrays.
163: Note: Here we only approximately preallocate storage space for the
164: Jacobian. See the users manual for a discussion of better techniques
165: for preallocating matrix memory.
166: */
167: if (size == 1) {
168: MatCreateSeqAIJ(comm,N,N,5,NULL,&J);
169: } else {
170: VecGetLocalSize(X,&m);
171: MatCreateAIJ(comm,m,m,N,N,5,NULL,3,NULL,&J);
172: }
174: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
175: Customize linear solver; set runtime options
176: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
178: /*
179: Set runtime options (e.g.,-ksp_monitor -ksp_rtol <rtol> -ksp_type <type>)
180: */
181: KSPSetFromOptions(ksp);
183: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
184: Evaluate initial guess
185: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
187: FormInitialGuess(&user,X);
188: ComputeFunction(&user,X,F); /* Compute F(X) */
189: VecNorm(F,NORM_2,&fnorm); /* fnorm = || F || */
190: ttol = fnorm*rtol;
191: if (!no_output) PetscPrintf(comm,"Initial function norm = %g\n",(double)fnorm);
193: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
194: Solve nonlinear system with a user-defined method
195: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
197: /*
198: This solver is a very simplistic inexact Newton method, with no
199: no damping strategies or bells and whistles. The intent of this code
200: is merely to demonstrate the repeated solution with KSP of linear
201: sytems with the same nonzero structure.
203: This is NOT the recommended approach for solving nonlinear problems
204: with PETSc! We urge users to employ the SNES component for solving
205: nonlinear problems whenever possible with application codes, as it
206: offers many advantages over coding nonlinear solvers independently.
207: */
209: for (i=0; i<max_nonlin_its; i++) {
211: /*
212: Compute the Jacobian matrix.
213: */
214: ComputeJacobian(&user,X,J);
216: /*
217: Solve J Y = F, where J is the Jacobian matrix.
218: - First, set the KSP linear operators. Here the matrix that
219: defines the linear system also serves as the preconditioning
220: matrix.
221: - Then solve the Newton system.
222: */
223: KSPSetOperators(ksp,J,J);
224: KSPSolve(ksp,F,Y);
225: KSPGetIterationNumber(ksp,&lin_its);
227: /*
228: Compute updated iterate
229: */
230: VecNorm(Y,NORM_2,&ynorm); /* ynorm = || Y || */
231: VecAYPX(Y,-1.0,X); /* Y <- X - Y */
232: VecCopy(Y,X); /* X <- Y */
233: VecNorm(X,NORM_2,&xnorm); /* xnorm = || X || */
234: if (!no_output) {
235: PetscPrintf(comm," linear solve iterations = %D, xnorm=%g, ynorm=%g\n",lin_its,(double)xnorm,(double)ynorm);
236: }
238: /*
239: Evaluate new nonlinear function
240: */
241: ComputeFunction(&user,X,F); /* Compute F(X) */
242: VecNorm(F,NORM_2,&fnorm); /* fnorm = || F || */
243: if (!no_output) {
244: PetscPrintf(comm,"Iteration %D, function norm = %g\n",i+1,(double)fnorm);
245: }
247: /*
248: Test for convergence
249: */
250: if (fnorm <= ttol) {
251: if (!no_output) {
252: PetscPrintf(comm,"Converged due to function norm %g < %g (relative tolerance)\n",(double)fnorm,(double)ttol);
253: }
254: break;
255: }
256: if (ynorm < xtol*(xnorm)) {
257: if (!no_output) {
258: PetscPrintf(comm,"Converged due to small update length: %g < %g * %g\n",(double)ynorm,(double)xtol,(double)xnorm);
259: }
260: break;
261: }
262: if (i > max_functions) {
263: if (!no_output) {
264: PetscPrintf(comm,"Exceeded maximum number of function evaluations: %D > %D\n",i,max_functions);
265: }
266: break;
267: }
268: }
269: PetscPrintf(comm,"Number of nonlinear iterations = %D\n",i);
271: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
272: Free work space. All PETSc objects should be destroyed when they
273: are no longer needed.
274: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
276: MatDestroy(&J); VecDestroy(&Y);
277: VecDestroy(&user.localX); VecDestroy(&X);
278: VecDestroy(&F);
279: KSPDestroy(&ksp); DMDestroy(&user.da);
280: PetscFinalize();
281: return ierr;
282: }
283: /* ------------------------------------------------------------------- */
284: /*
285: FormInitialGuess - Forms initial approximation.
287: Input Parameters:
288: user - user-defined application context
289: X - vector
291: Output Parameter:
292: X - vector
293: */
294: PetscErrorCode FormInitialGuess(AppCtx *user,Vec X)
295: {
296: PetscInt i,j,row,mx,my,ierr,xs,ys,xm,ym,gxm,gym,gxs,gys;
297: PetscReal one = 1.0,lambda,temp1,temp,hx,hy;
298: PetscScalar *x;
300: mx = user->mx; my = user->my; lambda = user->param;
301: hx = one/(PetscReal)(mx-1); hy = one/(PetscReal)(my-1);
302: temp1 = lambda/(lambda + one);
304: /*
305: Get a pointer to vector data.
306: - For default PETSc vectors, VecGetArray() returns a pointer to
307: the data array. Otherwise, the routine is implementation dependent.
308: - You MUST call VecRestoreArray() when you no longer need access to
309: the array.
310: */
311: VecGetArray(X,&x);
313: /*
314: Get local grid boundaries (for 2-dimensional DMDA):
315: xs, ys - starting grid indices (no ghost points)
316: xm, ym - widths of local grid (no ghost points)
317: gxs, gys - starting grid indices (including ghost points)
318: gxm, gym - widths of local grid (including ghost points)
319: */
320: DMDAGetCorners(user->da,&xs,&ys,NULL,&xm,&ym,NULL);
321: DMDAGetGhostCorners(user->da,&gxs,&gys,NULL,&gxm,&gym,NULL);
323: /*
324: Compute initial guess over the locally owned part of the grid
325: */
326: for (j=ys; j<ys+ym; j++) {
327: temp = (PetscReal)(PetscMin(j,my-j-1))*hy;
328: for (i=xs; i<xs+xm; i++) {
329: row = i - gxs + (j - gys)*gxm;
330: if (i == 0 || j == 0 || i == mx-1 || j == my-1) {
331: x[row] = 0.0;
332: continue;
333: }
334: x[row] = temp1*PetscSqrtReal(PetscMin((PetscReal)(PetscMin(i,mx-i-1))*hx,temp));
335: }
336: }
338: /*
339: Restore vector
340: */
341: VecRestoreArray(X,&x);
342: return 0;
343: }
344: /* ------------------------------------------------------------------- */
345: /*
346: ComputeFunction - Evaluates nonlinear function, F(x).
348: Input Parameters:
349: . X - input vector
350: . user - user-defined application context
352: Output Parameter:
353: . F - function vector
354: */
355: PetscErrorCode ComputeFunction(AppCtx *user,Vec X,Vec F)
356: {
358: PetscInt i,j,row,mx,my,xs,ys,xm,ym,gxs,gys,gxm,gym;
359: PetscReal two = 2.0,one = 1.0,lambda,hx,hy,hxdhy,hydhx,sc;
360: PetscScalar u,uxx,uyy,*x,*f;
361: Vec localX = user->localX;
363: mx = user->mx; my = user->my; lambda = user->param;
364: hx = one/(PetscReal)(mx-1); hy = one/(PetscReal)(my-1);
365: sc = hx*hy*lambda; hxdhy = hx/hy; hydhx = hy/hx;
367: /*
368: Scatter ghost points to local vector, using the 2-step process
369: DMGlobalToLocalBegin(), DMGlobalToLocalEnd().
370: By placing code between these two statements, computations can be
371: done while messages are in transition.
372: */
373: DMGlobalToLocalBegin(user->da,X,INSERT_VALUES,localX);
374: DMGlobalToLocalEnd(user->da,X,INSERT_VALUES,localX);
376: /*
377: Get pointers to vector data
378: */
379: VecGetArray(localX,&x);
380: VecGetArray(F,&f);
382: /*
383: Get local grid boundaries
384: */
385: DMDAGetCorners(user->da,&xs,&ys,NULL,&xm,&ym,NULL);
386: DMDAGetGhostCorners(user->da,&gxs,&gys,NULL,&gxm,&gym,NULL);
388: /*
389: Compute function over the locally owned part of the grid
390: */
391: for (j=ys; j<ys+ym; j++) {
392: row = (j - gys)*gxm + xs - gxs - 1;
393: for (i=xs; i<xs+xm; i++) {
394: row++;
395: if (i == 0 || j == 0 || i == mx-1 || j == my-1) {
396: f[row] = x[row];
397: continue;
398: }
399: u = x[row];
400: uxx = (two*u - x[row-1] - x[row+1])*hydhx;
401: uyy = (two*u - x[row-gxm] - x[row+gxm])*hxdhy;
402: f[row] = uxx + uyy - sc*PetscExpScalar(u);
403: }
404: }
406: /*
407: Restore vectors
408: */
409: VecRestoreArray(localX,&x);
410: VecRestoreArray(F,&f);
411: PetscLogFlops(11.0*ym*xm);
412: return 0;
413: }
414: /* ------------------------------------------------------------------- */
415: /*
416: ComputeJacobian - Evaluates Jacobian matrix.
418: Input Parameters:
419: . x - input vector
420: . user - user-defined application context
422: Output Parameters:
423: . jac - Jacobian matrix
424: . flag - flag indicating matrix structure
426: Notes:
427: Due to grid point reordering with DMDAs, we must always work
428: with the local grid points, and then transform them to the new
429: global numbering with the "ltog" mapping
430: We cannot work directly with the global numbers for the original
431: uniprocessor grid!
432: */
433: PetscErrorCode ComputeJacobian(AppCtx *user,Vec X,Mat jac)
434: {
435: PetscErrorCode ierr;
436: Vec localX = user->localX; /* local vector */
437: const PetscInt *ltog; /* local-to-global mapping */
438: PetscInt i,j,row,mx,my,col[5];
439: PetscInt xs,ys,xm,ym,gxs,gys,gxm,gym,grow;
440: PetscScalar two = 2.0,one = 1.0,lambda,v[5],hx,hy,hxdhy,hydhx,sc,*x;
441: ISLocalToGlobalMapping ltogm;
443: mx = user->mx; my = user->my; lambda = user->param;
444: hx = one/(PetscReal)(mx-1); hy = one/(PetscReal)(my-1);
445: sc = hx*hy; hxdhy = hx/hy; hydhx = hy/hx;
447: /*
448: Scatter ghost points to local vector, using the 2-step process
449: DMGlobalToLocalBegin(), DMGlobalToLocalEnd().
450: By placing code between these two statements, computations can be
451: done while messages are in transition.
452: */
453: DMGlobalToLocalBegin(user->da,X,INSERT_VALUES,localX);
454: DMGlobalToLocalEnd(user->da,X,INSERT_VALUES,localX);
456: /*
457: Get pointer to vector data
458: */
459: VecGetArray(localX,&x);
461: /*
462: Get local grid boundaries
463: */
464: DMDAGetCorners(user->da,&xs,&ys,NULL,&xm,&ym,NULL);
465: DMDAGetGhostCorners(user->da,&gxs,&gys,NULL,&gxm,&gym,NULL);
467: /*
468: Get the global node numbers for all local nodes, including ghost points
469: */
470: DMGetLocalToGlobalMapping(user->da,<ogm);
471: ISLocalToGlobalMappingGetIndices(ltogm,<og);
473: /*
474: Compute entries for the locally owned part of the Jacobian.
475: - Currently, all PETSc parallel matrix formats are partitioned by
476: contiguous chunks of rows across the processors. The "grow"
477: parameter computed below specifies the global row number
478: corresponding to each local grid point.
479: - Each processor needs to insert only elements that it owns
480: locally (but any non-local elements will be sent to the
481: appropriate processor during matrix assembly).
482: - Always specify global row and columns of matrix entries.
483: - Here, we set all entries for a particular row at once.
484: */
485: for (j=ys; j<ys+ym; j++) {
486: row = (j - gys)*gxm + xs - gxs - 1;
487: for (i=xs; i<xs+xm; i++) {
488: row++;
489: grow = ltog[row];
490: /* boundary points */
491: if (i == 0 || j == 0 || i == mx-1 || j == my-1) {
492: MatSetValues(jac,1,&grow,1,&grow,&one,INSERT_VALUES);
493: continue;
494: }
495: /* interior grid points */
496: v[0] = -hxdhy; col[0] = ltog[row - gxm];
497: v[1] = -hydhx; col[1] = ltog[row - 1];
498: v[2] = two*(hydhx + hxdhy) - sc*lambda*PetscExpScalar(x[row]); col[2] = grow;
499: v[3] = -hydhx; col[3] = ltog[row + 1];
500: v[4] = -hxdhy; col[4] = ltog[row + gxm];
501: MatSetValues(jac,1,&grow,5,col,v,INSERT_VALUES);
502: }
503: }
504: ISLocalToGlobalMappingRestoreIndices(ltogm,<og);
506: /*
507: Assemble matrix, using the 2-step process:
508: MatAssemblyBegin(), MatAssemblyEnd().
509: By placing code between these two statements, computations can be
510: done while messages are in transition.
511: */
512: MatAssemblyBegin(jac,MAT_FINAL_ASSEMBLY);
513: VecRestoreArray(localX,&x);
514: MatAssemblyEnd(jac,MAT_FINAL_ASSEMBLY);
516: return 0;
517: }
519: /*TEST
521: test:
523: TEST*/