Actual source code: ex38.c
petsc-3.12.2 2019-11-22
1: /*
3: mpiexec -n 8 ./ex38 -ksp_type fbcgs -ksp_rtol 1.e-6 -sub_ksp_type bcgs -sub_ksp_rtol 1.e-3 -pc_type bjacobi -ksp_converged_reason -ksp_monitor -n1 64 -n2 64
5: Contributed by Jie Chen for testing flexible BiCGStab algorithm
6: */
8: static char help[] = "Solves the PDE (in 2D) -laplacian(u) + gamma x dot grad(u) + beta u = 1\n\
9: with zero Dirichlet condition. The discretization is standard centered\n\
10: difference. Input parameters include:\n\
11: -n1 : number of mesh points in 1st dimension (default 64)\n\
12: -n2 : number of mesh points in 2nd dimension (default 64)\n\
13: -h : spacing between mesh points (default 1/n1)\n\
14: -gamma : gamma (default 4/h)\n\
15: -beta : beta (default 0.01/h^2)\n\n";
17: /*T
18: Concepts: KSP^basic parallel example;
19: Concepts: KSP^Laplacian, 2d
20: Concepts: Laplacian, 2d
21: Processors: n
22: T*/
24: /*
25: Include "petscksp.h" so that we can use KSP solvers. Note that this file
26: automatically includes:
27: petscsys.h - base PETSc routines petscvec.h - vectors
28: petscmat.h - matrices
29: petscis.h - index sets petscksp.h - Krylov subspace methods
30: petscviewer.h - viewers petscpc.h - preconditioners
31: */
32: #include <petscksp.h>
34: int main(int argc,char **args)
35: {
36: Vec x,b,u; /* approx solution, RHS, working vector */
37: Mat A; /* linear system matrix */
38: KSP ksp; /* linear solver context */
39: PetscInt n1, n2; /* parameters */
40: PetscReal h, gamma, beta; /* parameters */
41: PetscInt i,j,Ii,J,Istart,Iend;
43: PetscScalar v, co1, co2;
44: #if defined(PETSC_USE_LOG)
45: PetscLogStage stage;
46: #endif
48: PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
49: n1 = 64;
50: n2 = 64;
51: PetscOptionsGetInt(NULL,NULL,"-n1",&n1,NULL);
52: PetscOptionsGetInt(NULL,NULL,"-n2",&n2,NULL);
54: h = 1.0/n1;
55: gamma = 4.0;
56: beta = 0.01;
57: PetscOptionsGetReal(NULL,NULL,"-h",&h,NULL);
58: PetscOptionsGetReal(NULL,NULL,"-gamma",&gamma,NULL);
59: PetscOptionsGetReal(NULL,NULL,"-beta",&beta,NULL);
60: gamma = gamma/h;
61: beta = beta/(h*h);
63: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
64: Compute the matrix and set right-hand-side vector.
65: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
66: /*
67: Create parallel matrix, specifying only its global dimensions.
68: When using MatCreate(), the matrix format can be specified at
69: runtime. Also, the parallel partitioning of the matrix is
70: determined by PETSc at runtime.
72: Performance tuning note: For problems of substantial size,
73: preallocation of matrix memory is crucial for attaining good
74: performance. See the matrix chapter of the users manual for details.
75: */
76: MatCreate(PETSC_COMM_WORLD,&A);
77: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,n1*n2,n1*n2);
78: MatSetFromOptions(A);
79: MatMPIAIJSetPreallocation(A,5,NULL,5,NULL);
80: MatSeqAIJSetPreallocation(A,5,NULL);
81: MatSetUp(A);
83: /*
84: Currently, all PETSc parallel matrix formats are partitioned by
85: contiguous chunks of rows across the processors. Determine which
86: rows of the matrix are locally owned.
87: */
88: MatGetOwnershipRange(A,&Istart,&Iend);
90: /*
91: Set matrix elements for the 2-D, five-point stencil in parallel.
92: - Each processor needs to insert only elements that it owns
93: locally (but any non-local elements will be sent to the
94: appropriate processor during matrix assembly).
95: - Always specify global rows and columns of matrix entries.
96: */
97: PetscLogStageRegister("Assembly", &stage);
98: PetscLogStagePush(stage);
99: co1 = gamma * h * h / 2.0;
100: co2 = beta * h * h;
101: for (Ii=Istart; Ii<Iend; Ii++) {
102: i = Ii/n2; j = Ii - i*n2;
103: if (i>0) {
104: J = Ii - n2; v = -1.0 + co1*(PetscScalar)i;
105: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
106: }
107: if (i<n1-1) {
108: J = Ii + n2; v = -1.0 + co1*(PetscScalar)i;
109: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
110: }
111: if (j>0) {
112: J = Ii - 1; v = -1.0 + co1*(PetscScalar)j;
113: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
114: }
115: if (j<n2-1) {
116: J = Ii + 1; v = -1.0 + co1*(PetscScalar)j;
117: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
118: }
119: v = 4.0 + co2;
120: MatSetValues(A,1,&Ii,1,&Ii,&v,INSERT_VALUES);
121: }
123: /*
124: Assemble matrix, using the 2-step process:
125: MatAssemblyBegin(), MatAssemblyEnd()
126: Computations can be done while messages are in transition
127: by placing code between these two statements.
128: */
129: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
130: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
131: PetscLogStagePop();
133: /*
134: Create parallel vectors.
135: - We form 1 vector from scratch and then duplicate as needed.
136: - When using VecCreate(), VecSetSizes and VecSetFromOptions()
137: in this example, we specify only the
138: vector's global dimension; the parallel partitioning is determined
139: at runtime.
140: - When solving a linear system, the vectors and matrices MUST
141: be partitioned accordingly. PETSc automatically generates
142: appropriately partitioned matrices and vectors when MatCreate()
143: and VecCreate() are used with the same communicator.
144: - The user can alternatively specify the local vector and matrix
145: dimensions when more sophisticated partitioning is needed
146: (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
147: below).
148: */
149: VecCreate(PETSC_COMM_WORLD,&b);
150: VecSetSizes(b,PETSC_DECIDE,n1*n2);
151: VecSetFromOptions(b);
152: VecDuplicate(b,&x);
153: VecDuplicate(b,&u);
155: /*
156: Set right-hand side.
157: */
158: VecSet(b,1.0);
160: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
161: Create the linear solver and set various options
162: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
163: /*
164: Create linear solver context
165: */
166: KSPCreate(PETSC_COMM_WORLD,&ksp);
168: /*
169: Set operators. Here the matrix that defines the linear system
170: also serves as the preconditioning matrix.
171: */
172: KSPSetOperators(ksp,A,A);
174: /*
175: Set linear solver defaults for this problem (optional).
176: - By extracting the KSP and PC contexts from the KSP context,
177: we can then directly call any KSP and PC routines to set
178: various options.
179: */
180: KSPSetTolerances(ksp,1.e-6,1.e-50,PETSC_DEFAULT,200);
182: /*
183: Set runtime options, e.g.,
184: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
185: These options will override those specified above as long as
186: KSPSetFromOptions() is called _after_ any other customization
187: routines.
188: */
189: KSPSetFromOptions(ksp);
191: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
192: Solve the linear system
193: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
195: KSPSolve(ksp,b,x);
197: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
198: Clean up
199: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
200: /*
201: Free work space. All PETSc objects should be destroyed when they
202: are no longer needed.
203: */
204: KSPDestroy(&ksp);
205: VecDestroy(&u); VecDestroy(&x);
206: VecDestroy(&b); MatDestroy(&A);
208: /*
209: Always call PetscFinalize() before exiting a program. This routine
210: - finalizes the PETSc libraries as well as MPI
211: - provides summary and diagnostic information if certain runtime
212: options are chosen (e.g., -log_view).
213: */
214: PetscFinalize();
215: return ierr;
216: }
219: /*TEST
221: test:
222: nsize: 8
223: args: -ksp_type fbcgs -ksp_rtol 1.e-6 -sub_ksp_type bcgs -sub_ksp_rtol 1.e-3 -pc_type bjacobi -ksp_converged_reason -n1 64 -n2 64
225: TEST*/