Actual source code: ex2.c

  1: /*$Id: ex2.c,v 1.94 2001/08/07 21:30:54 bsmith Exp $*/

  3: /* Program usage:  mpirun -np <procs> ex2 [-help] [all PETSc options] */

  5: static char help[] = "Solves a linear system in parallel with KSP.\n\
  6: Input parameters include:\n\
  7:   -random_exact_sol : use a random exact solution vector\n\
  8:   -view_exact_sol   : write exact solution vector to stdout\n\
  9:   -m <mesh_x>       : number of mesh points in x-direction\n\
 10:   -n <mesh_n>       : number of mesh points in y-direction\n\n";

 12: /*T
 13:    Concepts: KSP^basic parallel example;
 14:    Concepts: KSP^Laplacian, 2d
 15:    Concepts: Laplacian, 2d
 16:    Processors: n
 17: T*/

 19: /* 
 20:   Include "petscksp.h" so that we can use KSP solvers.  Note that this file
 21:   automatically includes:
 22:      petsc.h       - base PETSc routines   petscvec.h - vectors
 23:      petscsys.h    - system routines       petscmat.h - matrices
 24:      petscis.h     - index sets            petscksp.h - Krylov subspace methods
 25:      petscviewer.h - viewers               petscpc.h  - preconditioners
 26: */
 27:  #include petscksp.h

 31: int main(int argc,char **args)
 32: {
 33:   Vec         x,b,u;  /* approx solution, RHS, exact solution */
 34:   Mat         A;        /* linear system matrix */
 35:   KSP         ksp;     /* linear solver context */
 36:   PetscRandom rctx;     /* random number generator context */
 37:   PetscReal   norm;     /* norm of solution error */
 38:   int         i,j,I,J,Istart,Iend,ierr,m = 8,n = 7,its;
 39:   PetscTruth  flg;
 40:   PetscScalar v,one = 1.0,neg_one = -1.0;

 42:   PetscInitialize(&argc,&args,(char *)0,help);
 43:   PetscOptionsGetInt(PETSC_NULL,"-m",&m,PETSC_NULL);
 44:   PetscOptionsGetInt(PETSC_NULL,"-n",&n,PETSC_NULL);

 46:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
 47:          Compute the matrix and right-hand-side vector that define
 48:          the linear system, Ax = b.
 49:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 50:   /* 
 51:      Create parallel matrix, specifying only its global dimensions.
 52:      When using MatCreate(), the matrix format can be specified at
 53:      runtime. Also, the parallel partitioning of the matrix is
 54:      determined by PETSc at runtime.

 56:      Performance tuning note:  For problems of substantial size,
 57:      preallocation of matrix memory is crucial for attaining good 
 58:      performance.  Since preallocation is not possible via the generic
 59:      matrix creation routine MatCreate(), we recommend for practical 
 60:      problems instead to use the creation routine for a particular matrix
 61:      format, e.g.,
 62:          MatCreateMPIAIJ() - parallel AIJ (compressed sparse row)
 63:          MatCreateMPIBAIJ() - parallel block AIJ
 64:      See the matrix chapter of the users manual for details.
 65:   */
 66:   MatCreate(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n,&A);
 67:   MatSetFromOptions(A);

 69:   /* 
 70:      Currently, all PETSc parallel matrix formats are partitioned by
 71:      contiguous chunks of rows across the processors.  Determine which
 72:      rows of the matrix are locally owned. 
 73:   */
 74:   MatGetOwnershipRange(A,&Istart,&Iend);

 76:   /* 
 77:      Set matrix elements for the 2-D, five-point stencil in parallel.
 78:       - Each processor needs to insert only elements that it owns
 79:         locally (but any non-local elements will be sent to the
 80:         appropriate processor during matrix assembly). 
 81:       - Always specify global rows and columns of matrix entries.

 83:      Note: this uses the less common natural ordering that orders first
 84:      all the unknowns for x = h then for x = 2h etc; Hence you see J = I +- n
 85:      instead of J = I +- m as you might expect. The more standard ordering
 86:      would first do all variables for y = h, then y = 2h etc.

 88:    */
 89:   for (I=Istart; I<Iend; I++) {
 90:     v = -1.0; i = I/n; j = I - i*n;
 91:     if (i>0)   {J = I - n; MatSetValues(A,1,&I,1,&J,&v,INSERT_VALUES);}
 92:     if (i<m-1) {J = I + n; MatSetValues(A,1,&I,1,&J,&v,INSERT_VALUES);}
 93:     if (j>0)   {J = I - 1; MatSetValues(A,1,&I,1,&J,&v,INSERT_VALUES);}
 94:     if (j<n-1) {J = I + 1; MatSetValues(A,1,&I,1,&J,&v,INSERT_VALUES);}
 95:     v = 4.0; MatSetValues(A,1,&I,1,&I,&v,INSERT_VALUES);
 96:   }

 98:   /* 
 99:      Assemble matrix, using the 2-step process:
100:        MatAssemblyBegin(), MatAssemblyEnd()
101:      Computations can be done while messages are in transition
102:      by placing code between these two statements.
103:   */
104:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
105:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

107:   /* 
108:      Create parallel vectors.
109:       - We form 1 vector from scratch and then duplicate as needed.
110:       - When using VecCreate(), VecSetSizes and VecSetFromOptions()
111:         in this example, we specify only the
112:         vector's global dimension; the parallel partitioning is determined
113:         at runtime. 
114:       - When solving a linear system, the vectors and matrices MUST
115:         be partitioned accordingly.  PETSc automatically generates
116:         appropriately partitioned matrices and vectors when MatCreate()
117:         and VecCreate() are used with the same communicator.  
118:       - The user can alternatively specify the local vector and matrix
119:         dimensions when more sophisticated partitioning is needed
120:         (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
121:         below).
122:   */
123:   VecCreate(PETSC_COMM_WORLD,&u);
124:   VecSetSizes(u,PETSC_DECIDE,m*n);
125:   VecSetFromOptions(u);
126:   VecDuplicate(u,&b);
127:   VecDuplicate(b,&x);

129:   /* 
130:      Set exact solution; then compute right-hand-side vector.
131:      By default we use an exact solution of a vector with all
132:      elements of 1.0;  Alternatively, using the runtime option
133:      -random_sol forms a solution vector with random components.
134:   */
135:   PetscOptionsHasName(PETSC_NULL,"-random_exact_sol",&flg);
136:   if (flg) {
137:     PetscRandomCreate(PETSC_COMM_WORLD,RANDOM_DEFAULT,&rctx);
138:     VecSetRandom(rctx,u);
139:     PetscRandomDestroy(rctx);
140:   } else {
141:     VecSet(&one,u);
142:   }
143:   MatMult(A,u,b);

145:   /*
146:      View the exact solution vector if desired
147:   */
148:   PetscOptionsHasName(PETSC_NULL,"-view_exact_sol",&flg);
149:   if (flg) {VecView(u,PETSC_VIEWER_STDOUT_WORLD);}

151:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
152:                 Create the linear solver and set various options
153:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

155:   /* 
156:      Create linear solver context
157:   */
158:   KSPCreate(PETSC_COMM_WORLD,&ksp);

160:   /* 
161:      Set operators. Here the matrix that defines the linear system
162:      also serves as the preconditioning matrix.
163:   */
164:   KSPSetOperators(ksp,A,A,DIFFERENT_NONZERO_PATTERN);

166:   /* 
167:      Set linear solver defaults for this problem (optional).
168:      - By extracting the KSP and PC contexts from the KSP context,
169:        we can then directly call any KSP and PC routines to set
170:        various options.
171:      - The following two statements are optional; all of these
172:        parameters could alternatively be specified at runtime via
173:        KSPSetFromOptions().  All of these defaults can be
174:        overridden at runtime, as indicated below.
175:   */

177:   KSPSetTolerances(ksp,1.e-2/((m+1)*(n+1)),1.e-50,PETSC_DEFAULT,
178:                           PETSC_DEFAULT);

180:   /* 
181:     Set runtime options, e.g.,
182:         -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
183:     These options will override those specified above as long as
184:     KSPSetFromOptions() is called _after_ any other customization
185:     routines.
186:   */
187:   KSPSetFromOptions(ksp);

189:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
190:                       Solve the linear system
191:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

193:   KSPSetRhs(ksp,b);
194:   KSPSetSolution(ksp,x);
195:   KSPSolve(ksp);

197:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
198:                       Check solution and clean up
199:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

201:   /* 
202:      Check the error
203:   */
204:   VecAXPY(&neg_one,u,x);
205:   VecNorm(x,NORM_2,&norm);
206:   KSPGetIterationNumber(ksp,&its);
207:   /* Scale the norm */
208:   /*  norm *= sqrt(1.0/((m+1)*(n+1))); */

210:   /*
211:      Print convergence information.  PetscPrintf() produces a single 
212:      print statement from all processes that share a communicator.
213:      An alternative is PetscFPrintf(), which prints to a file.
214:   */
215:   PetscPrintf(PETSC_COMM_WORLD,"Norm of error %A iterations %d\n",
216:                      norm,its);

218:   /*
219:      Free work space.  All PETSc objects should be destroyed when they
220:      are no longer needed.
221:   */
222:   KSPDestroy(ksp);
223:   VecDestroy(u);  VecDestroy(x);
224:   VecDestroy(b);  MatDestroy(A);

226:   /*
227:      Always call PetscFinalize() before exiting a program.  This routine
228:        - finalizes the PETSc libraries as well as MPI
229:        - provides summary and diagnostic information if certain runtime
230:          options are chosen (e.g., -log_summary). 
231:   */
232:   PetscFinalize();
233:   return 0;
234: }