Actual source code: ex4.c
2: static char help[] = "Solves a simple time-dependent linear PDE (the heat equation).\n\
3: Input parameters include:\n\
4: -m <points>, where <points> = number of grid points\n\
5: -time_dependent_rhs : Treat the problem as having a time-dependent right-hand side\n\
6: -debug : Activate debugging printouts\n\
7: -nox : Deactivate x-window graphics\n\n";
9: /* ------------------------------------------------------------------------
11: This program solves the one-dimensional heat equation (also called the
12: diffusion equation),
13: u_t = u_xx,
14: on the domain 0 <= x <= 1, with the boundary conditions
15: u(t,0) = 0, u(t,1) = 0,
16: and the initial condition
17: u(0,x) = sin(6*pi*x) + 3*sin(2*pi*x).
18: This is a linear, second-order, parabolic equation.
20: We discretize the right-hand side using finite differences with
21: uniform grid spacing h:
22: u_xx = (u_{i+1} - 2u_{i} + u_{i-1})/(h^2)
23: We then demonstrate time evolution using the various TS methods by
24: running the program via
25: mpiexec -n <procs> ex3 -ts_type <timestepping solver>
27: We compare the approximate solution with the exact solution, given by
28: u_exact(x,t) = exp(-36*pi*pi*t) * sin(6*pi*x) +
29: 3*exp(-4*pi*pi*t) * sin(2*pi*x)
31: Notes:
32: This code demonstrates the TS solver interface to two variants of
33: linear problems, u_t = f(u,t), namely
34: - time-dependent f: f(u,t) is a function of t
35: - time-independent f: f(u,t) is simply f(u)
37: The uniprocessor version of this code is ts/tutorials/ex3.c
39: ------------------------------------------------------------------------- */
41: /*
42: Include "petscdmda.h" so that we can use distributed arrays (DMDAs) to manage
43: the parallel grid. Include "petscts.h" so that we can use TS solvers.
44: Note that this file automatically includes:
45: petscsys.h - base PETSc routines petscvec.h - vectors
46: petscmat.h - matrices
47: petscis.h - index sets petscksp.h - Krylov subspace methods
48: petscviewer.h - viewers petscpc.h - preconditioners
49: petscksp.h - linear solvers petscsnes.h - nonlinear solvers
50: */
52: #include <petscdm.h>
53: #include <petscdmda.h>
54: #include <petscts.h>
55: #include <petscdraw.h>
57: /*
58: User-defined application context - contains data needed by the
59: application-provided call-back routines.
60: */
61: typedef struct {
62: MPI_Comm comm; /* communicator */
63: DM da; /* distributed array data structure */
64: Vec localwork; /* local ghosted work vector */
65: Vec u_local; /* local ghosted approximate solution vector */
66: Vec solution; /* global exact solution vector */
67: PetscInt m; /* total number of grid points */
68: PetscReal h; /* mesh width h = 1/(m-1) */
69: PetscBool debug; /* flag (1 indicates activation of debugging printouts) */
70: PetscViewer viewer1, viewer2; /* viewers for the solution and error */
71: PetscReal norm_2, norm_max; /* error norms */
72: } AppCtx;
74: /*
75: User-defined routines
76: */
77: extern PetscErrorCode InitialConditions(Vec, AppCtx *);
78: extern PetscErrorCode RHSMatrixHeat(TS, PetscReal, Vec, Mat, Mat, void *);
79: extern PetscErrorCode RHSFunctionHeat(TS, PetscReal, Vec, Vec, void *);
80: extern PetscErrorCode Monitor(TS, PetscInt, PetscReal, Vec, void *);
81: extern PetscErrorCode ExactSolution(PetscReal, Vec, AppCtx *);
83: int main(int argc, char **argv)
84: {
85: AppCtx appctx; /* user-defined application context */
86: TS ts; /* timestepping context */
87: Mat A; /* matrix data structure */
88: Vec u; /* approximate solution vector */
89: PetscReal time_total_max = 1.0; /* default max total time */
90: PetscInt time_steps_max = 100; /* default max timesteps */
91: PetscDraw draw; /* drawing context */
92: PetscInt steps, m;
93: PetscMPIInt size;
94: PetscReal dt, ftime;
95: PetscBool flg;
96: TSProblemType tsproblem = TS_LINEAR;
98: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
99: Initialize program and set problem parameters
100: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
103: PetscInitialize(&argc, &argv, (char *)0, help);
104: appctx.comm = PETSC_COMM_WORLD;
106: m = 60;
107: PetscOptionsGetInt(NULL, NULL, "-m", &m, NULL);
108: PetscOptionsHasName(NULL, NULL, "-debug", &appctx.debug);
109: appctx.m = m;
110: appctx.h = 1.0 / (m - 1.0);
111: appctx.norm_2 = 0.0;
112: appctx.norm_max = 0.0;
114: MPI_Comm_size(PETSC_COMM_WORLD, &size);
115: PetscPrintf(PETSC_COMM_WORLD, "Solving a linear TS problem, number of processors = %d\n", size);
117: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
118: Create vector data structures
119: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
120: /*
121: Create distributed array (DMDA) to manage parallel grid and vectors
122: and to set up the ghost point communication pattern. There are M
123: total grid values spread equally among all the processors.
124: */
126: DMDACreate1d(PETSC_COMM_WORLD, DM_BOUNDARY_NONE, m, 1, 1, NULL, &appctx.da);
127: DMSetFromOptions(appctx.da);
128: DMSetUp(appctx.da);
130: /*
131: Extract global and local vectors from DMDA; we use these to store the
132: approximate solution. Then duplicate these for remaining vectors that
133: have the same types.
134: */
135: DMCreateGlobalVector(appctx.da, &u);
136: DMCreateLocalVector(appctx.da, &appctx.u_local);
138: /*
139: Create local work vector for use in evaluating right-hand-side function;
140: create global work vector for storing exact solution.
141: */
142: VecDuplicate(appctx.u_local, &appctx.localwork);
143: VecDuplicate(u, &appctx.solution);
145: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
146: Set up displays to show graphs of the solution and error
147: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
149: PetscViewerDrawOpen(PETSC_COMM_WORLD, 0, "", 80, 380, 400, 160, &appctx.viewer1);
150: PetscViewerDrawGetDraw(appctx.viewer1, 0, &draw);
151: PetscDrawSetDoubleBuffer(draw);
152: PetscViewerDrawOpen(PETSC_COMM_WORLD, 0, "", 80, 0, 400, 160, &appctx.viewer2);
153: PetscViewerDrawGetDraw(appctx.viewer2, 0, &draw);
154: PetscDrawSetDoubleBuffer(draw);
156: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
157: Create timestepping solver context
158: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
160: TSCreate(PETSC_COMM_WORLD, &ts);
162: flg = PETSC_FALSE;
163: PetscOptionsGetBool(NULL, NULL, "-nonlinear", &flg, NULL);
164: TSSetProblemType(ts, flg ? TS_NONLINEAR : TS_LINEAR);
166: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
167: Set optional user-defined monitoring routine
168: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
169: TSMonitorSet(ts, Monitor, &appctx, NULL);
171: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
173: Create matrix data structure; set matrix evaluation routine.
174: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
176: MatCreate(PETSC_COMM_WORLD, &A);
177: MatSetSizes(A, PETSC_DECIDE, PETSC_DECIDE, m, m);
178: MatSetFromOptions(A);
179: MatSetUp(A);
181: flg = PETSC_FALSE;
182: PetscOptionsGetBool(NULL, NULL, "-time_dependent_rhs", &flg, NULL);
183: if (flg) {
184: /*
185: For linear problems with a time-dependent f(u,t) in the equation
186: u_t = f(u,t), the user provides the discretized right-hand-side
187: as a time-dependent matrix.
188: */
189: TSSetRHSFunction(ts, NULL, TSComputeRHSFunctionLinear, &appctx);
190: TSSetRHSJacobian(ts, A, A, RHSMatrixHeat, &appctx);
191: } else {
192: /*
193: For linear problems with a time-independent f(u) in the equation
194: u_t = f(u), the user provides the discretized right-hand-side
195: as a matrix only once, and then sets a null matrix evaluation
196: routine.
197: */
198: RHSMatrixHeat(ts, 0.0, u, A, A, &appctx);
199: TSSetRHSFunction(ts, NULL, TSComputeRHSFunctionLinear, &appctx);
200: TSSetRHSJacobian(ts, A, A, TSComputeRHSJacobianConstant, &appctx);
201: }
203: if (tsproblem == TS_NONLINEAR) {
204: SNES snes;
205: TSSetRHSFunction(ts, NULL, RHSFunctionHeat, &appctx);
206: TSGetSNES(ts, &snes);
207: SNESSetJacobian(snes, NULL, NULL, SNESComputeJacobianDefault, NULL);
208: }
210: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
211: Set solution vector and initial timestep
212: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
214: dt = appctx.h * appctx.h / 2.0;
215: TSSetTimeStep(ts, dt);
216: TSSetSolution(ts, u);
218: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
219: Customize timestepping solver:
220: - Set the solution method to be the Backward Euler method.
221: - Set timestepping duration info
222: Then set runtime options, which can override these defaults.
223: For example,
224: -ts_max_steps <maxsteps> -ts_max_time <maxtime>
225: to override the defaults set by TSSetMaxSteps()/TSSetMaxTime().
226: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
228: TSSetMaxSteps(ts, time_steps_max);
229: TSSetMaxTime(ts, time_total_max);
230: TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER);
231: TSSetFromOptions(ts);
233: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
234: Solve the problem
235: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
237: /*
238: Evaluate initial conditions
239: */
240: InitialConditions(u, &appctx);
242: /*
243: Run the timestepping solver
244: */
245: TSSolve(ts, u);
246: TSGetSolveTime(ts, &ftime);
247: TSGetStepNumber(ts, &steps);
249: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
250: View timestepping solver info
251: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
252: PetscPrintf(PETSC_COMM_WORLD, "Total timesteps %" PetscInt_FMT ", Final time %g\n", steps, (double)ftime);
253: PetscPrintf(PETSC_COMM_WORLD, "Avg. error (2 norm) = %g Avg. error (max norm) = %g\n", (double)(appctx.norm_2 / steps), (double)(appctx.norm_max / steps));
255: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
256: Free work space. All PETSc objects should be destroyed when they
257: are no longer needed.
258: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
260: TSDestroy(&ts);
261: MatDestroy(&A);
262: VecDestroy(&u);
263: PetscViewerDestroy(&appctx.viewer1);
264: PetscViewerDestroy(&appctx.viewer2);
265: VecDestroy(&appctx.localwork);
266: VecDestroy(&appctx.solution);
267: VecDestroy(&appctx.u_local);
268: DMDestroy(&appctx.da);
270: /*
271: Always call PetscFinalize() before exiting a program. This routine
272: - finalizes the PETSc libraries as well as MPI
273: - provides summary and diagnostic information if certain runtime
274: options are chosen (e.g., -log_view).
275: */
276: PetscFinalize();
277: return 0;
278: }
279: /* --------------------------------------------------------------------- */
280: /*
281: InitialConditions - Computes the solution at the initial time.
283: Input Parameter:
284: u - uninitialized solution vector (global)
285: appctx - user-defined application context
287: Output Parameter:
288: u - vector with solution at initial time (global)
289: */
290: PetscErrorCode InitialConditions(Vec u, AppCtx *appctx)
291: {
292: PetscScalar *u_localptr, h = appctx->h;
293: PetscInt i, mybase, myend;
295: /*
296: Determine starting point of each processor's range of
297: grid values.
298: */
299: VecGetOwnershipRange(u, &mybase, &myend);
301: /*
302: Get a pointer to vector data.
303: - For default PETSc vectors, VecGetArray() returns a pointer to
304: the data array. Otherwise, the routine is implementation dependent.
305: - You MUST call VecRestoreArray() when you no longer need access to
306: the array.
307: - Note that the Fortran interface to VecGetArray() differs from the
308: C version. See the users manual for details.
309: */
310: VecGetArray(u, &u_localptr);
312: /*
313: We initialize the solution array by simply writing the solution
314: directly into the array locations. Alternatively, we could use
315: VecSetValues() or VecSetValuesLocal().
316: */
317: for (i = mybase; i < myend; i++) u_localptr[i - mybase] = PetscSinScalar(PETSC_PI * i * 6. * h) + 3. * PetscSinScalar(PETSC_PI * i * 2. * h);
319: /*
320: Restore vector
321: */
322: VecRestoreArray(u, &u_localptr);
324: /*
325: Print debugging information if desired
326: */
327: if (appctx->debug) {
328: PetscPrintf(appctx->comm, "initial guess vector\n");
329: VecView(u, PETSC_VIEWER_STDOUT_WORLD);
330: }
332: return 0;
333: }
334: /* --------------------------------------------------------------------- */
335: /*
336: ExactSolution - Computes the exact solution at a given time.
338: Input Parameters:
339: t - current time
340: solution - vector in which exact solution will be computed
341: appctx - user-defined application context
343: Output Parameter:
344: solution - vector with the newly computed exact solution
345: */
346: PetscErrorCode ExactSolution(PetscReal t, Vec solution, AppCtx *appctx)
347: {
348: PetscScalar *s_localptr, h = appctx->h, ex1, ex2, sc1, sc2;
349: PetscInt i, mybase, myend;
351: /*
352: Determine starting and ending points of each processor's
353: range of grid values
354: */
355: VecGetOwnershipRange(solution, &mybase, &myend);
357: /*
358: Get a pointer to vector data.
359: */
360: VecGetArray(solution, &s_localptr);
362: /*
363: Simply write the solution directly into the array locations.
364: Alternatively, we culd use VecSetValues() or VecSetValuesLocal().
365: */
366: ex1 = PetscExpReal(-36. * PETSC_PI * PETSC_PI * t);
367: ex2 = PetscExpReal(-4. * PETSC_PI * PETSC_PI * t);
368: sc1 = PETSC_PI * 6. * h;
369: sc2 = PETSC_PI * 2. * h;
370: for (i = mybase; i < myend; i++) s_localptr[i - mybase] = PetscSinScalar(sc1 * (PetscReal)i) * ex1 + 3. * PetscSinScalar(sc2 * (PetscReal)i) * ex2;
372: /*
373: Restore vector
374: */
375: VecRestoreArray(solution, &s_localptr);
376: return 0;
377: }
378: /* --------------------------------------------------------------------- */
379: /*
380: Monitor - User-provided routine to monitor the solution computed at
381: each timestep. This example plots the solution and computes the
382: error in two different norms.
384: Input Parameters:
385: ts - the timestep context
386: step - the count of the current step (with 0 meaning the
387: initial condition)
388: time - the current time
389: u - the solution at this timestep
390: ctx - the user-provided context for this monitoring routine.
391: In this case we use the application context which contains
392: information about the problem size, workspace and the exact
393: solution.
394: */
395: PetscErrorCode Monitor(TS ts, PetscInt step, PetscReal time, Vec u, void *ctx)
396: {
397: AppCtx *appctx = (AppCtx *)ctx; /* user-defined application context */
398: PetscReal norm_2, norm_max;
400: /*
401: View a graph of the current iterate
402: */
403: VecView(u, appctx->viewer2);
405: /*
406: Compute the exact solution
407: */
408: ExactSolution(time, appctx->solution, appctx);
410: /*
411: Print debugging information if desired
412: */
413: if (appctx->debug) {
414: PetscPrintf(appctx->comm, "Computed solution vector\n");
415: VecView(u, PETSC_VIEWER_STDOUT_WORLD);
416: PetscPrintf(appctx->comm, "Exact solution vector\n");
417: VecView(appctx->solution, PETSC_VIEWER_STDOUT_WORLD);
418: }
420: /*
421: Compute the 2-norm and max-norm of the error
422: */
423: VecAXPY(appctx->solution, -1.0, u);
424: VecNorm(appctx->solution, NORM_2, &norm_2);
425: norm_2 = PetscSqrtReal(appctx->h) * norm_2;
426: VecNorm(appctx->solution, NORM_MAX, &norm_max);
427: if (norm_2 < 1e-14) norm_2 = 0;
428: if (norm_max < 1e-14) norm_max = 0;
430: /*
431: PetscPrintf() causes only the first processor in this
432: communicator to print the timestep information.
433: */
434: PetscPrintf(appctx->comm, "Timestep %" PetscInt_FMT ": time = %g 2-norm error = %g max norm error = %g\n", step, (double)time, (double)norm_2, (double)norm_max);
435: appctx->norm_2 += norm_2;
436: appctx->norm_max += norm_max;
438: /*
439: View a graph of the error
440: */
441: VecView(appctx->solution, appctx->viewer1);
443: /*
444: Print debugging information if desired
445: */
446: if (appctx->debug) {
447: PetscPrintf(appctx->comm, "Error vector\n");
448: VecView(appctx->solution, PETSC_VIEWER_STDOUT_WORLD);
449: }
451: return 0;
452: }
454: /* --------------------------------------------------------------------- */
455: /*
456: RHSMatrixHeat - User-provided routine to compute the right-hand-side
457: matrix for the heat equation.
459: Input Parameters:
460: ts - the TS context
461: t - current time
462: global_in - global input vector
463: dummy - optional user-defined context, as set by TSetRHSJacobian()
465: Output Parameters:
466: AA - Jacobian matrix
467: BB - optionally different preconditioning matrix
468: str - flag indicating matrix structure
470: Notes:
471: RHSMatrixHeat computes entries for the locally owned part of the system.
472: - Currently, all PETSc parallel matrix formats are partitioned by
473: contiguous chunks of rows across the processors.
474: - Each processor needs to insert only elements that it owns
475: locally (but any non-local elements will be sent to the
476: appropriate processor during matrix assembly).
477: - Always specify global row and columns of matrix entries when
478: using MatSetValues(); we could alternatively use MatSetValuesLocal().
479: - Here, we set all entries for a particular row at once.
480: - Note that MatSetValues() uses 0-based row and column numbers
481: in Fortran as well as in C.
482: */
483: PetscErrorCode RHSMatrixHeat(TS ts, PetscReal t, Vec X, Mat AA, Mat BB, void *ctx)
484: {
485: Mat A = AA; /* Jacobian matrix */
486: AppCtx *appctx = (AppCtx *)ctx; /* user-defined application context */
487: PetscInt i, mstart, mend, idx[3];
488: PetscScalar v[3], stwo = -2. / (appctx->h * appctx->h), sone = -.5 * stwo;
490: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
491: Compute entries for the locally owned part of the matrix
492: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
494: MatGetOwnershipRange(A, &mstart, &mend);
496: /*
497: Set matrix rows corresponding to boundary data
498: */
500: if (mstart == 0) { /* first processor only */
501: v[0] = 1.0;
502: MatSetValues(A, 1, &mstart, 1, &mstart, v, INSERT_VALUES);
503: mstart++;
504: }
506: if (mend == appctx->m) { /* last processor only */
507: mend--;
508: v[0] = 1.0;
509: MatSetValues(A, 1, &mend, 1, &mend, v, INSERT_VALUES);
510: }
512: /*
513: Set matrix rows corresponding to interior data. We construct the
514: matrix one row at a time.
515: */
516: v[0] = sone;
517: v[1] = stwo;
518: v[2] = sone;
519: for (i = mstart; i < mend; i++) {
520: idx[0] = i - 1;
521: idx[1] = i;
522: idx[2] = i + 1;
523: MatSetValues(A, 1, &i, 3, idx, v, INSERT_VALUES);
524: }
526: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
527: Complete the matrix assembly process and set some options
528: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
529: /*
530: Assemble matrix, using the 2-step process:
531: MatAssemblyBegin(), MatAssemblyEnd()
532: Computations can be done while messages are in transition
533: by placing code between these two statements.
534: */
535: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
536: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
538: /*
539: Set and option to indicate that we will never add a new nonzero location
540: to the matrix. If we do, it will generate an error.
541: */
542: MatSetOption(A, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);
544: return 0;
545: }
547: PetscErrorCode RHSFunctionHeat(TS ts, PetscReal t, Vec globalin, Vec globalout, void *ctx)
548: {
549: Mat A;
552: TSGetRHSJacobian(ts, &A, NULL, NULL, &ctx);
553: RHSMatrixHeat(ts, t, globalin, A, NULL, ctx);
554: /* MatView(A,PETSC_VIEWER_STDOUT_WORLD); */
555: MatMult(A, globalin, globalout);
556: return 0;
557: }
559: /*TEST
561: test:
562: args: -ts_view -nox
564: test:
565: suffix: 2
566: args: -ts_view -nox
567: nsize: 3
569: test:
570: suffix: 3
571: args: -ts_view -nox -nonlinear
573: test:
574: suffix: 4
575: args: -ts_view -nox -nonlinear
576: nsize: 3
577: timeoutfactor: 3
579: test:
580: suffix: sundials
581: requires: sundials2
582: args: -nox -ts_type sundials -ts_max_steps 5 -nonlinear
583: nsize: 4
585: test:
586: suffix: sundials_dense
587: requires: sundials2
588: args: -nox -ts_type sundials -ts_sundials_use_dense -ts_max_steps 5 -nonlinear
589: nsize: 1
591: TEST*/