Actual source code: rosenbrock3.c
1: /* Program usage: mpiexec -n 1 rosenbrock2 [-help] [all TAO options] */
3: /* Include "petsctao.h" so we can use TAO solvers. */
4: #include <petsctao.h>
6: static char help[] = "This example demonstrates use of the TAO package to \n\
7: solve an unconstrained minimization problem on a single processor. We \n\
8: minimize the extended Rosenbrock function: \n\
9: sum_{i=0}^{n/2-1} (alpha*(x_{2i+1}-x_{2i}^2)^2 + (1-x_{2i})^2) \n\
10: or the chained Rosenbrock function:\n\
11: sum_{i=0}^{n-1} alpha*(x_{i+1} - x_i^2)^2 + (1 - x_i)^2\n";
13: /*
14: User-defined application context - contains data needed by the
15: application-provided call-back routines that evaluate the function,
16: gradient, and hessian.
17: */
18: typedef struct {
19: PetscInt n; /* dimension */
20: PetscReal alpha; /* condition parameter */
21: PetscBool chained;
22: } AppCtx;
24: /* -------------- User-defined routines ---------- */
25: PetscErrorCode FormFunctionGradient(Tao, Vec, PetscReal *, Vec, void *);
26: PetscErrorCode FormHessian(Tao, Vec, Mat, Mat, void *);
28: int main(int argc, char **argv)
29: {
30: PetscReal zero = 0.0;
31: Vec x; /* solution vector */
32: Mat H;
33: Tao tao; /* Tao solver context */
34: PetscBool flg, test_lmvm = PETSC_FALSE;
35: PetscMPIInt size; /* number of processes running */
36: AppCtx user; /* user-defined application context */
37: TaoConvergedReason reason;
38: PetscInt its, recycled_its = 0, oneshot_its = 0;
40: /* Initialize TAO and PETSc */
42: PetscInitialize(&argc, &argv, (char *)0, help);
43: MPI_Comm_size(PETSC_COMM_WORLD, &size);
46: /* Initialize problem parameters */
47: user.n = 2;
48: user.alpha = 99.0;
49: user.chained = PETSC_FALSE;
50: /* Check for command line arguments to override defaults */
51: PetscOptionsGetInt(NULL, NULL, "-n", &user.n, &flg);
52: PetscOptionsGetReal(NULL, NULL, "-alpha", &user.alpha, &flg);
53: PetscOptionsGetBool(NULL, NULL, "-chained", &user.chained, &flg);
54: PetscOptionsGetBool(NULL, NULL, "-test_lmvm", &test_lmvm, &flg);
56: /* Allocate vectors for the solution and gradient */
57: VecCreateSeq(PETSC_COMM_SELF, user.n, &x);
58: MatCreateSeqBAIJ(PETSC_COMM_SELF, 2, user.n, user.n, 1, NULL, &H);
60: /* The TAO code begins here */
62: /* Create TAO solver with desired solution method */
63: TaoCreate(PETSC_COMM_SELF, &tao);
64: TaoSetType(tao, TAOBQNLS);
66: /* Set solution vec and an initial guess */
67: VecSet(x, zero);
68: TaoSetSolution(tao, x);
70: /* Set routines for function, gradient, hessian evaluation */
71: TaoSetObjectiveAndGradient(tao, NULL, FormFunctionGradient, &user);
72: TaoSetHessian(tao, H, H, FormHessian, &user);
74: /* Check for TAO command line options */
75: TaoSetFromOptions(tao);
77: /* Solve the problem */
78: TaoSetTolerances(tao, 1.e-5, 0.0, 0.0);
79: TaoSetMaximumIterations(tao, 5);
80: TaoSetRecycleHistory(tao, PETSC_TRUE);
81: reason = TAO_CONTINUE_ITERATING;
82: flg = PETSC_FALSE;
83: TaoGetRecycleHistory(tao, &flg);
84: if (flg) PetscPrintf(PETSC_COMM_SELF, "Recycle: enabled\n");
85: while (reason != TAO_CONVERGED_GATOL) {
86: TaoSolve(tao);
87: TaoGetConvergedReason(tao, &reason);
88: TaoGetIterationNumber(tao, &its);
89: recycled_its += its;
90: PetscPrintf(PETSC_COMM_SELF, "-----------------------\n");
91: }
93: /* Disable recycling and solve again! */
94: TaoSetMaximumIterations(tao, 100);
95: TaoSetRecycleHistory(tao, PETSC_FALSE);
96: VecSet(x, zero);
97: TaoGetRecycleHistory(tao, &flg);
98: if (!flg) PetscPrintf(PETSC_COMM_SELF, "Recycle: disabled\n");
99: TaoSolve(tao);
100: TaoGetConvergedReason(tao, &reason);
102: TaoGetIterationNumber(tao, &oneshot_its);
103: PetscPrintf(PETSC_COMM_SELF, "-----------------------\n");
104: PetscPrintf(PETSC_COMM_SELF, "recycled its: %" PetscInt_FMT " | oneshot its: %" PetscInt_FMT "\n", recycled_its, oneshot_its);
107: TaoDestroy(&tao);
108: VecDestroy(&x);
109: MatDestroy(&H);
111: PetscFinalize();
112: return 0;
113: }
115: /* -------------------------------------------------------------------- */
116: /*
117: FormFunctionGradient - Evaluates the function, f(X), and gradient, G(X).
119: Input Parameters:
120: . tao - the Tao context
121: . X - input vector
122: . ptr - optional user-defined context, as set by TaoSetFunctionGradient()
124: Output Parameters:
125: . G - vector containing the newly evaluated gradient
126: . f - function value
128: Note:
129: Some optimization methods ask for the function and the gradient evaluation
130: at the same time. Evaluating both at once may be more efficient than
131: evaluating each separately.
132: */
133: PetscErrorCode FormFunctionGradient(Tao tao, Vec X, PetscReal *f, Vec G, void *ptr)
134: {
135: AppCtx *user = (AppCtx *)ptr;
136: PetscInt i, nn = user->n / 2;
137: PetscReal ff = 0, t1, t2, alpha = user->alpha;
138: PetscScalar *g;
139: const PetscScalar *x;
142: /* Get pointers to vector data */
143: VecGetArrayRead(X, &x);
144: VecGetArrayWrite(G, &g);
146: /* Compute G(X) */
147: if (user->chained) {
148: g[0] = 0;
149: for (i = 0; i < user->n - 1; i++) {
150: t1 = x[i + 1] - x[i] * x[i];
151: ff += PetscSqr(1 - x[i]) + alpha * t1 * t1;
152: g[i] += -2 * (1 - x[i]) + 2 * alpha * t1 * (-2 * x[i]);
153: g[i + 1] = 2 * alpha * t1;
154: }
155: } else {
156: for (i = 0; i < nn; i++) {
157: t1 = x[2 * i + 1] - x[2 * i] * x[2 * i];
158: t2 = 1 - x[2 * i];
159: ff += alpha * t1 * t1 + t2 * t2;
160: g[2 * i] = -4 * alpha * t1 * x[2 * i] - 2.0 * t2;
161: g[2 * i + 1] = 2 * alpha * t1;
162: }
163: }
165: /* Restore vectors */
166: VecRestoreArrayRead(X, &x);
167: VecRestoreArrayWrite(G, &g);
168: *f = ff;
170: PetscLogFlops(15.0 * nn);
171: return 0;
172: }
174: /* ------------------------------------------------------------------- */
175: /*
176: FormHessian - Evaluates Hessian matrix.
178: Input Parameters:
179: . tao - the Tao context
180: . x - input vector
181: . ptr - optional user-defined context, as set by TaoSetHessian()
183: Output Parameters:
184: . H - Hessian matrix
186: Note: Providing the Hessian may not be necessary. Only some solvers
187: require this matrix.
188: */
189: PetscErrorCode FormHessian(Tao tao, Vec X, Mat H, Mat Hpre, void *ptr)
190: {
191: AppCtx *user = (AppCtx *)ptr;
192: PetscInt i, ind[2];
193: PetscReal alpha = user->alpha;
194: PetscReal v[2][2];
195: const PetscScalar *x;
196: PetscBool assembled;
199: /* Zero existing matrix entries */
200: MatAssembled(H, &assembled);
201: if (assembled || user->chained) MatZeroEntries(H);
203: /* Get a pointer to vector data */
204: VecGetArrayRead(X, &x);
206: /* Compute H(X) entries */
207: if (user->chained) {
208: for (i = 0; i < user->n - 1; i++) {
209: PetscScalar t1 = x[i + 1] - x[i] * x[i];
210: v[0][0] = 2 + 2 * alpha * (t1 * (-2) - 2 * x[i]);
211: v[0][1] = 2 * alpha * (-2 * x[i]);
212: v[1][0] = 2 * alpha * (-2 * x[i]);
213: v[1][1] = 2 * alpha * t1;
214: ind[0] = i;
215: ind[1] = i + 1;
216: MatSetValues(H, 2, ind, 2, ind, v[0], ADD_VALUES);
217: }
218: } else {
219: for (i = 0; i < user->n / 2; i++) {
220: v[1][1] = 2 * alpha;
221: v[0][0] = -4 * alpha * (x[2 * i + 1] - 3 * x[2 * i] * x[2 * i]) + 2;
222: v[1][0] = v[0][1] = -4.0 * alpha * x[2 * i];
223: ind[0] = 2 * i;
224: ind[1] = 2 * i + 1;
225: MatSetValues(H, 2, ind, 2, ind, v[0], INSERT_VALUES);
226: }
227: }
228: VecRestoreArrayRead(X, &x);
230: /* Assemble matrix */
231: MatAssemblyBegin(H, MAT_FINAL_ASSEMBLY);
232: MatAssemblyEnd(H, MAT_FINAL_ASSEMBLY);
233: PetscLogFlops(9.0 * user->n / 2.0);
234: return 0;
235: }
237: /*TEST
239: build:
240: requires: !complex
242: test:
243: args: -tao_type bqnls -tao_monitor
244: requires: !single
246: TEST*/