Actual source code: nls.c

  1: #include <petsctaolinesearch.h>
  2: #include <../src/tao/unconstrained/impls/nls/nlsimpl.h>

  4: #include <petscksp.h>

  6: #define NLS_INIT_CONSTANT      0
  7: #define NLS_INIT_DIRECTION     1
  8: #define NLS_INIT_INTERPOLATION 2
  9: #define NLS_INIT_TYPES         3

 11: #define NLS_UPDATE_STEP          0
 12: #define NLS_UPDATE_REDUCTION     1
 13: #define NLS_UPDATE_INTERPOLATION 2
 14: #define NLS_UPDATE_TYPES         3

 16: static const char *NLS_INIT[64] = {"constant", "direction", "interpolation"};

 18: static const char *NLS_UPDATE[64] = {"step", "reduction", "interpolation"};

 20: /*
 21:  Implements Newton's Method with a line search approach for solving
 22:  unconstrained minimization problems.  A More'-Thuente line search
 23:  is used to guarantee that the bfgs preconditioner remains positive
 24:  definite.

 26:  The method can shift the Hessian matrix.  The shifting procedure is
 27:  adapted from the PATH algorithm for solving complementarity
 28:  problems.

 30:  The linear system solve should be done with a conjugate gradient
 31:  method, although any method can be used.
 32: */

 34: #define NLS_NEWTON   0
 35: #define NLS_BFGS     1
 36: #define NLS_GRADIENT 2

 38: static PetscErrorCode TaoSolve_NLS(Tao tao)
 39: {
 40:   TAO_NLS                     *nlsP = (TAO_NLS *)tao->data;
 41:   KSPType                      ksp_type;
 42:   PetscBool                    is_nash, is_stcg, is_gltr, is_bfgs, is_jacobi, is_symmetric, sym_set;
 43:   KSPConvergedReason           ksp_reason;
 44:   PC                           pc;
 45:   TaoLineSearchConvergedReason ls_reason;

 47:   PetscReal fmin, ftrial, f_full, prered, actred, kappa, sigma;
 48:   PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
 49:   PetscReal f, fold, gdx, gnorm, pert;
 50:   PetscReal step   = 1.0;
 51:   PetscReal norm_d = 0.0, e_min;

 53:   PetscInt stepType;
 54:   PetscInt bfgsUpdates = 0;
 55:   PetscInt n, N, kspits;
 56:   PetscInt needH = 1;

 58:   PetscInt i_max = 5;
 59:   PetscInt j_max = 1;
 60:   PetscInt i, j;

 62:   if (tao->XL || tao->XU || tao->ops->computebounds) PetscInfo(tao, "WARNING: Variable bounds have been set but will be ignored by nls algorithm\n");

 64:   /* Initialized variables */
 65:   pert = nlsP->sval;

 67:   /* Number of times ksp stopped because of these reasons */
 68:   nlsP->ksp_atol = 0;
 69:   nlsP->ksp_rtol = 0;
 70:   nlsP->ksp_dtol = 0;
 71:   nlsP->ksp_ctol = 0;
 72:   nlsP->ksp_negc = 0;
 73:   nlsP->ksp_iter = 0;
 74:   nlsP->ksp_othr = 0;

 76:   /* Initialize trust-region radius when using nash, stcg, or gltr
 77:      Command automatically ignored for other methods
 78:      Will be reset during the first iteration
 79:   */
 80:   KSPGetType(tao->ksp, &ksp_type);
 81:   PetscStrcmp(ksp_type, KSPNASH, &is_nash);
 82:   PetscStrcmp(ksp_type, KSPSTCG, &is_stcg);
 83:   PetscStrcmp(ksp_type, KSPGLTR, &is_gltr);

 85:   KSPCGSetRadius(tao->ksp, nlsP->max_radius);

 87:   if (is_nash || is_stcg || is_gltr) {
 89:     tao->trust = tao->trust0;
 90:     tao->trust = PetscMax(tao->trust, nlsP->min_radius);
 91:     tao->trust = PetscMin(tao->trust, nlsP->max_radius);
 92:   }

 94:   /* Check convergence criteria */
 95:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
 96:   TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm);

 99:   tao->reason = TAO_CONTINUE_ITERATING;
100:   TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its);
101:   TaoMonitor(tao, tao->niter, f, gnorm, 0.0, step);
102:   PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
103:   if (tao->reason != TAO_CONTINUE_ITERATING) return 0;

105:   /* Allocate the vectors needed for the BFGS approximation */
106:   KSPGetPC(tao->ksp, &pc);
107:   PetscObjectTypeCompare((PetscObject)pc, PCLMVM, &is_bfgs);
108:   PetscObjectTypeCompare((PetscObject)pc, PCJACOBI, &is_jacobi);
109:   if (is_bfgs) {
110:     nlsP->bfgs_pre = pc;
111:     PCLMVMGetMatLMVM(nlsP->bfgs_pre, &nlsP->M);
112:     VecGetLocalSize(tao->solution, &n);
113:     VecGetSize(tao->solution, &N);
114:     MatSetSizes(nlsP->M, n, n, N, N);
115:     MatLMVMAllocate(nlsP->M, tao->solution, tao->gradient);
116:     MatIsSymmetricKnown(nlsP->M, &sym_set, &is_symmetric);
118:   } else if (is_jacobi) PCJacobiSetUseAbs(pc, PETSC_TRUE);

120:   /* Initialize trust-region radius.  The initialization is only performed
121:      when we are using Nash, Steihaug-Toint or the Generalized Lanczos method. */
122:   if (is_nash || is_stcg || is_gltr) {
123:     switch (nlsP->init_type) {
124:     case NLS_INIT_CONSTANT:
125:       /* Use the initial radius specified */
126:       break;

128:     case NLS_INIT_INTERPOLATION:
129:       /* Use the initial radius specified */
130:       max_radius = 0.0;

132:       for (j = 0; j < j_max; ++j) {
133:         fmin  = f;
134:         sigma = 0.0;

136:         if (needH) {
137:           TaoComputeHessian(tao, tao->solution, tao->hessian, tao->hessian_pre);
138:           needH = 0;
139:         }

141:         for (i = 0; i < i_max; ++i) {
142:           VecCopy(tao->solution, nlsP->W);
143:           VecAXPY(nlsP->W, -tao->trust / gnorm, tao->gradient);
144:           TaoComputeObjective(tao, nlsP->W, &ftrial);
145:           if (PetscIsInfOrNanReal(ftrial)) {
146:             tau = nlsP->gamma1_i;
147:           } else {
148:             if (ftrial < fmin) {
149:               fmin  = ftrial;
150:               sigma = -tao->trust / gnorm;
151:             }

153:             MatMult(tao->hessian, tao->gradient, nlsP->D);
154:             VecDot(tao->gradient, nlsP->D, &prered);

156:             prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
157:             actred = f - ftrial;
158:             if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
159:               kappa = 1.0;
160:             } else {
161:               kappa = actred / prered;
162:             }

164:             tau_1   = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust + (1.0 - nlsP->theta_i) * prered - actred);
165:             tau_2   = nlsP->theta_i * gnorm * tao->trust / (nlsP->theta_i * gnorm * tao->trust - (1.0 + nlsP->theta_i) * prered + actred);
166:             tau_min = PetscMin(tau_1, tau_2);
167:             tau_max = PetscMax(tau_1, tau_2);

169:             if (PetscAbsScalar(kappa - (PetscReal)1.0) <= nlsP->mu1_i) {
170:               /* Great agreement */
171:               max_radius = PetscMax(max_radius, tao->trust);

173:               if (tau_max < 1.0) {
174:                 tau = nlsP->gamma3_i;
175:               } else if (tau_max > nlsP->gamma4_i) {
176:                 tau = nlsP->gamma4_i;
177:               } else if (tau_1 >= 1.0 && tau_1 <= nlsP->gamma4_i && tau_2 < 1.0) {
178:                 tau = tau_1;
179:               } else if (tau_2 >= 1.0 && tau_2 <= nlsP->gamma4_i && tau_1 < 1.0) {
180:                 tau = tau_2;
181:               } else {
182:                 tau = tau_max;
183:               }
184:             } else if (PetscAbsScalar(kappa - (PetscReal)1.0) <= nlsP->mu2_i) {
185:               /* Good agreement */
186:               max_radius = PetscMax(max_radius, tao->trust);

188:               if (tau_max < nlsP->gamma2_i) {
189:                 tau = nlsP->gamma2_i;
190:               } else if (tau_max > nlsP->gamma3_i) {
191:                 tau = nlsP->gamma3_i;
192:               } else {
193:                 tau = tau_max;
194:               }
195:             } else {
196:               /* Not good agreement */
197:               if (tau_min > 1.0) {
198:                 tau = nlsP->gamma2_i;
199:               } else if (tau_max < nlsP->gamma1_i) {
200:                 tau = nlsP->gamma1_i;
201:               } else if ((tau_min < nlsP->gamma1_i) && (tau_max >= 1.0)) {
202:                 tau = nlsP->gamma1_i;
203:               } else if ((tau_1 >= nlsP->gamma1_i) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
204:                 tau = tau_1;
205:               } else if ((tau_2 >= nlsP->gamma1_i) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1_i) || (tau_2 >= 1.0))) {
206:                 tau = tau_2;
207:               } else {
208:                 tau = tau_max;
209:               }
210:             }
211:           }
212:           tao->trust = tau * tao->trust;
213:         }

215:         if (fmin < f) {
216:           f = fmin;
217:           VecAXPY(tao->solution, sigma, tao->gradient);
218:           TaoComputeGradient(tao, tao->solution, tao->gradient);

220:           TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm);
222:           needH = 1;

224:           TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its);
225:           TaoMonitor(tao, tao->niter, f, gnorm, 0.0, step);
226:           PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
227:           if (tao->reason != TAO_CONTINUE_ITERATING) return 0;
228:         }
229:       }
230:       tao->trust = PetscMax(tao->trust, max_radius);

232:       /* Modify the radius if it is too large or small */
233:       tao->trust = PetscMax(tao->trust, nlsP->min_radius);
234:       tao->trust = PetscMin(tao->trust, nlsP->max_radius);
235:       break;

237:     default:
238:       /* Norm of the first direction will initialize radius */
239:       tao->trust = 0.0;
240:       break;
241:     }
242:   }

244:   /* Set counter for gradient/reset steps*/
245:   nlsP->newt = 0;
246:   nlsP->bfgs = 0;
247:   nlsP->grad = 0;

249:   /* Have not converged; continue with Newton method */
250:   while (tao->reason == TAO_CONTINUE_ITERATING) {
251:     /* Call general purpose update function */
252:     PetscTryTypeMethod(tao, update, tao->niter, tao->user_update);
253:     ++tao->niter;
254:     tao->ksp_its = 0;

256:     /* Compute the Hessian */
257:     if (needH) TaoComputeHessian(tao, tao->solution, tao->hessian, tao->hessian_pre);

259:     /* Shift the Hessian matrix */
260:     if (pert > 0) {
261:       MatShift(tao->hessian, pert);
262:       if (tao->hessian != tao->hessian_pre) MatShift(tao->hessian_pre, pert);
263:     }

265:     if (nlsP->bfgs_pre) {
266:       MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
267:       ++bfgsUpdates;
268:     }

270:     /* Solve the Newton system of equations */
271:     KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre);
272:     if (is_nash || is_stcg || is_gltr) {
273:       KSPCGSetRadius(tao->ksp, nlsP->max_radius);
274:       KSPSolve(tao->ksp, tao->gradient, nlsP->D);
275:       KSPGetIterationNumber(tao->ksp, &kspits);
276:       tao->ksp_its += kspits;
277:       tao->ksp_tot_its += kspits;
278:       KSPCGGetNormD(tao->ksp, &norm_d);

280:       if (0.0 == tao->trust) {
281:         /* Radius was uninitialized; use the norm of the direction */
282:         if (norm_d > 0.0) {
283:           tao->trust = norm_d;

285:           /* Modify the radius if it is too large or small */
286:           tao->trust = PetscMax(tao->trust, nlsP->min_radius);
287:           tao->trust = PetscMin(tao->trust, nlsP->max_radius);
288:         } else {
289:           /* The direction was bad; set radius to default value and re-solve
290:              the trust-region subproblem to get a direction */
291:           tao->trust = tao->trust0;

293:           /* Modify the radius if it is too large or small */
294:           tao->trust = PetscMax(tao->trust, nlsP->min_radius);
295:           tao->trust = PetscMin(tao->trust, nlsP->max_radius);

297:           KSPCGSetRadius(tao->ksp, nlsP->max_radius);
298:           KSPSolve(tao->ksp, tao->gradient, nlsP->D);
299:           KSPGetIterationNumber(tao->ksp, &kspits);
300:           tao->ksp_its += kspits;
301:           tao->ksp_tot_its += kspits;
302:           KSPCGGetNormD(tao->ksp, &norm_d);

305:         }
306:       }
307:     } else {
308:       KSPSolve(tao->ksp, tao->gradient, nlsP->D);
309:       KSPGetIterationNumber(tao->ksp, &kspits);
310:       tao->ksp_its += kspits;
311:       tao->ksp_tot_its += kspits;
312:     }
313:     VecScale(nlsP->D, -1.0);
314:     KSPGetConvergedReason(tao->ksp, &ksp_reason);
315:     if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) && (nlsP->bfgs_pre)) {
316:       /* Preconditioner is numerically indefinite; reset the
317:          approximate if using BFGS preconditioning. */
318:       MatLMVMReset(nlsP->M, PETSC_FALSE);
319:       MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
320:       bfgsUpdates = 1;
321:     }

323:     if (KSP_CONVERGED_ATOL == ksp_reason) {
324:       ++nlsP->ksp_atol;
325:     } else if (KSP_CONVERGED_RTOL == ksp_reason) {
326:       ++nlsP->ksp_rtol;
327:     } else if (KSP_CONVERGED_CG_CONSTRAINED == ksp_reason) {
328:       ++nlsP->ksp_ctol;
329:     } else if (KSP_CONVERGED_CG_NEG_CURVE == ksp_reason) {
330:       ++nlsP->ksp_negc;
331:     } else if (KSP_DIVERGED_DTOL == ksp_reason) {
332:       ++nlsP->ksp_dtol;
333:     } else if (KSP_DIVERGED_ITS == ksp_reason) {
334:       ++nlsP->ksp_iter;
335:     } else {
336:       ++nlsP->ksp_othr;
337:     }

339:     /* Check for success (descent direction) */
340:     VecDot(nlsP->D, tao->gradient, &gdx);
341:     if ((gdx >= 0.0) || PetscIsInfOrNanReal(gdx)) {
342:       /* Newton step is not descent or direction produced Inf or NaN
343:          Update the perturbation for next time */
344:       if (pert <= 0.0) {
345:         /* Initialize the perturbation */
346:         pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
347:         if (is_gltr) {
348:           KSPGLTRGetMinEig(tao->ksp, &e_min);
349:           pert = PetscMax(pert, -e_min);
350:         }
351:       } else {
352:         /* Increase the perturbation */
353:         pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
354:       }

356:       if (!nlsP->bfgs_pre) {
357:         /* We don't have the bfgs matrix around and updated
358:            Must use gradient direction in this case */
359:         VecCopy(tao->gradient, nlsP->D);
360:         VecScale(nlsP->D, -1.0);
361:         ++nlsP->grad;
362:         stepType = NLS_GRADIENT;
363:       } else {
364:         /* Attempt to use the BFGS direction */
365:         MatSolve(nlsP->M, tao->gradient, nlsP->D);
366:         VecScale(nlsP->D, -1.0);

368:         /* Check for success (descent direction) */
369:         VecDot(tao->gradient, nlsP->D, &gdx);
370:         if ((gdx >= 0) || PetscIsInfOrNanReal(gdx)) {
371:           /* BFGS direction is not descent or direction produced not a number
372:              We can assert bfgsUpdates > 1 in this case because
373:              the first solve produces the scaled gradient direction,
374:              which is guaranteed to be descent */

376:           /* Use steepest descent direction (scaled) */
377:           MatLMVMReset(nlsP->M, PETSC_FALSE);
378:           MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
379:           MatSolve(nlsP->M, tao->gradient, nlsP->D);
380:           VecScale(nlsP->D, -1.0);

382:           bfgsUpdates = 1;
383:           ++nlsP->grad;
384:           stepType = NLS_GRADIENT;
385:         } else {
386:           MatLMVMGetUpdateCount(nlsP->M, &bfgsUpdates);
387:           if (1 == bfgsUpdates) {
388:             /* The first BFGS direction is always the scaled gradient */
389:             ++nlsP->grad;
390:             stepType = NLS_GRADIENT;
391:           } else {
392:             ++nlsP->bfgs;
393:             stepType = NLS_BFGS;
394:           }
395:         }
396:       }
397:     } else {
398:       /* Computed Newton step is descent */
399:       switch (ksp_reason) {
400:       case KSP_DIVERGED_NANORINF:
401:       case KSP_DIVERGED_BREAKDOWN:
402:       case KSP_DIVERGED_INDEFINITE_MAT:
403:       case KSP_DIVERGED_INDEFINITE_PC:
404:       case KSP_CONVERGED_CG_NEG_CURVE:
405:         /* Matrix or preconditioner is indefinite; increase perturbation */
406:         if (pert <= 0.0) {
407:           /* Initialize the perturbation */
408:           pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
409:           if (is_gltr) {
410:             KSPGLTRGetMinEig(tao->ksp, &e_min);
411:             pert = PetscMax(pert, -e_min);
412:           }
413:         } else {
414:           /* Increase the perturbation */
415:           pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
416:         }
417:         break;

419:       default:
420:         /* Newton step computation is good; decrease perturbation */
421:         pert = PetscMin(nlsP->psfac * pert, nlsP->pmsfac * gnorm);
422:         if (pert < nlsP->pmin) pert = 0.0;
423:         break;
424:       }

426:       ++nlsP->newt;
427:       stepType = NLS_NEWTON;
428:     }

430:     /* Perform the linesearch */
431:     fold = f;
432:     VecCopy(tao->solution, nlsP->Xold);
433:     VecCopy(tao->gradient, nlsP->Gold);

435:     TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
436:     TaoAddLineSearchCounts(tao);

438:     while (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER && stepType != NLS_GRADIENT) { /* Linesearch failed */
439:       f = fold;
440:       VecCopy(nlsP->Xold, tao->solution);
441:       VecCopy(nlsP->Gold, tao->gradient);

443:       switch (stepType) {
444:       case NLS_NEWTON:
445:         /* Failed to obtain acceptable iterate with Newton 1step
446:            Update the perturbation for next time */
447:         if (pert <= 0.0) {
448:           /* Initialize the perturbation */
449:           pert = PetscMin(nlsP->imax, PetscMax(nlsP->imin, nlsP->imfac * gnorm));
450:           if (is_gltr) {
451:             KSPGLTRGetMinEig(tao->ksp, &e_min);
452:             pert = PetscMax(pert, -e_min);
453:           }
454:         } else {
455:           /* Increase the perturbation */
456:           pert = PetscMin(nlsP->pmax, PetscMax(nlsP->pgfac * pert, nlsP->pmgfac * gnorm));
457:         }

459:         if (!nlsP->bfgs_pre) {
460:           /* We don't have the bfgs matrix around and being updated
461:              Must use gradient direction in this case */
462:           VecCopy(tao->gradient, nlsP->D);
463:           ++nlsP->grad;
464:           stepType = NLS_GRADIENT;
465:         } else {
466:           /* Attempt to use the BFGS direction */
467:           MatSolve(nlsP->M, tao->gradient, nlsP->D);
468:           /* Check for success (descent direction) */
469:           VecDot(tao->solution, nlsP->D, &gdx);
470:           if ((gdx <= 0) || PetscIsInfOrNanReal(gdx)) {
471:             /* BFGS direction is not descent or direction produced not a number
472:                We can assert bfgsUpdates > 1 in this case
473:                Use steepest descent direction (scaled) */
474:             MatLMVMReset(nlsP->M, PETSC_FALSE);
475:             MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
476:             MatSolve(nlsP->M, tao->gradient, nlsP->D);

478:             bfgsUpdates = 1;
479:             ++nlsP->grad;
480:             stepType = NLS_GRADIENT;
481:           } else {
482:             if (1 == bfgsUpdates) {
483:               /* The first BFGS direction is always the scaled gradient */
484:               ++nlsP->grad;
485:               stepType = NLS_GRADIENT;
486:             } else {
487:               ++nlsP->bfgs;
488:               stepType = NLS_BFGS;
489:             }
490:           }
491:         }
492:         break;

494:       case NLS_BFGS:
495:         /* Can only enter if pc_type == NLS_PC_BFGS
496:            Failed to obtain acceptable iterate with BFGS step
497:            Attempt to use the scaled gradient direction */
498:         MatLMVMReset(nlsP->M, PETSC_FALSE);
499:         MatLMVMUpdate(nlsP->M, tao->solution, tao->gradient);
500:         MatSolve(nlsP->M, tao->gradient, nlsP->D);

502:         bfgsUpdates = 1;
503:         ++nlsP->grad;
504:         stepType = NLS_GRADIENT;
505:         break;
506:       }
507:       VecScale(nlsP->D, -1.0);

509:       TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, nlsP->D, &step, &ls_reason);
510:       TaoLineSearchGetFullStepObjective(tao->linesearch, &f_full);
511:       TaoAddLineSearchCounts(tao);
512:     }

514:     if (ls_reason != TAOLINESEARCH_SUCCESS && ls_reason != TAOLINESEARCH_SUCCESS_USER) {
515:       /* Failed to find an improving point */
516:       f = fold;
517:       VecCopy(nlsP->Xold, tao->solution);
518:       VecCopy(nlsP->Gold, tao->gradient);
519:       step        = 0.0;
520:       tao->reason = TAO_DIVERGED_LS_FAILURE;
521:       break;
522:     }

524:     /* Update trust region radius */
525:     if (is_nash || is_stcg || is_gltr) {
526:       switch (nlsP->update_type) {
527:       case NLS_UPDATE_STEP:
528:         if (stepType == NLS_NEWTON) {
529:           if (step < nlsP->nu1) {
530:             /* Very bad step taken; reduce radius */
531:             tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
532:           } else if (step < nlsP->nu2) {
533:             /* Reasonably bad step taken; reduce radius */
534:             tao->trust = nlsP->omega2 * PetscMin(norm_d, tao->trust);
535:           } else if (step < nlsP->nu3) {
536:             /*  Reasonable step was taken; leave radius alone */
537:             if (nlsP->omega3 < 1.0) {
538:               tao->trust = nlsP->omega3 * PetscMin(norm_d, tao->trust);
539:             } else if (nlsP->omega3 > 1.0) {
540:               tao->trust = PetscMax(nlsP->omega3 * norm_d, tao->trust);
541:             }
542:           } else if (step < nlsP->nu4) {
543:             /*  Full step taken; increase the radius */
544:             tao->trust = PetscMax(nlsP->omega4 * norm_d, tao->trust);
545:           } else {
546:             /*  More than full step taken; increase the radius */
547:             tao->trust = PetscMax(nlsP->omega5 * norm_d, tao->trust);
548:           }
549:         } else {
550:           /*  Newton step was not good; reduce the radius */
551:           tao->trust = nlsP->omega1 * PetscMin(norm_d, tao->trust);
552:         }
553:         break;

555:       case NLS_UPDATE_REDUCTION:
556:         if (stepType == NLS_NEWTON) {
557:           /*  Get predicted reduction */

559:           KSPCGGetObjFcn(tao->ksp, &prered);
560:           if (prered >= 0.0) {
561:             /*  The predicted reduction has the wrong sign.  This cannot */
562:             /*  happen in infinite precision arithmetic.  Step should */
563:             /*  be rejected! */
564:             tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
565:           } else {
566:             if (PetscIsInfOrNanReal(f_full)) {
567:               tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
568:             } else {
569:               /*  Compute and actual reduction */
570:               actred = fold - f_full;
571:               prered = -prered;
572:               if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
573:                 kappa = 1.0;
574:               } else {
575:                 kappa = actred / prered;
576:               }

578:               /*  Accept of reject the step and update radius */
579:               if (kappa < nlsP->eta1) {
580:                 /*  Very bad step */
581:                 tao->trust = nlsP->alpha1 * PetscMin(tao->trust, norm_d);
582:               } else if (kappa < nlsP->eta2) {
583:                 /*  Marginal bad step */
584:                 tao->trust = nlsP->alpha2 * PetscMin(tao->trust, norm_d);
585:               } else if (kappa < nlsP->eta3) {
586:                 /*  Reasonable step */
587:                 if (nlsP->alpha3 < 1.0) {
588:                   tao->trust = nlsP->alpha3 * PetscMin(norm_d, tao->trust);
589:                 } else if (nlsP->alpha3 > 1.0) {
590:                   tao->trust = PetscMax(nlsP->alpha3 * norm_d, tao->trust);
591:                 }
592:               } else if (kappa < nlsP->eta4) {
593:                 /*  Good step */
594:                 tao->trust = PetscMax(nlsP->alpha4 * norm_d, tao->trust);
595:               } else {
596:                 /*  Very good step */
597:                 tao->trust = PetscMax(nlsP->alpha5 * norm_d, tao->trust);
598:               }
599:             }
600:           }
601:         } else {
602:           /*  Newton step was not good; reduce the radius */
603:           tao->trust = nlsP->alpha1 * PetscMin(norm_d, tao->trust);
604:         }
605:         break;

607:       default:
608:         if (stepType == NLS_NEWTON) {
609:           KSPCGGetObjFcn(tao->ksp, &prered);
610:           if (prered >= 0.0) {
611:             /*  The predicted reduction has the wrong sign.  This cannot */
612:             /*  happen in infinite precision arithmetic.  Step should */
613:             /*  be rejected! */
614:             tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
615:           } else {
616:             if (PetscIsInfOrNanReal(f_full)) {
617:               tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
618:             } else {
619:               actred = fold - f_full;
620:               prered = -prered;
621:               if ((PetscAbsScalar(actred) <= nlsP->epsilon) && (PetscAbsScalar(prered) <= nlsP->epsilon)) {
622:                 kappa = 1.0;
623:               } else {
624:                 kappa = actred / prered;
625:               }

627:               tau_1   = nlsP->theta * gdx / (nlsP->theta * gdx - (1.0 - nlsP->theta) * prered + actred);
628:               tau_2   = nlsP->theta * gdx / (nlsP->theta * gdx + (1.0 + nlsP->theta) * prered - actred);
629:               tau_min = PetscMin(tau_1, tau_2);
630:               tau_max = PetscMax(tau_1, tau_2);

632:               if (kappa >= 1.0 - nlsP->mu1) {
633:                 /*  Great agreement */
634:                 if (tau_max < 1.0) {
635:                   tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
636:                 } else if (tau_max > nlsP->gamma4) {
637:                   tao->trust = PetscMax(tao->trust, nlsP->gamma4 * norm_d);
638:                 } else {
639:                   tao->trust = PetscMax(tao->trust, tau_max * norm_d);
640:                 }
641:               } else if (kappa >= 1.0 - nlsP->mu2) {
642:                 /*  Good agreement */

644:                 if (tau_max < nlsP->gamma2) {
645:                   tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
646:                 } else if (tau_max > nlsP->gamma3) {
647:                   tao->trust = PetscMax(tao->trust, nlsP->gamma3 * norm_d);
648:                 } else if (tau_max < 1.0) {
649:                   tao->trust = tau_max * PetscMin(tao->trust, norm_d);
650:                 } else {
651:                   tao->trust = PetscMax(tao->trust, tau_max * norm_d);
652:                 }
653:               } else {
654:                 /*  Not good agreement */
655:                 if (tau_min > 1.0) {
656:                   tao->trust = nlsP->gamma2 * PetscMin(tao->trust, norm_d);
657:                 } else if (tau_max < nlsP->gamma1) {
658:                   tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
659:                 } else if ((tau_min < nlsP->gamma1) && (tau_max >= 1.0)) {
660:                   tao->trust = nlsP->gamma1 * PetscMin(tao->trust, norm_d);
661:                 } else if ((tau_1 >= nlsP->gamma1) && (tau_1 < 1.0) && ((tau_2 < nlsP->gamma1) || (tau_2 >= 1.0))) {
662:                   tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
663:                 } else if ((tau_2 >= nlsP->gamma1) && (tau_2 < 1.0) && ((tau_1 < nlsP->gamma1) || (tau_2 >= 1.0))) {
664:                   tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
665:                 } else {
666:                   tao->trust = tau_max * PetscMin(tao->trust, norm_d);
667:                 }
668:               }
669:             }
670:           }
671:         } else {
672:           /*  Newton step was not good; reduce the radius */
673:           tao->trust = nlsP->gamma1 * PetscMin(norm_d, tao->trust);
674:         }
675:         break;
676:       }

678:       /*  The radius may have been increased; modify if it is too large */
679:       tao->trust = PetscMin(tao->trust, nlsP->max_radius);
680:     }

682:     /*  Check for termination */
683:     TaoGradientNorm(tao, tao->gradient, NORM_2, &gnorm);
685:     needH = 1;
686:     TaoLogConvergenceHistory(tao, f, gnorm, 0.0, tao->ksp_its);
687:     TaoMonitor(tao, tao->niter, f, gnorm, 0.0, step);
688:     PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
689:   }
690:   return 0;
691: }

693: /* ---------------------------------------------------------- */
694: static PetscErrorCode TaoSetUp_NLS(Tao tao)
695: {
696:   TAO_NLS *nlsP = (TAO_NLS *)tao->data;

698:   if (!tao->gradient) VecDuplicate(tao->solution, &tao->gradient);
699:   if (!tao->stepdirection) VecDuplicate(tao->solution, &tao->stepdirection);
700:   if (!nlsP->W) VecDuplicate(tao->solution, &nlsP->W);
701:   if (!nlsP->D) VecDuplicate(tao->solution, &nlsP->D);
702:   if (!nlsP->Xold) VecDuplicate(tao->solution, &nlsP->Xold);
703:   if (!nlsP->Gold) VecDuplicate(tao->solution, &nlsP->Gold);
704:   nlsP->M        = NULL;
705:   nlsP->bfgs_pre = NULL;
706:   return 0;
707: }

709: /*------------------------------------------------------------*/
710: static PetscErrorCode TaoDestroy_NLS(Tao tao)
711: {
712:   TAO_NLS *nlsP = (TAO_NLS *)tao->data;

714:   if (tao->setupcalled) {
715:     VecDestroy(&nlsP->D);
716:     VecDestroy(&nlsP->W);
717:     VecDestroy(&nlsP->Xold);
718:     VecDestroy(&nlsP->Gold);
719:   }
720:   KSPDestroy(&tao->ksp);
721:   PetscFree(tao->data);
722:   return 0;
723: }

725: /*------------------------------------------------------------*/
726: static PetscErrorCode TaoSetFromOptions_NLS(Tao tao, PetscOptionItems *PetscOptionsObject)
727: {
728:   TAO_NLS *nlsP = (TAO_NLS *)tao->data;

730:   PetscOptionsHeadBegin(PetscOptionsObject, "Newton line search method for unconstrained optimization");
731:   PetscOptionsEList("-tao_nls_init_type", "radius initialization type", "", NLS_INIT, NLS_INIT_TYPES, NLS_INIT[nlsP->init_type], &nlsP->init_type, NULL);
732:   PetscOptionsEList("-tao_nls_update_type", "radius update type", "", NLS_UPDATE, NLS_UPDATE_TYPES, NLS_UPDATE[nlsP->update_type], &nlsP->update_type, NULL);
733:   PetscOptionsReal("-tao_nls_sval", "perturbation starting value", "", nlsP->sval, &nlsP->sval, NULL);
734:   PetscOptionsReal("-tao_nls_imin", "minimum initial perturbation", "", nlsP->imin, &nlsP->imin, NULL);
735:   PetscOptionsReal("-tao_nls_imax", "maximum initial perturbation", "", nlsP->imax, &nlsP->imax, NULL);
736:   PetscOptionsReal("-tao_nls_imfac", "initial merit factor", "", nlsP->imfac, &nlsP->imfac, NULL);
737:   PetscOptionsReal("-tao_nls_pmin", "minimum perturbation", "", nlsP->pmin, &nlsP->pmin, NULL);
738:   PetscOptionsReal("-tao_nls_pmax", "maximum perturbation", "", nlsP->pmax, &nlsP->pmax, NULL);
739:   PetscOptionsReal("-tao_nls_pgfac", "growth factor", "", nlsP->pgfac, &nlsP->pgfac, NULL);
740:   PetscOptionsReal("-tao_nls_psfac", "shrink factor", "", nlsP->psfac, &nlsP->psfac, NULL);
741:   PetscOptionsReal("-tao_nls_pmgfac", "merit growth factor", "", nlsP->pmgfac, &nlsP->pmgfac, NULL);
742:   PetscOptionsReal("-tao_nls_pmsfac", "merit shrink factor", "", nlsP->pmsfac, &nlsP->pmsfac, NULL);
743:   PetscOptionsReal("-tao_nls_eta1", "poor steplength; reduce radius", "", nlsP->eta1, &nlsP->eta1, NULL);
744:   PetscOptionsReal("-tao_nls_eta2", "reasonable steplength; leave radius alone", "", nlsP->eta2, &nlsP->eta2, NULL);
745:   PetscOptionsReal("-tao_nls_eta3", "good steplength; increase radius", "", nlsP->eta3, &nlsP->eta3, NULL);
746:   PetscOptionsReal("-tao_nls_eta4", "excellent steplength; greatly increase radius", "", nlsP->eta4, &nlsP->eta4, NULL);
747:   PetscOptionsReal("-tao_nls_alpha1", "", "", nlsP->alpha1, &nlsP->alpha1, NULL);
748:   PetscOptionsReal("-tao_nls_alpha2", "", "", nlsP->alpha2, &nlsP->alpha2, NULL);
749:   PetscOptionsReal("-tao_nls_alpha3", "", "", nlsP->alpha3, &nlsP->alpha3, NULL);
750:   PetscOptionsReal("-tao_nls_alpha4", "", "", nlsP->alpha4, &nlsP->alpha4, NULL);
751:   PetscOptionsReal("-tao_nls_alpha5", "", "", nlsP->alpha5, &nlsP->alpha5, NULL);
752:   PetscOptionsReal("-tao_nls_nu1", "poor steplength; reduce radius", "", nlsP->nu1, &nlsP->nu1, NULL);
753:   PetscOptionsReal("-tao_nls_nu2", "reasonable steplength; leave radius alone", "", nlsP->nu2, &nlsP->nu2, NULL);
754:   PetscOptionsReal("-tao_nls_nu3", "good steplength; increase radius", "", nlsP->nu3, &nlsP->nu3, NULL);
755:   PetscOptionsReal("-tao_nls_nu4", "excellent steplength; greatly increase radius", "", nlsP->nu4, &nlsP->nu4, NULL);
756:   PetscOptionsReal("-tao_nls_omega1", "", "", nlsP->omega1, &nlsP->omega1, NULL);
757:   PetscOptionsReal("-tao_nls_omega2", "", "", nlsP->omega2, &nlsP->omega2, NULL);
758:   PetscOptionsReal("-tao_nls_omega3", "", "", nlsP->omega3, &nlsP->omega3, NULL);
759:   PetscOptionsReal("-tao_nls_omega4", "", "", nlsP->omega4, &nlsP->omega4, NULL);
760:   PetscOptionsReal("-tao_nls_omega5", "", "", nlsP->omega5, &nlsP->omega5, NULL);
761:   PetscOptionsReal("-tao_nls_mu1_i", "", "", nlsP->mu1_i, &nlsP->mu1_i, NULL);
762:   PetscOptionsReal("-tao_nls_mu2_i", "", "", nlsP->mu2_i, &nlsP->mu2_i, NULL);
763:   PetscOptionsReal("-tao_nls_gamma1_i", "", "", nlsP->gamma1_i, &nlsP->gamma1_i, NULL);
764:   PetscOptionsReal("-tao_nls_gamma2_i", "", "", nlsP->gamma2_i, &nlsP->gamma2_i, NULL);
765:   PetscOptionsReal("-tao_nls_gamma3_i", "", "", nlsP->gamma3_i, &nlsP->gamma3_i, NULL);
766:   PetscOptionsReal("-tao_nls_gamma4_i", "", "", nlsP->gamma4_i, &nlsP->gamma4_i, NULL);
767:   PetscOptionsReal("-tao_nls_theta_i", "", "", nlsP->theta_i, &nlsP->theta_i, NULL);
768:   PetscOptionsReal("-tao_nls_mu1", "", "", nlsP->mu1, &nlsP->mu1, NULL);
769:   PetscOptionsReal("-tao_nls_mu2", "", "", nlsP->mu2, &nlsP->mu2, NULL);
770:   PetscOptionsReal("-tao_nls_gamma1", "", "", nlsP->gamma1, &nlsP->gamma1, NULL);
771:   PetscOptionsReal("-tao_nls_gamma2", "", "", nlsP->gamma2, &nlsP->gamma2, NULL);
772:   PetscOptionsReal("-tao_nls_gamma3", "", "", nlsP->gamma3, &nlsP->gamma3, NULL);
773:   PetscOptionsReal("-tao_nls_gamma4", "", "", nlsP->gamma4, &nlsP->gamma4, NULL);
774:   PetscOptionsReal("-tao_nls_theta", "", "", nlsP->theta, &nlsP->theta, NULL);
775:   PetscOptionsReal("-tao_nls_min_radius", "lower bound on initial radius", "", nlsP->min_radius, &nlsP->min_radius, NULL);
776:   PetscOptionsReal("-tao_nls_max_radius", "upper bound on radius", "", nlsP->max_radius, &nlsP->max_radius, NULL);
777:   PetscOptionsReal("-tao_nls_epsilon", "tolerance used when computing actual and predicted reduction", "", nlsP->epsilon, &nlsP->epsilon, NULL);
778:   PetscOptionsHeadEnd();
779:   TaoLineSearchSetFromOptions(tao->linesearch);
780:   KSPSetFromOptions(tao->ksp);
781:   return 0;
782: }

784: /*------------------------------------------------------------*/
785: static PetscErrorCode TaoView_NLS(Tao tao, PetscViewer viewer)
786: {
787:   TAO_NLS  *nlsP = (TAO_NLS *)tao->data;
788:   PetscBool isascii;

790:   PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
791:   if (isascii) {
792:     PetscViewerASCIIPushTab(viewer);
793:     PetscViewerASCIIPrintf(viewer, "Newton steps: %" PetscInt_FMT "\n", nlsP->newt);
794:     PetscViewerASCIIPrintf(viewer, "BFGS steps: %" PetscInt_FMT "\n", nlsP->bfgs);
795:     PetscViewerASCIIPrintf(viewer, "Gradient steps: %" PetscInt_FMT "\n", nlsP->grad);

797:     PetscViewerASCIIPrintf(viewer, "nls ksp atol: %" PetscInt_FMT "\n", nlsP->ksp_atol);
798:     PetscViewerASCIIPrintf(viewer, "nls ksp rtol: %" PetscInt_FMT "\n", nlsP->ksp_rtol);
799:     PetscViewerASCIIPrintf(viewer, "nls ksp ctol: %" PetscInt_FMT "\n", nlsP->ksp_ctol);
800:     PetscViewerASCIIPrintf(viewer, "nls ksp negc: %" PetscInt_FMT "\n", nlsP->ksp_negc);
801:     PetscViewerASCIIPrintf(viewer, "nls ksp dtol: %" PetscInt_FMT "\n", nlsP->ksp_dtol);
802:     PetscViewerASCIIPrintf(viewer, "nls ksp iter: %" PetscInt_FMT "\n", nlsP->ksp_iter);
803:     PetscViewerASCIIPrintf(viewer, "nls ksp othr: %" PetscInt_FMT "\n", nlsP->ksp_othr);
804:     PetscViewerASCIIPopTab(viewer);
805:   }
806:   return 0;
807: }

809: /* ---------------------------------------------------------- */
810: /*MC
811:   TAONLS - Newton's method with linesearch for unconstrained minimization.
812:   At each iteration, the Newton line search method solves the symmetric
813:   system of equations to obtain the step diretion dk:
814:               Hk dk = -gk
815:   a More-Thuente line search is applied on the direction dk to approximately
816:   solve
817:               min_t f(xk + t d_k)

819:     Options Database Keys:
820: + -tao_nls_init_type - "constant","direction","interpolation"
821: . -tao_nls_update_type - "step","direction","interpolation"
822: . -tao_nls_sval - perturbation starting value
823: . -tao_nls_imin - minimum initial perturbation
824: . -tao_nls_imax - maximum initial perturbation
825: . -tao_nls_pmin - minimum perturbation
826: . -tao_nls_pmax - maximum perturbation
827: . -tao_nls_pgfac - growth factor
828: . -tao_nls_psfac - shrink factor
829: . -tao_nls_imfac - initial merit factor
830: . -tao_nls_pmgfac - merit growth factor
831: . -tao_nls_pmsfac - merit shrink factor
832: . -tao_nls_eta1 - poor steplength; reduce radius
833: . -tao_nls_eta2 - reasonable steplength; leave radius
834: . -tao_nls_eta3 - good steplength; increase radius
835: . -tao_nls_eta4 - excellent steplength; greatly increase radius
836: . -tao_nls_alpha1 - alpha1 reduction
837: . -tao_nls_alpha2 - alpha2 reduction
838: . -tao_nls_alpha3 - alpha3 reduction
839: . -tao_nls_alpha4 - alpha4 reduction
840: . -tao_nls_alpha - alpha5 reduction
841: . -tao_nls_mu1 - mu1 interpolation update
842: . -tao_nls_mu2 - mu2 interpolation update
843: . -tao_nls_gamma1 - gamma1 interpolation update
844: . -tao_nls_gamma2 - gamma2 interpolation update
845: . -tao_nls_gamma3 - gamma3 interpolation update
846: . -tao_nls_gamma4 - gamma4 interpolation update
847: . -tao_nls_theta - theta interpolation update
848: . -tao_nls_omega1 - omega1 step update
849: . -tao_nls_omega2 - omega2 step update
850: . -tao_nls_omega3 - omega3 step update
851: . -tao_nls_omega4 - omega4 step update
852: . -tao_nls_omega5 - omega5 step update
853: . -tao_nls_mu1_i -  mu1 interpolation init factor
854: . -tao_nls_mu2_i -  mu2 interpolation init factor
855: . -tao_nls_gamma1_i -  gamma1 interpolation init factor
856: . -tao_nls_gamma2_i -  gamma2 interpolation init factor
857: . -tao_nls_gamma3_i -  gamma3 interpolation init factor
858: . -tao_nls_gamma4_i -  gamma4 interpolation init factor
859: - -tao_nls_theta_i -  theta interpolation init factor

861:   Level: beginner
862: M*/

864: PETSC_EXTERN PetscErrorCode TaoCreate_NLS(Tao tao)
865: {
866:   TAO_NLS    *nlsP;
867:   const char *morethuente_type = TAOLINESEARCHMT;

869:   PetscNew(&nlsP);

871:   tao->ops->setup          = TaoSetUp_NLS;
872:   tao->ops->solve          = TaoSolve_NLS;
873:   tao->ops->view           = TaoView_NLS;
874:   tao->ops->setfromoptions = TaoSetFromOptions_NLS;
875:   tao->ops->destroy        = TaoDestroy_NLS;

877:   /* Override default settings (unless already changed) */
878:   if (!tao->max_it_changed) tao->max_it = 50;
879:   if (!tao->trust0_changed) tao->trust0 = 100.0;

881:   tao->data = (void *)nlsP;

883:   nlsP->sval  = 0.0;
884:   nlsP->imin  = 1.0e-4;
885:   nlsP->imax  = 1.0e+2;
886:   nlsP->imfac = 1.0e-1;

888:   nlsP->pmin   = 1.0e-12;
889:   nlsP->pmax   = 1.0e+2;
890:   nlsP->pgfac  = 1.0e+1;
891:   nlsP->psfac  = 4.0e-1;
892:   nlsP->pmgfac = 1.0e-1;
893:   nlsP->pmsfac = 1.0e-1;

895:   /*  Default values for trust-region radius update based on steplength */
896:   nlsP->nu1 = 0.25;
897:   nlsP->nu2 = 0.50;
898:   nlsP->nu3 = 1.00;
899:   nlsP->nu4 = 1.25;

901:   nlsP->omega1 = 0.25;
902:   nlsP->omega2 = 0.50;
903:   nlsP->omega3 = 1.00;
904:   nlsP->omega4 = 2.00;
905:   nlsP->omega5 = 4.00;

907:   /*  Default values for trust-region radius update based on reduction */
908:   nlsP->eta1 = 1.0e-4;
909:   nlsP->eta2 = 0.25;
910:   nlsP->eta3 = 0.50;
911:   nlsP->eta4 = 0.90;

913:   nlsP->alpha1 = 0.25;
914:   nlsP->alpha2 = 0.50;
915:   nlsP->alpha3 = 1.00;
916:   nlsP->alpha4 = 2.00;
917:   nlsP->alpha5 = 4.00;

919:   /*  Default values for trust-region radius update based on interpolation */
920:   nlsP->mu1 = 0.10;
921:   nlsP->mu2 = 0.50;

923:   nlsP->gamma1 = 0.25;
924:   nlsP->gamma2 = 0.50;
925:   nlsP->gamma3 = 2.00;
926:   nlsP->gamma4 = 4.00;

928:   nlsP->theta = 0.05;

930:   /*  Default values for trust region initialization based on interpolation */
931:   nlsP->mu1_i = 0.35;
932:   nlsP->mu2_i = 0.50;

934:   nlsP->gamma1_i = 0.0625;
935:   nlsP->gamma2_i = 0.5;
936:   nlsP->gamma3_i = 2.0;
937:   nlsP->gamma4_i = 5.0;

939:   nlsP->theta_i = 0.25;

941:   /*  Remaining parameters */
942:   nlsP->min_radius = 1.0e-10;
943:   nlsP->max_radius = 1.0e10;
944:   nlsP->epsilon    = 1.0e-6;

946:   nlsP->init_type   = NLS_INIT_INTERPOLATION;
947:   nlsP->update_type = NLS_UPDATE_STEP;

949:   TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch);
950:   PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);
951:   TaoLineSearchSetType(tao->linesearch, morethuente_type);
952:   TaoLineSearchUseTaoRoutines(tao->linesearch, tao);
953:   TaoLineSearchSetOptionsPrefix(tao->linesearch, tao->hdr.prefix);

955:   /*  Set linear solver to default for symmetric matrices */
956:   KSPCreate(((PetscObject)tao)->comm, &tao->ksp);
957:   PetscObjectIncrementTabLevel((PetscObject)tao->ksp, (PetscObject)tao, 1);
958:   KSPSetOptionsPrefix(tao->ksp, tao->hdr.prefix);
959:   KSPAppendOptionsPrefix(tao->ksp, "tao_nls_");
960:   KSPSetType(tao->ksp, KSPSTCG);
961:   return 0;
962: }