Actual source code: taosolver.c
1: #include <petsc/private/taoimpl.h>
2: #include <petsc/private/snesimpl.h>
4: PetscBool TaoRegisterAllCalled = PETSC_FALSE;
5: PetscFunctionList TaoList = NULL;
7: PetscClassId TAO_CLASSID;
9: PetscLogEvent TAO_Solve;
10: PetscLogEvent TAO_ObjectiveEval;
11: PetscLogEvent TAO_GradientEval;
12: PetscLogEvent TAO_ObjGradEval;
13: PetscLogEvent TAO_HessianEval;
14: PetscLogEvent TAO_JacobianEval;
15: PetscLogEvent TAO_ConstraintsEval;
17: const char *TaoSubSetTypes[] = {"subvec", "mask", "matrixfree", "TaoSubSetType", "TAO_SUBSET_", NULL};
19: struct _n_TaoMonitorDrawCtx {
20: PetscViewer viewer;
21: PetscInt howoften; /* when > 0 uses iteration % howoften, when negative only final solution plotted */
22: };
24: static PetscErrorCode KSPPreSolve_TAOEW_Private(KSP ksp, Vec b, Vec x, Tao tao)
25: {
26: SNES snes_ewdummy = tao->snes_ewdummy;
28: if (!snes_ewdummy) return 0;
29: /* populate snes_ewdummy struct values used in KSPPreSolve_SNESEW */
30: snes_ewdummy->vec_func = b;
31: snes_ewdummy->rtol = tao->gttol;
32: snes_ewdummy->iter = tao->niter;
33: VecNorm(b, NORM_2, &snes_ewdummy->norm);
34: KSPPreSolve_SNESEW(ksp, b, x, snes_ewdummy);
35: snes_ewdummy->vec_func = NULL;
36: return 0;
37: }
39: static PetscErrorCode KSPPostSolve_TAOEW_Private(KSP ksp, Vec b, Vec x, Tao tao)
40: {
41: SNES snes_ewdummy = tao->snes_ewdummy;
43: if (!snes_ewdummy) return 0;
44: KSPPostSolve_SNESEW(ksp, b, x, snes_ewdummy);
45: return 0;
46: }
48: static PetscErrorCode TaoSetUpEW_Private(Tao tao)
49: {
50: SNESKSPEW *kctx;
51: const char *ewprefix;
53: if (!tao->ksp) return 0;
54: if (tao->ksp_ewconv) {
55: if (!tao->snes_ewdummy) SNESCreate(PetscObjectComm((PetscObject)tao), &tao->snes_ewdummy);
56: tao->snes_ewdummy->ksp_ewconv = PETSC_TRUE;
57: KSPSetPreSolve(tao->ksp, (PetscErrorCode(*)(KSP, Vec, Vec, void *))KSPPreSolve_TAOEW_Private, tao);
58: KSPSetPostSolve(tao->ksp, (PetscErrorCode(*)(KSP, Vec, Vec, void *))KSPPostSolve_TAOEW_Private, tao);
60: KSPGetOptionsPrefix(tao->ksp, &ewprefix);
61: kctx = (SNESKSPEW *)tao->snes_ewdummy->kspconvctx;
62: SNESEWSetFromOptions_Private(kctx, PetscObjectComm((PetscObject)tao), ewprefix);
63: } else SNESDestroy(&tao->snes_ewdummy);
64: return 0;
65: }
67: /*@
68: TaoCreate - Creates a Tao solver
70: Collective
72: Input Parameter:
73: . comm - MPI communicator
75: Output Parameter:
76: . newtao - the new Tao context
78: Available methods include:
79: + `TAONLS` - nls Newton's method with line search for unconstrained minimization
80: . `TAONTR` - ntr Newton's method with trust region for unconstrained minimization
81: . `TAONTL` - ntl Newton's method with trust region, line search for unconstrained minimization
82: . `TAOLMVM` - lmvm Limited memory variable metric method for unconstrained minimization
83: . `TAOCG` - cg Nonlinear conjugate gradient method for unconstrained minimization
84: . `TAONM` - nm Nelder-Mead algorithm for derivate-free unconstrained minimization
85: . `TAOTRON` - tron Newton Trust Region method for bound constrained minimization
86: . `TAOGPCG` - gpcg Newton Trust Region method for quadratic bound constrained minimization
87: . `TAOBLMVM` - blmvm Limited memory variable metric method for bound constrained minimization
88: . `TAOLCL` - lcl Linearly constrained Lagrangian method for pde-constrained minimization
89: - `TAOPOUNDERS` - pounders Model-based algorithm for nonlinear least squares
91: Options Database Keys:
92: . -tao_type - select which method Tao should use
94: Level: beginner
96: .seealso: `Tao`, `TaoSolve()`, `TaoDestroy()`, `TAOSetFromOptions()`, `TAOSetType()`
97: @*/
98: PetscErrorCode TaoCreate(MPI_Comm comm, Tao *newtao)
99: {
100: Tao tao;
103: TaoInitializePackage();
104: TaoLineSearchInitializePackage();
105: PetscHeaderCreate(tao, TAO_CLASSID, "Tao", "Optimization solver", "Tao", comm, TaoDestroy, TaoView);
107: /* Set non-NULL defaults */
108: tao->ops->convergencetest = TaoDefaultConvergenceTest;
110: tao->max_it = 10000;
111: tao->max_funcs = -1;
112: #if defined(PETSC_USE_REAL_SINGLE)
113: tao->gatol = 1e-5;
114: tao->grtol = 1e-5;
115: tao->crtol = 1e-5;
116: tao->catol = 1e-5;
117: #else
118: tao->gatol = 1e-8;
119: tao->grtol = 1e-8;
120: tao->crtol = 1e-8;
121: tao->catol = 1e-8;
122: #endif
123: tao->gttol = 0.0;
124: tao->steptol = 0.0;
125: tao->trust0 = PETSC_INFINITY;
126: tao->fmin = PETSC_NINFINITY;
128: tao->hist_reset = PETSC_TRUE;
130: TaoResetStatistics(tao);
131: *newtao = tao;
132: return 0;
133: }
135: /*@
136: TaoSolve - Solves an optimization problem min F(x) s.t. l <= x <= u
138: Collective
140: Input Parameters:
141: . tao - the Tao context
143: Level: beginner
145: Notes:
146: The user must set up the Tao with calls to `TaoSetSolution()`, `TaoSetObjective()`, `TaoSetGradient()`, and (if using 2nd order method) `TaoSetHessian()`.
148: You should call `TaoGetConvergedReason()` or run with `-tao_converged_reason` to determine if the optimization algorithm actually succeeded or
149: why it failed.
151: .seealso: `Tao`, `TaoCreate()`, `TaoSetObjective()`, `TaoSetGradient()`, `TaoSetHessian()`, `TaoGetConvergedReason()`, `TaoSetUp()`
152: @*/
153: PetscErrorCode TaoSolve(Tao tao)
154: {
155: static PetscBool set = PETSC_FALSE;
158: PetscCall(PetscCitationsRegister("@TechReport{tao-user-ref,\n"
159: "title = {Toolkit for Advanced Optimization (TAO) Users Manual},\n"
160: "author = {Todd Munson and Jason Sarich and Stefan Wild and Steve Benson and Lois Curfman McInnes},\n"
161: "Institution = {Argonne National Laboratory},\n"
162: "Year = 2014,\n"
163: "Number = {ANL/MCS-TM-322 - Revision 3.5},\n"
164: "url = {https://www.mcs.anl.gov/research/projects/tao/}\n}\n",
165: &set));
166: tao->header_printed = PETSC_FALSE;
167: TaoSetUp(tao);
168: TaoResetStatistics(tao);
169: if (tao->linesearch) TaoLineSearchReset(tao->linesearch);
171: PetscLogEventBegin(TAO_Solve, tao, 0, 0, 0);
172: PetscTryTypeMethod(tao, solve);
173: PetscLogEventEnd(TAO_Solve, tao, 0, 0, 0);
175: VecViewFromOptions(tao->solution, (PetscObject)tao, "-tao_view_solution");
177: tao->ntotalits += tao->niter;
179: if (tao->printreason) {
180: PetscViewer viewer = PETSC_VIEWER_STDOUT_(((PetscObject)tao)->comm);
181: PetscViewerASCIIAddTab(viewer, ((PetscObject)tao)->tablevel);
182: if (tao->reason > 0) {
183: PetscViewerASCIIPrintf(viewer, " TAO %s solve converged due to %s iterations %" PetscInt_FMT "\n", ((PetscObject)tao)->prefix ? ((PetscObject)tao)->prefix : "", TaoConvergedReasons[tao->reason], tao->niter);
184: } else {
185: PetscViewerASCIIPrintf(viewer, " TAO %s solve did not converge due to %s iteration %" PetscInt_FMT "\n", ((PetscObject)tao)->prefix ? ((PetscObject)tao)->prefix : "", TaoConvergedReasons[tao->reason], tao->niter);
186: }
187: PetscViewerASCIISubtractTab(viewer, ((PetscObject)tao)->tablevel);
188: }
189: TaoViewFromOptions(tao, NULL, "-tao_view");
190: return 0;
191: }
193: /*@
194: TaoSetUp - Sets up the internal data structures for the later use
195: of a Tao solver
197: Collective
199: Input Parameters:
200: . tao - the Tao context
202: Level: advanced
204: Notes:
205: The user will not need to explicitly call `TaoSetUp()`, as it will
206: automatically be called in `TaoSolve()`. However, if the user
207: desires to call it explicitly, it should come after `TaoCreate()`
208: and any TaoSetSomething() routines, but before `TaoSolve()`.
210: .seealso: `Tao`, `TaoCreate()`, `TaoSolve()`
211: @*/
212: PetscErrorCode TaoSetUp(Tao tao)
213: {
215: if (tao->setupcalled) return 0;
216: TaoSetUpEW_Private(tao);
218: PetscTryTypeMethod(tao, setup);
219: tao->setupcalled = PETSC_TRUE;
220: return 0;
221: }
223: /*@C
224: TaoDestroy - Destroys the Tao context that was created with `TaoCreate()`
226: Collective
228: Input Parameter:
229: . tao - the Tao context
231: Level: beginner
233: .seealso: `Tao`, `TaoCreate()`, `TaoSolve()`
234: @*/
235: PetscErrorCode TaoDestroy(Tao *tao)
236: {
237: if (!*tao) return 0;
239: if (--((PetscObject)*tao)->refct > 0) {
240: *tao = NULL;
241: return 0;
242: }
244: if ((*tao)->ops->destroy) (*((*tao))->ops->destroy)(*tao);
245: KSPDestroy(&(*tao)->ksp);
246: SNESDestroy(&(*tao)->snes_ewdummy);
247: TaoLineSearchDestroy(&(*tao)->linesearch);
249: if ((*tao)->ops->convergencedestroy) {
250: (*(*tao)->ops->convergencedestroy)((*tao)->cnvP);
251: if ((*tao)->jacobian_state_inv) MatDestroy(&(*tao)->jacobian_state_inv);
252: }
253: VecDestroy(&(*tao)->solution);
254: VecDestroy(&(*tao)->gradient);
255: VecDestroy(&(*tao)->ls_res);
257: if ((*tao)->gradient_norm) {
258: PetscObjectDereference((PetscObject)(*tao)->gradient_norm);
259: VecDestroy(&(*tao)->gradient_norm_tmp);
260: }
262: VecDestroy(&(*tao)->XL);
263: VecDestroy(&(*tao)->XU);
264: VecDestroy(&(*tao)->IL);
265: VecDestroy(&(*tao)->IU);
266: VecDestroy(&(*tao)->DE);
267: VecDestroy(&(*tao)->DI);
268: VecDestroy(&(*tao)->constraints);
269: VecDestroy(&(*tao)->constraints_equality);
270: VecDestroy(&(*tao)->constraints_inequality);
271: VecDestroy(&(*tao)->stepdirection);
272: MatDestroy(&(*tao)->hessian_pre);
273: MatDestroy(&(*tao)->hessian);
274: MatDestroy(&(*tao)->ls_jac);
275: MatDestroy(&(*tao)->ls_jac_pre);
276: MatDestroy(&(*tao)->jacobian_pre);
277: MatDestroy(&(*tao)->jacobian);
278: MatDestroy(&(*tao)->jacobian_state_pre);
279: MatDestroy(&(*tao)->jacobian_state);
280: MatDestroy(&(*tao)->jacobian_state_inv);
281: MatDestroy(&(*tao)->jacobian_design);
282: MatDestroy(&(*tao)->jacobian_equality);
283: MatDestroy(&(*tao)->jacobian_equality_pre);
284: MatDestroy(&(*tao)->jacobian_inequality);
285: MatDestroy(&(*tao)->jacobian_inequality_pre);
286: ISDestroy(&(*tao)->state_is);
287: ISDestroy(&(*tao)->design_is);
288: VecDestroy(&(*tao)->res_weights_v);
289: TaoCancelMonitors(*tao);
290: if ((*tao)->hist_malloc) PetscFree4((*tao)->hist_obj, (*tao)->hist_resid, (*tao)->hist_cnorm, (*tao)->hist_lits);
291: if ((*tao)->res_weights_n) {
292: PetscFree((*tao)->res_weights_rows);
293: PetscFree((*tao)->res_weights_cols);
294: PetscFree((*tao)->res_weights_w);
295: }
296: PetscHeaderDestroy(tao);
297: return 0;
298: }
300: /*@
301: TaoKSPSetUseEW - Sets `SNES` use Eisenstat-Walker method for
302: computing relative tolerance for linear solvers.
304: Logically Collective
306: Input Parameters:
307: + tao - Tao context
308: - flag - `PETSC_TRUE` or `PETSC_FALSE`
310: Level: advanced
312: Notes:
313: See `SNESKSPSetUseEW()` for customization details.
315: Reference:
316: S. C. Eisenstat and H. F. Walker, "Choosing the forcing terms in an
317: inexact Newton method", SISC 17 (1), pp.16-32, 1996.
319: .seealso: `Tao`, `SNESKSPSetUseEW()`
320: @*/
321: PetscErrorCode TaoKSPSetUseEW(Tao tao, PetscBool flag)
322: {
325: tao->ksp_ewconv = flag;
326: return 0;
327: }
329: /*@
330: TaoSetFromOptions - Sets various Tao parameters from user
331: options.
333: Collective
335: Input Parameter:
336: . tao - the Tao solver context
338: options Database Keys:
339: + -tao_type <type> - The algorithm that Tao uses (lmvm, nls, etc.)
340: . -tao_gatol <gatol> - absolute error tolerance for ||gradient||
341: . -tao_grtol <grtol> - relative error tolerance for ||gradient||
342: . -tao_gttol <gttol> - reduction of ||gradient|| relative to initial gradient
343: . -tao_max_it <max> - sets maximum number of iterations
344: . -tao_max_funcs <max> - sets maximum number of function evaluations
345: . -tao_fmin <fmin> - stop if function value reaches fmin
346: . -tao_steptol <tol> - stop if trust region radius less than <tol>
347: . -tao_trust0 <t> - initial trust region radius
348: . -tao_monitor - prints function value and residual at each iteration
349: . -tao_smonitor - same as tao_monitor, but truncates very small values
350: . -tao_cmonitor - prints function value, residual, and constraint norm at each iteration
351: . -tao_view_solution - prints solution vector at each iteration
352: . -tao_view_ls_residual - prints least-squares residual vector at each iteration
353: . -tao_view_stepdirection - prints step direction vector at each iteration
354: . -tao_view_gradient - prints gradient vector at each iteration
355: . -tao_draw_solution - graphically view solution vector at each iteration
356: . -tao_draw_step - graphically view step vector at each iteration
357: . -tao_draw_gradient - graphically view gradient at each iteration
358: . -tao_fd_gradient - use gradient computed with finite differences
359: . -tao_fd_hessian - use hessian computed with finite differences
360: . -tao_mf_hessian - use matrix-free hessian computed with finite differences
361: . -tao_cancelmonitors - cancels all monitors (except those set with command line)
362: . -tao_view - prints information about the Tao after solving
363: - -tao_converged_reason - prints the reason Tao stopped iterating
365: Level: beginner
367: Note:
368: To see all options, run your program with the -help option or consult the
369: user's manual. Should be called after `TaoCreate()` but before `TaoSolve()`
371: .seealso: `Tao`, `TaoCreate()`, `TaoSolve()`
372: @*/
373: PetscErrorCode TaoSetFromOptions(Tao tao)
374: {
375: TaoType default_type = TAOLMVM;
376: char type[256], monfilename[PETSC_MAX_PATH_LEN];
377: PetscViewer monviewer;
378: PetscBool flg;
379: MPI_Comm comm;
382: PetscObjectGetComm((PetscObject)tao, &comm);
384: if (((PetscObject)tao)->type_name) default_type = ((PetscObject)tao)->type_name;
386: PetscObjectOptionsBegin((PetscObject)tao);
387: /* Check for type from options */
388: PetscOptionsFList("-tao_type", "Tao Solver type", "TaoSetType", TaoList, default_type, type, 256, &flg);
389: if (flg) {
390: TaoSetType(tao, type);
391: } else if (!((PetscObject)tao)->type_name) {
392: TaoSetType(tao, default_type);
393: }
395: /* Tao solvers do not set the prefix, set it here if not yet done
396: We do it after SetType since solver may have been changed */
397: if (tao->linesearch) {
398: const char *prefix;
399: TaoLineSearchGetOptionsPrefix(tao->linesearch, &prefix);
400: if (!prefix) TaoLineSearchSetOptionsPrefix(tao->linesearch, ((PetscObject)(tao))->prefix);
401: }
403: PetscOptionsReal("-tao_catol", "Stop if constraints violations within", "TaoSetConstraintTolerances", tao->catol, &tao->catol, &flg);
404: if (flg) tao->catol_changed = PETSC_TRUE;
405: PetscOptionsReal("-tao_crtol", "Stop if relative constraint violations within", "TaoSetConstraintTolerances", tao->crtol, &tao->crtol, &flg);
406: if (flg) tao->crtol_changed = PETSC_TRUE;
407: PetscOptionsReal("-tao_gatol", "Stop if norm of gradient less than", "TaoSetTolerances", tao->gatol, &tao->gatol, &flg);
408: if (flg) tao->gatol_changed = PETSC_TRUE;
409: PetscOptionsReal("-tao_grtol", "Stop if norm of gradient divided by the function value is less than", "TaoSetTolerances", tao->grtol, &tao->grtol, &flg);
410: if (flg) tao->grtol_changed = PETSC_TRUE;
411: PetscOptionsReal("-tao_gttol", "Stop if the norm of the gradient is less than the norm of the initial gradient times tol", "TaoSetTolerances", tao->gttol, &tao->gttol, &flg);
412: if (flg) tao->gttol_changed = PETSC_TRUE;
413: PetscOptionsInt("-tao_max_it", "Stop if iteration number exceeds", "TaoSetMaximumIterations", tao->max_it, &tao->max_it, &flg);
414: if (flg) tao->max_it_changed = PETSC_TRUE;
415: PetscOptionsInt("-tao_max_funcs", "Stop if number of function evaluations exceeds", "TaoSetMaximumFunctionEvaluations", tao->max_funcs, &tao->max_funcs, &flg);
416: if (flg) tao->max_funcs_changed = PETSC_TRUE;
417: PetscOptionsReal("-tao_fmin", "Stop if function less than", "TaoSetFunctionLowerBound", tao->fmin, &tao->fmin, &flg);
418: if (flg) tao->fmin_changed = PETSC_TRUE;
419: PetscOptionsReal("-tao_steptol", "Stop if step size or trust region radius less than", "", tao->steptol, &tao->steptol, &flg);
420: if (flg) tao->steptol_changed = PETSC_TRUE;
421: PetscOptionsReal("-tao_trust0", "Initial trust region radius", "TaoSetTrustRegionRadius", tao->trust0, &tao->trust0, &flg);
422: if (flg) tao->trust0_changed = PETSC_TRUE;
423: PetscOptionsString("-tao_view_solution", "view solution vector after each evaluation", "TaoSetMonitor", "stdout", monfilename, sizeof(monfilename), &flg);
424: if (flg) {
425: PetscViewerASCIIOpen(comm, monfilename, &monviewer);
426: TaoSetMonitor(tao, TaoSolutionMonitor, monviewer, (PetscErrorCode(*)(void **))PetscViewerDestroy);
427: }
429: PetscOptionsBool("-tao_converged_reason", "Print reason for Tao converged", "TaoSolve", tao->printreason, &tao->printreason, NULL);
430: PetscOptionsString("-tao_view_gradient", "view gradient vector after each evaluation", "TaoSetMonitor", "stdout", monfilename, sizeof(monfilename), &flg);
431: if (flg) {
432: PetscViewerASCIIOpen(comm, monfilename, &monviewer);
433: TaoSetMonitor(tao, TaoGradientMonitor, monviewer, (PetscErrorCode(*)(void **))PetscViewerDestroy);
434: }
436: PetscOptionsString("-tao_view_stepdirection", "view step direction vector after each iteration", "TaoSetMonitor", "stdout", monfilename, sizeof(monfilename), &flg);
437: if (flg) {
438: PetscViewerASCIIOpen(comm, monfilename, &monviewer);
439: TaoSetMonitor(tao, TaoStepDirectionMonitor, monviewer, (PetscErrorCode(*)(void **))PetscViewerDestroy);
440: }
442: PetscOptionsString("-tao_view_residual", "view least-squares residual vector after each evaluation", "TaoSetMonitor", "stdout", monfilename, sizeof(monfilename), &flg);
443: if (flg) {
444: PetscViewerASCIIOpen(comm, monfilename, &monviewer);
445: TaoSetMonitor(tao, TaoResidualMonitor, monviewer, (PetscErrorCode(*)(void **))PetscViewerDestroy);
446: }
448: PetscOptionsString("-tao_monitor", "Use the default convergence monitor", "TaoSetMonitor", "stdout", monfilename, sizeof(monfilename), &flg);
449: if (flg) {
450: PetscViewerASCIIOpen(comm, monfilename, &monviewer);
451: TaoSetMonitor(tao, TaoMonitorDefault, monviewer, (PetscErrorCode(*)(void **))PetscViewerDestroy);
452: }
454: PetscOptionsString("-tao_gmonitor", "Use the convergence monitor with extra globalization info", "TaoSetMonitor", "stdout", monfilename, sizeof(monfilename), &flg);
455: if (flg) {
456: PetscViewerASCIIOpen(comm, monfilename, &monviewer);
457: TaoSetMonitor(tao, TaoDefaultGMonitor, monviewer, (PetscErrorCode(*)(void **))PetscViewerDestroy);
458: }
460: PetscOptionsString("-tao_smonitor", "Use the short convergence monitor", "TaoSetMonitor", "stdout", monfilename, sizeof(monfilename), &flg);
461: if (flg) {
462: PetscViewerASCIIOpen(comm, monfilename, &monviewer);
463: TaoSetMonitor(tao, TaoDefaultSMonitor, monviewer, (PetscErrorCode(*)(void **))PetscViewerDestroy);
464: }
466: PetscOptionsString("-tao_cmonitor", "Use the default convergence monitor with constraint norm", "TaoSetMonitor", "stdout", monfilename, sizeof(monfilename), &flg);
467: if (flg) {
468: PetscViewerASCIIOpen(comm, monfilename, &monviewer);
469: TaoSetMonitor(tao, TaoDefaultCMonitor, monviewer, (PetscErrorCode(*)(void **))PetscViewerDestroy);
470: }
472: flg = PETSC_FALSE;
473: PetscOptionsBool("-tao_cancelmonitors", "cancel all monitors and call any registered destroy routines", "TaoCancelMonitors", flg, &flg, NULL);
474: if (flg) TaoCancelMonitors(tao);
476: flg = PETSC_FALSE;
477: PetscOptionsBool("-tao_draw_solution", "Plot solution vector at each iteration", "TaoSetMonitor", flg, &flg, NULL);
478: if (flg) {
479: TaoMonitorDrawCtx drawctx;
480: PetscInt howoften = 1;
481: TaoMonitorDrawCtxCreate(PetscObjectComm((PetscObject)tao), NULL, NULL, PETSC_DECIDE, PETSC_DECIDE, 300, 300, howoften, &drawctx);
482: TaoSetMonitor(tao, TaoDrawSolutionMonitor, drawctx, (PetscErrorCode(*)(void **))TaoMonitorDrawCtxDestroy);
483: }
485: flg = PETSC_FALSE;
486: PetscOptionsBool("-tao_draw_step", "plots step direction at each iteration", "TaoSetMonitor", flg, &flg, NULL);
487: if (flg) TaoSetMonitor(tao, TaoDrawStepMonitor, NULL, NULL);
489: flg = PETSC_FALSE;
490: PetscOptionsBool("-tao_draw_gradient", "plots gradient at each iteration", "TaoSetMonitor", flg, &flg, NULL);
491: if (flg) {
492: TaoMonitorDrawCtx drawctx;
493: PetscInt howoften = 1;
494: TaoMonitorDrawCtxCreate(PetscObjectComm((PetscObject)tao), NULL, NULL, PETSC_DECIDE, PETSC_DECIDE, 300, 300, howoften, &drawctx);
495: TaoSetMonitor(tao, TaoDrawGradientMonitor, drawctx, (PetscErrorCode(*)(void **))TaoMonitorDrawCtxDestroy);
496: }
497: flg = PETSC_FALSE;
498: PetscOptionsBool("-tao_fd_gradient", "compute gradient using finite differences", "TaoDefaultComputeGradient", flg, &flg, NULL);
499: if (flg) TaoSetGradient(tao, NULL, TaoDefaultComputeGradient, NULL);
500: flg = PETSC_FALSE;
501: PetscOptionsBool("-tao_fd_hessian", "compute hessian using finite differences", "TaoDefaultComputeHessian", flg, &flg, NULL);
502: if (flg) {
503: Mat H;
505: MatCreate(PetscObjectComm((PetscObject)tao), &H);
506: MatSetType(H, MATAIJ);
507: TaoSetHessian(tao, H, H, TaoDefaultComputeHessian, NULL);
508: MatDestroy(&H);
509: }
510: flg = PETSC_FALSE;
511: PetscOptionsBool("-tao_mf_hessian", "compute matrix-free hessian using finite differences", "TaoDefaultComputeHessianMFFD", flg, &flg, NULL);
512: if (flg) {
513: Mat H;
515: MatCreate(PetscObjectComm((PetscObject)tao), &H);
516: TaoSetHessian(tao, H, H, TaoDefaultComputeHessianMFFD, NULL);
517: MatDestroy(&H);
518: }
519: flg = PETSC_FALSE;
520: PetscOptionsBool("-tao_recycle_history", "enable recycling/re-using information from the previous TaoSolve() call for some algorithms", "TaoSetRecycleHistory", flg, &flg, NULL);
521: if (flg) TaoSetRecycleHistory(tao, PETSC_TRUE);
522: PetscOptionsEnum("-tao_subset_type", "subset type", "", TaoSubSetTypes, (PetscEnum)tao->subset_type, (PetscEnum *)&tao->subset_type, NULL);
524: if (tao->ksp) {
525: PetscOptionsBool("-tao_ksp_ew", "Use Eisentat-Walker linear system convergence test", "TaoKSPSetUseEW", tao->ksp_ewconv, &tao->ksp_ewconv, NULL);
526: TaoKSPSetUseEW(tao, tao->ksp_ewconv);
527: }
529: PetscTryTypeMethod(tao, setfromoptions, PetscOptionsObject);
531: /* process any options handlers added with PetscObjectAddOptionsHandler() */
532: PetscObjectProcessOptionsHandlers((PetscObject)tao, PetscOptionsObject);
533: PetscOptionsEnd();
535: if (tao->linesearch) TaoLineSearchSetFromOptions(tao->linesearch);
536: return 0;
537: }
539: /*@C
540: TaoViewFromOptions - View a Tao options from the options database
542: Collective
544: Input Parameters:
545: + A - the Tao context
546: . obj - Optional object
547: - name - command line option
549: Level: intermediate
551: .seealso: `Tao`, `TaoView`, `PetscObjectViewFromOptions()`, `TaoCreate()`
552: @*/
553: PetscErrorCode TaoViewFromOptions(Tao A, PetscObject obj, const char name[])
554: {
556: PetscObjectViewFromOptions((PetscObject)A, obj, name);
557: return 0;
558: }
560: /*@C
561: TaoView - Prints information about the Tao object
563: Collective
565: InputParameters:
566: + tao - the Tao context
567: - viewer - visualization context
569: Options Database Key:
570: . -tao_view - Calls `TaoView()` at the end of `TaoSolve()`
572: Level: beginner
574: Notes:
575: The available visualization contexts include
576: + `PETSC_VIEWER_STDOUT_SELF` - standard output (default)
577: - `PETSC_VIEWER_STDOUT_WORLD` - synchronized standard
578: output where only the first processor opens
579: the file. All other processors send their
580: data to the first processor to print.
582: .seealso: `PetscViewerASCIIOpen()`
583: @*/
584: PetscErrorCode TaoView(Tao tao, PetscViewer viewer)
585: {
586: PetscBool isascii, isstring;
587: TaoType type;
590: if (!viewer) PetscViewerASCIIGetStdout(((PetscObject)tao)->comm, &viewer);
594: PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
595: PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERSTRING, &isstring);
596: if (isascii) {
597: PetscObjectPrintClassNamePrefixType((PetscObject)tao, viewer);
599: if (tao->ops->view) {
600: PetscViewerASCIIPushTab(viewer);
601: PetscUseTypeMethod(tao, view, viewer);
602: PetscViewerASCIIPopTab(viewer);
603: }
604: if (tao->linesearch) {
605: PetscViewerASCIIPushTab(viewer);
606: TaoLineSearchView(tao->linesearch, viewer);
607: PetscViewerASCIIPopTab(viewer);
608: }
609: if (tao->ksp) {
610: PetscViewerASCIIPushTab(viewer);
611: KSPView(tao->ksp, viewer);
612: PetscViewerASCIIPrintf(viewer, "total KSP iterations: %" PetscInt_FMT "\n", tao->ksp_tot_its);
613: PetscViewerASCIIPopTab(viewer);
614: }
616: PetscViewerASCIIPushTab(viewer);
618: if (tao->XL || tao->XU) PetscViewerASCIIPrintf(viewer, "Active Set subset type: %s\n", TaoSubSetTypes[tao->subset_type]);
620: PetscViewerASCIIPrintf(viewer, "convergence tolerances: gatol=%g,", (double)tao->gatol);
621: PetscViewerASCIIPrintf(viewer, " steptol=%g,", (double)tao->steptol);
622: PetscViewerASCIIPrintf(viewer, " gttol=%g\n", (double)tao->gttol);
623: PetscViewerASCIIPrintf(viewer, "Residual in Function/Gradient:=%g\n", (double)tao->residual);
625: if (tao->constrained) {
626: PetscViewerASCIIPrintf(viewer, "convergence tolerances:");
627: PetscViewerASCIIPrintf(viewer, " catol=%g,", (double)tao->catol);
628: PetscViewerASCIIPrintf(viewer, " crtol=%g\n", (double)tao->crtol);
629: PetscViewerASCIIPrintf(viewer, "Residual in Constraints:=%g\n", (double)tao->cnorm);
630: }
632: if (tao->trust < tao->steptol) {
633: PetscViewerASCIIPrintf(viewer, "convergence tolerances: steptol=%g\n", (double)tao->steptol);
634: PetscViewerASCIIPrintf(viewer, "Final trust region radius:=%g\n", (double)tao->trust);
635: }
637: if (tao->fmin > -1.e25) PetscViewerASCIIPrintf(viewer, "convergence tolerances: function minimum=%g\n", (double)tao->fmin);
638: PetscViewerASCIIPrintf(viewer, "Objective value=%g\n", (double)tao->fc);
640: PetscViewerASCIIPrintf(viewer, "total number of iterations=%" PetscInt_FMT ", ", tao->niter);
641: PetscViewerASCIIPrintf(viewer, " (max: %" PetscInt_FMT ")\n", tao->max_it);
643: if (tao->nfuncs > 0) {
644: PetscViewerASCIIPrintf(viewer, "total number of function evaluations=%" PetscInt_FMT ",", tao->nfuncs);
645: PetscViewerASCIIPrintf(viewer, " max: %" PetscInt_FMT "\n", tao->max_funcs);
646: }
647: if (tao->ngrads > 0) {
648: PetscViewerASCIIPrintf(viewer, "total number of gradient evaluations=%" PetscInt_FMT ",", tao->ngrads);
649: PetscViewerASCIIPrintf(viewer, " max: %" PetscInt_FMT "\n", tao->max_funcs);
650: }
651: if (tao->nfuncgrads > 0) {
652: PetscViewerASCIIPrintf(viewer, "total number of function/gradient evaluations=%" PetscInt_FMT ",", tao->nfuncgrads);
653: PetscViewerASCIIPrintf(viewer, " (max: %" PetscInt_FMT ")\n", tao->max_funcs);
654: }
655: if (tao->nhess > 0) PetscViewerASCIIPrintf(viewer, "total number of Hessian evaluations=%" PetscInt_FMT "\n", tao->nhess);
656: if (tao->nconstraints > 0) PetscViewerASCIIPrintf(viewer, "total number of constraint function evaluations=%" PetscInt_FMT "\n", tao->nconstraints);
657: if (tao->njac > 0) PetscViewerASCIIPrintf(viewer, "total number of Jacobian evaluations=%" PetscInt_FMT "\n", tao->njac);
659: if (tao->reason > 0) {
660: PetscViewerASCIIPrintf(viewer, "Solution converged: ");
661: switch (tao->reason) {
662: case TAO_CONVERGED_GATOL:
663: PetscViewerASCIIPrintf(viewer, " ||g(X)|| <= gatol\n");
664: break;
665: case TAO_CONVERGED_GRTOL:
666: PetscViewerASCIIPrintf(viewer, " ||g(X)||/|f(X)| <= grtol\n");
667: break;
668: case TAO_CONVERGED_GTTOL:
669: PetscViewerASCIIPrintf(viewer, " ||g(X)||/||g(X0)|| <= gttol\n");
670: break;
671: case TAO_CONVERGED_STEPTOL:
672: PetscViewerASCIIPrintf(viewer, " Steptol -- step size small\n");
673: break;
674: case TAO_CONVERGED_MINF:
675: PetscViewerASCIIPrintf(viewer, " Minf -- f < fmin\n");
676: break;
677: case TAO_CONVERGED_USER:
678: PetscViewerASCIIPrintf(viewer, " User Terminated\n");
679: break;
680: default:
681: PetscViewerASCIIPrintf(viewer, "\n");
682: break;
683: }
684: } else {
685: PetscViewerASCIIPrintf(viewer, "Solver terminated: %d", tao->reason);
686: switch (tao->reason) {
687: case TAO_DIVERGED_MAXITS:
688: PetscViewerASCIIPrintf(viewer, " Maximum Iterations\n");
689: break;
690: case TAO_DIVERGED_NAN:
691: PetscViewerASCIIPrintf(viewer, " NAN or Inf encountered\n");
692: break;
693: case TAO_DIVERGED_MAXFCN:
694: PetscViewerASCIIPrintf(viewer, " Maximum Function Evaluations\n");
695: break;
696: case TAO_DIVERGED_LS_FAILURE:
697: PetscViewerASCIIPrintf(viewer, " Line Search Failure\n");
698: break;
699: case TAO_DIVERGED_TR_REDUCTION:
700: PetscViewerASCIIPrintf(viewer, " Trust Region too small\n");
701: break;
702: case TAO_DIVERGED_USER:
703: PetscViewerASCIIPrintf(viewer, " User Terminated\n");
704: break;
705: default:
706: PetscViewerASCIIPrintf(viewer, "\n");
707: break;
708: }
709: }
710: PetscViewerASCIIPopTab(viewer);
711: } else if (isstring) {
712: TaoGetType(tao, &type);
713: PetscViewerStringSPrintf(viewer, " %-3.3s", type);
714: }
715: return 0;
716: }
718: /*@
719: TaoSetRecycleHistory - Sets the boolean flag to enable/disable re-using
720: iterate information from the previous `TaoSolve()`. This feature is disabled by
721: default.
723: Logically Collective
725: Input Parameters:
726: + tao - the Tao context
727: - recycle - boolean flag
729: Options Database Keys:
730: . -tao_recycle_history <true,false> - reuse the history
732: Level: intermediate
734: Notes:
735: For conjugate gradient methods (`TAOBNCG`), this re-uses the latest search direction
736: from the previous `TaoSolve()` call when computing the first search direction in a
737: new solution. By default, CG methods set the first search direction to the
738: negative gradient.
740: For quasi-Newton family of methods (`TAOBQNLS`, `TAOBQNKLS`, `TAOBQNKTR`, `TAOBQNKTL`), this re-uses
741: the accumulated quasi-Newton Hessian approximation from the previous `TaoSolve()`
742: call. By default, QN family of methods reset the initial Hessian approximation to
743: the identity matrix.
745: For any other algorithm, this setting has no effect.
747: .seealso: `TaoGetRecycleHistory()`, `TAOBNCG`, `TAOBQNLS`, `TAOBQNKLS`, `TAOBQNKTR`, `TAOBQNKTL`
748: @*/
749: PetscErrorCode TaoSetRecycleHistory(Tao tao, PetscBool recycle)
750: {
753: tao->recycle = recycle;
754: return 0;
755: }
757: /*@
758: TaoGetRecycleHistory - Retrieve the boolean flag for re-using iterate information
759: from the previous `TaoSolve()`. This feature is disabled by default.
761: Logically Collective
763: Input Parameters:
764: . tao - the Tao context
766: Output Parameters:
767: . recycle - boolean flag
769: Level: intermediate
771: .seealso: `TaoSetRecycleHistory()`, `TAOBNCG`, `TAOBQNLS`, `TAOBQNKLS`, `TAOBQNKTR`, `TAOBQNKTL`
772: @*/
773: PetscErrorCode TaoGetRecycleHistory(Tao tao, PetscBool *recycle)
774: {
777: *recycle = tao->recycle;
778: return 0;
779: }
781: /*@
782: TaoSetTolerances - Sets parameters used in Tao convergence tests
784: Logically Collective
786: Input Parameters:
787: + tao - the Tao context
788: . gatol - stop if norm of gradient is less than this
789: . grtol - stop if relative norm of gradient is less than this
790: - gttol - stop if norm of gradient is reduced by this factor
792: Options Database Keys:
793: + -tao_gatol <gatol> - Sets gatol
794: . -tao_grtol <grtol> - Sets grtol
795: - -tao_gttol <gttol> - Sets gttol
797: Stopping Criteria:
798: .vb
799: ||g(X)|| <= gatol
800: ||g(X)|| / |f(X)| <= grtol
801: ||g(X)|| / ||g(X0)|| <= gttol
802: .ve
804: Level: beginner
806: Note:
807: Use `PETSC_DEFAULT` to leave one or more tolerances unchanged.
809: .seealso: `TaoGetTolerances()`
810: @*/
811: PetscErrorCode TaoSetTolerances(Tao tao, PetscReal gatol, PetscReal grtol, PetscReal gttol)
812: {
818: if (gatol != PETSC_DEFAULT) {
819: if (gatol < 0) {
820: PetscInfo(tao, "Tried to set negative gatol -- ignored.\n");
821: } else {
822: tao->gatol = PetscMax(0, gatol);
823: tao->gatol_changed = PETSC_TRUE;
824: }
825: }
827: if (grtol != PETSC_DEFAULT) {
828: if (grtol < 0) {
829: PetscInfo(tao, "Tried to set negative grtol -- ignored.\n");
830: } else {
831: tao->grtol = PetscMax(0, grtol);
832: tao->grtol_changed = PETSC_TRUE;
833: }
834: }
836: if (gttol != PETSC_DEFAULT) {
837: if (gttol < 0) {
838: PetscInfo(tao, "Tried to set negative gttol -- ignored.\n");
839: } else {
840: tao->gttol = PetscMax(0, gttol);
841: tao->gttol_changed = PETSC_TRUE;
842: }
843: }
844: return 0;
845: }
847: /*@
848: TaoSetConstraintTolerances - Sets constraint tolerance parameters used in Tao convergence tests
850: Logically Collective
852: Input Parameters:
853: + tao - the Tao context
854: . catol - absolute constraint tolerance, constraint norm must be less than `catol` for used for gatol convergence criteria
855: - crtol - relative constraint tolerance, constraint norm must be less than `crtol` for used for gatol, gttol convergence criteria
857: Options Database Keys:
858: + -tao_catol <catol> - Sets catol
859: - -tao_crtol <crtol> - Sets crtol
861: Level: intermediate
863: Notes:
864: Use `PETSC_DEFAULT` to leave any tolerance unchanged.
866: .seealso: `TaoGetTolerances()`, `TaoGetConstraintTolerances()`, `TaoSetTolerances()`
867: @*/
868: PetscErrorCode TaoSetConstraintTolerances(Tao tao, PetscReal catol, PetscReal crtol)
869: {
874: if (catol != PETSC_DEFAULT) {
875: if (catol < 0) {
876: PetscInfo(tao, "Tried to set negative catol -- ignored.\n");
877: } else {
878: tao->catol = PetscMax(0, catol);
879: tao->catol_changed = PETSC_TRUE;
880: }
881: }
883: if (crtol != PETSC_DEFAULT) {
884: if (crtol < 0) {
885: PetscInfo(tao, "Tried to set negative crtol -- ignored.\n");
886: } else {
887: tao->crtol = PetscMax(0, crtol);
888: tao->crtol_changed = PETSC_TRUE;
889: }
890: }
891: return 0;
892: }
894: /*@
895: TaoGetConstraintTolerances - Gets constraint tolerance parameters used in Tao convergence tests
897: Not Collective
899: Input Parameter:
900: . tao - the Tao context
902: Output Parameters:
903: + catol - absolute constraint tolerance, constraint norm must be less than `catol` for used for gatol convergence criteria
904: - crtol - relative constraint tolerance, constraint norm must be less than `crtol` for used for gatol, gttol convergence criteria
906: Level: intermediate
908: .seealso: `TaoGetTolerances()`, `TaoSetTolerances()`, `TaoSetConstraintTolerances()`
910: @*/
911: PetscErrorCode TaoGetConstraintTolerances(Tao tao, PetscReal *catol, PetscReal *crtol)
912: {
914: if (catol) *catol = tao->catol;
915: if (crtol) *crtol = tao->crtol;
916: return 0;
917: }
919: /*@
920: TaoSetFunctionLowerBound - Sets a bound on the solution objective value.
921: When an approximate solution with an objective value below this number
922: has been found, the solver will terminate.
924: Logically Collective
926: Input Parameters:
927: + tao - the Tao solver context
928: - fmin - the tolerance
930: Options Database Keys:
931: . -tao_fmin <fmin> - sets the minimum function value
933: Level: intermediate
935: .seealso: `TaoSetTolerances()`
936: @*/
937: PetscErrorCode TaoSetFunctionLowerBound(Tao tao, PetscReal fmin)
938: {
941: tao->fmin = fmin;
942: tao->fmin_changed = PETSC_TRUE;
943: return 0;
944: }
946: /*@
947: TaoGetFunctionLowerBound - Gets the bound on the solution objective value.
948: When an approximate solution with an objective value below this number
949: has been found, the solver will terminate.
951: Not Collective
953: Input Parameters:
954: . tao - the Tao solver context
956: OutputParameters:
957: . fmin - the minimum function value
959: Level: intermediate
961: .seealso: `TaoSetFunctionLowerBound()`
962: @*/
963: PetscErrorCode TaoGetFunctionLowerBound(Tao tao, PetscReal *fmin)
964: {
967: *fmin = tao->fmin;
968: return 0;
969: }
971: /*@
972: TaoSetMaximumFunctionEvaluations - Sets a maximum number of
973: function evaluations.
975: Logically Collective
977: Input Parameters:
978: + tao - the Tao solver context
979: - nfcn - the maximum number of function evaluations (>=0)
981: Options Database Keys:
982: . -tao_max_funcs <nfcn> - sets the maximum number of function evaluations
984: Level: intermediate
986: .seealso: `TaoSetTolerances()`, `TaoSetMaximumIterations()`
987: @*/
989: PetscErrorCode TaoSetMaximumFunctionEvaluations(Tao tao, PetscInt nfcn)
990: {
993: if (nfcn >= 0) {
994: tao->max_funcs = PetscMax(0, nfcn);
995: } else {
996: tao->max_funcs = -1;
997: }
998: tao->max_funcs_changed = PETSC_TRUE;
999: return 0;
1000: }
1002: /*@
1003: TaoGetMaximumFunctionEvaluations - Gets a maximum number of
1004: function evaluations.
1006: Logically Collective
1008: Input Parameters:
1009: . tao - the Tao solver context
1011: Output Parameters:
1012: . nfcn - the maximum number of function evaluations
1014: Level: intermediate
1016: .seealso: `TaoSetMaximumFunctionEvaluations()`, `TaoGetMaximumIterations()`
1017: @*/
1019: PetscErrorCode TaoGetMaximumFunctionEvaluations(Tao tao, PetscInt *nfcn)
1020: {
1023: *nfcn = tao->max_funcs;
1024: return 0;
1025: }
1027: /*@
1028: TaoGetCurrentFunctionEvaluations - Get current number of
1029: function evaluations.
1031: Not Collective
1033: Input Parameters:
1034: . tao - the Tao solver context
1036: Output Parameters:
1037: . nfuncs - the current number of function evaluations (maximum between gradient and function evaluations)
1039: Level: intermediate
1041: .seealso: `TaoSetMaximumFunctionEvaluations()`, `TaoGetMaximumFunctionEvaluations()`, `TaoGetMaximumIterations()`
1042: @*/
1044: PetscErrorCode TaoGetCurrentFunctionEvaluations(Tao tao, PetscInt *nfuncs)
1045: {
1048: *nfuncs = PetscMax(tao->nfuncs, tao->nfuncgrads);
1049: return 0;
1050: }
1052: /*@
1053: TaoSetMaximumIterations - Sets a maximum number of iterates.
1055: Logically Collective
1057: Input Parameters:
1058: + tao - the Tao solver context
1059: - maxits - the maximum number of iterates (>=0)
1061: Options Database Keys:
1062: . -tao_max_it <its> - sets the maximum number of iterations
1064: Level: intermediate
1066: .seealso: `TaoSetTolerances()`, `TaoSetMaximumFunctionEvaluations()`
1067: @*/
1068: PetscErrorCode TaoSetMaximumIterations(Tao tao, PetscInt maxits)
1069: {
1072: tao->max_it = PetscMax(0, maxits);
1073: tao->max_it_changed = PETSC_TRUE;
1074: return 0;
1075: }
1077: /*@
1078: TaoGetMaximumIterations - Gets a maximum number of iterates that will be used
1080: Not Collective
1082: Input Parameters:
1083: . tao - the Tao solver context
1085: Output Parameters:
1086: . maxits - the maximum number of iterates
1088: Level: intermediate
1090: .seealso: `TaoSetMaximumIterations()`, `TaoGetMaximumFunctionEvaluations()`
1091: @*/
1092: PetscErrorCode TaoGetMaximumIterations(Tao tao, PetscInt *maxits)
1093: {
1096: *maxits = tao->max_it;
1097: return 0;
1098: }
1100: /*@
1101: TaoSetInitialTrustRegionRadius - Sets the initial trust region radius.
1103: Logically Collective
1105: Input Parameters:
1106: + tao - a Tao optimization solver
1107: - radius - the trust region radius
1109: Level: intermediate
1111: Options Database Key:
1112: . -tao_trust0 <t0> - sets initial trust region radius
1114: .seealso: `TaoGetTrustRegionRadius()`, `TaoSetTrustRegionTolerance()`, `TAONTR`
1115: @*/
1116: PetscErrorCode TaoSetInitialTrustRegionRadius(Tao tao, PetscReal radius)
1117: {
1120: tao->trust0 = PetscMax(0.0, radius);
1121: tao->trust0_changed = PETSC_TRUE;
1122: return 0;
1123: }
1125: /*@
1126: TaoGetInitialTrustRegionRadius - Gets the initial trust region radius.
1128: Not Collective
1130: Input Parameter:
1131: . tao - a Tao optimization solver
1133: Output Parameter:
1134: . radius - the trust region radius
1136: Level: intermediate
1138: .seealso: `TaoSetInitialTrustRegionRadius()`, `TaoGetCurrentTrustRegionRadius()`, `TAONTR`
1139: @*/
1140: PetscErrorCode TaoGetInitialTrustRegionRadius(Tao tao, PetscReal *radius)
1141: {
1144: *radius = tao->trust0;
1145: return 0;
1146: }
1148: /*@
1149: TaoGetCurrentTrustRegionRadius - Gets the current trust region radius.
1151: Not Collective
1153: Input Parameter:
1154: . tao - a Tao optimization solver
1156: Output Parameter:
1157: . radius - the trust region radius
1159: Level: intermediate
1161: .seealso: `TaoSetInitialTrustRegionRadius()`, `TaoGetInitialTrustRegionRadius()`, `TAONTR`
1162: @*/
1163: PetscErrorCode TaoGetCurrentTrustRegionRadius(Tao tao, PetscReal *radius)
1164: {
1167: *radius = tao->trust;
1168: return 0;
1169: }
1171: /*@
1172: TaoGetTolerances - gets the current values of tolerances
1174: Not Collective
1176: Input Parameter:
1177: . tao - the Tao context
1179: Output Parameters:
1180: + gatol - stop if norm of gradient is less than this
1181: . grtol - stop if relative norm of gradient is less than this
1182: - gttol - stop if norm of gradient is reduced by a this factor
1184: Level: intermediate
1186: Note:
1187: NULL can be used as an argument if not all tolerances values are needed
1189: .seealso: `Tao`, `TaoSetTolerances()`
1190: @*/
1191: PetscErrorCode TaoGetTolerances(Tao tao, PetscReal *gatol, PetscReal *grtol, PetscReal *gttol)
1192: {
1194: if (gatol) *gatol = tao->gatol;
1195: if (grtol) *grtol = tao->grtol;
1196: if (gttol) *gttol = tao->gttol;
1197: return 0;
1198: }
1200: /*@
1201: TaoGetKSP - Gets the linear solver used by the optimization solver.
1202: Application writers should use `TaoGetKSP()` if they need direct access
1203: to the PETSc `KSP` object.
1205: Not Collective
1207: Input Parameters:
1208: . tao - the Tao solver
1210: Output Parameters:
1211: . ksp - the KSP linear solver used in the optimization solver
1213: Level: intermediate
1215: .seealso: `Tao`, `KSP`
1216: @*/
1217: PetscErrorCode TaoGetKSP(Tao tao, KSP *ksp)
1218: {
1221: *ksp = tao->ksp;
1222: return 0;
1223: }
1225: /*@
1226: TaoGetLinearSolveIterations - Gets the total number of linear iterations
1227: used by the Tao solver
1229: Not Collective
1231: Input Parameter:
1232: . tao - Tao context
1234: Output Parameter:
1235: . lits - number of linear iterations
1237: Notes:
1238: This counter is reset to zero for each successive call to TaoSolve()
1240: Level: intermediate
1242: .seealso: `Tao`, `TaoGetKSP()`
1243: @*/
1244: PetscErrorCode TaoGetLinearSolveIterations(Tao tao, PetscInt *lits)
1245: {
1248: *lits = tao->ksp_tot_its;
1249: return 0;
1250: }
1252: /*@
1253: TaoGetLineSearch - Gets the line search used by the optimization solver.
1254: Application writers should use `TaoGetLineSearch()` if they need direct access
1255: to the TaoLineSearch object.
1257: Not Collective
1259: Input Parameters:
1260: . tao - the Tao solver
1262: Output Parameters:
1263: . ls - the line search used in the optimization solver
1265: Level: intermediate
1267: @*/
1268: PetscErrorCode TaoGetLineSearch(Tao tao, TaoLineSearch *ls)
1269: {
1272: *ls = tao->linesearch;
1273: return 0;
1274: }
1276: /*@
1277: TaoAddLineSearchCounts - Adds the number of function evaluations spent
1278: in the line search to the running total.
1280: Input Parameters:
1281: + tao - the Tao solver
1282: - ls - the line search used in the optimization solver
1284: Level: developer
1286: .seealso: `TaoGetLineSearch()`, `TaoLineSearchApply()`
1287: @*/
1288: PetscErrorCode TaoAddLineSearchCounts(Tao tao)
1289: {
1290: PetscBool flg;
1291: PetscInt nfeval, ngeval, nfgeval;
1294: if (tao->linesearch) {
1295: TaoLineSearchIsUsingTaoRoutines(tao->linesearch, &flg);
1296: if (!flg) {
1297: TaoLineSearchGetNumberFunctionEvaluations(tao->linesearch, &nfeval, &ngeval, &nfgeval);
1298: tao->nfuncs += nfeval;
1299: tao->ngrads += ngeval;
1300: tao->nfuncgrads += nfgeval;
1301: }
1302: }
1303: return 0;
1304: }
1306: /*@
1307: TaoGetSolution - Returns the vector with the current Tao solution
1309: Not Collective
1311: Input Parameter:
1312: . tao - the Tao context
1314: Output Parameter:
1315: . X - the current solution
1317: Level: intermediate
1319: Note:
1320: The returned vector will be the same object that was passed into `TaoSetSolution()`
1322: .seealso: `Tao`, `TaoSetSolution()`, `TaoSolve()`
1323: @*/
1324: PetscErrorCode TaoGetSolution(Tao tao, Vec *X)
1325: {
1328: *X = tao->solution;
1329: return 0;
1330: }
1332: /*@
1333: TaoResetStatistics - Initialize the statistics used by Tao for all of the solvers.
1334: These statistics include the iteration number, residual norms, and convergence status.
1335: This routine gets called before solving each optimization problem.
1337: Collective
1339: Input Parameters:
1340: . solver - the Tao context
1342: Level: developer
1344: .seealso: `Tao`, `TaoCreate()`, `TaoSolve()`
1345: @*/
1346: PetscErrorCode TaoResetStatistics(Tao tao)
1347: {
1349: tao->niter = 0;
1350: tao->nfuncs = 0;
1351: tao->nfuncgrads = 0;
1352: tao->ngrads = 0;
1353: tao->nhess = 0;
1354: tao->njac = 0;
1355: tao->nconstraints = 0;
1356: tao->ksp_its = 0;
1357: tao->ksp_tot_its = 0;
1358: tao->reason = TAO_CONTINUE_ITERATING;
1359: tao->residual = 0.0;
1360: tao->cnorm = 0.0;
1361: tao->step = 0.0;
1362: tao->lsflag = PETSC_FALSE;
1363: if (tao->hist_reset) tao->hist_len = 0;
1364: return 0;
1365: }
1367: /*@C
1368: TaoSetUpdate - Sets the general-purpose update function called
1369: at the beginning of every iteration of the optimization algorithm. Specifically
1370: it is called at the top of every iteration, after the new solution and the gradient
1371: is determined, but before the Hessian is computed (if applicable).
1373: Logically Collective
1375: Input Parameters:
1376: + tao - The tao solver context
1377: - func - The function
1379: Calling sequence of func:
1380: $ func (Tao tao, PetscInt step);
1382: . step - The current step of the iteration
1384: Level: advanced
1386: .seealso: `Tao`, `TaoSolve()`
1387: @*/
1388: PetscErrorCode TaoSetUpdate(Tao tao, PetscErrorCode (*func)(Tao, PetscInt, void *), void *ctx)
1389: {
1391: tao->ops->update = func;
1392: tao->user_update = ctx;
1393: return 0;
1394: }
1396: /*@C
1397: TaoSetConvergenceTest - Sets the function that is to be used to test
1398: for convergence o fthe iterative minimization solution. The new convergence
1399: testing routine will replace Tao's default convergence test.
1401: Logically Collective
1403: Input Parameters:
1404: + tao - the Tao object
1405: . conv - the routine to test for convergence
1406: - ctx - [optional] context for private data for the convergence routine
1407: (may be NULL)
1409: Calling sequence of conv:
1410: $ PetscErrorCode conv(Tao tao, void *ctx)
1412: + tao - the Tao object
1413: - ctx - [optional] convergence context
1415: Note:
1416: The new convergence testing routine should call `TaoSetConvergedReason()`.
1418: Level: advanced
1420: .seealso: `Tao`, `TaoSolve()`, `TaoSetConvergedReason()`, `TaoGetSolutionStatus()`, `TaoGetTolerances()`, `TaoSetMonitor`
1422: @*/
1423: PetscErrorCode TaoSetConvergenceTest(Tao tao, PetscErrorCode (*conv)(Tao, void *), void *ctx)
1424: {
1426: tao->ops->convergencetest = conv;
1427: tao->cnvP = ctx;
1428: return 0;
1429: }
1431: /*@C
1432: TaoSetMonitor - Sets an additional function that is to be used at every
1433: iteration of the solver to display the iteration's
1434: progress.
1436: Logically Collective
1438: Input Parameters:
1439: + tao - the Tao solver context
1440: . mymonitor - monitoring routine
1441: - mctx - [optional] user-defined context for private data for the
1442: monitor routine (may be NULL)
1444: Calling sequence of mymonitor:
1445: .vb
1446: PetscErrorCode mymonitor(Tao tao,void *mctx)
1447: .ve
1449: + tao - the Tao solver context
1450: - mctx - [optional] monitoring context
1452: Options Database Keys:
1453: + -tao_monitor - sets the default monitor `TaoMonitorDefault()`
1454: . -tao_smonitor - sets short monitor
1455: . -tao_cmonitor - same as smonitor plus constraint norm
1456: . -tao_view_solution - view solution at each iteration
1457: . -tao_view_gradient - view gradient at each iteration
1458: . -tao_view_ls_residual - view least-squares residual vector at each iteration
1459: - -tao_cancelmonitors - cancels all monitors that have been hardwired into a code by calls to TaoSetMonitor(), but does not cancel those set via the options database.
1461: Notes:
1462: Several different monitoring routines may be set by calling
1463: `TaoSetMonitor()` multiple times; all will be called in the
1464: order in which they were set.
1466: Fortran Note:
1467: Only one monitor function may be set
1469: Level: intermediate
1471: .seealso: `Tao`, `TaoSolve()`, `TaoMonitorDefault()`, `TaoCancelMonitors()`, `TaoSetDestroyRoutine()`, `TaoView()`
1472: @*/
1473: PetscErrorCode TaoSetMonitor(Tao tao, PetscErrorCode (*func)(Tao, void *), void *ctx, PetscErrorCode (*dest)(void **))
1474: {
1475: PetscInt i;
1476: PetscBool identical;
1481: for (i = 0; i < tao->numbermonitors; i++) {
1482: PetscMonitorCompare((PetscErrorCode(*)(void))func, ctx, dest, (PetscErrorCode(*)(void))tao->monitor[i], tao->monitorcontext[i], tao->monitordestroy[i], &identical);
1483: if (identical) return 0;
1484: }
1485: tao->monitor[tao->numbermonitors] = func;
1486: tao->monitorcontext[tao->numbermonitors] = (void *)ctx;
1487: tao->monitordestroy[tao->numbermonitors] = dest;
1488: ++tao->numbermonitors;
1489: return 0;
1490: }
1492: /*@
1493: TaoCancelMonitors - Clears all the monitor functions for a Tao object.
1495: Logically Collective
1497: Input Parameters:
1498: . tao - the Tao solver context
1500: Options Database Key:
1501: . -tao_cancelmonitors - cancels all monitors that have been hardwired
1502: into a code by calls to `TaoSetMonitor()`, but does not cancel those
1503: set via the options database
1505: Notes:
1506: There is no way to clear one specific monitor from a Tao object.
1508: Level: advanced
1510: .seealso: `Tao`, `TaoMonitorDefault()`, `TaoSetMonitor()`
1511: @*/
1512: PetscErrorCode TaoCancelMonitors(Tao tao)
1513: {
1514: PetscInt i;
1517: for (i = 0; i < tao->numbermonitors; i++) {
1518: if (tao->monitordestroy[i]) (*tao->monitordestroy[i])(&tao->monitorcontext[i]);
1519: }
1520: tao->numbermonitors = 0;
1521: return 0;
1522: }
1524: /*@
1525: TaoMonitorDefault - Default routine for monitoring progress of the
1526: Tao solvers (default). This monitor prints the function value and gradient
1527: norm at each iteration. It can be turned on from the command line using the
1528: -tao_monitor option
1530: Collective
1532: Input Parameters:
1533: + tao - the Tao context
1534: - ctx - `PetscViewer` context or NULL
1536: Options Database Keys:
1537: . -tao_monitor - turn on default monitoring
1539: Level: advanced
1541: .seealso: `TaoDefaultSMonitor()`, `TaoSetMonitor()`
1542: @*/
1543: PetscErrorCode TaoMonitorDefault(Tao tao, void *ctx)
1544: {
1545: PetscInt its, tabs;
1546: PetscReal fct, gnorm;
1547: PetscViewer viewer = (PetscViewer)ctx;
1551: its = tao->niter;
1552: fct = tao->fc;
1553: gnorm = tao->residual;
1554: PetscViewerASCIIGetTab(viewer, &tabs);
1555: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1556: if (its == 0 && ((PetscObject)tao)->prefix && !tao->header_printed) {
1557: PetscViewerASCIIPrintf(viewer, " Iteration information for %s solve.\n", ((PetscObject)tao)->prefix);
1558: tao->header_printed = PETSC_TRUE;
1559: }
1560: PetscViewerASCIIPrintf(viewer, "%3" PetscInt_FMT " TAO,", its);
1561: PetscViewerASCIIPrintf(viewer, " Function value: %g,", (double)fct);
1562: if (gnorm >= PETSC_INFINITY) {
1563: PetscViewerASCIIPrintf(viewer, " Residual: Inf \n");
1564: } else {
1565: PetscViewerASCIIPrintf(viewer, " Residual: %g \n", (double)gnorm);
1566: }
1567: PetscViewerASCIISetTab(viewer, tabs);
1568: return 0;
1569: }
1571: /*@
1572: TaoDefaultGMonitor - Default routine for monitoring progress of the
1573: Tao solvers (default) with extra detail on the globalization method.
1574: This monitor prints the function value and gradient norm at each
1575: iteration, as well as the step size and trust radius. Note that the
1576: step size and trust radius may be the same for some algorithms.
1577: It can be turned on from the command line using the
1578: -tao_gmonitor option
1580: Collective
1582: Input Parameters:
1583: + tao - the Tao context
1584: - ctx - `PetscViewer` context or NULL
1586: Options Database Keys:
1587: . -tao_gmonitor - turn on monitoring with globalization information
1589: Level: advanced
1591: .seealso: `TaoDefaultSMonitor()`, `TaoSetMonitor()`
1592: @*/
1593: PetscErrorCode TaoDefaultGMonitor(Tao tao, void *ctx)
1594: {
1595: PetscInt its, tabs;
1596: PetscReal fct, gnorm, stp, tr;
1597: PetscViewer viewer = (PetscViewer)ctx;
1601: its = tao->niter;
1602: fct = tao->fc;
1603: gnorm = tao->residual;
1604: stp = tao->step;
1605: tr = tao->trust;
1606: PetscViewerASCIIGetTab(viewer, &tabs);
1607: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1608: if (its == 0 && ((PetscObject)tao)->prefix && !tao->header_printed) {
1609: PetscViewerASCIIPrintf(viewer, " Iteration information for %s solve.\n", ((PetscObject)tao)->prefix);
1610: tao->header_printed = PETSC_TRUE;
1611: }
1612: PetscViewerASCIIPrintf(viewer, "%3" PetscInt_FMT " TAO,", its);
1613: PetscViewerASCIIPrintf(viewer, " Function value: %g,", (double)fct);
1614: if (gnorm >= PETSC_INFINITY) {
1615: PetscViewerASCIIPrintf(viewer, " Residual: Inf,");
1616: } else {
1617: PetscViewerASCIIPrintf(viewer, " Residual: %g,", (double)gnorm);
1618: }
1619: PetscViewerASCIIPrintf(viewer, " Step: %g, Trust: %g\n", (double)stp, (double)tr);
1620: PetscViewerASCIISetTab(viewer, tabs);
1621: return 0;
1622: }
1624: /*@
1625: TaoDefaultSMonitor - Default routine for monitoring progress of the
1626: solver. Same as `TaoMonitorDefault()` except
1627: it prints fewer digits of the residual as the residual gets smaller.
1628: This is because the later digits are meaningless and are often
1629: different on different machines; by using this routine different
1630: machines will usually generate the same output. It can be turned on
1631: by using the -tao_smonitor option
1633: Collective
1635: Input Parameters:
1636: + tao - the Tao context
1637: - ctx - PetscViewer context of type ASCII
1639: Options Database Keys:
1640: . -tao_smonitor - turn on default short monitoring
1642: Level: advanced
1644: .seealso: `TaoMonitorDefault()`, `TaoSetMonitor()`
1645: @*/
1646: PetscErrorCode TaoDefaultSMonitor(Tao tao, void *ctx)
1647: {
1648: PetscInt its, tabs;
1649: PetscReal fct, gnorm;
1650: PetscViewer viewer = (PetscViewer)ctx;
1654: its = tao->niter;
1655: fct = tao->fc;
1656: gnorm = tao->residual;
1657: PetscViewerASCIIGetTab(viewer, &tabs);
1658: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1659: PetscViewerASCIIPrintf(viewer, "iter = %3" PetscInt_FMT ",", its);
1660: PetscViewerASCIIPrintf(viewer, " Function value %g,", (double)fct);
1661: if (gnorm >= PETSC_INFINITY) {
1662: PetscViewerASCIIPrintf(viewer, " Residual: Inf \n");
1663: } else if (gnorm > 1.e-6) {
1664: PetscViewerASCIIPrintf(viewer, " Residual: %g \n", (double)gnorm);
1665: } else if (gnorm > 1.e-11) {
1666: PetscViewerASCIIPrintf(viewer, " Residual: < 1.0e-6 \n");
1667: } else {
1668: PetscViewerASCIIPrintf(viewer, " Residual: < 1.0e-11 \n");
1669: }
1670: PetscViewerASCIISetTab(viewer, tabs);
1671: return 0;
1672: }
1674: /*@
1675: TaoDefaultCMonitor - same as `TaoMonitorDefault()` except
1676: it prints the norm of the constraints function. It can be turned on
1677: from the command line using the -tao_cmonitor option
1679: Collective
1681: Input Parameters:
1682: + tao - the Tao context
1683: - ctx - `PetscViewer` context or NULL
1685: Options Database Keys:
1686: . -tao_cmonitor - monitor the constraints
1688: Level: advanced
1690: .seealso: `TaoMonitorDefault()`, `TaoSetMonitor()`
1691: @*/
1692: PetscErrorCode TaoDefaultCMonitor(Tao tao, void *ctx)
1693: {
1694: PetscInt its, tabs;
1695: PetscReal fct, gnorm;
1696: PetscViewer viewer = (PetscViewer)ctx;
1700: its = tao->niter;
1701: fct = tao->fc;
1702: gnorm = tao->residual;
1703: PetscViewerASCIIGetTab(viewer, &tabs);
1704: PetscViewerASCIISetTab(viewer, ((PetscObject)tao)->tablevel);
1705: PetscViewerASCIIPrintf(viewer, "iter = %" PetscInt_FMT ",", its);
1706: PetscViewerASCIIPrintf(viewer, " Function value: %g,", (double)fct);
1707: PetscViewerASCIIPrintf(viewer, " Residual: %g ", (double)gnorm);
1708: PetscViewerASCIIPrintf(viewer, " Constraint: %g \n", (double)tao->cnorm);
1709: PetscViewerASCIISetTab(viewer, tabs);
1710: return 0;
1711: }
1713: /*@C
1714: TaoSolutionMonitor - Views the solution at each iteration
1715: It can be turned on from the command line using the
1716: -tao_view_solution option
1718: Collective
1720: Input Parameters:
1721: + tao - the Tao context
1722: - ctx - `PetscViewer` context or NULL
1724: Options Database Keys:
1725: . -tao_view_solution - view the solution
1727: Level: advanced
1729: .seealso: `TaoDefaultSMonitor()`, `TaoSetMonitor()`
1730: @*/
1731: PetscErrorCode TaoSolutionMonitor(Tao tao, void *ctx)
1732: {
1733: PetscViewer viewer = (PetscViewer)ctx;
1737: VecView(tao->solution, viewer);
1738: return 0;
1739: }
1741: /*@C
1742: TaoGradientMonitor - Views the gradient at each iteration
1743: It can be turned on from the command line using the
1744: -tao_view_gradient option
1746: Collective
1748: Input Parameters:
1749: + tao - the Tao context
1750: - ctx - `PetscViewer` context or NULL
1752: Options Database Keys:
1753: . -tao_view_gradient - view the gradient at each iteration
1755: Level: advanced
1757: .seealso: `TaoDefaultSMonitor()`, `TaoSetMonitor()`
1758: @*/
1759: PetscErrorCode TaoGradientMonitor(Tao tao, void *ctx)
1760: {
1761: PetscViewer viewer = (PetscViewer)ctx;
1765: VecView(tao->gradient, viewer);
1766: return 0;
1767: }
1769: /*@C
1770: TaoStepDirectionMonitor - Views the step-direction at each iteration
1772: Collective
1774: Input Parameters:
1775: + tao - the Tao context
1776: - ctx - `PetscViewer` context or NULL
1778: Options Database Keys:
1779: . -tao_view_gradient - view the gradient at each iteration
1781: Level: advanced
1783: .seealso: `TaoDefaultSMonitor()`, `TaoSetMonitor()`
1784: @*/
1785: PetscErrorCode TaoStepDirectionMonitor(Tao tao, void *ctx)
1786: {
1787: PetscViewer viewer = (PetscViewer)ctx;
1791: VecView(tao->stepdirection, viewer);
1792: return 0;
1793: }
1795: /*@C
1796: TaoDrawSolutionMonitor - Plots the solution at each iteration
1797: It can be turned on from the command line using the
1798: -tao_draw_solution option
1800: Collective
1802: Input Parameters:
1803: + tao - the Tao context
1804: - ctx - `TaoMonitorDraw` context
1806: Options Database Keys:
1807: . -tao_draw_solution - draw the solution at each iteration
1809: Level: advanced
1811: .seealso: `TaoSolutionMonitor()`, `TaoSetMonitor()`, `TaoDrawGradientMonitor`, `TaoMonitorDraw`
1812: @*/
1813: PetscErrorCode TaoDrawSolutionMonitor(Tao tao, void *ctx)
1814: {
1815: TaoMonitorDrawCtx ictx = (TaoMonitorDrawCtx)ctx;
1818: if (!(((ictx->howoften > 0) && (!(tao->niter % ictx->howoften))) || ((ictx->howoften == -1) && tao->reason))) return 0;
1819: VecView(tao->solution, ictx->viewer);
1820: return 0;
1821: }
1823: /*@C
1824: TaoDrawGradientMonitor - Plots the gradient at each iteration
1825: It can be turned on from the command line using the
1826: -tao_draw_gradient option
1828: Collective
1830: Input Parameters:
1831: + tao - the Tao context
1832: - ctx - `PetscViewer` context
1834: Options Database Keys:
1835: . -tao_draw_gradient - draw the gradient at each iteration
1837: Level: advanced
1839: .seealso: `TaoGradientMonitor()`, `TaoSetMonitor()`, `TaoDrawSolutionMonitor`
1840: @*/
1841: PetscErrorCode TaoDrawGradientMonitor(Tao tao, void *ctx)
1842: {
1843: TaoMonitorDrawCtx ictx = (TaoMonitorDrawCtx)ctx;
1846: if (!(((ictx->howoften > 0) && (!(tao->niter % ictx->howoften))) || ((ictx->howoften == -1) && tao->reason))) return 0;
1847: VecView(tao->gradient, ictx->viewer);
1848: return 0;
1849: }
1851: /*@C
1852: TaoDrawStepMonitor - Plots the step direction at each iteration
1854: Collective
1856: Input Parameters:
1857: + tao - the Tao context
1858: - ctx - PetscViewer context
1860: Options Database Keys:
1861: . -tao_draw_step - draw the step direction at each iteration
1863: Level: advanced
1865: .seealso: `TaoSetMonitor()`, `TaoDrawSolutionMonitor`
1866: @*/
1867: PetscErrorCode TaoDrawStepMonitor(Tao tao, void *ctx)
1868: {
1869: PetscViewer viewer = (PetscViewer)ctx;
1873: VecView(tao->stepdirection, viewer);
1874: return 0;
1875: }
1877: /*@C
1878: TaoResidualMonitor - Views the least-squares residual at each iteration
1880: Collective
1882: Input Parameters:
1883: + tao - the Tao context
1884: - ctx - `PetscViewer` context or NULL
1886: Options Database Keys:
1887: . -tao_view_ls_residual - view the least-squares residual at each iteration
1889: Level: advanced
1891: .seealso: `TaoDefaultSMonitor()`, `TaoSetMonitor()`
1892: @*/
1893: PetscErrorCode TaoResidualMonitor(Tao tao, void *ctx)
1894: {
1895: PetscViewer viewer = (PetscViewer)ctx;
1899: VecView(tao->ls_res, viewer);
1900: return 0;
1901: }
1903: /*@
1904: TaoDefaultConvergenceTest - Determines whether the solver should continue iterating
1905: or terminate.
1907: Collective
1909: Input Parameters:
1910: + tao - the Tao context
1911: - dummy - unused dummy context
1913: Output Parameter:
1914: . reason - for terminating
1916: Notes:
1917: This routine checks the residual in the optimality conditions, the
1918: relative residual in the optimity conditions, the number of function
1919: evaluations, and the function value to test convergence. Some
1920: solvers may use different convergence routines.
1922: Level: developer
1924: .seealso: `TaoSetTolerances()`, `TaoGetConvergedReason()`, `TaoSetConvergedReason()`
1925: @*/
1927: PetscErrorCode TaoDefaultConvergenceTest(Tao tao, void *dummy)
1928: {
1929: PetscInt niter = tao->niter, nfuncs = PetscMax(tao->nfuncs, tao->nfuncgrads);
1930: PetscInt max_funcs = tao->max_funcs;
1931: PetscReal gnorm = tao->residual, gnorm0 = tao->gnorm0;
1932: PetscReal f = tao->fc, steptol = tao->steptol, trradius = tao->step;
1933: PetscReal gatol = tao->gatol, grtol = tao->grtol, gttol = tao->gttol;
1934: PetscReal catol = tao->catol, crtol = tao->crtol;
1935: PetscReal fmin = tao->fmin, cnorm = tao->cnorm;
1936: TaoConvergedReason reason = tao->reason;
1939: if (reason != TAO_CONTINUE_ITERATING) return 0;
1941: if (PetscIsInfOrNanReal(f)) {
1942: PetscInfo(tao, "Failed to converged, function value is Inf or NaN\n");
1943: reason = TAO_DIVERGED_NAN;
1944: } else if (f <= fmin && cnorm <= catol) {
1945: PetscInfo(tao, "Converged due to function value %g < minimum function value %g\n", (double)f, (double)fmin);
1946: reason = TAO_CONVERGED_MINF;
1947: } else if (gnorm <= gatol && cnorm <= catol) {
1948: PetscInfo(tao, "Converged due to residual norm ||g(X)||=%g < %g\n", (double)gnorm, (double)gatol);
1949: reason = TAO_CONVERGED_GATOL;
1950: } else if (f != 0 && PetscAbsReal(gnorm / f) <= grtol && cnorm <= crtol) {
1951: PetscInfo(tao, "Converged due to residual ||g(X)||/|f(X)| =%g < %g\n", (double)(gnorm / f), (double)grtol);
1952: reason = TAO_CONVERGED_GRTOL;
1953: } else if (gnorm0 != 0 && ((gttol == 0 && gnorm == 0) || gnorm / gnorm0 < gttol) && cnorm <= crtol) {
1954: PetscInfo(tao, "Converged due to relative residual norm ||g(X)||/||g(X0)|| = %g < %g\n", (double)(gnorm / gnorm0), (double)gttol);
1955: reason = TAO_CONVERGED_GTTOL;
1956: } else if (max_funcs >= 0 && nfuncs > max_funcs) {
1957: PetscInfo(tao, "Exceeded maximum number of function evaluations: %" PetscInt_FMT " > %" PetscInt_FMT "\n", nfuncs, max_funcs);
1958: reason = TAO_DIVERGED_MAXFCN;
1959: } else if (tao->lsflag != 0) {
1960: PetscInfo(tao, "Tao Line Search failure.\n");
1961: reason = TAO_DIVERGED_LS_FAILURE;
1962: } else if (trradius < steptol && niter > 0) {
1963: PetscInfo(tao, "Trust region/step size too small: %g < %g\n", (double)trradius, (double)steptol);
1964: reason = TAO_CONVERGED_STEPTOL;
1965: } else if (niter >= tao->max_it) {
1966: PetscInfo(tao, "Exceeded maximum number of iterations: %" PetscInt_FMT " > %" PetscInt_FMT "\n", niter, tao->max_it);
1967: reason = TAO_DIVERGED_MAXITS;
1968: } else {
1969: reason = TAO_CONTINUE_ITERATING;
1970: }
1971: tao->reason = reason;
1972: return 0;
1973: }
1975: /*@C
1976: TaoSetOptionsPrefix - Sets the prefix used for searching for all
1977: Tao options in the database.
1979: Logically Collective
1981: Input Parameters:
1982: + tao - the Tao context
1983: - prefix - the prefix string to prepend to all Tao option requests
1985: Notes:
1986: A hyphen (-) must NOT be given at the beginning of the prefix name.
1987: The first character of all runtime options is AUTOMATICALLY the hyphen.
1989: For example, to distinguish between the runtime options for two
1990: different Tao solvers, one could call
1991: .vb
1992: TaoSetOptionsPrefix(tao1,"sys1_")
1993: TaoSetOptionsPrefix(tao2,"sys2_")
1994: .ve
1996: This would enable use of different options for each system, such as
1997: .vb
1998: -sys1_tao_method blmvm -sys1_tao_grtol 1.e-3
1999: -sys2_tao_method lmvm -sys2_tao_grtol 1.e-4
2000: .ve
2002: Level: advanced
2004: .seealso: `TaoSetFromOptions()`, `TaoAppendOptionsPrefix()`, `TaoGetOptionsPrefix()`
2005: @*/
2007: PetscErrorCode TaoSetOptionsPrefix(Tao tao, const char p[])
2008: {
2010: PetscObjectSetOptionsPrefix((PetscObject)tao, p);
2011: if (tao->linesearch) TaoLineSearchSetOptionsPrefix(tao->linesearch, p);
2012: if (tao->ksp) KSPSetOptionsPrefix(tao->ksp, p);
2013: return 0;
2014: }
2016: /*@C
2017: TaoAppendOptionsPrefix - Appends to the prefix used for searching for all
2018: Tao options in the database.
2020: Logically Collective
2022: Input Parameters:
2023: + tao - the Tao solver context
2024: - prefix - the prefix string to prepend to all Tao option requests
2026: Note:
2027: A hyphen (-) must NOT be given at the beginning of the prefix name.
2028: The first character of all runtime options is automatically the hyphen.
2030: Level: advanced
2032: .seealso: `TaoSetFromOptions()`, `TaoSetOptionsPrefix()`, `TaoGetOptionsPrefix()`
2033: @*/
2034: PetscErrorCode TaoAppendOptionsPrefix(Tao tao, const char p[])
2035: {
2037: PetscObjectAppendOptionsPrefix((PetscObject)tao, p);
2038: if (tao->linesearch) PetscObjectAppendOptionsPrefix((PetscObject)tao->linesearch, p);
2039: if (tao->ksp) KSPAppendOptionsPrefix(tao->ksp, p);
2040: return 0;
2041: }
2043: /*@C
2044: TaoGetOptionsPrefix - Gets the prefix used for searching for all
2045: Tao options in the database
2047: Not Collective
2049: Input Parameters:
2050: . tao - the Tao context
2052: Output Parameters:
2053: . prefix - pointer to the prefix string used is returned
2055: Fortran Note:
2056: On the fortran side, the user should pass in a string 'prefix' of
2057: sufficient length to hold the prefix.
2059: Level: advanced
2061: .seealso: `TaoSetFromOptions()`, `TaoSetOptionsPrefix()`, `TaoAppendOptionsPrefix()`
2062: @*/
2063: PetscErrorCode TaoGetOptionsPrefix(Tao tao, const char *p[])
2064: {
2066: PetscObjectGetOptionsPrefix((PetscObject)tao, p);
2067: return 0;
2068: }
2070: /*@C
2071: TaoSetType - Sets the method for the unconstrained minimization solver.
2073: Collective
2075: Input Parameters:
2076: + solver - the Tao solver context
2077: - type - a known method
2079: Options Database Key:
2080: . -tao_type <type> - Sets the method; use -help for a list
2081: of available methods (for instance, "-tao_type lmvm" or "-tao_type tron")
2083: Available methods include:
2084: + `TAONLS` - nls Newton's method with line search for unconstrained minimization
2085: . `TAONTR` - ntr Newton's method with trust region for unconstrained minimization
2086: . `TAONTL` - ntl Newton's method with trust region, line search for unconstrained minimization
2087: . `TAOLMVM` - lmvm Limited memory variable metric method for unconstrained minimization
2088: . `TAOCG` - cg Nonlinear conjugate gradient method for unconstrained minimization
2089: . `TAONM` - nm Nelder-Mead algorithm for derivate-free unconstrained minimization
2090: . `TAOTRON` - tron Newton Trust Region method for bound constrained minimization
2091: . `TAOGPCG` - gpcg Newton Trust Region method for quadratic bound constrained minimization
2092: . `TAOBLMVM` - blmvm Limited memory variable metric method for bound constrained minimization
2093: . `TAOLCL` - lcl Linearly constrained Lagrangian method for pde-constrained minimization
2094: - `TAOPOUNDERS` - pounders Model-based algorithm for nonlinear least squares
2096: Level: intermediate
2098: .seealso: `Tao`, `TaoCreate()`, `TaoGetType()`, `TaoType`
2100: @*/
2101: PetscErrorCode TaoSetType(Tao tao, TaoType type)
2102: {
2103: PetscErrorCode (*create_xxx)(Tao);
2104: PetscBool issame;
2108: PetscObjectTypeCompare((PetscObject)tao, type, &issame);
2109: if (issame) return 0;
2111: PetscFunctionListFind(TaoList, type, (void (**)(void)) & create_xxx);
2114: /* Destroy the existing solver information */
2115: PetscTryTypeMethod(tao, destroy);
2116: KSPDestroy(&tao->ksp);
2117: TaoLineSearchDestroy(&tao->linesearch);
2118: tao->ops->setup = NULL;
2119: tao->ops->solve = NULL;
2120: tao->ops->view = NULL;
2121: tao->ops->setfromoptions = NULL;
2122: tao->ops->destroy = NULL;
2124: tao->setupcalled = PETSC_FALSE;
2126: (*create_xxx)(tao);
2127: PetscObjectChangeTypeName((PetscObject)tao, type);
2128: return 0;
2129: }
2131: /*@C
2132: TaoRegister - Adds a method to the Tao package for unconstrained minimization.
2134: Synopsis:
2135: TaoRegister(char *name_solver,char *path,char *name_Create,PetscErrorCode (*routine_Create)(Tao))
2137: Not collective
2139: Input Parameters:
2140: + sname - name of a new user-defined solver
2141: - func - routine to Create method context
2143: Sample usage:
2144: .vb
2145: TaoRegister("my_solver",MySolverCreate);
2146: .ve
2148: Then, your solver can be chosen with the procedural interface via
2149: $ TaoSetType(tao,"my_solver")
2150: or at runtime via the option
2151: $ -tao_type my_solver
2153: Level: advanced
2155: Note:
2156: `TaoRegister()` may be called multiple times to add several user-defined solvers.
2158: .seealso: `Tao`, `TaoSetType()`, `TaoRegisterAll()`, `TaoRegisterDestroy()`
2159: @*/
2160: PetscErrorCode TaoRegister(const char sname[], PetscErrorCode (*func)(Tao))
2161: {
2162: TaoInitializePackage();
2163: PetscFunctionListAdd(&TaoList, sname, (void (*)(void))func);
2164: return 0;
2165: }
2167: /*@C
2168: TaoRegisterDestroy - Frees the list of minimization solvers that were
2169: registered by `TaoRegisterDynamic()`.
2171: Not Collective
2173: Level: advanced
2175: .seealso: `Tao`, `TaoRegisterAll()`, `TaoRegister()`
2176: @*/
2177: PetscErrorCode TaoRegisterDestroy(void)
2178: {
2179: PetscFunctionListDestroy(&TaoList);
2180: TaoRegisterAllCalled = PETSC_FALSE;
2181: return 0;
2182: }
2184: /*@
2185: TaoGetIterationNumber - Gets the number of Tao iterations completed
2186: at this time.
2188: Not Collective
2190: Input Parameter:
2191: . tao - Tao context
2193: Output Parameter:
2194: . iter - iteration number
2196: Notes:
2197: For example, during the computation of iteration 2 this would return 1.
2199: Level: intermediate
2201: .seealso: `Tao`, `TaoGetLinearSolveIterations()`, `TaoGetResidualNorm()`, `TaoGetObjective()`
2202: @*/
2203: PetscErrorCode TaoGetIterationNumber(Tao tao, PetscInt *iter)
2204: {
2207: *iter = tao->niter;
2208: return 0;
2209: }
2211: /*@
2212: TaoGetResidualNorm - Gets the current value of the norm of the residual
2213: at this time.
2215: Not Collective
2217: Input Parameter:
2218: . tao - Tao context
2220: Output Parameter:
2221: . value - the current value
2223: Level: intermediate
2225: Developer Note:
2226: This is the 2-norm of the residual, we cannot use `TaoGetGradientNorm()` because that has
2227: a different meaning. For some reason Tao sometimes calls the gradient the residual.
2229: .seealso: `Tao`, `TaoGetLinearSolveIterations()`, `TaoGetIterationNumber()`, `TaoGetObjective()`
2230: @*/
2231: PetscErrorCode TaoGetResidualNorm(Tao tao, PetscReal *value)
2232: {
2235: *value = tao->residual;
2236: return 0;
2237: }
2239: /*@
2240: TaoSetIterationNumber - Sets the current iteration number.
2242: Logically Collective
2244: Input Parameters:
2245: + tao - Tao context
2246: - iter - iteration number
2248: Level: developer
2250: .seealso: `Tao`, `TaoGetLinearSolveIterations()`
2251: @*/
2252: PetscErrorCode TaoSetIterationNumber(Tao tao, PetscInt iter)
2253: {
2256: PetscObjectSAWsTakeAccess((PetscObject)tao);
2257: tao->niter = iter;
2258: PetscObjectSAWsGrantAccess((PetscObject)tao);
2259: return 0;
2260: }
2262: /*@
2263: TaoGetTotalIterationNumber - Gets the total number of Tao iterations
2264: completed. This number keeps accumulating if multiple solves
2265: are called with the Tao object.
2267: Not Collective
2269: Input Parameter:
2270: . tao - Tao context
2272: Output Parameter:
2273: . iter - iteration number
2275: Level: intermediate
2277: Notes:
2278: The total iteration count is updated after each solve, if there is a current
2279: `TaoSolve()` in progress then those iterations are not yet counted.
2281: .seealso: `Tao`, `TaoGetLinearSolveIterations()`
2282: @*/
2283: PetscErrorCode TaoGetTotalIterationNumber(Tao tao, PetscInt *iter)
2284: {
2287: *iter = tao->ntotalits;
2288: return 0;
2289: }
2291: /*@
2292: TaoSetTotalIterationNumber - Sets the current total iteration number.
2294: Logically Collective
2296: Input Parameters:
2297: + tao - Tao context
2298: - iter - iteration number
2300: Level: developer
2302: .seealso: `Tao`, `TaoGetLinearSolveIterations()`
2303: @*/
2304: PetscErrorCode TaoSetTotalIterationNumber(Tao tao, PetscInt iter)
2305: {
2308: PetscObjectSAWsTakeAccess((PetscObject)tao);
2309: tao->ntotalits = iter;
2310: PetscObjectSAWsGrantAccess((PetscObject)tao);
2311: return 0;
2312: }
2314: /*@
2315: TaoSetConvergedReason - Sets the termination flag on a Tao object
2317: Logically Collective
2319: Input Parameters:
2320: + tao - the Tao context
2321: - reason - one of
2322: .vb
2323: TAO_CONVERGED_ATOL (2),
2324: TAO_CONVERGED_RTOL (3),
2325: TAO_CONVERGED_STEPTOL (4),
2326: TAO_CONVERGED_MINF (5),
2327: TAO_CONVERGED_USER (6),
2328: TAO_DIVERGED_MAXITS (-2),
2329: TAO_DIVERGED_NAN (-4),
2330: TAO_DIVERGED_MAXFCN (-5),
2331: TAO_DIVERGED_LS_FAILURE (-6),
2332: TAO_DIVERGED_TR_REDUCTION (-7),
2333: TAO_DIVERGED_USER (-8),
2334: TAO_CONTINUE_ITERATING (0)
2335: .ve
2337: Level: intermediate
2339: .seealso: `Tao`
2340: @*/
2341: PetscErrorCode TaoSetConvergedReason(Tao tao, TaoConvergedReason reason)
2342: {
2345: tao->reason = reason;
2346: return 0;
2347: }
2349: /*@
2350: TaoGetConvergedReason - Gets the reason the Tao iteration was stopped.
2352: Not Collective
2354: Input Parameter:
2355: . tao - the Tao solver context
2357: Output Parameter:
2358: . reason - one of
2359: .vb
2360: TAO_CONVERGED_GATOL (3) ||g(X)|| < gatol
2361: TAO_CONVERGED_GRTOL (4) ||g(X)|| / f(X) < grtol
2362: TAO_CONVERGED_GTTOL (5) ||g(X)|| / ||g(X0)|| < gttol
2363: TAO_CONVERGED_STEPTOL (6) step size small
2364: TAO_CONVERGED_MINF (7) F < F_min
2365: TAO_CONVERGED_USER (8) User defined
2366: TAO_DIVERGED_MAXITS (-2) its > maxits
2367: TAO_DIVERGED_NAN (-4) Numerical problems
2368: TAO_DIVERGED_MAXFCN (-5) fevals > max_funcsals
2369: TAO_DIVERGED_LS_FAILURE (-6) line search failure
2370: TAO_DIVERGED_TR_REDUCTION (-7) trust region failure
2371: TAO_DIVERGED_USER (-8) (user defined)
2372: TAO_CONTINUE_ITERATING (0)
2373: .ve
2375: where
2376: + X - current solution
2377: . X0 - initial guess
2378: . f(X) - current function value
2379: . f(X*) - true solution (estimated)
2380: . g(X) - current gradient
2381: . its - current iterate number
2382: . maxits - maximum number of iterates
2383: . fevals - number of function evaluations
2384: - max_funcsals - maximum number of function evaluations
2386: Level: intermediate
2388: .seealso: `TaoSetConvergenceTest()`, `TaoSetTolerances()`
2389: @*/
2390: PetscErrorCode TaoGetConvergedReason(Tao tao, TaoConvergedReason *reason)
2391: {
2394: *reason = tao->reason;
2395: return 0;
2396: }
2398: /*@
2399: TaoGetSolutionStatus - Get the current iterate, objective value,
2400: residual, infeasibility, and termination
2402: Not Collective
2404: Input Parameter:
2405: . tao - the Tao context
2407: Output Parameters:
2408: + iterate - the current iterate number (>=0)
2409: . f - the current function value
2410: . gnorm - the square of the gradient norm, duality gap, or other measure indicating distance from optimality.
2411: . cnorm - the infeasibility of the current solution with regard to the constraints.
2412: . xdiff - the step length or trust region radius of the most recent iterate.
2413: - reason - The termination reason, which can equal `TAO_CONTINUE_ITERATING`
2415: Level: intermediate
2417: Notes:
2418: Tao returns the values set by the solvers in the routine `TaoMonitor()`.
2420: If any of the output arguments are set to `NULL`, no corresponding value will be returned.
2422: .seealso: `TaoMonitor()`, `TaoGetConvergedReason()`
2423: @*/
2424: PetscErrorCode TaoGetSolutionStatus(Tao tao, PetscInt *its, PetscReal *f, PetscReal *gnorm, PetscReal *cnorm, PetscReal *xdiff, TaoConvergedReason *reason)
2425: {
2427: if (its) *its = tao->niter;
2428: if (f) *f = tao->fc;
2429: if (gnorm) *gnorm = tao->residual;
2430: if (cnorm) *cnorm = tao->cnorm;
2431: if (reason) *reason = tao->reason;
2432: if (xdiff) *xdiff = tao->step;
2433: return 0;
2434: }
2436: /*@C
2437: TaoGetType - Gets the current Tao algorithm.
2439: Not Collective
2441: Input Parameter:
2442: . tao - the Tao solver context
2444: Output Parameter:
2445: . type - Tao method
2447: Level: intermediate
2449: .seealso: `Tao`, `TaoType`, `TaoSetType()`
2450: @*/
2451: PetscErrorCode TaoGetType(Tao tao, TaoType *type)
2452: {
2455: *type = ((PetscObject)tao)->type_name;
2456: return 0;
2457: }
2459: /*@C
2460: TaoMonitor - Monitor the solver and the current solution. This
2461: routine will record the iteration number and residual statistics,
2462: and call any monitors specified by the user.
2464: Input Parameters:
2465: + tao - the Tao context
2466: . its - the current iterate number (>=0)
2467: . f - the current objective function value
2468: . res - the gradient norm, square root of the duality gap, or other measure indicating distince from optimality. This measure will be recorded and
2469: used for some termination tests.
2470: . cnorm - the infeasibility of the current solution with regard to the constraints.
2471: - steplength - multiple of the step direction added to the previous iterate.
2473: Output Parameters:
2474: . reason - The termination reason, which can equal `TAO_CONTINUE_ITERATING`
2476: Options Database Key:
2477: . -tao_monitor - Use the default monitor, which prints statistics to standard output
2479: Level: developer
2481: .seealso: `Tao`, `TaoGetConvergedReason()`, `TaoMonitorDefault()`, `TaoSetMonitor()`
2482: @*/
2483: PetscErrorCode TaoMonitor(Tao tao, PetscInt its, PetscReal f, PetscReal res, PetscReal cnorm, PetscReal steplength)
2484: {
2485: PetscInt i;
2488: tao->fc = f;
2489: tao->residual = res;
2490: tao->cnorm = cnorm;
2491: tao->step = steplength;
2492: if (!its) {
2493: tao->cnorm0 = cnorm;
2494: tao->gnorm0 = res;
2495: }
2497: for (i = 0; i < tao->numbermonitors; i++) (*tao->monitor[i])(tao, tao->monitorcontext[i]);
2498: return 0;
2499: }
2501: /*@
2502: TaoSetConvergenceHistory - Sets the array used to hold the convergence history.
2504: Logically Collective
2506: Input Parameters:
2507: + tao - the Tao solver context
2508: . obj - array to hold objective value history
2509: . resid - array to hold residual history
2510: . cnorm - array to hold constraint violation history
2511: . lits - integer array holds the number of linear iterations for each Tao iteration
2512: . na - size of `obj`, `resid`, and `cnorm`
2513: - reset - `PETSC_TRUE` indicates each new minimization resets the history counter to zero,
2514: else it continues storing new values for new minimizations after the old ones
2516: Level: intermediate
2518: Notes:
2519: If set, Tao will fill the given arrays with the indicated
2520: information at each iteration. If 'obj','resid','cnorm','lits' are
2521: *all* `NULL` then space (using size `na`, or 1000 if na is `PETSC_DECIDE` or
2522: `PETSC_DEFAULT`) is allocated for the history.
2523: If not all are `NULL`, then only the non-`NULL` information categories
2524: will be stored, the others will be ignored.
2526: Any convergence information after iteration number 'na' will not be stored.
2528: This routine is useful, e.g., when running a code for purposes
2529: of accurate performance monitoring, when no I/O should be done
2530: during the section of code that is being timed.
2532: .seealso: `TaoGetConvergenceHistory()`
2533: @*/
2534: PetscErrorCode TaoSetConvergenceHistory(Tao tao, PetscReal obj[], PetscReal resid[], PetscReal cnorm[], PetscInt lits[], PetscInt na, PetscBool reset)
2535: {
2542: if (na == PETSC_DECIDE || na == PETSC_DEFAULT) na = 1000;
2543: if (!obj && !resid && !cnorm && !lits) {
2544: PetscCalloc4(na, &obj, na, &resid, na, &cnorm, na, &lits);
2545: tao->hist_malloc = PETSC_TRUE;
2546: }
2548: tao->hist_obj = obj;
2549: tao->hist_resid = resid;
2550: tao->hist_cnorm = cnorm;
2551: tao->hist_lits = lits;
2552: tao->hist_max = na;
2553: tao->hist_reset = reset;
2554: tao->hist_len = 0;
2555: return 0;
2556: }
2558: /*@C
2559: TaoGetConvergenceHistory - Gets the arrays used that hold the convergence history.
2561: Collective
2563: Input Parameter:
2564: . tao - the Tao context
2566: Output Parameters:
2567: + obj - array used to hold objective value history
2568: . resid - array used to hold residual history
2569: . cnorm - array used to hold constraint violation history
2570: . lits - integer array used to hold linear solver iteration count
2571: - nhist - size of `obj`, `resid`, `cnorm`, and `lits`
2573: Level: advanced
2575: Notes:
2576: This routine must be preceded by calls to `TaoSetConvergenceHistory()`
2577: and `TaoSolve()`, otherwise it returns useless information.
2579: This routine is useful, e.g., when running a code for purposes
2580: of accurate performance monitoring, when no I/O should be done
2581: during the section of code that is being timed.
2583: Fortran Note:
2584: The calling sequence is
2585: .vb
2586: call TaoGetConvergenceHistory(Tao tao, PetscInt nhist, PetscErrorCode ierr)
2587: .ve
2589: .seealso: `Tao`, `TaoSolve()`, `TaoSetConvergenceHistory()`
2590: @*/
2591: PetscErrorCode TaoGetConvergenceHistory(Tao tao, PetscReal **obj, PetscReal **resid, PetscReal **cnorm, PetscInt **lits, PetscInt *nhist)
2592: {
2594: if (obj) *obj = tao->hist_obj;
2595: if (cnorm) *cnorm = tao->hist_cnorm;
2596: if (resid) *resid = tao->hist_resid;
2597: if (lits) *lits = tao->hist_lits;
2598: if (nhist) *nhist = tao->hist_len;
2599: return 0;
2600: }
2602: /*@
2603: TaoSetApplicationContext - Sets the optional user-defined context for
2604: a solver.
2606: Logically Collective
2608: Input Parameters:
2609: + tao - the Tao context
2610: - usrP - optional user context
2612: Level: intermediate
2614: .seealso: `Tao`, `TaoGetApplicationContext()`, `TaoSetApplicationContext()`
2615: @*/
2616: PetscErrorCode TaoSetApplicationContext(Tao tao, void *usrP)
2617: {
2619: tao->user = usrP;
2620: return 0;
2621: }
2623: /*@
2624: TaoGetApplicationContext - Gets the user-defined context for a
2625: Tao solvers.
2627: Not Collective
2629: Input Parameter:
2630: . tao - Tao context
2632: Output Parameter:
2633: . usrP - user context
2635: Level: intermediate
2637: .seealso: `TaoSetApplicationContext()`
2638: @*/
2639: PetscErrorCode TaoGetApplicationContext(Tao tao, void *usrP)
2640: {
2643: *(void **)usrP = tao->user;
2644: return 0;
2645: }
2647: /*@
2648: TaoSetGradientNorm - Sets the matrix used to define the norm that measures the size of the gradient.
2650: Collective
2652: Input Parameters:
2653: + tao - the Tao context
2654: - M - matrix that defines the norm
2656: Level: beginner
2658: .seealso: `Tao`, `TaoGetGradientNorm()`, `TaoGradientNorm()`
2659: @*/
2660: PetscErrorCode TaoSetGradientNorm(Tao tao, Mat M)
2661: {
2664: PetscObjectReference((PetscObject)M);
2665: MatDestroy(&tao->gradient_norm);
2666: VecDestroy(&tao->gradient_norm_tmp);
2667: tao->gradient_norm = M;
2668: MatCreateVecs(M, NULL, &tao->gradient_norm_tmp);
2669: return 0;
2670: }
2672: /*@
2673: TaoGetGradientNorm - Returns the matrix used to define the norm used for measuring the size of the gradient.
2675: Not Collective
2677: Input Parameter:
2678: . tao - Tao context
2680: Output Parameter:
2681: . M - gradient norm
2683: Level: beginner
2685: .seealso: `Tao`, `TaoSetGradientNorm()`, `TaoGradientNorm()`
2686: @*/
2687: PetscErrorCode TaoGetGradientNorm(Tao tao, Mat *M)
2688: {
2691: *M = tao->gradient_norm;
2692: return 0;
2693: }
2695: /*@C
2696: TaoGradientNorm - Compute the norm with respect to the norm the user has set.
2698: Collective
2700: Input Parameters:
2701: + tao - the Tao context
2702: . gradient - the gradient to be computed
2703: - norm - the norm type
2705: Output Parameter:
2706: . gnorm - the gradient norm
2708: Level: developer
2710: .seealso: `Tao`, `TaoSetGradientNorm()`, `TaoGetGradientNorm()`
2711: @*/
2712: PetscErrorCode TaoGradientNorm(Tao tao, Vec gradient, NormType type, PetscReal *gnorm)
2713: {
2718: if (tao->gradient_norm) {
2719: PetscScalar gnorms;
2722: MatMult(tao->gradient_norm, gradient, tao->gradient_norm_tmp);
2723: VecDot(gradient, tao->gradient_norm_tmp, &gnorms);
2724: *gnorm = PetscRealPart(PetscSqrtScalar(gnorms));
2725: } else {
2726: VecNorm(gradient, type, gnorm);
2727: }
2728: return 0;
2729: }
2731: /*@C
2732: TaoMonitorDrawCtxCreate - Creates the monitor context `TaoMonitorDrawSolution()`
2734: Collective
2736: Output Parameter:
2737: . ctx - the monitor context
2739: Options Database Key:
2740: . -tao_draw_solution_initial - show initial guess as well as current solution
2742: Level: intermediate
2744: .seealso: `Tao`, `TaoMonitorSet()`, `TaoMonitorDefault()`, `VecView()`, `TaoMonitorDrawCtx()`
2745: @*/
2746: PetscErrorCode TaoMonitorDrawCtxCreate(MPI_Comm comm, const char host[], const char label[], int x, int y, int m, int n, PetscInt howoften, TaoMonitorDrawCtx *ctx)
2747: {
2748: PetscNew(ctx);
2749: PetscViewerDrawOpen(comm, host, label, x, y, m, n, &(*ctx)->viewer);
2750: PetscViewerSetFromOptions((*ctx)->viewer);
2751: (*ctx)->howoften = howoften;
2752: return 0;
2753: }
2755: /*@C
2756: TaoMonitorDrawCtxDestroy - Destroys the monitor context for `TaoMonitorDrawSolution()`
2758: Collective
2760: Input Parameters:
2761: . ctx - the monitor context
2763: Level: intermediate
2765: .seealso: `TaoMonitorSet()`, `TaoMonitorDefault()`, `VecView()`, `TaoMonitorDrawSolution()`
2766: @*/
2767: PetscErrorCode TaoMonitorDrawCtxDestroy(TaoMonitorDrawCtx *ictx)
2768: {
2769: PetscViewerDestroy(&(*ictx)->viewer);
2770: PetscFree(*ictx);
2771: return 0;
2772: }