Actual source code: asils.c
1: #include <../src/tao/complementarity/impls/ssls/ssls.h>
2: /*
3: Context for ASXLS
4: -- active-set - reduced matrices formed
5: - inherit properties of original system
6: -- semismooth (S) - function not differentiable
7: - merit function continuously differentiable
8: - Fischer-Burmeister reformulation of complementarity
9: - Billups composition for two finite bounds
10: -- infeasible (I) - iterates not guaranteed to remain within bounds
11: -- feasible (F) - iterates guaranteed to remain within bounds
12: -- linesearch (LS) - Armijo rule on direction
14: Many other reformulations are possible and combinations of
15: feasible/infeasible and linesearch/trust region are possible.
17: Basic theory
18: Fischer-Burmeister reformulation is semismooth with a continuously
19: differentiable merit function and strongly semismooth if the F has
20: lipschitz continuous derivatives.
22: Every accumulation point generated by the algorithm is a stationary
23: point for the merit function. Stationary points of the merit function
24: are solutions of the complementarity problem if
25: a. the stationary point has a BD-regular subdifferential, or
26: b. the Schur complement F'/F'_ff is a P_0-matrix where ff is the
27: index set corresponding to the free variables.
29: If one of the accumulation points has a BD-regular subdifferential then
30: a. the entire sequence converges to this accumulation point at
31: a local q-superlinear rate
32: b. if in addition the reformulation is strongly semismooth near
33: this accumulation point, then the algorithm converges at a
34: local q-quadratic rate.
36: The theory for the feasible version follows from the feasible descent
37: algorithm framework.
39: References:
40: + * - Billups, "Algorithms for Complementarity Problems and Generalized
41: Equations," Ph.D thesis, University of Wisconsin Madison, 1995.
42: . * - De Luca, Facchinei, Kanzow, "A Semismooth Equation Approach to the
43: Solution of Nonlinear Complementarity Problems," Mathematical
44: Programming, 75, 1996.
45: . * - Ferris, Kanzow, Munson, "Feasible Descent Algorithms for Mixed
46: Complementarity Problems," Mathematical Programming, 86,
47: 1999.
48: . * - Fischer, "A Special Newton type Optimization Method," Optimization,
49: 24, 1992
50: - * - Munson, Facchinei, Ferris, Fischer, Kanzow, "The Semismooth Algorithm
51: for Large Scale Complementarity Problems," Technical Report,
52: University of Wisconsin Madison, 1999.
53: */
55: static PetscErrorCode TaoSetUp_ASILS(Tao tao)
56: {
57: TAO_SSLS *asls = (TAO_SSLS *)tao->data;
59: VecDuplicate(tao->solution, &tao->gradient);
60: VecDuplicate(tao->solution, &tao->stepdirection);
61: VecDuplicate(tao->solution, &asls->ff);
62: VecDuplicate(tao->solution, &asls->dpsi);
63: VecDuplicate(tao->solution, &asls->da);
64: VecDuplicate(tao->solution, &asls->db);
65: VecDuplicate(tao->solution, &asls->t1);
66: VecDuplicate(tao->solution, &asls->t2);
67: asls->fixed = NULL;
68: asls->free = NULL;
69: asls->J_sub = NULL;
70: asls->Jpre_sub = NULL;
71: asls->w = NULL;
72: asls->r1 = NULL;
73: asls->r2 = NULL;
74: asls->r3 = NULL;
75: asls->dxfree = NULL;
76: return 0;
77: }
79: static PetscErrorCode Tao_ASLS_FunctionGradient(TaoLineSearch ls, Vec X, PetscReal *fcn, Vec G, void *ptr)
80: {
81: Tao tao = (Tao)ptr;
82: TAO_SSLS *asls = (TAO_SSLS *)tao->data;
84: TaoComputeConstraints(tao, X, tao->constraints);
85: VecFischer(X, tao->constraints, tao->XL, tao->XU, asls->ff);
86: VecNorm(asls->ff, NORM_2, &asls->merit);
87: *fcn = 0.5 * asls->merit * asls->merit;
89: TaoComputeJacobian(tao, tao->solution, tao->jacobian, tao->jacobian_pre);
90: MatDFischer(tao->jacobian, tao->solution, tao->constraints, tao->XL, tao->XU, asls->t1, asls->t2, asls->da, asls->db);
91: VecPointwiseMult(asls->t1, asls->ff, asls->db);
92: MatMultTranspose(tao->jacobian, asls->t1, G);
93: VecPointwiseMult(asls->t1, asls->ff, asls->da);
94: VecAXPY(G, 1.0, asls->t1);
95: return 0;
96: }
98: static PetscErrorCode TaoDestroy_ASILS(Tao tao)
99: {
100: TAO_SSLS *ssls = (TAO_SSLS *)tao->data;
102: VecDestroy(&ssls->ff);
103: VecDestroy(&ssls->dpsi);
104: VecDestroy(&ssls->da);
105: VecDestroy(&ssls->db);
106: VecDestroy(&ssls->w);
107: VecDestroy(&ssls->t1);
108: VecDestroy(&ssls->t2);
109: VecDestroy(&ssls->r1);
110: VecDestroy(&ssls->r2);
111: VecDestroy(&ssls->r3);
112: VecDestroy(&ssls->dxfree);
113: MatDestroy(&ssls->J_sub);
114: MatDestroy(&ssls->Jpre_sub);
115: ISDestroy(&ssls->fixed);
116: ISDestroy(&ssls->free);
117: KSPDestroy(&tao->ksp);
118: PetscFree(tao->data);
119: return 0;
120: }
122: static PetscErrorCode TaoSolve_ASILS(Tao tao)
123: {
124: TAO_SSLS *asls = (TAO_SSLS *)tao->data;
125: PetscReal psi, ndpsi, normd, innerd, t = 0;
126: PetscInt nf;
127: TaoLineSearchConvergedReason ls_reason;
129: /* Assume that Setup has been called!
130: Set the structure for the Jacobian and create a linear solver. */
132: TaoComputeVariableBounds(tao);
133: TaoLineSearchSetObjectiveAndGradientRoutine(tao->linesearch, Tao_ASLS_FunctionGradient, tao);
134: TaoLineSearchSetObjectiveRoutine(tao->linesearch, Tao_SSLS_Function, tao);
136: /* Calculate the function value and fischer function value at the
137: current iterate */
138: TaoLineSearchComputeObjectiveAndGradient(tao->linesearch, tao->solution, &psi, asls->dpsi);
139: VecNorm(asls->dpsi, NORM_2, &ndpsi);
141: tao->reason = TAO_CONTINUE_ITERATING;
142: while (1) {
143: /* Check the termination criteria */
144: PetscInfo(tao, "iter %" PetscInt_FMT ", merit: %g, ||dpsi||: %g\n", tao->niter, (double)asls->merit, (double)ndpsi);
145: TaoLogConvergenceHistory(tao, asls->merit, ndpsi, 0.0, tao->ksp_its);
146: TaoMonitor(tao, tao->niter, asls->merit, ndpsi, 0.0, t);
147: PetscUseTypeMethod(tao, convergencetest, tao->cnvP);
148: if (TAO_CONTINUE_ITERATING != tao->reason) break;
150: /* Call general purpose update function */
151: PetscTryTypeMethod(tao, update, tao->niter, tao->user_update);
152: tao->niter++;
154: /* We are going to solve a linear system of equations. We need to
155: set the tolerances for the solve so that we maintain an asymptotic
156: rate of convergence that is superlinear.
157: Note: these tolerances are for the reduced system. We really need
158: to make sure that the full system satisfies the full-space conditions.
160: This rule gives superlinear asymptotic convergence
161: asls->atol = min(0.5, asls->merit*sqrt(asls->merit));
162: asls->rtol = 0.0;
164: This rule gives quadratic asymptotic convergence
165: asls->atol = min(0.5, asls->merit*asls->merit);
166: asls->rtol = 0.0;
168: Calculate a free and fixed set of variables. The fixed set of
169: variables are those for the d_b is approximately equal to zero.
170: The definition of approximately changes as we approach the solution
171: to the problem.
173: No one rule is guaranteed to work in all cases. The following
174: definition is based on the norm of the Jacobian matrix. If the
175: norm is large, the tolerance becomes smaller. */
176: MatNorm(tao->jacobian, NORM_1, &asls->identifier);
177: asls->identifier = PetscMin(asls->merit, 1e-2) / (1 + asls->identifier);
179: VecSet(asls->t1, -asls->identifier);
180: VecSet(asls->t2, asls->identifier);
182: ISDestroy(&asls->fixed);
183: ISDestroy(&asls->free);
184: VecWhichBetweenOrEqual(asls->t1, asls->db, asls->t2, &asls->fixed);
185: ISComplementVec(asls->fixed, asls->t1, &asls->free);
187: ISGetSize(asls->fixed, &nf);
188: PetscInfo(tao, "Number of fixed variables: %" PetscInt_FMT "\n", nf);
190: /* We now have our partition. Now calculate the direction in the
191: fixed variable space. */
192: TaoVecGetSubVec(asls->ff, asls->fixed, tao->subset_type, 0.0, &asls->r1);
193: TaoVecGetSubVec(asls->da, asls->fixed, tao->subset_type, 1.0, &asls->r2);
194: VecPointwiseDivide(asls->r1, asls->r1, asls->r2);
195: VecSet(tao->stepdirection, 0.0);
196: VecISAXPY(tao->stepdirection, asls->fixed, 1.0, asls->r1);
198: /* Our direction in the Fixed Variable Set is fixed. Calculate the
199: information needed for the step in the Free Variable Set. To
200: do this, we need to know the diagonal perturbation and the
201: right hand side. */
203: TaoVecGetSubVec(asls->da, asls->free, tao->subset_type, 0.0, &asls->r1);
204: TaoVecGetSubVec(asls->ff, asls->free, tao->subset_type, 0.0, &asls->r2);
205: TaoVecGetSubVec(asls->db, asls->free, tao->subset_type, 1.0, &asls->r3);
206: VecPointwiseDivide(asls->r1, asls->r1, asls->r3);
207: VecPointwiseDivide(asls->r2, asls->r2, asls->r3);
209: /* r1 is the diagonal perturbation
210: r2 is the right hand side
211: r3 is no longer needed
213: Now need to modify r2 for our direction choice in the fixed
214: variable set: calculate t1 = J*d, take the reduced vector
215: of t1 and modify r2. */
217: MatMult(tao->jacobian, tao->stepdirection, asls->t1);
218: TaoVecGetSubVec(asls->t1, asls->free, tao->subset_type, 0.0, &asls->r3);
219: VecAXPY(asls->r2, -1.0, asls->r3);
221: /* Calculate the reduced problem matrix and the direction */
222: if (!asls->w && (tao->subset_type == TAO_SUBSET_MASK || tao->subset_type == TAO_SUBSET_MATRIXFREE)) VecDuplicate(tao->solution, &asls->w);
223: TaoMatGetSubMat(tao->jacobian, asls->free, asls->w, tao->subset_type, &asls->J_sub);
224: if (tao->jacobian != tao->jacobian_pre) {
225: TaoMatGetSubMat(tao->jacobian_pre, asls->free, asls->w, tao->subset_type, &asls->Jpre_sub);
226: } else {
227: MatDestroy(&asls->Jpre_sub);
228: asls->Jpre_sub = asls->J_sub;
229: PetscObjectReference((PetscObject)(asls->Jpre_sub));
230: }
231: MatDiagonalSet(asls->J_sub, asls->r1, ADD_VALUES);
232: TaoVecGetSubVec(tao->stepdirection, asls->free, tao->subset_type, 0.0, &asls->dxfree);
233: VecSet(asls->dxfree, 0.0);
235: /* Calculate the reduced direction. (Really negative of Newton
236: direction. Therefore, rest of the code uses -d.) */
237: KSPReset(tao->ksp);
238: KSPSetOperators(tao->ksp, asls->J_sub, asls->Jpre_sub);
239: KSPSolve(tao->ksp, asls->r2, asls->dxfree);
240: KSPGetIterationNumber(tao->ksp, &tao->ksp_its);
241: tao->ksp_tot_its += tao->ksp_its;
243: /* Add the direction in the free variables back into the real direction. */
244: VecISAXPY(tao->stepdirection, asls->free, 1.0, asls->dxfree);
246: /* Check the real direction for descent and if not, use the negative
247: gradient direction. */
248: VecNorm(tao->stepdirection, NORM_2, &normd);
249: VecDot(tao->stepdirection, asls->dpsi, &innerd);
251: if (innerd <= asls->delta * PetscPowReal(normd, asls->rho)) {
252: PetscInfo(tao, "Gradient direction: %5.4e.\n", (double)innerd);
253: PetscInfo(tao, "Iteration %" PetscInt_FMT ": newton direction not descent\n", tao->niter);
254: VecCopy(asls->dpsi, tao->stepdirection);
255: VecDot(asls->dpsi, tao->stepdirection, &innerd);
256: }
258: VecScale(tao->stepdirection, -1.0);
259: innerd = -innerd;
261: /* We now have a correct descent direction. Apply a linesearch to
262: find the new iterate. */
263: TaoLineSearchSetInitialStepLength(tao->linesearch, 1.0);
264: TaoLineSearchApply(tao->linesearch, tao->solution, &psi, asls->dpsi, tao->stepdirection, &t, &ls_reason);
265: VecNorm(asls->dpsi, NORM_2, &ndpsi);
266: }
267: return 0;
268: }
270: /* ---------------------------------------------------------- */
271: /*MC
272: TAOASILS - Active-set infeasible linesearch algorithm for solving
273: complementarity constraints
275: Options Database Keys:
276: + -tao_ssls_delta - descent test fraction
277: - -tao_ssls_rho - descent test power
279: Level: beginner
280: M*/
281: PETSC_EXTERN PetscErrorCode TaoCreate_ASILS(Tao tao)
282: {
283: TAO_SSLS *asls;
284: const char *armijo_type = TAOLINESEARCHARMIJO;
286: PetscNew(&asls);
287: tao->data = (void *)asls;
288: tao->ops->solve = TaoSolve_ASILS;
289: tao->ops->setup = TaoSetUp_ASILS;
290: tao->ops->view = TaoView_SSLS;
291: tao->ops->setfromoptions = TaoSetFromOptions_SSLS;
292: tao->ops->destroy = TaoDestroy_ASILS;
293: tao->subset_type = TAO_SUBSET_SUBVEC;
294: asls->delta = 1e-10;
295: asls->rho = 2.1;
296: asls->fixed = NULL;
297: asls->free = NULL;
298: asls->J_sub = NULL;
299: asls->Jpre_sub = NULL;
300: asls->w = NULL;
301: asls->r1 = NULL;
302: asls->r2 = NULL;
303: asls->r3 = NULL;
304: asls->t1 = NULL;
305: asls->t2 = NULL;
306: asls->dxfree = NULL;
308: asls->identifier = 1e-5;
310: TaoLineSearchCreate(((PetscObject)tao)->comm, &tao->linesearch);
311: PetscObjectIncrementTabLevel((PetscObject)tao->linesearch, (PetscObject)tao, 1);
312: TaoLineSearchSetType(tao->linesearch, armijo_type);
313: TaoLineSearchSetOptionsPrefix(tao->linesearch, tao->hdr.prefix);
314: TaoLineSearchSetFromOptions(tao->linesearch);
316: KSPCreate(((PetscObject)tao)->comm, &tao->ksp);
317: PetscObjectIncrementTabLevel((PetscObject)tao->ksp, (PetscObject)tao, 1);
318: KSPSetOptionsPrefix(tao->ksp, tao->hdr.prefix);
319: KSPSetFromOptions(tao->ksp);
321: /* Override default settings (unless already changed) */
322: if (!tao->max_it_changed) tao->max_it = 2000;
323: if (!tao->max_funcs_changed) tao->max_funcs = 4000;
324: if (!tao->gttol_changed) tao->gttol = 0;
325: if (!tao->grtol_changed) tao->grtol = 0;
326: #if defined(PETSC_USE_REAL_SINGLE)
327: if (!tao->gatol_changed) tao->gatol = 1.0e-6;
328: if (!tao->fmin_changed) tao->fmin = 1.0e-4;
329: #else
330: if (!tao->gatol_changed) tao->gatol = 1.0e-16;
331: if (!tao->fmin_changed) tao->fmin = 1.0e-8;
332: #endif
333: return 0;
334: }