Actual source code: mkl_pardiso.c
1: #include <../src/mat/impls/aij/seq/aij.h>
2: #include <../src/mat/impls/sbaij/seq/sbaij.h>
3: #include <../src/mat/impls/dense/seq/dense.h>
5: #if defined(PETSC_HAVE_MKL_INTEL_ILP64)
6: #define MKL_ILP64
7: #endif
8: #include <mkl_pardiso.h>
10: PETSC_EXTERN void PetscSetMKL_PARDISOThreads(int);
12: /*
13: * Possible mkl_pardiso phases that controls the execution of the solver.
14: * For more information check mkl_pardiso manual.
15: */
16: #define JOB_ANALYSIS 11
17: #define JOB_ANALYSIS_NUMERICAL_FACTORIZATION 12
18: #define JOB_ANALYSIS_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT 13
19: #define JOB_NUMERICAL_FACTORIZATION 22
20: #define JOB_NUMERICAL_FACTORIZATION_SOLVE_ITERATIVE_REFINEMENT 23
21: #define JOB_SOLVE_ITERATIVE_REFINEMENT 33
22: #define JOB_SOLVE_FORWARD_SUBSTITUTION 331
23: #define JOB_SOLVE_DIAGONAL_SUBSTITUTION 332
24: #define JOB_SOLVE_BACKWARD_SUBSTITUTION 333
25: #define JOB_RELEASE_OF_LU_MEMORY 0
26: #define JOB_RELEASE_OF_ALL_MEMORY -1
28: #define IPARM_SIZE 64
30: #if defined(PETSC_USE_64BIT_INDICES)
31: #if defined(PETSC_HAVE_MKL_INTEL_ILP64)
32: #define INT_TYPE long long int
33: #define MKL_PARDISO pardiso
34: #define MKL_PARDISO_INIT pardisoinit
35: #else
36: /* this is the case where the MKL BLAS/LAPACK are 32 bit integers but the 64 bit integer version of
37: of Pardiso code is used; hence the need for the 64 below*/
38: #define INT_TYPE long long int
39: #define MKL_PARDISO pardiso_64
40: #define MKL_PARDISO_INIT pardiso_64init
41: void pardiso_64init(void *pt, INT_TYPE *mtype, INT_TYPE iparm[])
42: {
43: int iparm_copy[IPARM_SIZE], mtype_copy, i;
45: mtype_copy = *mtype;
46: pardisoinit(pt, &mtype_copy, iparm_copy);
47: for (i = 0; i < IPARM_SIZE; i++) iparm[i] = iparm_copy[i];
48: }
49: #endif
50: #else
51: #define INT_TYPE int
52: #define MKL_PARDISO pardiso
53: #define MKL_PARDISO_INIT pardisoinit
54: #endif
56: /*
57: * Internal data structure.
58: * For more information check mkl_pardiso manual.
59: */
60: typedef struct {
61: /* Configuration vector*/
62: INT_TYPE iparm[IPARM_SIZE];
64: /*
65: * Internal mkl_pardiso memory location.
66: * After the first call to mkl_pardiso do not modify pt, as that could cause a serious memory leak.
67: */
68: void *pt[IPARM_SIZE];
70: /* Basic mkl_pardiso info*/
71: INT_TYPE phase, maxfct, mnum, mtype, n, nrhs, msglvl, err;
73: /* Matrix structure*/
74: void *a;
75: INT_TYPE *ia, *ja;
77: /* Number of non-zero elements*/
78: INT_TYPE nz;
80: /* Row permutaton vector*/
81: INT_TYPE *perm;
83: /* Define if matrix preserves sparse structure.*/
84: MatStructure matstruc;
86: PetscBool needsym;
87: PetscBool freeaij;
89: /* Schur complement */
90: PetscScalar *schur;
91: PetscInt schur_size;
92: PetscInt *schur_idxs;
93: PetscScalar *schur_work;
94: PetscBLASInt schur_work_size;
95: PetscBool solve_interior;
97: /* True if mkl_pardiso function have been used.*/
98: PetscBool CleanUp;
100: /* Conversion to a format suitable for MKL */
101: PetscErrorCode (*Convert)(Mat, PetscBool, MatReuse, PetscBool *, INT_TYPE *, INT_TYPE **, INT_TYPE **, PetscScalar **);
102: } Mat_MKL_PARDISO;
104: PetscErrorCode MatMKLPardiso_Convert_seqsbaij(Mat A, PetscBool sym, MatReuse reuse, PetscBool *free, INT_TYPE *nnz, INT_TYPE **r, INT_TYPE **c, PetscScalar **v)
105: {
106: Mat_SeqSBAIJ *aa = (Mat_SeqSBAIJ *)A->data;
107: PetscInt bs = A->rmap->bs, i;
110: *v = aa->a;
111: if (bs == 1) { /* already in the correct format */
112: /* though PetscInt and INT_TYPE are of the same size since they are defined differently the Intel compiler requires a cast */
113: *r = (INT_TYPE *)aa->i;
114: *c = (INT_TYPE *)aa->j;
115: *nnz = (INT_TYPE)aa->nz;
116: *free = PETSC_FALSE;
117: } else if (reuse == MAT_INITIAL_MATRIX) {
118: PetscInt m = A->rmap->n, nz = aa->nz;
119: PetscInt *row, *col;
120: PetscMalloc2(m + 1, &row, nz, &col);
121: for (i = 0; i < m + 1; i++) row[i] = aa->i[i] + 1;
122: for (i = 0; i < nz; i++) col[i] = aa->j[i] + 1;
123: *r = (INT_TYPE *)row;
124: *c = (INT_TYPE *)col;
125: *nnz = (INT_TYPE)nz;
126: *free = PETSC_TRUE;
127: }
128: return 0;
129: }
131: PetscErrorCode MatMKLPardiso_Convert_seqbaij(Mat A, PetscBool sym, MatReuse reuse, PetscBool *free, INT_TYPE *nnz, INT_TYPE **r, INT_TYPE **c, PetscScalar **v)
132: {
133: Mat_SeqBAIJ *aa = (Mat_SeqBAIJ *)A->data;
134: PetscInt bs = A->rmap->bs, i;
136: if (!sym) {
137: *v = aa->a;
138: if (bs == 1) { /* already in the correct format */
139: /* though PetscInt and INT_TYPE are of the same size since they are defined differently the Intel compiler requires a cast */
140: *r = (INT_TYPE *)aa->i;
141: *c = (INT_TYPE *)aa->j;
142: *nnz = (INT_TYPE)aa->nz;
143: *free = PETSC_FALSE;
144: return 0;
145: } else if (reuse == MAT_INITIAL_MATRIX) {
146: PetscInt m = A->rmap->n, nz = aa->nz;
147: PetscInt *row, *col;
148: PetscMalloc2(m + 1, &row, nz, &col);
149: for (i = 0; i < m + 1; i++) row[i] = aa->i[i] + 1;
150: for (i = 0; i < nz; i++) col[i] = aa->j[i] + 1;
151: *r = (INT_TYPE *)row;
152: *c = (INT_TYPE *)col;
153: *nnz = (INT_TYPE)nz;
154: }
155: *free = PETSC_TRUE;
156: } else {
157: SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_PLIB, "This should not happen");
158: }
159: return 0;
160: }
162: PetscErrorCode MatMKLPardiso_Convert_seqaij(Mat A, PetscBool sym, MatReuse reuse, PetscBool *free, INT_TYPE *nnz, INT_TYPE **r, INT_TYPE **c, PetscScalar **v)
163: {
164: Mat_SeqAIJ *aa = (Mat_SeqAIJ *)A->data;
165: PetscScalar *aav;
167: MatSeqAIJGetArrayRead(A, (const PetscScalar **)&aav);
168: if (!sym) { /* already in the correct format */
169: *v = aav;
170: *r = (INT_TYPE *)aa->i;
171: *c = (INT_TYPE *)aa->j;
172: *nnz = (INT_TYPE)aa->nz;
173: *free = PETSC_FALSE;
174: } else if (reuse == MAT_INITIAL_MATRIX) { /* need to get the triangular part */
175: PetscScalar *vals, *vv;
176: PetscInt *row, *col, *jj;
177: PetscInt m = A->rmap->n, nz, i;
179: nz = 0;
180: for (i = 0; i < m; i++) nz += aa->i[i + 1] - aa->diag[i];
181: PetscMalloc2(m + 1, &row, nz, &col);
182: PetscMalloc1(nz, &vals);
183: jj = col;
184: vv = vals;
186: row[0] = 0;
187: for (i = 0; i < m; i++) {
188: PetscInt *aj = aa->j + aa->diag[i];
189: PetscScalar *av = aav + aa->diag[i];
190: PetscInt rl = aa->i[i + 1] - aa->diag[i], j;
192: for (j = 0; j < rl; j++) {
193: *jj = *aj;
194: jj++;
195: aj++;
196: *vv = *av;
197: vv++;
198: av++;
199: }
200: row[i + 1] = row[i] + rl;
201: }
202: *v = vals;
203: *r = (INT_TYPE *)row;
204: *c = (INT_TYPE *)col;
205: *nnz = (INT_TYPE)nz;
206: *free = PETSC_TRUE;
207: } else {
208: PetscScalar *vv;
209: PetscInt m = A->rmap->n, i;
211: vv = *v;
212: for (i = 0; i < m; i++) {
213: PetscScalar *av = aav + aa->diag[i];
214: PetscInt rl = aa->i[i + 1] - aa->diag[i], j;
215: for (j = 0; j < rl; j++) {
216: *vv = *av;
217: vv++;
218: av++;
219: }
220: }
221: *free = PETSC_TRUE;
222: }
223: MatSeqAIJRestoreArrayRead(A, (const PetscScalar **)&aav);
224: return 0;
225: }
227: static PetscErrorCode MatMKLPardisoSolveSchur_Private(Mat F, PetscScalar *B, PetscScalar *X)
228: {
229: Mat_MKL_PARDISO *mpardiso = (Mat_MKL_PARDISO *)F->data;
230: Mat S, Xmat, Bmat;
231: MatFactorSchurStatus schurstatus;
233: MatFactorGetSchurComplement(F, &S, &schurstatus);
235: MatCreateSeqDense(PETSC_COMM_SELF, mpardiso->schur_size, mpardiso->nrhs, B, &Bmat);
236: MatCreateSeqDense(PETSC_COMM_SELF, mpardiso->schur_size, mpardiso->nrhs, X, &Xmat);
237: MatSetType(Bmat, ((PetscObject)S)->type_name);
238: MatSetType(Xmat, ((PetscObject)S)->type_name);
239: #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
240: MatBindToCPU(Xmat, S->boundtocpu);
241: MatBindToCPU(Bmat, S->boundtocpu);
242: #endif
244: #if defined(PETSC_USE_COMPLEX)
246: #endif
248: switch (schurstatus) {
249: case MAT_FACTOR_SCHUR_FACTORED:
250: if (!mpardiso->iparm[12 - 1]) {
251: MatMatSolve(S, Bmat, Xmat);
252: } else { /* transpose solve */
253: MatMatSolveTranspose(S, Bmat, Xmat);
254: }
255: break;
256: case MAT_FACTOR_SCHUR_INVERTED:
257: MatProductCreateWithMat(S, Bmat, NULL, Xmat);
258: if (!mpardiso->iparm[12 - 1]) {
259: MatProductSetType(Xmat, MATPRODUCT_AB);
260: } else { /* transpose solve */
261: MatProductSetType(Xmat, MATPRODUCT_AtB);
262: }
263: MatProductSetFromOptions(Xmat);
264: MatProductSymbolic(Xmat);
265: MatProductNumeric(Xmat);
266: MatProductClear(Xmat);
267: break;
268: default:
269: SETERRQ(PetscObjectComm((PetscObject)F), PETSC_ERR_SUP, "Unhandled MatFactorSchurStatus %" PetscInt_FMT, F->schur_status);
270: break;
271: }
272: MatFactorRestoreSchurComplement(F, &S, schurstatus);
273: MatDestroy(&Bmat);
274: MatDestroy(&Xmat);
275: return 0;
276: }
278: PetscErrorCode MatFactorSetSchurIS_MKL_PARDISO(Mat F, IS is)
279: {
280: Mat_MKL_PARDISO *mpardiso = (Mat_MKL_PARDISO *)F->data;
281: const PetscScalar *arr;
282: const PetscInt *idxs;
283: PetscInt size, i;
284: PetscMPIInt csize;
285: PetscBool sorted;
287: MPI_Comm_size(PetscObjectComm((PetscObject)F), &csize);
289: ISSorted(is, &sorted);
291: ISGetLocalSize(is, &size);
292: PetscFree(mpardiso->schur_work);
293: PetscBLASIntCast(PetscMax(mpardiso->n, 2 * size), &mpardiso->schur_work_size);
294: PetscMalloc1(mpardiso->schur_work_size, &mpardiso->schur_work);
295: MatDestroy(&F->schur);
296: MatCreateSeqDense(PETSC_COMM_SELF, size, size, NULL, &F->schur);
297: MatDenseGetArrayRead(F->schur, &arr);
298: mpardiso->schur = (PetscScalar *)arr;
299: mpardiso->schur_size = size;
300: MatDenseRestoreArrayRead(F->schur, &arr);
301: if (mpardiso->mtype == 2) MatSetOption(F->schur, MAT_SPD, PETSC_TRUE);
303: PetscFree(mpardiso->schur_idxs);
304: PetscMalloc1(size, &mpardiso->schur_idxs);
305: PetscArrayzero(mpardiso->perm, mpardiso->n);
306: ISGetIndices(is, &idxs);
307: PetscArraycpy(mpardiso->schur_idxs, idxs, size);
308: for (i = 0; i < size; i++) mpardiso->perm[idxs[i]] = 1;
309: ISRestoreIndices(is, &idxs);
310: if (size) { /* turn on Schur switch if the set of indices is not empty */
311: mpardiso->iparm[36 - 1] = 2;
312: }
313: return 0;
314: }
316: PetscErrorCode MatDestroy_MKL_PARDISO(Mat A)
317: {
318: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)A->data;
320: if (mat_mkl_pardiso->CleanUp) {
321: mat_mkl_pardiso->phase = JOB_RELEASE_OF_ALL_MEMORY;
323: MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, NULL, NULL, NULL, NULL, &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, NULL, NULL,
324: &mat_mkl_pardiso->err);
325: }
326: PetscFree(mat_mkl_pardiso->perm);
327: PetscFree(mat_mkl_pardiso->schur_work);
328: PetscFree(mat_mkl_pardiso->schur_idxs);
329: if (mat_mkl_pardiso->freeaij) {
330: PetscFree2(mat_mkl_pardiso->ia, mat_mkl_pardiso->ja);
331: if (mat_mkl_pardiso->iparm[34] == 1) PetscFree(mat_mkl_pardiso->a);
332: }
333: PetscFree(A->data);
335: /* clear composed functions */
336: PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL);
337: PetscObjectComposeFunction((PetscObject)A, "MatFactorSetSchurIS_C", NULL);
338: PetscObjectComposeFunction((PetscObject)A, "MatMkl_PardisoSetCntl_C", NULL);
339: return 0;
340: }
342: static PetscErrorCode MatMKLPardisoScatterSchur_Private(Mat_MKL_PARDISO *mpardiso, PetscScalar *whole, PetscScalar *schur, PetscBool reduce)
343: {
344: if (reduce) { /* data given for the whole matrix */
345: PetscInt i, m = 0, p = 0;
346: for (i = 0; i < mpardiso->nrhs; i++) {
347: PetscInt j;
348: for (j = 0; j < mpardiso->schur_size; j++) schur[p + j] = whole[m + mpardiso->schur_idxs[j]];
349: m += mpardiso->n;
350: p += mpardiso->schur_size;
351: }
352: } else { /* from Schur to whole */
353: PetscInt i, m = 0, p = 0;
354: for (i = 0; i < mpardiso->nrhs; i++) {
355: PetscInt j;
356: for (j = 0; j < mpardiso->schur_size; j++) whole[m + mpardiso->schur_idxs[j]] = schur[p + j];
357: m += mpardiso->n;
358: p += mpardiso->schur_size;
359: }
360: }
361: return 0;
362: }
364: PetscErrorCode MatSolve_MKL_PARDISO(Mat A, Vec b, Vec x)
365: {
366: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)A->data;
367: PetscScalar *xarray;
368: const PetscScalar *barray;
370: mat_mkl_pardiso->nrhs = 1;
371: VecGetArrayWrite(x, &xarray);
372: VecGetArrayRead(b, &barray);
374: if (!mat_mkl_pardiso->schur) mat_mkl_pardiso->phase = JOB_SOLVE_ITERATIVE_REFINEMENT;
375: else mat_mkl_pardiso->phase = JOB_SOLVE_FORWARD_SUBSTITUTION;
377: if (barray == xarray) { /* if the two vectors share the same memory */
378: PetscScalar *work;
379: if (!mat_mkl_pardiso->schur_work) {
380: PetscMalloc1(mat_mkl_pardiso->n, &work);
381: } else {
382: work = mat_mkl_pardiso->schur_work;
383: }
384: mat_mkl_pardiso->iparm[6 - 1] = 1;
385: MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, NULL, &mat_mkl_pardiso->nrhs,
386: mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, (void *)xarray, (void *)work, &mat_mkl_pardiso->err);
387: if (!mat_mkl_pardiso->schur_work) PetscFree(work);
388: } else {
389: mat_mkl_pardiso->iparm[6 - 1] = 0;
390: MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
391: &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_pardiso->err);
392: }
393: VecRestoreArrayRead(b, &barray);
397: if (mat_mkl_pardiso->schur) { /* solve Schur complement and expand solution */
398: if (!mat_mkl_pardiso->solve_interior) {
399: PetscInt shift = mat_mkl_pardiso->schur_size;
401: MatFactorFactorizeSchurComplement(A);
402: /* if inverted, uses BLAS *MM subroutines, otherwise LAPACK *TRS */
403: if (A->schur_status != MAT_FACTOR_SCHUR_INVERTED) shift = 0;
405: /* solve Schur complement */
406: MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso, xarray, mat_mkl_pardiso->schur_work, PETSC_TRUE);
407: MatMKLPardisoSolveSchur_Private(A, mat_mkl_pardiso->schur_work, mat_mkl_pardiso->schur_work + shift);
408: MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso, xarray, mat_mkl_pardiso->schur_work + shift, PETSC_FALSE);
409: } else { /* if we are solving for the interior problem, any value in barray[schur] forward-substituted to xarray[schur] will be neglected */
410: PetscInt i;
411: for (i = 0; i < mat_mkl_pardiso->schur_size; i++) xarray[mat_mkl_pardiso->schur_idxs[i]] = 0.;
412: }
414: /* expansion phase */
415: mat_mkl_pardiso->iparm[6 - 1] = 1;
416: mat_mkl_pardiso->phase = JOB_SOLVE_BACKWARD_SUBSTITUTION;
417: MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
418: &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, (void *)xarray, (void *)mat_mkl_pardiso->schur_work, /* according to the specs, the solution vector is always used */
419: &mat_mkl_pardiso->err);
422: mat_mkl_pardiso->iparm[6 - 1] = 0;
423: }
424: VecRestoreArrayWrite(x, &xarray);
425: mat_mkl_pardiso->CleanUp = PETSC_TRUE;
426: return 0;
427: }
429: PetscErrorCode MatSolveTranspose_MKL_PARDISO(Mat A, Vec b, Vec x)
430: {
431: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)A->data;
432: PetscInt oiparm12;
434: oiparm12 = mat_mkl_pardiso->iparm[12 - 1];
435: mat_mkl_pardiso->iparm[12 - 1] = 2;
436: MatSolve_MKL_PARDISO(A, b, x);
437: mat_mkl_pardiso->iparm[12 - 1] = oiparm12;
438: return 0;
439: }
441: PetscErrorCode MatMatSolve_MKL_PARDISO(Mat A, Mat B, Mat X)
442: {
443: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)(A)->data;
444: const PetscScalar *barray;
445: PetscScalar *xarray;
446: PetscBool flg;
448: PetscObjectBaseTypeCompare((PetscObject)B, MATSEQDENSE, &flg);
450: if (X != B) {
451: PetscObjectBaseTypeCompare((PetscObject)X, MATSEQDENSE, &flg);
453: }
455: MatGetSize(B, NULL, (PetscInt *)&mat_mkl_pardiso->nrhs);
457: if (mat_mkl_pardiso->nrhs > 0) {
458: MatDenseGetArrayRead(B, &barray);
459: MatDenseGetArrayWrite(X, &xarray);
462: if (!mat_mkl_pardiso->schur) mat_mkl_pardiso->phase = JOB_SOLVE_ITERATIVE_REFINEMENT;
463: else mat_mkl_pardiso->phase = JOB_SOLVE_FORWARD_SUBSTITUTION;
465: MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
466: &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, (void *)barray, (void *)xarray, &mat_mkl_pardiso->err);
469: MatDenseRestoreArrayRead(B, &barray);
470: if (mat_mkl_pardiso->schur) { /* solve Schur complement and expand solution */
471: PetscScalar *o_schur_work = NULL;
473: /* solve Schur complement */
474: if (!mat_mkl_pardiso->solve_interior) {
475: PetscInt shift = mat_mkl_pardiso->schur_size * mat_mkl_pardiso->nrhs, scale;
476: PetscInt mem = mat_mkl_pardiso->n * mat_mkl_pardiso->nrhs;
478: MatFactorFactorizeSchurComplement(A);
479: /* allocate extra memory if it is needed */
480: scale = 1;
481: if (A->schur_status == MAT_FACTOR_SCHUR_INVERTED) scale = 2;
482: mem *= scale;
483: if (mem > mat_mkl_pardiso->schur_work_size) {
484: o_schur_work = mat_mkl_pardiso->schur_work;
485: PetscMalloc1(mem, &mat_mkl_pardiso->schur_work);
486: }
487: /* if inverted, uses BLAS *MM subroutines, otherwise LAPACK *TRS */
488: if (A->schur_status != MAT_FACTOR_SCHUR_INVERTED) shift = 0;
489: MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso, xarray, mat_mkl_pardiso->schur_work, PETSC_TRUE);
490: MatMKLPardisoSolveSchur_Private(A, mat_mkl_pardiso->schur_work, mat_mkl_pardiso->schur_work + shift);
491: MatMKLPardisoScatterSchur_Private(mat_mkl_pardiso, xarray, mat_mkl_pardiso->schur_work + shift, PETSC_FALSE);
492: } else { /* if we are solving for the interior problem, any value in barray[schur,n] forward-substituted to xarray[schur,n] will be neglected */
493: PetscInt i, n, m = 0;
494: for (n = 0; n < mat_mkl_pardiso->nrhs; n++) {
495: for (i = 0; i < mat_mkl_pardiso->schur_size; i++) xarray[mat_mkl_pardiso->schur_idxs[i] + m] = 0.;
496: m += mat_mkl_pardiso->n;
497: }
498: }
500: /* expansion phase */
501: mat_mkl_pardiso->iparm[6 - 1] = 1;
502: mat_mkl_pardiso->phase = JOB_SOLVE_BACKWARD_SUBSTITUTION;
503: MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
504: &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, (void *)xarray, (void *)mat_mkl_pardiso->schur_work, /* according to the specs, the solution vector is always used */
505: &mat_mkl_pardiso->err);
506: if (o_schur_work) { /* restore original schur_work (minimal size) */
507: PetscFree(mat_mkl_pardiso->schur_work);
508: mat_mkl_pardiso->schur_work = o_schur_work;
509: }
511: mat_mkl_pardiso->iparm[6 - 1] = 0;
512: }
513: MatDenseRestoreArrayWrite(X, &xarray);
514: }
515: mat_mkl_pardiso->CleanUp = PETSC_TRUE;
516: return 0;
517: }
519: PetscErrorCode MatFactorNumeric_MKL_PARDISO(Mat F, Mat A, const MatFactorInfo *info)
520: {
521: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)(F)->data;
523: mat_mkl_pardiso->matstruc = SAME_NONZERO_PATTERN;
524: (*mat_mkl_pardiso->Convert)(A, mat_mkl_pardiso->needsym, MAT_REUSE_MATRIX, &mat_mkl_pardiso->freeaij, &mat_mkl_pardiso->nz, &mat_mkl_pardiso->ia, &mat_mkl_pardiso->ja, (PetscScalar **)&mat_mkl_pardiso->a);
526: mat_mkl_pardiso->phase = JOB_NUMERICAL_FACTORIZATION;
527: MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
528: &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, NULL, (void *)mat_mkl_pardiso->schur, &mat_mkl_pardiso->err);
531: /* report flops */
532: if (mat_mkl_pardiso->iparm[18] > 0) PetscLogFlops(PetscPowRealInt(10., 6) * mat_mkl_pardiso->iparm[18]);
534: if (F->schur) { /* schur output from pardiso is in row major format */
535: #if defined(PETSC_HAVE_CUDA)
536: F->schur->offloadmask = PETSC_OFFLOAD_CPU;
537: #endif
538: MatFactorRestoreSchurComplement(F, NULL, MAT_FACTOR_SCHUR_UNFACTORED);
539: MatTranspose(F->schur, MAT_INPLACE_MATRIX, &F->schur);
540: }
541: mat_mkl_pardiso->matstruc = SAME_NONZERO_PATTERN;
542: mat_mkl_pardiso->CleanUp = PETSC_TRUE;
543: return 0;
544: }
546: PetscErrorCode MatSetFromOptions_MKL_PARDISO(Mat F, Mat A)
547: {
548: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)F->data;
549: PetscInt icntl, bs, threads = 1;
550: PetscBool flg;
552: PetscOptionsBegin(PetscObjectComm((PetscObject)F), ((PetscObject)F)->prefix, "MKL_PARDISO Options", "Mat");
554: PetscOptionsInt("-mat_mkl_pardiso_65", "Suggested number of threads to use within PARDISO", "None", threads, &threads, &flg);
555: if (flg) PetscSetMKL_PARDISOThreads((int)threads);
557: PetscOptionsInt("-mat_mkl_pardiso_66", "Maximum number of factors with identical sparsity structure that must be kept in memory at the same time", "None", mat_mkl_pardiso->maxfct, &icntl, &flg);
558: if (flg) mat_mkl_pardiso->maxfct = icntl;
560: PetscOptionsInt("-mat_mkl_pardiso_67", "Indicates the actual matrix for the solution phase", "None", mat_mkl_pardiso->mnum, &icntl, &flg);
561: if (flg) mat_mkl_pardiso->mnum = icntl;
563: PetscOptionsInt("-mat_mkl_pardiso_68", "Message level information", "None", mat_mkl_pardiso->msglvl, &icntl, &flg);
564: if (flg) mat_mkl_pardiso->msglvl = icntl;
566: PetscOptionsInt("-mat_mkl_pardiso_69", "Defines the matrix type", "None", mat_mkl_pardiso->mtype, &icntl, &flg);
567: if (flg) {
568: void *pt[IPARM_SIZE];
569: mat_mkl_pardiso->mtype = icntl;
570: icntl = mat_mkl_pardiso->iparm[34];
571: bs = mat_mkl_pardiso->iparm[36];
572: MKL_PARDISO_INIT(pt, &mat_mkl_pardiso->mtype, mat_mkl_pardiso->iparm);
573: #if defined(PETSC_USE_REAL_SINGLE)
574: mat_mkl_pardiso->iparm[27] = 1;
575: #else
576: mat_mkl_pardiso->iparm[27] = 0;
577: #endif
578: mat_mkl_pardiso->iparm[34] = icntl;
579: mat_mkl_pardiso->iparm[36] = bs;
580: }
582: PetscOptionsInt("-mat_mkl_pardiso_1", "Use default values (if 0)", "None", mat_mkl_pardiso->iparm[0], &icntl, &flg);
583: if (flg) mat_mkl_pardiso->iparm[0] = icntl;
585: PetscOptionsInt("-mat_mkl_pardiso_2", "Fill-in reducing ordering for the input matrix", "None", mat_mkl_pardiso->iparm[1], &icntl, &flg);
586: if (flg) mat_mkl_pardiso->iparm[1] = icntl;
588: PetscOptionsInt("-mat_mkl_pardiso_4", "Preconditioned CGS/CG", "None", mat_mkl_pardiso->iparm[3], &icntl, &flg);
589: if (flg) mat_mkl_pardiso->iparm[3] = icntl;
591: PetscOptionsInt("-mat_mkl_pardiso_5", "User permutation", "None", mat_mkl_pardiso->iparm[4], &icntl, &flg);
592: if (flg) mat_mkl_pardiso->iparm[4] = icntl;
594: PetscOptionsInt("-mat_mkl_pardiso_6", "Write solution on x", "None", mat_mkl_pardiso->iparm[5], &icntl, &flg);
595: if (flg) mat_mkl_pardiso->iparm[5] = icntl;
597: PetscOptionsInt("-mat_mkl_pardiso_8", "Iterative refinement step", "None", mat_mkl_pardiso->iparm[7], &icntl, &flg);
598: if (flg) mat_mkl_pardiso->iparm[7] = icntl;
600: PetscOptionsInt("-mat_mkl_pardiso_10", "Pivoting perturbation", "None", mat_mkl_pardiso->iparm[9], &icntl, &flg);
601: if (flg) mat_mkl_pardiso->iparm[9] = icntl;
603: PetscOptionsInt("-mat_mkl_pardiso_11", "Scaling vectors", "None", mat_mkl_pardiso->iparm[10], &icntl, &flg);
604: if (flg) mat_mkl_pardiso->iparm[10] = icntl;
606: PetscOptionsInt("-mat_mkl_pardiso_12", "Solve with transposed or conjugate transposed matrix A", "None", mat_mkl_pardiso->iparm[11], &icntl, &flg);
607: if (flg) mat_mkl_pardiso->iparm[11] = icntl;
609: PetscOptionsInt("-mat_mkl_pardiso_13", "Improved accuracy using (non-) symmetric weighted matching", "None", mat_mkl_pardiso->iparm[12], &icntl, &flg);
610: if (flg) mat_mkl_pardiso->iparm[12] = icntl;
612: PetscOptionsInt("-mat_mkl_pardiso_18", "Numbers of non-zero elements", "None", mat_mkl_pardiso->iparm[17], &icntl, &flg);
613: if (flg) mat_mkl_pardiso->iparm[17] = icntl;
615: PetscOptionsInt("-mat_mkl_pardiso_19", "Report number of floating point operations (0 to disable)", "None", mat_mkl_pardiso->iparm[18], &icntl, &flg);
616: if (flg) mat_mkl_pardiso->iparm[18] = icntl;
618: PetscOptionsInt("-mat_mkl_pardiso_21", "Pivoting for symmetric indefinite matrices", "None", mat_mkl_pardiso->iparm[20], &icntl, &flg);
619: if (flg) mat_mkl_pardiso->iparm[20] = icntl;
621: PetscOptionsInt("-mat_mkl_pardiso_24", "Parallel factorization control", "None", mat_mkl_pardiso->iparm[23], &icntl, &flg);
622: if (flg) mat_mkl_pardiso->iparm[23] = icntl;
624: PetscOptionsInt("-mat_mkl_pardiso_25", "Parallel forward/backward solve control", "None", mat_mkl_pardiso->iparm[24], &icntl, &flg);
625: if (flg) mat_mkl_pardiso->iparm[24] = icntl;
627: PetscOptionsInt("-mat_mkl_pardiso_27", "Matrix checker", "None", mat_mkl_pardiso->iparm[26], &icntl, &flg);
628: if (flg) mat_mkl_pardiso->iparm[26] = icntl;
630: PetscOptionsInt("-mat_mkl_pardiso_31", "Partial solve and computing selected components of the solution vectors", "None", mat_mkl_pardiso->iparm[30], &icntl, &flg);
631: if (flg) mat_mkl_pardiso->iparm[30] = icntl;
633: PetscOptionsInt("-mat_mkl_pardiso_34", "Optimal number of threads for conditional numerical reproducibility (CNR) mode", "None", mat_mkl_pardiso->iparm[33], &icntl, &flg);
634: if (flg) mat_mkl_pardiso->iparm[33] = icntl;
636: PetscOptionsInt("-mat_mkl_pardiso_60", "Intel MKL_PARDISO mode", "None", mat_mkl_pardiso->iparm[59], &icntl, &flg);
637: if (flg) mat_mkl_pardiso->iparm[59] = icntl;
638: PetscOptionsEnd();
639: return 0;
640: }
642: PetscErrorCode MatFactorMKL_PARDISOInitialize_Private(Mat A, MatFactorType ftype, Mat_MKL_PARDISO *mat_mkl_pardiso)
643: {
644: PetscInt i, bs;
645: PetscBool match;
647: for (i = 0; i < IPARM_SIZE; i++) mat_mkl_pardiso->iparm[i] = 0;
648: for (i = 0; i < IPARM_SIZE; i++) mat_mkl_pardiso->pt[i] = 0;
649: #if defined(PETSC_USE_REAL_SINGLE)
650: mat_mkl_pardiso->iparm[27] = 1;
651: #else
652: mat_mkl_pardiso->iparm[27] = 0;
653: #endif
654: /* Default options for both sym and unsym */
655: mat_mkl_pardiso->iparm[0] = 1; /* Solver default parameters overridden with provided by iparm */
656: mat_mkl_pardiso->iparm[1] = 2; /* Metis reordering */
657: mat_mkl_pardiso->iparm[5] = 0; /* Write solution into x */
658: mat_mkl_pardiso->iparm[7] = 0; /* Max number of iterative refinement steps */
659: mat_mkl_pardiso->iparm[17] = -1; /* Output: Number of nonzeros in the factor LU */
660: mat_mkl_pardiso->iparm[18] = -1; /* Output: Mflops for LU factorization */
661: #if 0
662: mat_mkl_pardiso->iparm[23] = 1; /* Parallel factorization control*/
663: #endif
664: PetscObjectTypeCompareAny((PetscObject)A, &match, MATSEQBAIJ, MATSEQSBAIJ, "");
665: MatGetBlockSize(A, &bs);
666: if (!match || bs == 1) {
667: mat_mkl_pardiso->iparm[34] = 1; /* Cluster Sparse Solver use C-style indexing for ia and ja arrays */
668: mat_mkl_pardiso->n = A->rmap->N;
669: } else {
670: mat_mkl_pardiso->iparm[34] = 0; /* Cluster Sparse Solver use Fortran-style indexing for ia and ja arrays */
671: mat_mkl_pardiso->iparm[36] = bs;
672: mat_mkl_pardiso->n = A->rmap->N / bs;
673: }
674: mat_mkl_pardiso->iparm[39] = 0; /* Input: matrix/rhs/solution stored on rank-0 */
676: mat_mkl_pardiso->CleanUp = PETSC_FALSE;
677: mat_mkl_pardiso->maxfct = 1; /* Maximum number of numerical factorizations. */
678: mat_mkl_pardiso->mnum = 1; /* Which factorization to use. */
679: mat_mkl_pardiso->msglvl = 0; /* 0: do not print 1: Print statistical information in file */
680: mat_mkl_pardiso->phase = -1;
681: mat_mkl_pardiso->err = 0;
683: mat_mkl_pardiso->nrhs = 1;
684: mat_mkl_pardiso->err = 0;
685: mat_mkl_pardiso->phase = -1;
687: if (ftype == MAT_FACTOR_LU) {
688: mat_mkl_pardiso->iparm[9] = 13; /* Perturb the pivot elements with 1E-13 */
689: mat_mkl_pardiso->iparm[10] = 1; /* Use nonsymmetric permutation and scaling MPS */
690: mat_mkl_pardiso->iparm[12] = 1; /* Switch on Maximum Weighted Matching algorithm (default for non-symmetric) */
691: } else {
692: mat_mkl_pardiso->iparm[9] = 8; /* Perturb the pivot elements with 1E-8 */
693: mat_mkl_pardiso->iparm[10] = 0; /* Use nonsymmetric permutation and scaling MPS */
694: mat_mkl_pardiso->iparm[12] = 1; /* Switch on Maximum Weighted Matching algorithm (default for non-symmetric) */
695: #if defined(PETSC_USE_DEBUG)
696: mat_mkl_pardiso->iparm[26] = 1; /* Matrix checker */
697: #endif
698: }
699: PetscCalloc1(A->rmap->N * sizeof(INT_TYPE), &mat_mkl_pardiso->perm);
700: mat_mkl_pardiso->schur_size = 0;
701: return 0;
702: }
704: PetscErrorCode MatFactorSymbolic_AIJMKL_PARDISO_Private(Mat F, Mat A, const MatFactorInfo *info)
705: {
706: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)F->data;
708: mat_mkl_pardiso->matstruc = DIFFERENT_NONZERO_PATTERN;
709: MatSetFromOptions_MKL_PARDISO(F, A);
710: /* throw away any previously computed structure */
711: if (mat_mkl_pardiso->freeaij) {
712: PetscFree2(mat_mkl_pardiso->ia, mat_mkl_pardiso->ja);
713: if (mat_mkl_pardiso->iparm[34] == 1) PetscFree(mat_mkl_pardiso->a);
714: }
715: (*mat_mkl_pardiso->Convert)(A, mat_mkl_pardiso->needsym, MAT_INITIAL_MATRIX, &mat_mkl_pardiso->freeaij, &mat_mkl_pardiso->nz, &mat_mkl_pardiso->ia, &mat_mkl_pardiso->ja, (PetscScalar **)&mat_mkl_pardiso->a);
716: if (mat_mkl_pardiso->iparm[34] == 1) mat_mkl_pardiso->n = A->rmap->N;
717: else mat_mkl_pardiso->n = A->rmap->N / A->rmap->bs;
719: mat_mkl_pardiso->phase = JOB_ANALYSIS;
721: /* reset flops counting if requested */
722: if (mat_mkl_pardiso->iparm[18]) mat_mkl_pardiso->iparm[18] = -1;
724: MKL_PARDISO(mat_mkl_pardiso->pt, &mat_mkl_pardiso->maxfct, &mat_mkl_pardiso->mnum, &mat_mkl_pardiso->mtype, &mat_mkl_pardiso->phase, &mat_mkl_pardiso->n, mat_mkl_pardiso->a, mat_mkl_pardiso->ia, mat_mkl_pardiso->ja, mat_mkl_pardiso->perm,
725: &mat_mkl_pardiso->nrhs, mat_mkl_pardiso->iparm, &mat_mkl_pardiso->msglvl, NULL, NULL, &mat_mkl_pardiso->err);
728: mat_mkl_pardiso->CleanUp = PETSC_TRUE;
730: if (F->factortype == MAT_FACTOR_LU) F->ops->lufactornumeric = MatFactorNumeric_MKL_PARDISO;
731: else F->ops->choleskyfactornumeric = MatFactorNumeric_MKL_PARDISO;
733: F->ops->solve = MatSolve_MKL_PARDISO;
734: F->ops->solvetranspose = MatSolveTranspose_MKL_PARDISO;
735: F->ops->matsolve = MatMatSolve_MKL_PARDISO;
736: return 0;
737: }
739: PetscErrorCode MatLUFactorSymbolic_AIJMKL_PARDISO(Mat F, Mat A, IS r, IS c, const MatFactorInfo *info)
740: {
741: MatFactorSymbolic_AIJMKL_PARDISO_Private(F, A, info);
742: return 0;
743: }
745: #if !defined(PETSC_USE_COMPLEX)
746: PetscErrorCode MatGetInertia_MKL_PARDISO(Mat F, PetscInt *nneg, PetscInt *nzero, PetscInt *npos)
747: {
748: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)F->data;
750: if (nneg) *nneg = mat_mkl_pardiso->iparm[22];
751: if (npos) *npos = mat_mkl_pardiso->iparm[21];
752: if (nzero) *nzero = F->rmap->N - (mat_mkl_pardiso->iparm[22] + mat_mkl_pardiso->iparm[21]);
753: return 0;
754: }
755: #endif
757: PetscErrorCode MatCholeskyFactorSymbolic_AIJMKL_PARDISO(Mat F, Mat A, IS r, const MatFactorInfo *info)
758: {
759: MatFactorSymbolic_AIJMKL_PARDISO_Private(F, A, info);
760: #if defined(PETSC_USE_COMPLEX)
761: F->ops->getinertia = NULL;
762: #else
763: F->ops->getinertia = MatGetInertia_MKL_PARDISO;
764: #endif
765: return 0;
766: }
768: PetscErrorCode MatView_MKL_PARDISO(Mat A, PetscViewer viewer)
769: {
770: PetscBool iascii;
771: PetscViewerFormat format;
772: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)A->data;
773: PetscInt i;
775: if (A->ops->solve != MatSolve_MKL_PARDISO) return 0;
777: PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &iascii);
778: if (iascii) {
779: PetscViewerGetFormat(viewer, &format);
780: if (format == PETSC_VIEWER_ASCII_INFO) {
781: PetscViewerASCIIPrintf(viewer, "MKL_PARDISO run parameters:\n");
782: PetscViewerASCIIPrintf(viewer, "MKL_PARDISO phase: %d \n", mat_mkl_pardiso->phase);
783: for (i = 1; i <= 64; i++) PetscViewerASCIIPrintf(viewer, "MKL_PARDISO iparm[%d]: %d \n", i, mat_mkl_pardiso->iparm[i - 1]);
784: PetscViewerASCIIPrintf(viewer, "MKL_PARDISO maxfct: %d \n", mat_mkl_pardiso->maxfct);
785: PetscViewerASCIIPrintf(viewer, "MKL_PARDISO mnum: %d \n", mat_mkl_pardiso->mnum);
786: PetscViewerASCIIPrintf(viewer, "MKL_PARDISO mtype: %d \n", mat_mkl_pardiso->mtype);
787: PetscViewerASCIIPrintf(viewer, "MKL_PARDISO n: %d \n", mat_mkl_pardiso->n);
788: PetscViewerASCIIPrintf(viewer, "MKL_PARDISO nrhs: %d \n", mat_mkl_pardiso->nrhs);
789: PetscViewerASCIIPrintf(viewer, "MKL_PARDISO msglvl: %d \n", mat_mkl_pardiso->msglvl);
790: }
791: }
792: return 0;
793: }
795: PetscErrorCode MatGetInfo_MKL_PARDISO(Mat A, MatInfoType flag, MatInfo *info)
796: {
797: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)A->data;
799: info->block_size = 1.0;
800: info->nz_used = mat_mkl_pardiso->iparm[17];
801: info->nz_allocated = mat_mkl_pardiso->iparm[17];
802: info->nz_unneeded = 0.0;
803: info->assemblies = 0.0;
804: info->mallocs = 0.0;
805: info->memory = 0.0;
806: info->fill_ratio_given = 0;
807: info->fill_ratio_needed = 0;
808: info->factor_mallocs = 0;
809: return 0;
810: }
812: PetscErrorCode MatMkl_PardisoSetCntl_MKL_PARDISO(Mat F, PetscInt icntl, PetscInt ival)
813: {
814: PetscInt backup, bs;
815: Mat_MKL_PARDISO *mat_mkl_pardiso = (Mat_MKL_PARDISO *)F->data;
817: if (icntl <= 64) {
818: mat_mkl_pardiso->iparm[icntl - 1] = ival;
819: } else {
820: if (icntl == 65) PetscSetMKL_PARDISOThreads(ival);
821: else if (icntl == 66) mat_mkl_pardiso->maxfct = ival;
822: else if (icntl == 67) mat_mkl_pardiso->mnum = ival;
823: else if (icntl == 68) mat_mkl_pardiso->msglvl = ival;
824: else if (icntl == 69) {
825: void *pt[IPARM_SIZE];
826: backup = mat_mkl_pardiso->iparm[34];
827: bs = mat_mkl_pardiso->iparm[36];
828: mat_mkl_pardiso->mtype = ival;
829: MKL_PARDISO_INIT(pt, &mat_mkl_pardiso->mtype, mat_mkl_pardiso->iparm);
830: #if defined(PETSC_USE_REAL_SINGLE)
831: mat_mkl_pardiso->iparm[27] = 1;
832: #else
833: mat_mkl_pardiso->iparm[27] = 0;
834: #endif
835: mat_mkl_pardiso->iparm[34] = backup;
836: mat_mkl_pardiso->iparm[36] = bs;
837: } else if (icntl == 70) mat_mkl_pardiso->solve_interior = (PetscBool) !!ival;
838: }
839: return 0;
840: }
842: /*@
843: MatMkl_PardisoSetCntl - Set Mkl_Pardiso parameters
845: Logically Collective
847: Input Parameters:
848: + F - the factored matrix obtained by calling `MatGetFactor()`
849: . icntl - index of Mkl_Pardiso parameter
850: - ival - value of Mkl_Pardiso parameter
852: Options Database Key:
853: . -mat_mkl_pardiso_<icntl> <ival> - change the option numbered icntl to the value ival
855: Level: beginner
857: References:
858: . * - Mkl_Pardiso Users' Guide
860: .seealso: `MATSOLVERMKL_PARDISO`, `MatGetFactor()`
861: @*/
862: PetscErrorCode MatMkl_PardisoSetCntl(Mat F, PetscInt icntl, PetscInt ival)
863: {
864: PetscTryMethod(F, "MatMkl_PardisoSetCntl_C", (Mat, PetscInt, PetscInt), (F, icntl, ival));
865: return 0;
866: }
868: /*MC
869: MATSOLVERMKL_PARDISO - A matrix type providing direct solvers, LU, for
870: `MATSEQAIJ` matrices via the external package MKL_PARDISO.
872: Use -pc_type lu -pc_factor_mat_solver_type mkl_pardiso to use this direct solver
874: Options Database Keys:
875: + -mat_mkl_pardiso_65 - Suggested number of threads to use within MKL_PARDISO
876: . -mat_mkl_pardiso_66 - Maximum number of factors with identical sparsity structure that must be kept in memory at the same time
877: . -mat_mkl_pardiso_67 - Indicates the actual matrix for the solution phase
878: . -mat_mkl_pardiso_68 - Message level information, use 1 to get detailed information on the solver options
879: . -mat_mkl_pardiso_69 - Defines the matrix type. IMPORTANT: When you set this flag, iparm parameters are going to be set to the default ones for the matrix type
880: . -mat_mkl_pardiso_1 - Use default values
881: . -mat_mkl_pardiso_2 - Fill-in reducing ordering for the input matrix
882: . -mat_mkl_pardiso_4 - Preconditioned CGS/CG
883: . -mat_mkl_pardiso_5 - User permutation
884: . -mat_mkl_pardiso_6 - Write solution on x
885: . -mat_mkl_pardiso_8 - Iterative refinement step
886: . -mat_mkl_pardiso_10 - Pivoting perturbation
887: . -mat_mkl_pardiso_11 - Scaling vectors
888: . -mat_mkl_pardiso_12 - Solve with transposed or conjugate transposed matrix A
889: . -mat_mkl_pardiso_13 - Improved accuracy using (non-) symmetric weighted matching
890: . -mat_mkl_pardiso_18 - Numbers of non-zero elements
891: . -mat_mkl_pardiso_19 - Report number of floating point operations
892: . -mat_mkl_pardiso_21 - Pivoting for symmetric indefinite matrices
893: . -mat_mkl_pardiso_24 - Parallel factorization control
894: . -mat_mkl_pardiso_25 - Parallel forward/backward solve control
895: . -mat_mkl_pardiso_27 - Matrix checker
896: . -mat_mkl_pardiso_31 - Partial solve and computing selected components of the solution vectors
897: . -mat_mkl_pardiso_34 - Optimal number of threads for conditional numerical reproducibility (CNR) mode
898: - -mat_mkl_pardiso_60 - Intel MKL_PARDISO mode
900: Level: beginner
902: Notes:
903: Use -mat_mkl_pardiso_68 1 to display the number of threads the solver is using. MKL does not provide a way to directly access this
904: information.
906: For more information on the options check the MKL_Pardiso manual
908: .seealso: `MATSEQAIJ`, `PCFactorSetMatSolverType()`, `MatSolverType`
909: M*/
910: static PetscErrorCode MatFactorGetSolverType_mkl_pardiso(Mat A, MatSolverType *type)
911: {
912: *type = MATSOLVERMKL_PARDISO;
913: return 0;
914: }
916: PETSC_EXTERN PetscErrorCode MatGetFactor_aij_mkl_pardiso(Mat A, MatFactorType ftype, Mat *F)
917: {
918: Mat B;
919: Mat_MKL_PARDISO *mat_mkl_pardiso;
920: PetscBool isSeqAIJ, isSeqBAIJ, isSeqSBAIJ;
922: PetscObjectBaseTypeCompare((PetscObject)A, MATSEQAIJ, &isSeqAIJ);
923: PetscObjectTypeCompare((PetscObject)A, MATSEQBAIJ, &isSeqBAIJ);
924: PetscObjectTypeCompare((PetscObject)A, MATSEQSBAIJ, &isSeqSBAIJ);
925: MatCreate(PetscObjectComm((PetscObject)A), &B);
926: MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N);
927: PetscStrallocpy("mkl_pardiso", &((PetscObject)B)->type_name);
928: MatSetUp(B);
930: PetscNew(&mat_mkl_pardiso);
931: B->data = mat_mkl_pardiso;
933: MatFactorMKL_PARDISOInitialize_Private(A, ftype, mat_mkl_pardiso);
934: if (ftype == MAT_FACTOR_LU) {
935: B->ops->lufactorsymbolic = MatLUFactorSymbolic_AIJMKL_PARDISO;
936: B->factortype = MAT_FACTOR_LU;
937: mat_mkl_pardiso->needsym = PETSC_FALSE;
938: if (isSeqAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqaij;
939: else if (isSeqBAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqbaij;
940: else {
942: SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "No support for PARDISO LU with %s format", ((PetscObject)A)->type_name);
943: }
944: #if defined(PETSC_USE_COMPLEX)
945: mat_mkl_pardiso->mtype = 13;
946: #else
947: mat_mkl_pardiso->mtype = 11;
948: #endif
949: } else {
950: B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_AIJMKL_PARDISO;
951: B->factortype = MAT_FACTOR_CHOLESKY;
952: if (isSeqAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqaij;
953: else if (isSeqBAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqbaij;
954: else if (isSeqSBAIJ) mat_mkl_pardiso->Convert = MatMKLPardiso_Convert_seqsbaij;
955: else SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "No support for PARDISO CHOLESKY with %s format", ((PetscObject)A)->type_name);
957: mat_mkl_pardiso->needsym = PETSC_TRUE;
958: #if !defined(PETSC_USE_COMPLEX)
959: if (A->spd == PETSC_BOOL3_TRUE) mat_mkl_pardiso->mtype = 2;
960: else mat_mkl_pardiso->mtype = -2;
961: #else
962: mat_mkl_pardiso->mtype = 6;
964: #endif
965: }
966: B->ops->destroy = MatDestroy_MKL_PARDISO;
967: B->ops->view = MatView_MKL_PARDISO;
968: B->ops->getinfo = MatGetInfo_MKL_PARDISO;
969: B->factortype = ftype;
970: B->assembled = PETSC_TRUE;
972: PetscFree(B->solvertype);
973: PetscStrallocpy(MATSOLVERMKL_PARDISO, &B->solvertype);
975: PetscObjectComposeFunction((PetscObject)B, "MatFactorGetSolverType_C", MatFactorGetSolverType_mkl_pardiso);
976: PetscObjectComposeFunction((PetscObject)B, "MatFactorSetSchurIS_C", MatFactorSetSchurIS_MKL_PARDISO);
977: PetscObjectComposeFunction((PetscObject)B, "MatMkl_PardisoSetCntl_C", MatMkl_PardisoSetCntl_MKL_PARDISO);
979: *F = B;
980: return 0;
981: }
983: PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_MKL_Pardiso(void)
984: {
985: MatSolverTypeRegister(MATSOLVERMKL_PARDISO, MATSEQAIJ, MAT_FACTOR_LU, MatGetFactor_aij_mkl_pardiso);
986: MatSolverTypeRegister(MATSOLVERMKL_PARDISO, MATSEQAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_aij_mkl_pardiso);
987: MatSolverTypeRegister(MATSOLVERMKL_PARDISO, MATSEQBAIJ, MAT_FACTOR_LU, MatGetFactor_aij_mkl_pardiso);
988: MatSolverTypeRegister(MATSOLVERMKL_PARDISO, MATSEQSBAIJ, MAT_FACTOR_CHOLESKY, MatGetFactor_aij_mkl_pardiso);
989: return 0;
990: }