Actual source code: mpimatmatmult.c


  2: /*
  3:   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
  4:           C = A * B
  5: */
  6: #include <../src/mat/impls/aij/seq/aij.h>
  7: #include <../src/mat/utils/freespace.h>
  8: #include <../src/mat/impls/aij/mpi/mpiaij.h>
  9: #include <petscbt.h>
 10: #include <../src/mat/impls/dense/mpi/mpidense.h>
 11: #include <petsc/private/vecimpl.h>
 12: #include <petsc/private/sfimpl.h>

 14: #if defined(PETSC_HAVE_HYPRE)
 15: PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat, Mat, PetscReal, Mat);
 16: #endif

 18: PETSC_INTERN PetscErrorCode MatProductSymbolic_ABt_MPIAIJ_MPIAIJ(Mat C)
 19: {
 20:   Mat_Product *product = C->product;
 21:   Mat          B       = product->B;

 23:   MatTranspose(B, MAT_INITIAL_MATRIX, &product->B);
 24:   MatDestroy(&B);
 25:   MatProductSymbolic_AB_MPIAIJ_MPIAIJ(C);
 26:   return 0;
 27: }

 29: PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
 30: {
 31:   Mat_Product        *product = C->product;
 32:   Mat                 A = product->A, B = product->B;
 33:   MatProductAlgorithm alg  = product->alg;
 34:   PetscReal           fill = product->fill;
 35:   PetscBool           flg;

 37:   /* scalable */
 38:   PetscStrcmp(alg, "scalable", &flg);
 39:   if (flg) {
 40:     MatMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C);
 41:     return 0;
 42:   }

 44:   /* nonscalable */
 45:   PetscStrcmp(alg, "nonscalable", &flg);
 46:   if (flg) {
 47:     MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C);
 48:     return 0;
 49:   }

 51:   /* seqmpi */
 52:   PetscStrcmp(alg, "seqmpi", &flg);
 53:   if (flg) {
 54:     MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A, B, fill, C);
 55:     return 0;
 56:   }

 58:   /* backend general code */
 59:   PetscStrcmp(alg, "backend", &flg);
 60:   if (flg) {
 61:     MatProductSymbolic_MPIAIJBACKEND(C);
 62:     return 0;
 63:   }

 65: #if defined(PETSC_HAVE_HYPRE)
 66:   PetscStrcmp(alg, "hypre", &flg);
 67:   if (flg) {
 68:     MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A, B, fill, C);
 69:     return 0;
 70:   }
 71: #endif
 72:   SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_SUP, "Mat Product Algorithm is not supported");
 73: }

 75: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *data)
 76: {
 77:   Mat_APMPI *ptap = (Mat_APMPI *)data;

 79:   PetscFree2(ptap->startsj_s, ptap->startsj_r);
 80:   PetscFree(ptap->bufa);
 81:   MatDestroy(&ptap->P_loc);
 82:   MatDestroy(&ptap->P_oth);
 83:   MatDestroy(&ptap->Pt);
 84:   PetscFree(ptap->api);
 85:   PetscFree(ptap->apj);
 86:   PetscFree(ptap->apa);
 87:   PetscFree(ptap);
 88:   return 0;
 89: }

 91: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, Mat C)
 92: {
 93:   Mat_MPIAIJ        *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
 94:   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)(a->A)->data, *ao = (Mat_SeqAIJ *)(a->B)->data;
 95:   Mat_SeqAIJ        *cd = (Mat_SeqAIJ *)(c->A)->data, *co = (Mat_SeqAIJ *)(c->B)->data;
 96:   PetscScalar       *cda = cd->a, *coa = co->a;
 97:   Mat_SeqAIJ        *p_loc, *p_oth;
 98:   PetscScalar       *apa, *ca;
 99:   PetscInt           cm = C->rmap->n;
100:   Mat_APMPI         *ptap;
101:   PetscInt          *api, *apj, *apJ, i, k;
102:   PetscInt           cstart = C->cmap->rstart;
103:   PetscInt           cdnz, conz, k0, k1;
104:   const PetscScalar *dummy;
105:   MPI_Comm           comm;
106:   PetscMPIInt        size;

108:   MatCheckProduct(C, 3);
109:   ptap = (Mat_APMPI *)C->product->data;
111:   PetscObjectGetComm((PetscObject)A, &comm);
112:   MPI_Comm_size(comm, &size);

115:   /* flag CPU mask for C */
116: #if defined(PETSC_HAVE_DEVICE)
117:   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
118:   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
119:   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
120: #endif

122:   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
123:   /*-----------------------------------------------------*/
124:   /* update numerical values of P_oth and P_loc */
125:   MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth);
126:   MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc);

128:   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
129:   /*----------------------------------------------------------*/
130:   /* get data from symbolic products */
131:   p_loc = (Mat_SeqAIJ *)(ptap->P_loc)->data;
132:   p_oth = NULL;
133:   if (size > 1) p_oth = (Mat_SeqAIJ *)(ptap->P_oth)->data;

135:   /* get apa for storing dense row A[i,:]*P */
136:   apa = ptap->apa;

138:   api = ptap->api;
139:   apj = ptap->apj;
140:   /* trigger copy to CPU */
141:   MatSeqAIJGetArrayRead(a->A, &dummy);
142:   MatSeqAIJRestoreArrayRead(a->A, &dummy);
143:   MatSeqAIJGetArrayRead(a->B, &dummy);
144:   MatSeqAIJRestoreArrayRead(a->B, &dummy);
145:   for (i = 0; i < cm; i++) {
146:     /* compute apa = A[i,:]*P */
147:     AProw_nonscalable(i, ad, ao, p_loc, p_oth, apa);

149:     /* set values in C */
150:     apJ  = apj + api[i];
151:     cdnz = cd->i[i + 1] - cd->i[i];
152:     conz = co->i[i + 1] - co->i[i];

154:     /* 1st off-diagonal part of C */
155:     ca = coa + co->i[i];
156:     k  = 0;
157:     for (k0 = 0; k0 < conz; k0++) {
158:       if (apJ[k] >= cstart) break;
159:       ca[k0]        = apa[apJ[k]];
160:       apa[apJ[k++]] = 0.0;
161:     }

163:     /* diagonal part of C */
164:     ca = cda + cd->i[i];
165:     for (k1 = 0; k1 < cdnz; k1++) {
166:       ca[k1]        = apa[apJ[k]];
167:       apa[apJ[k++]] = 0.0;
168:     }

170:     /* 2nd off-diagonal part of C */
171:     ca = coa + co->i[i];
172:     for (; k0 < conz; k0++) {
173:       ca[k0]        = apa[apJ[k]];
174:       apa[apJ[k++]] = 0.0;
175:     }
176:   }
177:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
178:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);
179:   return 0;
180: }

182: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A, Mat P, PetscReal fill, Mat C)
183: {
184:   MPI_Comm           comm;
185:   PetscMPIInt        size;
186:   Mat_APMPI         *ptap;
187:   PetscFreeSpaceList free_space = NULL, current_space = NULL;
188:   Mat_MPIAIJ        *a  = (Mat_MPIAIJ *)A->data;
189:   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)(a->A)->data, *ao = (Mat_SeqAIJ *)(a->B)->data, *p_loc, *p_oth;
190:   PetscInt          *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
191:   PetscInt          *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
192:   PetscInt          *lnk, i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi;
193:   PetscInt           am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n;
194:   PetscBT            lnkbt;
195:   PetscReal          afill;
196:   MatType            mtype;

198:   MatCheckProduct(C, 4);
200:   PetscObjectGetComm((PetscObject)A, &comm);
201:   MPI_Comm_size(comm, &size);

203:   /* create struct Mat_APMPI and attached it to C later */
204:   PetscNew(&ptap);

206:   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
207:   MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth);

209:   /* get P_loc by taking all local rows of P */
210:   MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc);

212:   p_loc  = (Mat_SeqAIJ *)(ptap->P_loc)->data;
213:   pi_loc = p_loc->i;
214:   pj_loc = p_loc->j;
215:   if (size > 1) {
216:     p_oth  = (Mat_SeqAIJ *)(ptap->P_oth)->data;
217:     pi_oth = p_oth->i;
218:     pj_oth = p_oth->j;
219:   } else {
220:     p_oth  = NULL;
221:     pi_oth = NULL;
222:     pj_oth = NULL;
223:   }

225:   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
226:   /*-------------------------------------------------------------------*/
227:   PetscMalloc1(am + 2, &api);
228:   ptap->api = api;
229:   api[0]    = 0;

231:   /* create and initialize a linked list */
232:   PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt);

234:   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
235:   PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space);
236:   current_space = free_space;

238:   MatPreallocateBegin(comm, am, pn, dnz, onz);
239:   for (i = 0; i < am; i++) {
240:     /* diagonal portion of A */
241:     nzi = adi[i + 1] - adi[i];
242:     for (j = 0; j < nzi; j++) {
243:       row  = *adj++;
244:       pnz  = pi_loc[row + 1] - pi_loc[row];
245:       Jptr = pj_loc + pi_loc[row];
246:       /* add non-zero cols of P into the sorted linked list lnk */
247:       PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt);
248:     }
249:     /* off-diagonal portion of A */
250:     nzi = aoi[i + 1] - aoi[i];
251:     for (j = 0; j < nzi; j++) {
252:       row  = *aoj++;
253:       pnz  = pi_oth[row + 1] - pi_oth[row];
254:       Jptr = pj_oth + pi_oth[row];
255:       PetscLLCondensedAddSorted(pnz, Jptr, lnk, lnkbt);
256:     }
257:     /* add possible missing diagonal entry */
258:     if (C->force_diagonals) {
259:       j = i + rstart; /* column index */
260:       PetscLLCondensedAddSorted(1, &j, lnk, lnkbt);
261:     }

263:     apnz       = lnk[0];
264:     api[i + 1] = api[i] + apnz;

266:     /* if free space is not available, double the total space in the list */
267:     if (current_space->local_remaining < apnz) {
268:       PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), &current_space);
269:       nspacedouble++;
270:     }

272:     /* Copy data into free space, then initialize lnk */
273:     PetscLLCondensedClean(pN, apnz, current_space->array, lnk, lnkbt);
274:     MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz);

276:     current_space->array += apnz;
277:     current_space->local_used += apnz;
278:     current_space->local_remaining -= apnz;
279:   }

281:   /* Allocate space for apj, initialize apj, and */
282:   /* destroy list of free space and other temporary array(s) */
283:   PetscMalloc1(api[am] + 1, &ptap->apj);
284:   apj = ptap->apj;
285:   PetscFreeSpaceContiguous(&free_space, ptap->apj);
286:   PetscLLDestroy(lnk, lnkbt);

288:   /* malloc apa to store dense row A[i,:]*P */
289:   PetscCalloc1(pN, &ptap->apa);

291:   /* set and assemble symbolic parallel matrix C */
292:   /*---------------------------------------------*/
293:   MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE);
294:   MatSetBlockSizesFromMats(C, A, P);

296:   MatGetType(A, &mtype);
297:   MatSetType(C, mtype);
298:   MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz);
299:   MatPreallocateEnd(dnz, onz);

301:   MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
302:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
303:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);
304:   MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);

306:   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
307:   C->ops->productnumeric = MatProductNumeric_AB;

309:   /* attach the supporting struct to C for reuse */
310:   C->product->data    = ptap;
311:   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;

313:   /* set MatInfo */
314:   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
315:   if (afill < 1.0) afill = 1.0;
316:   C->info.mallocs           = nspacedouble;
317:   C->info.fill_ratio_given  = fill;
318:   C->info.fill_ratio_needed = afill;

320: #if defined(PETSC_USE_INFO)
321:   if (api[am]) {
322:     PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill);
323:     PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill);
324:   } else {
325:     PetscInfo(C, "Empty matrix product\n");
326:   }
327: #endif
328:   return 0;
329: }

331: /* ------------------------------------------------------- */
332: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat, Mat, PetscReal, Mat);
333: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat, Mat, Mat);

335: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
336: {
337:   Mat_Product *product = C->product;
338:   Mat          A = product->A, B = product->B;

340:   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
341:     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->cmap->rstart, A->cmap->rend, B->rmap->rstart, B->rmap->rend);

343:   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
344:   C->ops->productsymbolic = MatProductSymbolic_AB;
345:   return 0;
346: }
347: /* -------------------------------------------------------------------- */
348: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
349: {
350:   Mat_Product *product = C->product;
351:   Mat          A = product->A, B = product->B;

353:   if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
354:     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")", A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);

356:   C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
357:   C->ops->productsymbolic          = MatProductSymbolic_AtB;
358:   return 0;
359: }

361: /* --------------------------------------------------------------------- */
362: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
363: {
364:   Mat_Product *product = C->product;

366:   switch (product->type) {
367:   case MATPRODUCT_AB:
368:     MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C);
369:     break;
370:   case MATPRODUCT_AtB:
371:     MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C);
372:     break;
373:   default:
374:     break;
375:   }
376:   return 0;
377: }
378: /* ------------------------------------------------------- */

380: typedef struct {
381:   Mat           workB, workB1;
382:   MPI_Request  *rwaits, *swaits;
383:   PetscInt      nsends, nrecvs;
384:   MPI_Datatype *stype, *rtype;
385:   PetscInt      blda;
386: } MPIAIJ_MPIDense;

388: PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
389: {
390:   MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense *)ctx;
391:   PetscInt         i;

393:   MatDestroy(&contents->workB);
394:   MatDestroy(&contents->workB1);
395:   for (i = 0; i < contents->nsends; i++) MPI_Type_free(&contents->stype[i]);
396:   for (i = 0; i < contents->nrecvs; i++) MPI_Type_free(&contents->rtype[i]);
397:   PetscFree4(contents->stype, contents->rtype, contents->rwaits, contents->swaits);
398:   PetscFree(contents);
399:   return 0;
400: }

402: static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A, Mat B, PetscReal fill, Mat C)
403: {
404:   Mat_MPIAIJ      *aij = (Mat_MPIAIJ *)A->data;
405:   PetscInt         nz  = aij->B->cmap->n, nsends, nrecvs, i, nrows_to, j, blda, m, M, n, N;
406:   MPIAIJ_MPIDense *contents;
407:   VecScatter       ctx = aij->Mvctx;
408:   PetscInt         Am = A->rmap->n, Bm = B->rmap->n, BN = B->cmap->N, Bbn, Bbn1, bs, nrows_from, numBb;
409:   MPI_Comm         comm;
410:   MPI_Datatype     type1, *stype, *rtype;
411:   const PetscInt  *sindices, *sstarts, *rstarts;
412:   PetscMPIInt     *disp;
413:   PetscBool        cisdense;

415:   MatCheckProduct(C, 4);
417:   PetscObjectGetComm((PetscObject)A, &comm);
418:   PetscObjectBaseTypeCompare((PetscObject)C, MATMPIDENSE, &cisdense);
419:   if (!cisdense) MatSetType(C, ((PetscObject)B)->type_name);
420:   MatGetLocalSize(C, &m, &n);
421:   MatGetSize(C, &M, &N);
422:   if (m == PETSC_DECIDE || n == PETSC_DECIDE || M == PETSC_DECIDE || N == PETSC_DECIDE) MatSetSizes(C, Am, B->cmap->n, A->rmap->N, BN);
423:   MatSetBlockSizesFromMats(C, A, B);
424:   MatSetUp(C);
425:   MatDenseGetLDA(B, &blda);
426:   PetscNew(&contents);

428:   VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL);
429:   VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL);

431:   /* Create column block of B and C for memory scalability when BN is too large */
432:   /* Estimate Bbn, column size of Bb */
433:   if (nz) {
434:     Bbn1 = 2 * Am * BN / nz;
435:     if (!Bbn1) Bbn1 = 1;
436:   } else Bbn1 = BN;

438:   bs   = PetscAbs(B->cmap->bs);
439:   Bbn1 = Bbn1 / bs * bs; /* Bbn1 is a multiple of bs */
440:   if (Bbn1 > BN) Bbn1 = BN;
441:   MPI_Allreduce(&Bbn1, &Bbn, 1, MPIU_INT, MPI_MAX, comm);

443:   /* Enable runtime option for Bbn */
444:   PetscOptionsBegin(comm, ((PetscObject)C)->prefix, "MatMatMult", "Mat");
445:   PetscOptionsInt("-matmatmult_Bbn", "Number of columns in Bb", "MatMatMult", Bbn, &Bbn, NULL);
446:   PetscOptionsEnd();
447:   Bbn = PetscMin(Bbn, BN);

449:   if (Bbn > 0 && Bbn < BN) {
450:     numBb = BN / Bbn;
451:     Bbn1  = BN - numBb * Bbn;
452:   } else numBb = 0;

454:   if (numBb) {
455:     PetscInfo(C, "use Bb, BN=%" PetscInt_FMT ", Bbn=%" PetscInt_FMT "; numBb=%" PetscInt_FMT "\n", BN, Bbn, numBb);
456:     if (Bbn1) { /* Create workB1 for the remaining columns */
457:       PetscInfo(C, "use Bb1, BN=%" PetscInt_FMT ", Bbn1=%" PetscInt_FMT "\n", BN, Bbn1);
458:       /* Create work matrix used to store off processor rows of B needed for local product */
459:       MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn1, NULL, &contents->workB1);
460:     } else contents->workB1 = NULL;
461:   }

463:   /* Create work matrix used to store off processor rows of B needed for local product */
464:   MatCreateSeqDense(PETSC_COMM_SELF, nz, Bbn, NULL, &contents->workB);

466:   /* Use MPI derived data type to reduce memory required by the send/recv buffers */
467:   PetscMalloc4(nsends, &stype, nrecvs, &rtype, nrecvs, &contents->rwaits, nsends, &contents->swaits);
468:   contents->stype  = stype;
469:   contents->nsends = nsends;

471:   contents->rtype  = rtype;
472:   contents->nrecvs = nrecvs;
473:   contents->blda   = blda;

475:   PetscMalloc1(Bm + 1, &disp);
476:   for (i = 0; i < nsends; i++) {
477:     nrows_to = sstarts[i + 1] - sstarts[i];
478:     for (j = 0; j < nrows_to; j++) disp[j] = sindices[sstarts[i] + j]; /* rowB to be sent */
479:     MPI_Type_create_indexed_block(nrows_to, 1, disp, MPIU_SCALAR, &type1);
480:     MPI_Type_create_resized(type1, 0, blda * sizeof(PetscScalar), &stype[i]);
481:     MPI_Type_commit(&stype[i]);
482:     MPI_Type_free(&type1);
483:   }

485:   for (i = 0; i < nrecvs; i++) {
486:     /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
487:     nrows_from = rstarts[i + 1] - rstarts[i];
488:     disp[0]    = 0;
489:     MPI_Type_create_indexed_block(1, nrows_from, disp, MPIU_SCALAR, &type1);
490:     MPI_Type_create_resized(type1, 0, nz * sizeof(PetscScalar), &rtype[i]);
491:     MPI_Type_commit(&rtype[i]);
492:     MPI_Type_free(&type1);
493:   }

495:   PetscFree(disp);
496:   VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, NULL, NULL);
497:   VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, NULL, NULL);
498:   MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE);
499:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
500:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);
501:   MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);

503:   C->product->data       = contents;
504:   C->product->destroy    = MatMPIAIJ_MPIDenseDestroy;
505:   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
506:   return 0;
507: }

509: PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat, Mat, Mat, const PetscBool);

511: /*
512:     Performs an efficient scatter on the rows of B needed by this process; this is
513:     a modification of the VecScatterBegin_() routines.

515:     Input: If Bbidx = 0, uses B = Bb, else B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
516: */

518: PetscErrorCode MatMPIDenseScatter(Mat A, Mat B, PetscInt Bbidx, Mat C, Mat *outworkB)
519: {
520:   Mat_MPIAIJ        *aij = (Mat_MPIAIJ *)A->data;
521:   const PetscScalar *b;
522:   PetscScalar       *rvalues;
523:   VecScatter         ctx = aij->Mvctx;
524:   const PetscInt    *sindices, *sstarts, *rstarts;
525:   const PetscMPIInt *sprocs, *rprocs;
526:   PetscInt           i, nsends, nrecvs;
527:   MPI_Request       *swaits, *rwaits;
528:   MPI_Comm           comm;
529:   PetscMPIInt        tag = ((PetscObject)ctx)->tag, ncols = B->cmap->N, nrows = aij->B->cmap->n, nsends_mpi, nrecvs_mpi;
530:   MPIAIJ_MPIDense   *contents;
531:   Mat                workB;
532:   MPI_Datatype      *stype, *rtype;
533:   PetscInt           blda;

535:   MatCheckProduct(C, 4);
537:   contents = (MPIAIJ_MPIDense *)C->product->data;
538:   VecScatterGetRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL /*bs*/);
539:   VecScatterGetRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL /*bs*/);
540:   PetscMPIIntCast(nsends, &nsends_mpi);
541:   PetscMPIIntCast(nrecvs, &nrecvs_mpi);
542:   if (Bbidx == 0) workB = *outworkB = contents->workB;
543:   else workB = *outworkB = contents->workB1;
545:   swaits = contents->swaits;
546:   rwaits = contents->rwaits;

548:   MatDenseGetArrayRead(B, &b);
549:   MatDenseGetLDA(B, &blda);
551:   MatDenseGetArray(workB, &rvalues);

553:   /* Post recv, use MPI derived data type to save memory */
554:   PetscObjectGetComm((PetscObject)C, &comm);
555:   rtype = contents->rtype;
556:   for (i = 0; i < nrecvs; i++) MPI_Irecv(rvalues + (rstarts[i] - rstarts[0]), ncols, rtype[i], rprocs[i], tag, comm, rwaits + i);

558:   stype = contents->stype;
559:   for (i = 0; i < nsends; i++) MPI_Isend(b, ncols, stype[i], sprocs[i], tag, comm, swaits + i);

561:   if (nrecvs) MPI_Waitall(nrecvs_mpi, rwaits, MPI_STATUSES_IGNORE);
562:   if (nsends) MPI_Waitall(nsends_mpi, swaits, MPI_STATUSES_IGNORE);

564:   VecScatterRestoreRemote_Private(ctx, PETSC_TRUE /*send*/, &nsends, &sstarts, &sindices, &sprocs, NULL);
565:   VecScatterRestoreRemoteOrdered_Private(ctx, PETSC_FALSE /*recv*/, &nrecvs, &rstarts, NULL, &rprocs, NULL);
566:   MatDenseRestoreArrayRead(B, &b);
567:   MatDenseRestoreArray(workB, &rvalues);
568:   return 0;
569: }

571: static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A, Mat B, Mat C)
572: {
573:   Mat_MPIAIJ      *aij    = (Mat_MPIAIJ *)A->data;
574:   Mat_MPIDense    *bdense = (Mat_MPIDense *)B->data;
575:   Mat_MPIDense    *cdense = (Mat_MPIDense *)C->data;
576:   Mat              workB;
577:   MPIAIJ_MPIDense *contents;

579:   MatCheckProduct(C, 3);
581:   contents = (MPIAIJ_MPIDense *)C->product->data;
582:   /* diagonal block of A times all local rows of B */
583:   /* TODO: this calls a symbolic multiplication every time, which could be avoided */
584:   MatMatMult(aij->A, bdense->A, MAT_REUSE_MATRIX, PETSC_DEFAULT, &cdense->A);
585:   if (contents->workB->cmap->n == B->cmap->N) {
586:     /* get off processor parts of B needed to complete C=A*B */
587:     MatMPIDenseScatter(A, B, 0, C, &workB);

589:     /* off-diagonal block of A times nonlocal rows of B */
590:     MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE);
591:   } else {
592:     Mat       Bb, Cb;
593:     PetscInt  BN = B->cmap->N, n = contents->workB->cmap->n, i;
594:     PetscBool ccpu;

597:     /* Prevent from unneeded copies back and forth from the GPU
598:        when getting and restoring the submatrix
599:        We need a proper GPU code for AIJ * dense in parallel */
600:     MatBoundToCPU(C, &ccpu);
601:     MatBindToCPU(C, PETSC_TRUE);
602:     for (i = 0; i < BN; i += n) {
603:       MatDenseGetSubMatrix(B, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Bb);
604:       MatDenseGetSubMatrix(C, PETSC_DECIDE, PETSC_DECIDE, i, PetscMin(i + n, BN), &Cb);

606:       /* get off processor parts of B needed to complete C=A*B */
607:       MatMPIDenseScatter(A, Bb, (i + n) > BN, C, &workB);

609:       /* off-diagonal block of A times nonlocal rows of B */
610:       cdense = (Mat_MPIDense *)Cb->data;
611:       MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B, workB, cdense->A, PETSC_TRUE);
612:       MatDenseRestoreSubMatrix(B, &Bb);
613:       MatDenseRestoreSubMatrix(C, &Cb);
614:     }
615:     MatBindToCPU(C, ccpu);
616:   }
617:   return 0;
618: }

620: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A, Mat P, Mat C)
621: {
622:   Mat_MPIAIJ        *a = (Mat_MPIAIJ *)A->data, *c = (Mat_MPIAIJ *)C->data;
623:   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)(a->A)->data, *ao = (Mat_SeqAIJ *)(a->B)->data;
624:   Mat_SeqAIJ        *cd = (Mat_SeqAIJ *)(c->A)->data, *co = (Mat_SeqAIJ *)(c->B)->data;
625:   PetscInt          *adi = ad->i, *adj, *aoi = ao->i, *aoj;
626:   PetscScalar       *ada, *aoa, *cda = cd->a, *coa = co->a;
627:   Mat_SeqAIJ        *p_loc, *p_oth;
628:   PetscInt          *pi_loc, *pj_loc, *pi_oth, *pj_oth, *pj;
629:   PetscScalar       *pa_loc, *pa_oth, *pa, valtmp, *ca;
630:   PetscInt           cm = C->rmap->n, anz, pnz;
631:   Mat_APMPI         *ptap;
632:   PetscScalar       *apa_sparse;
633:   const PetscScalar *dummy;
634:   PetscInt          *api, *apj, *apJ, i, j, k, row;
635:   PetscInt           cstart = C->cmap->rstart;
636:   PetscInt           cdnz, conz, k0, k1, nextp;
637:   MPI_Comm           comm;
638:   PetscMPIInt        size;

640:   MatCheckProduct(C, 3);
641:   ptap = (Mat_APMPI *)C->product->data;
643:   PetscObjectGetComm((PetscObject)C, &comm);
644:   MPI_Comm_size(comm, &size);

647:   /* flag CPU mask for C */
648: #if defined(PETSC_HAVE_DEVICE)
649:   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
650:   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
651:   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
652: #endif
653:   apa_sparse = ptap->apa;

655:   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
656:   /*-----------------------------------------------------*/
657:   /* update numerical values of P_oth and P_loc */
658:   MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_REUSE_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth);
659:   MatMPIAIJGetLocalMat(P, MAT_REUSE_MATRIX, &ptap->P_loc);

661:   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
662:   /*----------------------------------------------------------*/
663:   /* get data from symbolic products */
664:   p_loc  = (Mat_SeqAIJ *)(ptap->P_loc)->data;
665:   pi_loc = p_loc->i;
666:   pj_loc = p_loc->j;
667:   pa_loc = p_loc->a;
668:   if (size > 1) {
669:     p_oth  = (Mat_SeqAIJ *)(ptap->P_oth)->data;
670:     pi_oth = p_oth->i;
671:     pj_oth = p_oth->j;
672:     pa_oth = p_oth->a;
673:   } else {
674:     p_oth  = NULL;
675:     pi_oth = NULL;
676:     pj_oth = NULL;
677:     pa_oth = NULL;
678:   }

680:   /* trigger copy to CPU */
681:   MatSeqAIJGetArrayRead(a->A, &dummy);
682:   MatSeqAIJRestoreArrayRead(a->A, &dummy);
683:   MatSeqAIJGetArrayRead(a->B, &dummy);
684:   MatSeqAIJRestoreArrayRead(a->B, &dummy);
685:   api = ptap->api;
686:   apj = ptap->apj;
687:   for (i = 0; i < cm; i++) {
688:     apJ = apj + api[i];

690:     /* diagonal portion of A */
691:     anz = adi[i + 1] - adi[i];
692:     adj = ad->j + adi[i];
693:     ada = ad->a + adi[i];
694:     for (j = 0; j < anz; j++) {
695:       row = adj[j];
696:       pnz = pi_loc[row + 1] - pi_loc[row];
697:       pj  = pj_loc + pi_loc[row];
698:       pa  = pa_loc + pi_loc[row];
699:       /* perform sparse axpy */
700:       valtmp = ada[j];
701:       nextp  = 0;
702:       for (k = 0; nextp < pnz; k++) {
703:         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
704:           apa_sparse[k] += valtmp * pa[nextp++];
705:         }
706:       }
707:       PetscLogFlops(2.0 * pnz);
708:     }

710:     /* off-diagonal portion of A */
711:     anz = aoi[i + 1] - aoi[i];
712:     aoj = ao->j + aoi[i];
713:     aoa = ao->a + aoi[i];
714:     for (j = 0; j < anz; j++) {
715:       row = aoj[j];
716:       pnz = pi_oth[row + 1] - pi_oth[row];
717:       pj  = pj_oth + pi_oth[row];
718:       pa  = pa_oth + pi_oth[row];
719:       /* perform sparse axpy */
720:       valtmp = aoa[j];
721:       nextp  = 0;
722:       for (k = 0; nextp < pnz; k++) {
723:         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
724:           apa_sparse[k] += valtmp * pa[nextp++];
725:         }
726:       }
727:       PetscLogFlops(2.0 * pnz);
728:     }

730:     /* set values in C */
731:     cdnz = cd->i[i + 1] - cd->i[i];
732:     conz = co->i[i + 1] - co->i[i];

734:     /* 1st off-diagonal part of C */
735:     ca = coa + co->i[i];
736:     k  = 0;
737:     for (k0 = 0; k0 < conz; k0++) {
738:       if (apJ[k] >= cstart) break;
739:       ca[k0]        = apa_sparse[k];
740:       apa_sparse[k] = 0.0;
741:       k++;
742:     }

744:     /* diagonal part of C */
745:     ca = cda + cd->i[i];
746:     for (k1 = 0; k1 < cdnz; k1++) {
747:       ca[k1]        = apa_sparse[k];
748:       apa_sparse[k] = 0.0;
749:       k++;
750:     }

752:     /* 2nd off-diagonal part of C */
753:     ca = coa + co->i[i];
754:     for (; k0 < conz; k0++) {
755:       ca[k0]        = apa_sparse[k];
756:       apa_sparse[k] = 0.0;
757:       k++;
758:     }
759:   }
760:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
761:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);
762:   return 0;
763: }

765: /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
766: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A, Mat P, PetscReal fill, Mat C)
767: {
768:   MPI_Comm           comm;
769:   PetscMPIInt        size;
770:   Mat_APMPI         *ptap;
771:   PetscFreeSpaceList free_space = NULL, current_space = NULL;
772:   Mat_MPIAIJ        *a  = (Mat_MPIAIJ *)A->data;
773:   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)(a->A)->data, *ao = (Mat_SeqAIJ *)(a->B)->data, *p_loc, *p_oth;
774:   PetscInt          *pi_loc, *pj_loc, *pi_oth, *pj_oth, *dnz, *onz;
775:   PetscInt          *adi = ad->i, *adj = ad->j, *aoi = ao->i, *aoj = ao->j, rstart = A->rmap->rstart;
776:   PetscInt           i, pnz, row, *api, *apj, *Jptr, apnz, nspacedouble = 0, j, nzi, *lnk, apnz_max = 1;
777:   PetscInt           am = A->rmap->n, pn = P->cmap->n, pm = P->rmap->n, lsize = pn + 20;
778:   PetscReal          afill;
779:   MatType            mtype;

781:   MatCheckProduct(C, 4);
783:   PetscObjectGetComm((PetscObject)A, &comm);
784:   MPI_Comm_size(comm, &size);

786:   /* create struct Mat_APMPI and attached it to C later */
787:   PetscNew(&ptap);

789:   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
790:   MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth);

792:   /* get P_loc by taking all local rows of P */
793:   MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc);

795:   p_loc  = (Mat_SeqAIJ *)(ptap->P_loc)->data;
796:   pi_loc = p_loc->i;
797:   pj_loc = p_loc->j;
798:   if (size > 1) {
799:     p_oth  = (Mat_SeqAIJ *)(ptap->P_oth)->data;
800:     pi_oth = p_oth->i;
801:     pj_oth = p_oth->j;
802:   } else {
803:     p_oth  = NULL;
804:     pi_oth = NULL;
805:     pj_oth = NULL;
806:   }

808:   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
809:   /*-------------------------------------------------------------------*/
810:   PetscMalloc1(am + 2, &api);
811:   ptap->api = api;
812:   api[0]    = 0;

814:   PetscLLCondensedCreate_Scalable(lsize, &lnk);

816:   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
817:   PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space);
818:   current_space = free_space;
819:   MatPreallocateBegin(comm, am, pn, dnz, onz);
820:   for (i = 0; i < am; i++) {
821:     /* diagonal portion of A */
822:     nzi = adi[i + 1] - adi[i];
823:     for (j = 0; j < nzi; j++) {
824:       row  = *adj++;
825:       pnz  = pi_loc[row + 1] - pi_loc[row];
826:       Jptr = pj_loc + pi_loc[row];
827:       /* Expand list if it is not long enough */
828:       if (pnz + apnz_max > lsize) {
829:         lsize = pnz + apnz_max;
830:         PetscLLCondensedExpand_Scalable(lsize, &lnk);
831:       }
832:       /* add non-zero cols of P into the sorted linked list lnk */
833:       PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk);
834:       apnz       = *lnk; /* The first element in the list is the number of items in the list */
835:       api[i + 1] = api[i] + apnz;
836:       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
837:     }
838:     /* off-diagonal portion of A */
839:     nzi = aoi[i + 1] - aoi[i];
840:     for (j = 0; j < nzi; j++) {
841:       row  = *aoj++;
842:       pnz  = pi_oth[row + 1] - pi_oth[row];
843:       Jptr = pj_oth + pi_oth[row];
844:       /* Expand list if it is not long enough */
845:       if (pnz + apnz_max > lsize) {
846:         lsize = pnz + apnz_max;
847:         PetscLLCondensedExpand_Scalable(lsize, &lnk);
848:       }
849:       /* add non-zero cols of P into the sorted linked list lnk */
850:       PetscLLCondensedAddSorted_Scalable(pnz, Jptr, lnk);
851:       apnz       = *lnk; /* The first element in the list is the number of items in the list */
852:       api[i + 1] = api[i] + apnz;
853:       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
854:     }

856:     /* add missing diagonal entry */
857:     if (C->force_diagonals) {
858:       j = i + rstart; /* column index */
859:       PetscLLCondensedAddSorted_Scalable(1, &j, lnk);
860:     }

862:     apnz       = *lnk;
863:     api[i + 1] = api[i] + apnz;
864:     if (apnz > apnz_max) apnz_max = apnz;

866:     /* if free space is not available, double the total space in the list */
867:     if (current_space->local_remaining < apnz) {
868:       PetscFreeSpaceGet(PetscIntSumTruncate(apnz, current_space->total_array_size), &current_space);
869:       nspacedouble++;
870:     }

872:     /* Copy data into free space, then initialize lnk */
873:     PetscLLCondensedClean_Scalable(apnz, current_space->array, lnk);
874:     MatPreallocateSet(i + rstart, apnz, current_space->array, dnz, onz);

876:     current_space->array += apnz;
877:     current_space->local_used += apnz;
878:     current_space->local_remaining -= apnz;
879:   }

881:   /* Allocate space for apj, initialize apj, and */
882:   /* destroy list of free space and other temporary array(s) */
883:   PetscMalloc1(api[am] + 1, &ptap->apj);
884:   apj = ptap->apj;
885:   PetscFreeSpaceContiguous(&free_space, ptap->apj);
886:   PetscLLCondensedDestroy_Scalable(lnk);

888:   /* create and assemble symbolic parallel matrix C */
889:   /*----------------------------------------------------*/
890:   MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE);
891:   MatSetBlockSizesFromMats(C, A, P);
892:   MatGetType(A, &mtype);
893:   MatSetType(C, mtype);
894:   MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz);
895:   MatPreallocateEnd(dnz, onz);

897:   /* malloc apa for assembly C */
898:   PetscCalloc1(apnz_max, &ptap->apa);

900:   MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
901:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
902:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);
903:   MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);

905:   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
906:   C->ops->productnumeric = MatProductNumeric_AB;

908:   /* attach the supporting struct to C for reuse */
909:   C->product->data    = ptap;
910:   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;

912:   /* set MatInfo */
913:   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
914:   if (afill < 1.0) afill = 1.0;
915:   C->info.mallocs           = nspacedouble;
916:   C->info.fill_ratio_given  = fill;
917:   C->info.fill_ratio_needed = afill;

919: #if defined(PETSC_USE_INFO)
920:   if (api[am]) {
921:     PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill);
922:     PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill);
923:   } else {
924:     PetscInfo(C, "Empty matrix product\n");
925:   }
926: #endif
927:   return 0;
928: }

930: /* This function is needed for the seqMPI matrix-matrix multiplication.  */
931: /* Three input arrays are merged to one output array. The size of the    */
932: /* output array is also output. Duplicate entries only show up once.     */
933: static void Merge3SortedArrays(PetscInt size1, PetscInt *in1, PetscInt size2, PetscInt *in2, PetscInt size3, PetscInt *in3, PetscInt *size4, PetscInt *out)
934: {
935:   int i = 0, j = 0, k = 0, l = 0;

937:   /* Traverse all three arrays */
938:   while (i < size1 && j < size2 && k < size3) {
939:     if (in1[i] < in2[j] && in1[i] < in3[k]) {
940:       out[l++] = in1[i++];
941:     } else if (in2[j] < in1[i] && in2[j] < in3[k]) {
942:       out[l++] = in2[j++];
943:     } else if (in3[k] < in1[i] && in3[k] < in2[j]) {
944:       out[l++] = in3[k++];
945:     } else if (in1[i] == in2[j] && in1[i] < in3[k]) {
946:       out[l++] = in1[i];
947:       i++, j++;
948:     } else if (in1[i] == in3[k] && in1[i] < in2[j]) {
949:       out[l++] = in1[i];
950:       i++, k++;
951:     } else if (in3[k] == in2[j] && in2[j] < in1[i]) {
952:       out[l++] = in2[j];
953:       k++, j++;
954:     } else if (in1[i] == in2[j] && in1[i] == in3[k]) {
955:       out[l++] = in1[i];
956:       i++, j++, k++;
957:     }
958:   }

960:   /* Traverse two remaining arrays */
961:   while (i < size1 && j < size2) {
962:     if (in1[i] < in2[j]) {
963:       out[l++] = in1[i++];
964:     } else if (in1[i] > in2[j]) {
965:       out[l++] = in2[j++];
966:     } else {
967:       out[l++] = in1[i];
968:       i++, j++;
969:     }
970:   }

972:   while (i < size1 && k < size3) {
973:     if (in1[i] < in3[k]) {
974:       out[l++] = in1[i++];
975:     } else if (in1[i] > in3[k]) {
976:       out[l++] = in3[k++];
977:     } else {
978:       out[l++] = in1[i];
979:       i++, k++;
980:     }
981:   }

983:   while (k < size3 && j < size2) {
984:     if (in3[k] < in2[j]) {
985:       out[l++] = in3[k++];
986:     } else if (in3[k] > in2[j]) {
987:       out[l++] = in2[j++];
988:     } else {
989:       out[l++] = in3[k];
990:       k++, j++;
991:     }
992:   }

994:   /* Traverse one remaining array */
995:   while (i < size1) out[l++] = in1[i++];
996:   while (j < size2) out[l++] = in2[j++];
997:   while (k < size3) out[l++] = in3[k++];

999:   *size4 = l;
1000: }

1002: /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and  */
1003: /* adds up the products. Two of these three multiplications are performed with existing (sequential)      */
1004: /* matrix-matrix multiplications.  */
1005: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
1006: {
1007:   MPI_Comm           comm;
1008:   PetscMPIInt        size;
1009:   Mat_APMPI         *ptap;
1010:   PetscFreeSpaceList free_space_diag = NULL, current_space = NULL;
1011:   Mat_MPIAIJ        *a  = (Mat_MPIAIJ *)A->data;
1012:   Mat_SeqAIJ        *ad = (Mat_SeqAIJ *)(a->A)->data, *ao = (Mat_SeqAIJ *)(a->B)->data, *p_loc;
1013:   Mat_MPIAIJ        *p = (Mat_MPIAIJ *)P->data;
1014:   Mat_SeqAIJ        *adpd_seq, *p_off, *aopoth_seq;
1015:   PetscInt           adponz, adpdnz;
1016:   PetscInt          *pi_loc, *dnz, *onz;
1017:   PetscInt          *adi = ad->i, *adj = ad->j, *aoi = ao->i, rstart = A->rmap->rstart;
1018:   PetscInt          *lnk, i, i1 = 0, pnz, row, *adpoi, *adpoj, *api, *adpoJ, *aopJ, *apJ, *Jptr, aopnz, nspacedouble = 0, j, nzi, *apj, apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1019:   PetscInt           am = A->rmap->n, pN = P->cmap->N, pn = P->cmap->n, pm = P->rmap->n, p_colstart, p_colend;
1020:   PetscBT            lnkbt;
1021:   PetscReal          afill;
1022:   PetscMPIInt        rank;
1023:   Mat                adpd, aopoth;
1024:   MatType            mtype;
1025:   const char        *prefix;

1027:   MatCheckProduct(C, 4);
1029:   PetscObjectGetComm((PetscObject)A, &comm);
1030:   MPI_Comm_size(comm, &size);
1031:   MPI_Comm_rank(comm, &rank);
1032:   MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend);

1034:   /* create struct Mat_APMPI and attached it to C later */
1035:   PetscNew(&ptap);

1037:   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1038:   MatGetBrowsOfAoCols_MPIAIJ(A, P, MAT_INITIAL_MATRIX, &ptap->startsj_s, &ptap->startsj_r, &ptap->bufa, &ptap->P_oth);

1040:   /* get P_loc by taking all local rows of P */
1041:   MatMPIAIJGetLocalMat(P, MAT_INITIAL_MATRIX, &ptap->P_loc);

1043:   p_loc  = (Mat_SeqAIJ *)(ptap->P_loc)->data;
1044:   pi_loc = p_loc->i;

1046:   /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1047:   PetscMalloc1(am + 2, &api);
1048:   PetscMalloc1(am + 2, &adpoi);

1050:   adpoi[0]  = 0;
1051:   ptap->api = api;
1052:   api[0]    = 0;

1054:   /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1055:   PetscLLCondensedCreate(pN, pN, &lnk, &lnkbt);
1056:   MatPreallocateBegin(comm, am, pn, dnz, onz);

1058:   /* Symbolic calc of A_loc_diag * P_loc_diag */
1059:   MatGetOptionsPrefix(A, &prefix);
1060:   MatProductCreate(a->A, p->A, NULL, &adpd);
1061:   MatGetOptionsPrefix(A, &prefix);
1062:   MatSetOptionsPrefix(adpd, prefix);
1063:   MatAppendOptionsPrefix(adpd, "inner_diag_");

1065:   MatProductSetType(adpd, MATPRODUCT_AB);
1066:   MatProductSetAlgorithm(adpd, "sorted");
1067:   MatProductSetFill(adpd, fill);
1068:   MatProductSetFromOptions(adpd);

1070:   adpd->force_diagonals = C->force_diagonals;
1071:   MatProductSymbolic(adpd);

1073:   adpd_seq = (Mat_SeqAIJ *)((adpd)->data);
1074:   adpdi    = adpd_seq->i;
1075:   adpdj    = adpd_seq->j;
1076:   p_off    = (Mat_SeqAIJ *)((p->B)->data);
1077:   poff_i   = p_off->i;
1078:   poff_j   = p_off->j;

1080:   /* j_temp stores indices of a result row before they are added to the linked list */
1081:   PetscMalloc1(pN + 2, &j_temp);

1083:   /* Symbolic calc of the A_diag * p_loc_off */
1084:   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1085:   PetscFreeSpaceGet(PetscRealIntMultTruncate(fill, PetscIntSumTruncate(adi[am], PetscIntSumTruncate(aoi[am], pi_loc[pm]))), &free_space_diag);
1086:   current_space = free_space_diag;

1088:   for (i = 0; i < am; i++) {
1089:     /* A_diag * P_loc_off */
1090:     nzi = adi[i + 1] - adi[i];
1091:     for (j = 0; j < nzi; j++) {
1092:       row  = *adj++;
1093:       pnz  = poff_i[row + 1] - poff_i[row];
1094:       Jptr = poff_j + poff_i[row];
1095:       for (i1 = 0; i1 < pnz; i1++) j_temp[i1] = p->garray[Jptr[i1]];
1096:       /* add non-zero cols of P into the sorted linked list lnk */
1097:       PetscLLCondensedAddSorted(pnz, j_temp, lnk, lnkbt);
1098:     }

1100:     adponz       = lnk[0];
1101:     adpoi[i + 1] = adpoi[i] + adponz;

1103:     /* if free space is not available, double the total space in the list */
1104:     if (current_space->local_remaining < adponz) {
1105:       PetscFreeSpaceGet(PetscIntSumTruncate(adponz, current_space->total_array_size), &current_space);
1106:       nspacedouble++;
1107:     }

1109:     /* Copy data into free space, then initialize lnk */
1110:     PetscLLCondensedClean(pN, adponz, current_space->array, lnk, lnkbt);

1112:     current_space->array += adponz;
1113:     current_space->local_used += adponz;
1114:     current_space->local_remaining -= adponz;
1115:   }

1117:   /* Symbolic calc of A_off * P_oth */
1118:   MatSetOptionsPrefix(a->B, prefix);
1119:   MatAppendOptionsPrefix(a->B, "inner_offdiag_");
1120:   MatCreate(PETSC_COMM_SELF, &aopoth);
1121:   MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth);
1122:   aopoth_seq = (Mat_SeqAIJ *)((aopoth)->data);
1123:   aopothi    = aopoth_seq->i;
1124:   aopothj    = aopoth_seq->j;

1126:   /* Allocate space for apj, adpj, aopj, ... */
1127:   /* destroy lists of free space and other temporary array(s) */

1129:   PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am] + 2, &ptap->apj);
1130:   PetscMalloc1(adpoi[am] + 2, &adpoj);

1132:   /* Copy from linked list to j-array */
1133:   PetscFreeSpaceContiguous(&free_space_diag, adpoj);
1134:   PetscLLDestroy(lnk, lnkbt);

1136:   adpoJ = adpoj;
1137:   adpdJ = adpdj;
1138:   aopJ  = aopothj;
1139:   apj   = ptap->apj;
1140:   apJ   = apj; /* still empty */

1142:   /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1143:   /* A_diag * P_loc_diag to get A*P */
1144:   for (i = 0; i < am; i++) {
1145:     aopnz  = aopothi[i + 1] - aopothi[i];
1146:     adponz = adpoi[i + 1] - adpoi[i];
1147:     adpdnz = adpdi[i + 1] - adpdi[i];

1149:     /* Correct indices from A_diag*P_diag */
1150:     for (i1 = 0; i1 < adpdnz; i1++) adpdJ[i1] += p_colstart;
1151:     /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1152:     Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1153:     MatPreallocateSet(i + rstart, apnz, apJ, dnz, onz);

1155:     aopJ += aopnz;
1156:     adpoJ += adponz;
1157:     adpdJ += adpdnz;
1158:     apJ += apnz;
1159:     api[i + 1] = api[i] + apnz;
1160:   }

1162:   /* malloc apa to store dense row A[i,:]*P */
1163:   PetscCalloc1(pN + 2, &ptap->apa);

1165:   /* create and assemble symbolic parallel matrix C */
1166:   MatSetSizes(C, am, pn, PETSC_DETERMINE, PETSC_DETERMINE);
1167:   MatSetBlockSizesFromMats(C, A, P);
1168:   MatGetType(A, &mtype);
1169:   MatSetType(C, mtype);
1170:   MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz);
1171:   MatPreallocateEnd(dnz, onz);

1173:   MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);
1174:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
1175:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);
1176:   MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);

1178:   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1179:   C->ops->productnumeric = MatProductNumeric_AB;

1181:   /* attach the supporting struct to C for reuse */
1182:   C->product->data    = ptap;
1183:   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;

1185:   /* set MatInfo */
1186:   afill = (PetscReal)api[am] / (adi[am] + aoi[am] + pi_loc[pm] + 1) + 1.e-5;
1187:   if (afill < 1.0) afill = 1.0;
1188:   C->info.mallocs           = nspacedouble;
1189:   C->info.fill_ratio_given  = fill;
1190:   C->info.fill_ratio_needed = afill;

1192: #if defined(PETSC_USE_INFO)
1193:   if (api[am]) {
1194:     PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill);
1195:     PetscInfo(C, "Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n", (double)afill);
1196:   } else {
1197:     PetscInfo(C, "Empty matrix product\n");
1198:   }
1199: #endif

1201:   MatDestroy(&aopoth);
1202:   MatDestroy(&adpd);
1203:   PetscFree(j_temp);
1204:   PetscFree(adpoj);
1205:   PetscFree(adpoi);
1206:   return 0;
1207: }

1209: /*-------------------------------------------------------------------------*/
1210: /* This routine only works when scall=MAT_REUSE_MATRIX! */
1211: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P, Mat A, Mat C)
1212: {
1213:   Mat_APMPI *ptap;
1214:   Mat        Pt;

1216:   MatCheckProduct(C, 3);
1217:   ptap = (Mat_APMPI *)C->product->data;

1221:   Pt = ptap->Pt;
1222:   MatTransposeSetPrecursor(P, Pt);
1223:   MatTranspose(P, MAT_REUSE_MATRIX, &Pt);
1224:   MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt, A, C);
1225:   return 0;
1226: }

1228: /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1229: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, PetscReal fill, Mat C)
1230: {
1231:   Mat_APMPI               *ptap;
1232:   Mat_MPIAIJ              *p = (Mat_MPIAIJ *)P->data;
1233:   MPI_Comm                 comm;
1234:   PetscMPIInt              size, rank;
1235:   PetscFreeSpaceList       free_space = NULL, current_space = NULL;
1236:   PetscInt                 pn = P->cmap->n, aN = A->cmap->N, an = A->cmap->n;
1237:   PetscInt                *lnk, i, k, nsend, rstart;
1238:   PetscBT                  lnkbt;
1239:   PetscMPIInt              tagi, tagj, *len_si, *len_s, *len_ri, nrecv;
1240:   PETSC_UNUSED PetscMPIInt icompleted = 0;
1241:   PetscInt               **buf_rj, **buf_ri, **buf_ri_k, row, ncols, *cols;
1242:   PetscInt                 len, proc, *dnz, *onz, *owners, nzi;
1243:   PetscInt                 nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1244:   MPI_Request             *swaits, *rwaits;
1245:   MPI_Status              *sstatus, rstatus;
1246:   PetscLayout              rowmap;
1247:   PetscInt                *owners_co, *coi, *coj; /* i and j array of (p->B)^T*A*P - used in the communication */
1248:   PetscMPIInt             *len_r, *id_r;          /* array of length of comm->size, store send/recv matrix values */
1249:   PetscInt                *Jptr, *prmap = p->garray, con, j, Crmax;
1250:   Mat_SeqAIJ              *a_loc, *c_loc, *c_oth;
1251:   PetscTable               ta;
1252:   MatType                  mtype;
1253:   const char              *prefix;

1255:   PetscObjectGetComm((PetscObject)A, &comm);
1256:   MPI_Comm_size(comm, &size);
1257:   MPI_Comm_rank(comm, &rank);

1259:   /* create symbolic parallel matrix C */
1260:   MatGetType(A, &mtype);
1261:   MatSetType(C, mtype);

1263:   C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;

1265:   /* create struct Mat_APMPI and attached it to C later */
1266:   PetscNew(&ptap);

1268:   /* (0) compute Rd = Pd^T, Ro = Po^T  */
1269:   /* --------------------------------- */
1270:   MatTranspose(p->A, MAT_INITIAL_MATRIX, &ptap->Rd);
1271:   MatTranspose(p->B, MAT_INITIAL_MATRIX, &ptap->Ro);

1273:   /* (1) compute symbolic A_loc */
1274:   /* ---------------------------*/
1275:   MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &ptap->A_loc);

1277:   /* (2-1) compute symbolic C_oth = Ro*A_loc  */
1278:   /* ------------------------------------ */
1279:   MatGetOptionsPrefix(A, &prefix);
1280:   MatSetOptionsPrefix(ptap->Ro, prefix);
1281:   MatAppendOptionsPrefix(ptap->Ro, "inner_offdiag_");
1282:   MatCreate(PETSC_COMM_SELF, &ptap->C_oth);
1283:   MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro, ptap->A_loc, fill, ptap->C_oth);

1285:   /* (3) send coj of C_oth to other processors  */
1286:   /* ------------------------------------------ */
1287:   /* determine row ownership */
1288:   PetscLayoutCreate(comm, &rowmap);
1289:   rowmap->n  = pn;
1290:   rowmap->bs = 1;
1291:   PetscLayoutSetUp(rowmap);
1292:   owners = rowmap->range;

1294:   /* determine the number of messages to send, their lengths */
1295:   PetscMalloc4(size, &len_s, size, &len_si, size, &sstatus, size + 2, &owners_co);
1296:   PetscArrayzero(len_s, size);
1297:   PetscArrayzero(len_si, size);

1299:   c_oth = (Mat_SeqAIJ *)ptap->C_oth->data;
1300:   coi   = c_oth->i;
1301:   coj   = c_oth->j;
1302:   con   = ptap->C_oth->rmap->n;
1303:   proc  = 0;
1304:   for (i = 0; i < con; i++) {
1305:     while (prmap[i] >= owners[proc + 1]) proc++;
1306:     len_si[proc]++;                     /* num of rows in Co(=Pt*A) to be sent to [proc] */
1307:     len_s[proc] += coi[i + 1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1308:   }

1310:   len          = 0; /* max length of buf_si[], see (4) */
1311:   owners_co[0] = 0;
1312:   nsend        = 0;
1313:   for (proc = 0; proc < size; proc++) {
1314:     owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1315:     if (len_s[proc]) {
1316:       nsend++;
1317:       len_si[proc] = 2 * (len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1318:       len += len_si[proc];
1319:     }
1320:   }

1322:   /* determine the number and length of messages to receive for coi and coj  */
1323:   PetscGatherNumberOfMessages(comm, NULL, len_s, &nrecv);
1324:   PetscGatherMessageLengths2(comm, nsend, nrecv, len_s, len_si, &id_r, &len_r, &len_ri);

1326:   /* post the Irecv and Isend of coj */
1327:   PetscCommGetNewTag(comm, &tagj);
1328:   PetscPostIrecvInt(comm, tagj, nrecv, id_r, len_r, &buf_rj, &rwaits);
1329:   PetscMalloc1(nsend + 1, &swaits);
1330:   for (proc = 0, k = 0; proc < size; proc++) {
1331:     if (!len_s[proc]) continue;
1332:     i = owners_co[proc];
1333:     MPI_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k);
1334:     k++;
1335:   }

1337:   /* (2-2) compute symbolic C_loc = Rd*A_loc */
1338:   /* ---------------------------------------- */
1339:   MatSetOptionsPrefix(ptap->Rd, prefix);
1340:   MatAppendOptionsPrefix(ptap->Rd, "inner_diag_");
1341:   MatCreate(PETSC_COMM_SELF, &ptap->C_loc);
1342:   MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd, ptap->A_loc, fill, ptap->C_loc);
1343:   c_loc = (Mat_SeqAIJ *)ptap->C_loc->data;

1345:   /* receives coj are complete */
1346:   for (i = 0; i < nrecv; i++) MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus);
1347:   PetscFree(rwaits);
1348:   if (nsend) MPI_Waitall(nsend, swaits, sstatus);

1350:   /* add received column indices into ta to update Crmax */
1351:   a_loc = (Mat_SeqAIJ *)(ptap->A_loc)->data;

1353:   /* create and initialize a linked list */
1354:   PetscTableCreate(an, aN, &ta); /* for compute Crmax */
1355:   MatRowMergeMax_SeqAIJ(a_loc, ptap->A_loc->rmap->N, ta);

1357:   for (k = 0; k < nrecv; k++) { /* k-th received message */
1358:     Jptr = buf_rj[k];
1359:     for (j = 0; j < len_r[k]; j++) PetscTableAdd(ta, *(Jptr + j) + 1, 1, INSERT_VALUES);
1360:   }
1361:   PetscTableGetCount(ta, &Crmax);
1362:   PetscTableDestroy(&ta);

1364:   /* (4) send and recv coi */
1365:   /*-----------------------*/
1366:   PetscCommGetNewTag(comm, &tagi);
1367:   PetscPostIrecvInt(comm, tagi, nrecv, id_r, len_ri, &buf_ri, &rwaits);
1368:   PetscMalloc1(len + 1, &buf_s);
1369:   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1370:   for (proc = 0, k = 0; proc < size; proc++) {
1371:     if (!len_s[proc]) continue;
1372:     /* form outgoing message for i-structure:
1373:          buf_si[0]:                 nrows to be sent
1374:                [1:nrows]:           row index (global)
1375:                [nrows+1:2*nrows+1]: i-structure index
1376:     */
1377:     /*-------------------------------------------*/
1378:     nrows       = len_si[proc] / 2 - 1; /* num of rows in Co to be sent to [proc] */
1379:     buf_si_i    = buf_si + nrows + 1;
1380:     buf_si[0]   = nrows;
1381:     buf_si_i[0] = 0;
1382:     nrows       = 0;
1383:     for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1384:       nzi                 = coi[i + 1] - coi[i];
1385:       buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi;   /* i-structure */
1386:       buf_si[nrows + 1]   = prmap[i] - owners[proc]; /* local row index */
1387:       nrows++;
1388:     }
1389:     MPI_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k);
1390:     k++;
1391:     buf_si += len_si[proc];
1392:   }
1393:   for (i = 0; i < nrecv; i++) MPI_Waitany(nrecv, rwaits, &icompleted, &rstatus);
1394:   PetscFree(rwaits);
1395:   if (nsend) MPI_Waitall(nsend, swaits, sstatus);

1397:   PetscFree4(len_s, len_si, sstatus, owners_co);
1398:   PetscFree(len_ri);
1399:   PetscFree(swaits);
1400:   PetscFree(buf_s);

1402:   /* (5) compute the local portion of C      */
1403:   /* ------------------------------------------ */
1404:   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of C */
1405:   PetscFreeSpaceGet(Crmax, &free_space);
1406:   current_space = free_space;

1408:   PetscMalloc3(nrecv, &buf_ri_k, nrecv, &nextrow, nrecv, &nextci);
1409:   for (k = 0; k < nrecv; k++) {
1410:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1411:     nrows       = *buf_ri_k[k];
1412:     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1413:     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1414:   }

1416:   MatPreallocateBegin(comm, pn, an, dnz, onz);
1417:   PetscLLCondensedCreate(Crmax, aN, &lnk, &lnkbt);
1418:   for (i = 0; i < pn; i++) { /* for each local row of C */
1419:     /* add C_loc into C */
1420:     nzi  = c_loc->i[i + 1] - c_loc->i[i];
1421:     Jptr = c_loc->j + c_loc->i[i];
1422:     PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt);

1424:     /* add received col data into lnk */
1425:     for (k = 0; k < nrecv; k++) { /* k-th received message */
1426:       if (i == *nextrow[k]) {     /* i-th row */
1427:         nzi  = *(nextci[k] + 1) - *nextci[k];
1428:         Jptr = buf_rj[k] + *nextci[k];
1429:         PetscLLCondensedAddSorted(nzi, Jptr, lnk, lnkbt);
1430:         nextrow[k]++;
1431:         nextci[k]++;
1432:       }
1433:     }

1435:     /* add missing diagonal entry */
1436:     if (C->force_diagonals) {
1437:       k = i + owners[rank]; /* column index */
1438:       PetscLLCondensedAddSorted(1, &k, lnk, lnkbt);
1439:     }

1441:     nzi = lnk[0];

1443:     /* copy data into free space, then initialize lnk */
1444:     PetscLLCondensedClean(aN, nzi, current_space->array, lnk, lnkbt);
1445:     MatPreallocateSet(i + owners[rank], nzi, current_space->array, dnz, onz);
1446:   }
1447:   PetscFree3(buf_ri_k, nextrow, nextci);
1448:   PetscLLDestroy(lnk, lnkbt);
1449:   PetscFreeSpaceDestroy(free_space);

1451:   /* local sizes and preallocation */
1452:   MatSetSizes(C, pn, an, PETSC_DETERMINE, PETSC_DETERMINE);
1453:   if (P->cmap->bs > 0) PetscLayoutSetBlockSize(C->rmap, P->cmap->bs);
1454:   if (A->cmap->bs > 0) PetscLayoutSetBlockSize(C->cmap, A->cmap->bs);
1455:   MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz);
1456:   MatPreallocateEnd(dnz, onz);

1458:   /* add C_loc and C_oth to C */
1459:   MatGetOwnershipRange(C, &rstart, NULL);
1460:   for (i = 0; i < pn; i++) {
1461:     ncols = c_loc->i[i + 1] - c_loc->i[i];
1462:     cols  = c_loc->j + c_loc->i[i];
1463:     row   = rstart + i;
1464:     MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES);

1466:     if (C->force_diagonals) MatSetValues(C, 1, (const PetscInt *)&row, 1, (const PetscInt *)&row, NULL, INSERT_VALUES);
1467:   }
1468:   for (i = 0; i < con; i++) {
1469:     ncols = c_oth->i[i + 1] - c_oth->i[i];
1470:     cols  = c_oth->j + c_oth->i[i];
1471:     row   = prmap[i];
1472:     MatSetValues(C, 1, (const PetscInt *)&row, ncols, (const PetscInt *)cols, NULL, INSERT_VALUES);
1473:   }
1474:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
1475:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);
1476:   MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);

1478:   /* members in merge */
1479:   PetscFree(id_r);
1480:   PetscFree(len_r);
1481:   PetscFree(buf_ri[0]);
1482:   PetscFree(buf_ri);
1483:   PetscFree(buf_rj[0]);
1484:   PetscFree(buf_rj);
1485:   PetscLayoutDestroy(&rowmap);

1487:   /* attach the supporting struct to C for reuse */
1488:   C->product->data    = ptap;
1489:   C->product->destroy = MatDestroy_MPIAIJ_PtAP;
1490:   return 0;
1491: }

1493: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P, Mat A, Mat C)
1494: {
1495:   Mat_MPIAIJ        *p = (Mat_MPIAIJ *)P->data;
1496:   Mat_SeqAIJ        *c_seq;
1497:   Mat_APMPI         *ptap;
1498:   Mat                A_loc, C_loc, C_oth;
1499:   PetscInt           i, rstart, rend, cm, ncols, row;
1500:   const PetscInt    *cols;
1501:   const PetscScalar *vals;

1503:   MatCheckProduct(C, 3);
1504:   ptap = (Mat_APMPI *)C->product->data;
1507:   MatZeroEntries(C);

1509:   /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1510:   /* 1) get R = Pd^T, Ro = Po^T */
1511:   /*----------------------------*/
1512:   MatTransposeSetPrecursor(p->A, ptap->Rd);
1513:   MatTranspose(p->A, MAT_REUSE_MATRIX, &ptap->Rd);
1514:   MatTransposeSetPrecursor(p->B, ptap->Ro);
1515:   MatTranspose(p->B, MAT_REUSE_MATRIX, &ptap->Ro);

1517:   /* 2) compute numeric A_loc */
1518:   /*--------------------------*/
1519:   MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &ptap->A_loc);

1521:   /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1522:   A_loc = ptap->A_loc;
1523:   ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd, A_loc, ptap->C_loc);
1524:   ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro, A_loc, ptap->C_oth);
1525:   C_loc = ptap->C_loc;
1526:   C_oth = ptap->C_oth;

1528:   /* add C_loc and C_oth to C */
1529:   MatGetOwnershipRange(C, &rstart, &rend);

1531:   /* C_loc -> C */
1532:   cm    = C_loc->rmap->N;
1533:   c_seq = (Mat_SeqAIJ *)C_loc->data;
1534:   cols  = c_seq->j;
1535:   vals  = c_seq->a;
1536:   for (i = 0; i < cm; i++) {
1537:     ncols = c_seq->i[i + 1] - c_seq->i[i];
1538:     row   = rstart + i;
1539:     MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES);
1540:     cols += ncols;
1541:     vals += ncols;
1542:   }

1544:   /* Co -> C, off-processor part */
1545:   cm    = C_oth->rmap->N;
1546:   c_seq = (Mat_SeqAIJ *)C_oth->data;
1547:   cols  = c_seq->j;
1548:   vals  = c_seq->a;
1549:   for (i = 0; i < cm; i++) {
1550:     ncols = c_seq->i[i + 1] - c_seq->i[i];
1551:     row   = p->garray[i];
1552:     MatSetValues(C, 1, &row, ncols, cols, vals, ADD_VALUES);
1553:     cols += ncols;
1554:     vals += ncols;
1555:   }
1556:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
1557:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);
1558:   MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);
1559:   return 0;
1560: }

1562: PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P, Mat A, Mat C)
1563: {
1564:   Mat_Merge_SeqsToMPI *merge;
1565:   Mat_MPIAIJ          *p  = (Mat_MPIAIJ *)P->data;
1566:   Mat_SeqAIJ          *pd = (Mat_SeqAIJ *)(p->A)->data, *po = (Mat_SeqAIJ *)(p->B)->data;
1567:   Mat_APMPI           *ap;
1568:   PetscInt            *adj;
1569:   PetscInt             i, j, k, anz, pnz, row, *cj, nexta;
1570:   MatScalar           *ada, *ca, valtmp;
1571:   PetscInt             am = A->rmap->n, cm = C->rmap->n, pon = (p->B)->cmap->n;
1572:   MPI_Comm             comm;
1573:   PetscMPIInt          size, rank, taga, *len_s;
1574:   PetscInt            *owners, proc, nrows, **buf_ri_k, **nextrow, **nextci;
1575:   PetscInt           **buf_ri, **buf_rj;
1576:   PetscInt             cnz = 0, *bj_i, *bi, *bj, bnz, nextcj; /* bi,bj,ba: local array of C(mpi mat) */
1577:   MPI_Request         *s_waits, *r_waits;
1578:   MPI_Status          *status;
1579:   MatScalar          **abuf_r, *ba_i, *pA, *coa, *ba;
1580:   const PetscScalar   *dummy;
1581:   PetscInt            *ai, *aj, *coi, *coj, *poJ, *pdJ;
1582:   Mat                  A_loc;
1583:   Mat_SeqAIJ          *a_loc;

1585:   MatCheckProduct(C, 3);
1586:   ap = (Mat_APMPI *)C->product->data;
1589:   PetscObjectGetComm((PetscObject)C, &comm);
1590:   MPI_Comm_size(comm, &size);
1591:   MPI_Comm_rank(comm, &rank);

1593:   merge = ap->merge;

1595:   /* 2) compute numeric C_seq = P_loc^T*A_loc */
1596:   /*------------------------------------------*/
1597:   /* get data from symbolic products */
1598:   coi = merge->coi;
1599:   coj = merge->coj;
1600:   PetscCalloc1(coi[pon] + 1, &coa);
1601:   bi     = merge->bi;
1602:   bj     = merge->bj;
1603:   owners = merge->rowmap->range;
1604:   PetscCalloc1(bi[cm] + 1, &ba);

1606:   /* get A_loc by taking all local rows of A */
1607:   A_loc = ap->A_loc;
1608:   MatMPIAIJGetLocalMat(A, MAT_REUSE_MATRIX, &A_loc);
1609:   a_loc = (Mat_SeqAIJ *)(A_loc)->data;
1610:   ai    = a_loc->i;
1611:   aj    = a_loc->j;

1613:   /* trigger copy to CPU */
1614:   MatSeqAIJGetArrayRead(p->A, &dummy);
1615:   MatSeqAIJRestoreArrayRead(p->A, &dummy);
1616:   MatSeqAIJGetArrayRead(p->B, &dummy);
1617:   MatSeqAIJRestoreArrayRead(p->B, &dummy);
1618:   for (i = 0; i < am; i++) {
1619:     anz = ai[i + 1] - ai[i];
1620:     adj = aj + ai[i];
1621:     ada = a_loc->a + ai[i];

1623:     /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1624:     /*-------------------------------------------------------------*/
1625:     /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1626:     pnz = po->i[i + 1] - po->i[i];
1627:     poJ = po->j + po->i[i];
1628:     pA  = po->a + po->i[i];
1629:     for (j = 0; j < pnz; j++) {
1630:       row = poJ[j];
1631:       cj  = coj + coi[row];
1632:       ca  = coa + coi[row];
1633:       /* perform sparse axpy */
1634:       nexta  = 0;
1635:       valtmp = pA[j];
1636:       for (k = 0; nexta < anz; k++) {
1637:         if (cj[k] == adj[nexta]) {
1638:           ca[k] += valtmp * ada[nexta];
1639:           nexta++;
1640:         }
1641:       }
1642:       PetscLogFlops(2.0 * anz);
1643:     }

1645:     /* put the value into Cd (diagonal part) */
1646:     pnz = pd->i[i + 1] - pd->i[i];
1647:     pdJ = pd->j + pd->i[i];
1648:     pA  = pd->a + pd->i[i];
1649:     for (j = 0; j < pnz; j++) {
1650:       row = pdJ[j];
1651:       cj  = bj + bi[row];
1652:       ca  = ba + bi[row];
1653:       /* perform sparse axpy */
1654:       nexta  = 0;
1655:       valtmp = pA[j];
1656:       for (k = 0; nexta < anz; k++) {
1657:         if (cj[k] == adj[nexta]) {
1658:           ca[k] += valtmp * ada[nexta];
1659:           nexta++;
1660:         }
1661:       }
1662:       PetscLogFlops(2.0 * anz);
1663:     }
1664:   }

1666:   /* 3) send and recv matrix values coa */
1667:   /*------------------------------------*/
1668:   buf_ri = merge->buf_ri;
1669:   buf_rj = merge->buf_rj;
1670:   len_s  = merge->len_s;
1671:   PetscCommGetNewTag(comm, &taga);
1672:   PetscPostIrecvScalar(comm, taga, merge->nrecv, merge->id_r, merge->len_r, &abuf_r, &r_waits);

1674:   PetscMalloc2(merge->nsend + 1, &s_waits, size, &status);
1675:   for (proc = 0, k = 0; proc < size; proc++) {
1676:     if (!len_s[proc]) continue;
1677:     i = merge->owners_co[proc];
1678:     MPI_Isend(coa + coi[i], len_s[proc], MPIU_MATSCALAR, proc, taga, comm, s_waits + k);
1679:     k++;
1680:   }
1681:   if (merge->nrecv) MPI_Waitall(merge->nrecv, r_waits, status);
1682:   if (merge->nsend) MPI_Waitall(merge->nsend, s_waits, status);

1684:   PetscFree2(s_waits, status);
1685:   PetscFree(r_waits);
1686:   PetscFree(coa);

1688:   /* 4) insert local Cseq and received values into Cmpi */
1689:   /*----------------------------------------------------*/
1690:   PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci);
1691:   for (k = 0; k < merge->nrecv; k++) {
1692:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1693:     nrows       = *(buf_ri_k[k]);
1694:     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1695:     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1696:   }

1698:   for (i = 0; i < cm; i++) {
1699:     row  = owners[rank] + i; /* global row index of C_seq */
1700:     bj_i = bj + bi[i];       /* col indices of the i-th row of C */
1701:     ba_i = ba + bi[i];
1702:     bnz  = bi[i + 1] - bi[i];
1703:     /* add received vals into ba */
1704:     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1705:       /* i-th row */
1706:       if (i == *nextrow[k]) {
1707:         cnz    = *(nextci[k] + 1) - *nextci[k];
1708:         cj     = buf_rj[k] + *(nextci[k]);
1709:         ca     = abuf_r[k] + *(nextci[k]);
1710:         nextcj = 0;
1711:         for (j = 0; nextcj < cnz; j++) {
1712:           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1713:             ba_i[j] += ca[nextcj++];
1714:           }
1715:         }
1716:         nextrow[k]++;
1717:         nextci[k]++;
1718:         PetscLogFlops(2.0 * cnz);
1719:       }
1720:     }
1721:     MatSetValues(C, 1, &row, bnz, bj_i, ba_i, INSERT_VALUES);
1722:   }
1723:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
1724:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);

1726:   PetscFree(ba);
1727:   PetscFree(abuf_r[0]);
1728:   PetscFree(abuf_r);
1729:   PetscFree3(buf_ri_k, nextrow, nextci);
1730:   return 0;
1731: }

1733: PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P, Mat A, PetscReal fill, Mat C)
1734: {
1735:   Mat                  A_loc;
1736:   Mat_APMPI           *ap;
1737:   PetscFreeSpaceList   free_space = NULL, current_space = NULL;
1738:   Mat_MPIAIJ          *p = (Mat_MPIAIJ *)P->data, *a = (Mat_MPIAIJ *)A->data;
1739:   PetscInt            *pdti, *pdtj, *poti, *potj, *ptJ;
1740:   PetscInt             nnz;
1741:   PetscInt            *lnk, *owners_co, *coi, *coj, i, k, pnz, row;
1742:   PetscInt             am = A->rmap->n, pn = P->cmap->n;
1743:   MPI_Comm             comm;
1744:   PetscMPIInt          size, rank, tagi, tagj, *len_si, *len_s, *len_ri;
1745:   PetscInt           **buf_rj, **buf_ri, **buf_ri_k;
1746:   PetscInt             len, proc, *dnz, *onz, *owners;
1747:   PetscInt             nzi, *bi, *bj;
1748:   PetscInt             nrows, *buf_s, *buf_si, *buf_si_i, **nextrow, **nextci;
1749:   MPI_Request         *swaits, *rwaits;
1750:   MPI_Status          *sstatus, rstatus;
1751:   Mat_Merge_SeqsToMPI *merge;
1752:   PetscInt            *ai, *aj, *Jptr, anz, *prmap = p->garray, pon, nspacedouble = 0, j;
1753:   PetscReal            afill  = 1.0, afill_tmp;
1754:   PetscInt             rstart = P->cmap->rstart, rmax, aN = A->cmap->N, Armax;
1755:   Mat_SeqAIJ          *a_loc;
1756:   PetscTable           ta;
1757:   MatType              mtype;

1759:   PetscObjectGetComm((PetscObject)A, &comm);
1760:   /* check if matrix local sizes are compatible */
1762:              A->rmap->rend, P->rmap->rstart, P->rmap->rend);

1764:   MPI_Comm_size(comm, &size);
1765:   MPI_Comm_rank(comm, &rank);

1767:   /* create struct Mat_APMPI and attached it to C later */
1768:   PetscNew(&ap);

1770:   /* get A_loc by taking all local rows of A */
1771:   MatMPIAIJGetLocalMat(A, MAT_INITIAL_MATRIX, &A_loc);

1773:   ap->A_loc = A_loc;
1774:   a_loc     = (Mat_SeqAIJ *)(A_loc)->data;
1775:   ai        = a_loc->i;
1776:   aj        = a_loc->j;

1778:   /* determine symbolic Co=(p->B)^T*A - send to others */
1779:   /*----------------------------------------------------*/
1780:   MatGetSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj);
1781:   MatGetSymbolicTranspose_SeqAIJ(p->B, &poti, &potj);
1782:   pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1783:                          >= (num of nonzero rows of C_seq) - pn */
1784:   PetscMalloc1(pon + 1, &coi);
1785:   coi[0] = 0;

1787:   /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1788:   nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(poti[pon], ai[am]));
1789:   PetscFreeSpaceGet(nnz, &free_space);
1790:   current_space = free_space;

1792:   /* create and initialize a linked list */
1793:   PetscTableCreate(A->cmap->n + a->B->cmap->N, aN, &ta);
1794:   MatRowMergeMax_SeqAIJ(a_loc, am, ta);
1795:   PetscTableGetCount(ta, &Armax);

1797:   PetscLLCondensedCreate_Scalable(Armax, &lnk);

1799:   for (i = 0; i < pon; i++) {
1800:     pnz = poti[i + 1] - poti[i];
1801:     ptJ = potj + poti[i];
1802:     for (j = 0; j < pnz; j++) {
1803:       row  = ptJ[j]; /* row of A_loc == col of Pot */
1804:       anz  = ai[row + 1] - ai[row];
1805:       Jptr = aj + ai[row];
1806:       /* add non-zero cols of AP into the sorted linked list lnk */
1807:       PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk);
1808:     }
1809:     nnz = lnk[0];

1811:     /* If free space is not available, double the total space in the list */
1812:     if (current_space->local_remaining < nnz) {
1813:       PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), &current_space);
1814:       nspacedouble++;
1815:     }

1817:     /* Copy data into free space, and zero out denserows */
1818:     PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk);

1820:     current_space->array += nnz;
1821:     current_space->local_used += nnz;
1822:     current_space->local_remaining -= nnz;

1824:     coi[i + 1] = coi[i] + nnz;
1825:   }

1827:   PetscMalloc1(coi[pon] + 1, &coj);
1828:   PetscFreeSpaceContiguous(&free_space, coj);
1829:   PetscLLCondensedDestroy_Scalable(lnk); /* must destroy to get a new one for C */

1831:   afill_tmp = (PetscReal)coi[pon] / (poti[pon] + ai[am] + 1);
1832:   if (afill_tmp > afill) afill = afill_tmp;

1834:   /* send j-array (coj) of Co to other processors */
1835:   /*----------------------------------------------*/
1836:   /* determine row ownership */
1837:   PetscNew(&merge);
1838:   PetscLayoutCreate(comm, &merge->rowmap);

1840:   merge->rowmap->n  = pn;
1841:   merge->rowmap->bs = 1;

1843:   PetscLayoutSetUp(merge->rowmap);
1844:   owners = merge->rowmap->range;

1846:   /* determine the number of messages to send, their lengths */
1847:   PetscCalloc1(size, &len_si);
1848:   PetscCalloc1(size, &merge->len_s);

1850:   len_s        = merge->len_s;
1851:   merge->nsend = 0;

1853:   PetscMalloc1(size + 2, &owners_co);

1855:   proc = 0;
1856:   for (i = 0; i < pon; i++) {
1857:     while (prmap[i] >= owners[proc + 1]) proc++;
1858:     len_si[proc]++; /* num of rows in Co to be sent to [proc] */
1859:     len_s[proc] += coi[i + 1] - coi[i];
1860:   }

1862:   len          = 0; /* max length of buf_si[] */
1863:   owners_co[0] = 0;
1864:   for (proc = 0; proc < size; proc++) {
1865:     owners_co[proc + 1] = owners_co[proc] + len_si[proc];
1866:     if (len_si[proc]) {
1867:       merge->nsend++;
1868:       len_si[proc] = 2 * (len_si[proc] + 1);
1869:       len += len_si[proc];
1870:     }
1871:   }

1873:   /* determine the number and length of messages to receive for coi and coj  */
1874:   PetscGatherNumberOfMessages(comm, NULL, len_s, &merge->nrecv);
1875:   PetscGatherMessageLengths2(comm, merge->nsend, merge->nrecv, len_s, len_si, &merge->id_r, &merge->len_r, &len_ri);

1877:   /* post the Irecv and Isend of coj */
1878:   PetscCommGetNewTag(comm, &tagj);
1879:   PetscPostIrecvInt(comm, tagj, merge->nrecv, merge->id_r, merge->len_r, &buf_rj, &rwaits);
1880:   PetscMalloc1(merge->nsend + 1, &swaits);
1881:   for (proc = 0, k = 0; proc < size; proc++) {
1882:     if (!len_s[proc]) continue;
1883:     i = owners_co[proc];
1884:     MPI_Isend(coj + coi[i], len_s[proc], MPIU_INT, proc, tagj, comm, swaits + k);
1885:     k++;
1886:   }

1888:   /* receives and sends of coj are complete */
1889:   PetscMalloc1(size, &sstatus);
1890:   for (i = 0; i < merge->nrecv; i++) {
1891:     PETSC_UNUSED PetscMPIInt icompleted;
1892:     MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus);
1893:   }
1894:   PetscFree(rwaits);
1895:   if (merge->nsend) MPI_Waitall(merge->nsend, swaits, sstatus);

1897:   /* add received column indices into table to update Armax */
1898:   /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1899:   for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1900:     Jptr = buf_rj[k];
1901:     for (j = 0; j < merge->len_r[k]; j++) PetscTableAdd(ta, *(Jptr + j) + 1, 1, INSERT_VALUES);
1902:   }
1903:   PetscTableGetCount(ta, &Armax);

1905:   /* send and recv coi */
1906:   /*-------------------*/
1907:   PetscCommGetNewTag(comm, &tagi);
1908:   PetscPostIrecvInt(comm, tagi, merge->nrecv, merge->id_r, len_ri, &buf_ri, &rwaits);
1909:   PetscMalloc1(len + 1, &buf_s);
1910:   buf_si = buf_s; /* points to the beginning of k-th msg to be sent */
1911:   for (proc = 0, k = 0; proc < size; proc++) {
1912:     if (!len_s[proc]) continue;
1913:     /* form outgoing message for i-structure:
1914:          buf_si[0]:                 nrows to be sent
1915:                [1:nrows]:           row index (global)
1916:                [nrows+1:2*nrows+1]: i-structure index
1917:     */
1918:     /*-------------------------------------------*/
1919:     nrows       = len_si[proc] / 2 - 1;
1920:     buf_si_i    = buf_si + nrows + 1;
1921:     buf_si[0]   = nrows;
1922:     buf_si_i[0] = 0;
1923:     nrows       = 0;
1924:     for (i = owners_co[proc]; i < owners_co[proc + 1]; i++) {
1925:       nzi                 = coi[i + 1] - coi[i];
1926:       buf_si_i[nrows + 1] = buf_si_i[nrows] + nzi;   /* i-structure */
1927:       buf_si[nrows + 1]   = prmap[i] - owners[proc]; /* local row index */
1928:       nrows++;
1929:     }
1930:     MPI_Isend(buf_si, len_si[proc], MPIU_INT, proc, tagi, comm, swaits + k);
1931:     k++;
1932:     buf_si += len_si[proc];
1933:   }
1934:   i = merge->nrecv;
1935:   while (i--) {
1936:     PETSC_UNUSED PetscMPIInt icompleted;
1937:     MPI_Waitany(merge->nrecv, rwaits, &icompleted, &rstatus);
1938:   }
1939:   PetscFree(rwaits);
1940:   if (merge->nsend) MPI_Waitall(merge->nsend, swaits, sstatus);
1941:   PetscFree(len_si);
1942:   PetscFree(len_ri);
1943:   PetscFree(swaits);
1944:   PetscFree(sstatus);
1945:   PetscFree(buf_s);

1947:   /* compute the local portion of C (mpi mat) */
1948:   /*------------------------------------------*/
1949:   /* allocate bi array and free space for accumulating nonzero column info */
1950:   PetscMalloc1(pn + 1, &bi);
1951:   bi[0] = 0;

1953:   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1954:   nnz = PetscRealIntMultTruncate(fill, PetscIntSumTruncate(pdti[pn], PetscIntSumTruncate(poti[pon], ai[am])));
1955:   PetscFreeSpaceGet(nnz, &free_space);
1956:   current_space = free_space;

1958:   PetscMalloc3(merge->nrecv, &buf_ri_k, merge->nrecv, &nextrow, merge->nrecv, &nextci);
1959:   for (k = 0; k < merge->nrecv; k++) {
1960:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1961:     nrows       = *buf_ri_k[k];
1962:     nextrow[k]  = buf_ri_k[k] + 1;           /* next row number of k-th recved i-structure */
1963:     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure  */
1964:   }

1966:   PetscLLCondensedCreate_Scalable(Armax, &lnk);
1967:   MatPreallocateBegin(comm, pn, A->cmap->n, dnz, onz);
1968:   rmax = 0;
1969:   for (i = 0; i < pn; i++) {
1970:     /* add pdt[i,:]*AP into lnk */
1971:     pnz = pdti[i + 1] - pdti[i];
1972:     ptJ = pdtj + pdti[i];
1973:     for (j = 0; j < pnz; j++) {
1974:       row  = ptJ[j]; /* row of AP == col of Pt */
1975:       anz  = ai[row + 1] - ai[row];
1976:       Jptr = aj + ai[row];
1977:       /* add non-zero cols of AP into the sorted linked list lnk */
1978:       PetscLLCondensedAddSorted_Scalable(anz, Jptr, lnk);
1979:     }

1981:     /* add received col data into lnk */
1982:     for (k = 0; k < merge->nrecv; k++) { /* k-th received message */
1983:       if (i == *nextrow[k]) {            /* i-th row */
1984:         nzi  = *(nextci[k] + 1) - *nextci[k];
1985:         Jptr = buf_rj[k] + *nextci[k];
1986:         PetscLLCondensedAddSorted_Scalable(nzi, Jptr, lnk);
1987:         nextrow[k]++;
1988:         nextci[k]++;
1989:       }
1990:     }

1992:     /* add missing diagonal entry */
1993:     if (C->force_diagonals) {
1994:       k = i + owners[rank]; /* column index */
1995:       PetscLLCondensedAddSorted_Scalable(1, &k, lnk);
1996:     }

1998:     nnz = lnk[0];

2000:     /* if free space is not available, make more free space */
2001:     if (current_space->local_remaining < nnz) {
2002:       PetscFreeSpaceGet(PetscIntSumTruncate(nnz, current_space->total_array_size), &current_space);
2003:       nspacedouble++;
2004:     }
2005:     /* copy data into free space, then initialize lnk */
2006:     PetscLLCondensedClean_Scalable(nnz, current_space->array, lnk);
2007:     MatPreallocateSet(i + owners[rank], nnz, current_space->array, dnz, onz);

2009:     current_space->array += nnz;
2010:     current_space->local_used += nnz;
2011:     current_space->local_remaining -= nnz;

2013:     bi[i + 1] = bi[i] + nnz;
2014:     if (nnz > rmax) rmax = nnz;
2015:   }
2016:   PetscFree3(buf_ri_k, nextrow, nextci);

2018:   PetscMalloc1(bi[pn] + 1, &bj);
2019:   PetscFreeSpaceContiguous(&free_space, bj);
2020:   afill_tmp = (PetscReal)bi[pn] / (pdti[pn] + poti[pon] + ai[am] + 1);
2021:   if (afill_tmp > afill) afill = afill_tmp;
2022:   PetscLLCondensedDestroy_Scalable(lnk);
2023:   PetscTableDestroy(&ta);
2024:   MatRestoreSymbolicTranspose_SeqAIJ(p->A, &pdti, &pdtj);
2025:   MatRestoreSymbolicTranspose_SeqAIJ(p->B, &poti, &potj);

2027:   /* create symbolic parallel matrix C - why cannot be assembled in Numeric part   */
2028:   /*-------------------------------------------------------------------------------*/
2029:   MatSetSizes(C, pn, A->cmap->n, PETSC_DETERMINE, PETSC_DETERMINE);
2030:   MatSetBlockSizes(C, PetscAbs(P->cmap->bs), PetscAbs(A->cmap->bs));
2031:   MatGetType(A, &mtype);
2032:   MatSetType(C, mtype);
2033:   MatMPIAIJSetPreallocation(C, 0, dnz, 0, onz);
2034:   MatPreallocateEnd(dnz, onz);
2035:   MatSetBlockSize(C, 1);
2036:   MatSetOption(C, MAT_NO_OFF_PROC_ENTRIES, PETSC_TRUE);
2037:   for (i = 0; i < pn; i++) {
2038:     row  = i + rstart;
2039:     nnz  = bi[i + 1] - bi[i];
2040:     Jptr = bj + bi[i];
2041:     MatSetValues(C, 1, &row, nnz, Jptr, NULL, INSERT_VALUES);
2042:   }
2043:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
2044:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);
2045:   MatSetOption(C, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);
2046:   merge->bi        = bi;
2047:   merge->bj        = bj;
2048:   merge->coi       = coi;
2049:   merge->coj       = coj;
2050:   merge->buf_ri    = buf_ri;
2051:   merge->buf_rj    = buf_rj;
2052:   merge->owners_co = owners_co;

2054:   /* attach the supporting struct to C for reuse */
2055:   C->product->data    = ap;
2056:   C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2057:   ap->merge           = merge;

2059:   C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;

2061: #if defined(PETSC_USE_INFO)
2062:   if (bi[pn] != 0) {
2063:     PetscInfo(C, "Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n", nspacedouble, (double)fill, (double)afill);
2064:     PetscInfo(C, "Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n", (double)afill);
2065:   } else {
2066:     PetscInfo(C, "Empty matrix product\n");
2067:   }
2068: #endif
2069:   return 0;
2070: }

2072: /* ---------------------------------------------------------------- */
2073: static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2074: {
2075:   Mat_Product *product = C->product;
2076:   Mat          A = product->A, B = product->B;
2077:   PetscReal    fill = product->fill;
2078:   PetscBool    flg;

2080:   /* scalable */
2081:   PetscStrcmp(product->alg, "scalable", &flg);
2082:   if (flg) {
2083:     MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A, B, fill, C);
2084:     goto next;
2085:   }

2087:   /* nonscalable */
2088:   PetscStrcmp(product->alg, "nonscalable", &flg);
2089:   if (flg) {
2090:     MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A, B, fill, C);
2091:     goto next;
2092:   }

2094:   /* matmatmult */
2095:   PetscStrcmp(product->alg, "at*b", &flg);
2096:   if (flg) {
2097:     Mat        At;
2098:     Mat_APMPI *ptap;

2100:     MatTranspose(A, MAT_INITIAL_MATRIX, &At);
2101:     MatMatMultSymbolic_MPIAIJ_MPIAIJ(At, B, fill, C);
2102:     ptap = (Mat_APMPI *)C->product->data;
2103:     if (ptap) {
2104:       ptap->Pt            = At;
2105:       C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2106:     }
2107:     C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2108:     goto next;
2109:   }

2111:   /* backend general code */
2112:   PetscStrcmp(product->alg, "backend", &flg);
2113:   if (flg) {
2114:     MatProductSymbolic_MPIAIJBACKEND(C);
2115:     return 0;
2116:   }

2118:   SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatProduct type is not supported");

2120: next:
2121:   C->ops->productnumeric = MatProductNumeric_AtB;
2122:   return 0;
2123: }

2125: /* ---------------------------------------------------------------- */
2126: /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2127: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2128: {
2129:   Mat_Product *product = C->product;
2130:   Mat          A = product->A, B = product->B;
2131: #if defined(PETSC_HAVE_HYPRE)
2132:   const char *algTypes[5] = {"scalable", "nonscalable", "seqmpi", "backend", "hypre"};
2133:   PetscInt    nalg        = 5;
2134: #else
2135:   const char *algTypes[4] = {
2136:     "scalable",
2137:     "nonscalable",
2138:     "seqmpi",
2139:     "backend",
2140:   };
2141:   PetscInt    nalg        = 4;
2142: #endif
2143:   PetscInt  alg = 1; /* set nonscalable algorithm as default */
2144:   PetscBool flg;
2145:   MPI_Comm  comm;

2147:   PetscObjectGetComm((PetscObject)C, &comm);

2149:   /* Set "nonscalable" as default algorithm */
2150:   PetscStrcmp(C->product->alg, "default", &flg);
2151:   if (flg) {
2152:     MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);

2154:     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2155:     if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2156:       MatInfo   Ainfo, Binfo;
2157:       PetscInt  nz_local;
2158:       PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;

2160:       MatGetInfo(A, MAT_LOCAL, &Ainfo);
2161:       MatGetInfo(B, MAT_LOCAL, &Binfo);
2162:       nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);

2164:       if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2165:       MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPIU_BOOL, MPI_LOR, comm);

2167:       if (alg_scalable) {
2168:         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2169:         MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);
2170:         PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local));
2171:       }
2172:     }
2173:   }

2175:   /* Get runtime option */
2176:   if (product->api_user) {
2177:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMult", "Mat");
2178:     PetscOptionsEList("-matmatmult_via", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg);
2179:     PetscOptionsEnd();
2180:   } else {
2181:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AB", "Mat");
2182:     PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg);
2183:     PetscOptionsEnd();
2184:   }
2185:   if (flg) MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);

2187:   C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2188:   return 0;
2189: }

2191: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABt(Mat C)
2192: {
2193:   MatProductSetFromOptions_MPIAIJ_AB(C);
2194:   C->ops->productsymbolic = MatProductSymbolic_ABt_MPIAIJ_MPIAIJ;
2195:   return 0;
2196: }

2198: /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2199: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2200: {
2201:   Mat_Product *product = C->product;
2202:   Mat          A = product->A, B = product->B;
2203:   const char  *algTypes[4] = {"scalable", "nonscalable", "at*b", "backend"};
2204:   PetscInt     nalg        = 4;
2205:   PetscInt     alg         = 1; /* set default algorithm  */
2206:   PetscBool    flg;
2207:   MPI_Comm     comm;

2209:   /* Check matrix local sizes */
2210:   PetscObjectGetComm((PetscObject)C, &comm);
2212:              A->rmap->rstart, A->rmap->rend, B->rmap->rstart, B->rmap->rend);

2214:   /* Set default algorithm */
2215:   PetscStrcmp(C->product->alg, "default", &flg);
2216:   if (flg) MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);

2218:   /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2219:   if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2220:     MatInfo   Ainfo, Binfo;
2221:     PetscInt  nz_local;
2222:     PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;

2224:     MatGetInfo(A, MAT_LOCAL, &Ainfo);
2225:     MatGetInfo(B, MAT_LOCAL, &Binfo);
2226:     nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);

2228:     if (B->cmap->N > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2229:     MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPIU_BOOL, MPI_LOR, comm);

2231:     if (alg_scalable) {
2232:       alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2233:       MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);
2234:       PetscInfo(B, "Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n", B->cmap->N, (double)(product->fill * nz_local));
2235:     }
2236:   }

2238:   /* Get runtime option */
2239:   if (product->api_user) {
2240:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatTransposeMatMult", "Mat");
2241:     PetscOptionsEList("-mattransposematmult_via", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg);
2242:     PetscOptionsEnd();
2243:   } else {
2244:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_AtB", "Mat");
2245:     PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatTransposeMatMult", algTypes, nalg, algTypes[alg], &alg, &flg);
2246:     PetscOptionsEnd();
2247:   }
2248:   if (flg) MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);

2250:   C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2251:   return 0;
2252: }

2254: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2255: {
2256:   Mat_Product *product = C->product;
2257:   Mat          A = product->A, P = product->B;
2258:   MPI_Comm     comm;
2259:   PetscBool    flg;
2260:   PetscInt     alg = 1; /* set default algorithm */
2261: #if !defined(PETSC_HAVE_HYPRE)
2262:   const char *algTypes[5] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend"};
2263:   PetscInt    nalg        = 5;
2264: #else
2265:   const char *algTypes[6] = {"scalable", "nonscalable", "allatonce", "allatonce_merged", "backend", "hypre"};
2266:   PetscInt    nalg        = 6;
2267: #endif
2268:   PetscInt pN = P->cmap->N;

2270:   /* Check matrix local sizes */
2271:   PetscObjectGetComm((PetscObject)C, &comm);
2273:              A->rmap->rstart, A->rmap->rend, P->rmap->rstart, P->rmap->rend);
2275:              A->cmap->rstart, A->cmap->rend, P->rmap->rstart, P->rmap->rend);

2277:   /* Set "nonscalable" as default algorithm */
2278:   PetscStrcmp(C->product->alg, "default", &flg);
2279:   if (flg) {
2280:     MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);

2282:     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2283:     if (pN > 100000) {
2284:       MatInfo   Ainfo, Pinfo;
2285:       PetscInt  nz_local;
2286:       PetscBool alg_scalable_loc = PETSC_FALSE, alg_scalable;

2288:       MatGetInfo(A, MAT_LOCAL, &Ainfo);
2289:       MatGetInfo(P, MAT_LOCAL, &Pinfo);
2290:       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);

2292:       if (pN > product->fill * nz_local) alg_scalable_loc = PETSC_TRUE;
2293:       MPIU_Allreduce(&alg_scalable_loc, &alg_scalable, 1, MPIU_BOOL, MPI_LOR, comm);

2295:       if (alg_scalable) {
2296:         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2297:         MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);
2298:       }
2299:     }
2300:   }

2302:   /* Get runtime option */
2303:   if (product->api_user) {
2304:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatPtAP", "Mat");
2305:     PetscOptionsEList("-matptap_via", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg);
2306:     PetscOptionsEnd();
2307:   } else {
2308:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_PtAP", "Mat");
2309:     PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatPtAP", algTypes, nalg, algTypes[alg], &alg, &flg);
2310:     PetscOptionsEnd();
2311:   }
2312:   if (flg) MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);

2314:   C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2315:   return 0;
2316: }

2318: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2319: {
2320:   Mat_Product *product = C->product;
2321:   Mat          A = product->A, R = product->B;

2323:   /* Check matrix local sizes */
2325:              A->rmap->n, R->rmap->n, R->cmap->n);

2327:   C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2328:   return 0;
2329: }

2331: /*
2332:  Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2333: */
2334: static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2335: {
2336:   Mat_Product *product     = C->product;
2337:   PetscBool    flg         = PETSC_FALSE;
2338:   PetscInt     alg         = 1; /* default algorithm */
2339:   const char  *algTypes[3] = {"scalable", "nonscalable", "seqmpi"};
2340:   PetscInt     nalg        = 3;

2342:   /* Set default algorithm */
2343:   PetscStrcmp(C->product->alg, "default", &flg);
2344:   if (flg) MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);

2346:   /* Get runtime option */
2347:   if (product->api_user) {
2348:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatMatMatMult", "Mat");
2349:     PetscOptionsEList("-matmatmatmult_via", "Algorithmic approach", "MatMatMatMult", algTypes, nalg, algTypes[alg], &alg, &flg);
2350:     PetscOptionsEnd();
2351:   } else {
2352:     PetscOptionsBegin(PetscObjectComm((PetscObject)C), ((PetscObject)C)->prefix, "MatProduct_ABC", "Mat");
2353:     PetscOptionsEList("-mat_product_algorithm", "Algorithmic approach", "MatProduct_ABC", algTypes, nalg, algTypes[alg], &alg, &flg);
2354:     PetscOptionsEnd();
2355:   }
2356:   if (flg) MatProductSetAlgorithm(C, (MatProductAlgorithm)algTypes[alg]);

2358:   C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2359:   C->ops->productsymbolic    = MatProductSymbolic_ABC;
2360:   return 0;
2361: }

2363: PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2364: {
2365:   Mat_Product *product = C->product;

2367:   switch (product->type) {
2368:   case MATPRODUCT_AB:
2369:     MatProductSetFromOptions_MPIAIJ_AB(C);
2370:     break;
2371:   case MATPRODUCT_ABt:
2372:     MatProductSetFromOptions_MPIAIJ_ABt(C);
2373:     break;
2374:   case MATPRODUCT_AtB:
2375:     MatProductSetFromOptions_MPIAIJ_AtB(C);
2376:     break;
2377:   case MATPRODUCT_PtAP:
2378:     MatProductSetFromOptions_MPIAIJ_PtAP(C);
2379:     break;
2380:   case MATPRODUCT_RARt:
2381:     MatProductSetFromOptions_MPIAIJ_RARt(C);
2382:     break;
2383:   case MATPRODUCT_ABC:
2384:     MatProductSetFromOptions_MPIAIJ_ABC(C);
2385:     break;
2386:   default:
2387:     break;
2388:   }
2389:   return 0;
2390: }