Actual source code: baijov.c
2: /*
3: Routines to compute overlapping regions of a parallel MPI matrix
4: and to find submatrices that were shared across processors.
5: */
6: #include <../src/mat/impls/baij/mpi/mpibaij.h>
7: #include <petscbt.h>
9: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat, PetscInt, char **, PetscInt *, PetscInt **);
10: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat, PetscInt, PetscInt **, PetscInt **, PetscInt *);
11: extern PetscErrorCode MatGetRow_MPIBAIJ(Mat, PetscInt, PetscInt *, PetscInt **, PetscScalar **);
12: extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat, PetscInt, PetscInt *, PetscInt **, PetscScalar **);
14: PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C, PetscInt imax, IS is[], PetscInt ov)
15: {
16: PetscInt i, N = C->cmap->N, bs = C->rmap->bs;
17: IS *is_new;
19: PetscMalloc1(imax, &is_new);
20: /* Convert the indices into block format */
21: ISCompressIndicesGeneral(N, C->rmap->n, bs, imax, is, is_new);
23: for (i = 0; i < ov; ++i) MatIncreaseOverlap_MPIBAIJ_Once(C, imax, is_new);
24: for (i = 0; i < imax; i++) ISDestroy(&is[i]);
25: ISExpandIndicesGeneral(N, N, bs, imax, is_new, is);
26: for (i = 0; i < imax; i++) ISDestroy(&is_new[i]);
27: PetscFree(is_new);
28: return 0;
29: }
31: /*
32: Sample message format:
33: If a processor A wants processor B to process some elements corresponding
34: to index sets is[1], is[5]
35: mesg [0] = 2 (no of index sets in the mesg)
36: -----------
37: mesg [1] = 1 => is[1]
38: mesg [2] = sizeof(is[1]);
39: -----------
40: mesg [5] = 5 => is[5]
41: mesg [6] = sizeof(is[5]);
42: -----------
43: mesg [7]
44: mesg [n] data(is[1])
45: -----------
46: mesg[n+1]
47: mesg[m] data(is[5])
48: -----------
50: Notes:
51: nrqs - no of requests sent (or to be sent out)
52: nrqr - no of requests received (which have to be or which have been processed)
53: */
54: PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C, PetscInt imax, IS is[])
55: {
56: Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
57: const PetscInt **idx, *idx_i;
58: PetscInt *n, *w3, *w4, **data, len;
59: PetscMPIInt size, rank, tag1, tag2, *w2, *w1, nrqr;
60: PetscInt Mbs, i, j, k, **rbuf, row, nrqs, msz, **outdat, **ptr;
61: PetscInt *ctr, *pa, *tmp, *isz, *isz1, **xdata, **rbuf2, *d_p;
62: PetscMPIInt *onodes1, *olengths1, *onodes2, *olengths2, proc = -1;
63: PetscBT *table;
64: MPI_Comm comm, *iscomms;
65: MPI_Request *s_waits1, *r_waits1, *s_waits2, *r_waits2;
66: char *t_p;
68: PetscObjectGetComm((PetscObject)C, &comm);
69: size = c->size;
70: rank = c->rank;
71: Mbs = c->Mbs;
73: PetscObjectGetNewTag((PetscObject)C, &tag1);
74: PetscObjectGetNewTag((PetscObject)C, &tag2);
76: PetscMalloc2(imax + 1, (PetscInt ***)&idx, imax, &n);
78: for (i = 0; i < imax; i++) {
79: ISGetIndices(is[i], &idx[i]);
80: ISGetLocalSize(is[i], &n[i]);
81: }
83: /* evaluate communication - mesg to who,length of mesg, and buffer space
84: required. Based on this, buffers are allocated, and data copied into them*/
85: PetscCalloc4(size, &w1, size, &w2, size, &w3, size, &w4);
86: for (i = 0; i < imax; i++) {
87: PetscArrayzero(w4, size); /* initialise work vector*/
88: idx_i = idx[i];
89: len = n[i];
90: for (j = 0; j < len; j++) {
91: row = idx_i[j];
93: PetscLayoutFindOwner(C->rmap, row * C->rmap->bs, &proc);
94: w4[proc]++;
95: }
96: for (j = 0; j < size; j++) {
97: if (w4[j]) {
98: w1[j] += w4[j];
99: w3[j]++;
100: }
101: }
102: }
104: nrqs = 0; /* no of outgoing messages */
105: msz = 0; /* total mesg length (for all proc */
106: w1[rank] = 0; /* no mesg sent to itself */
107: w3[rank] = 0;
108: for (i = 0; i < size; i++) {
109: if (w1[i]) {
110: w2[i] = 1;
111: nrqs++;
112: } /* there exists a message to proc i */
113: }
114: /* pa - is list of processors to communicate with */
115: PetscMalloc1(nrqs, &pa);
116: for (i = 0, j = 0; i < size; i++) {
117: if (w1[i]) {
118: pa[j] = i;
119: j++;
120: }
121: }
123: /* Each message would have a header = 1 + 2*(no of IS) + data */
124: for (i = 0; i < nrqs; i++) {
125: j = pa[i];
126: w1[j] += w2[j] + 2 * w3[j];
127: msz += w1[j];
128: }
130: /* Determine the number of messages to expect, their lengths, from from-ids */
131: PetscGatherNumberOfMessages(comm, w2, w1, &nrqr);
132: PetscGatherMessageLengths(comm, nrqs, nrqr, w1, &onodes1, &olengths1);
134: /* Now post the Irecvs corresponding to these messages */
135: PetscPostIrecvInt(comm, tag1, nrqr, onodes1, olengths1, &rbuf, &r_waits1);
137: /* Allocate Memory for outgoing messages */
138: PetscMalloc4(size, &outdat, size, &ptr, msz, &tmp, size, &ctr);
139: PetscArrayzero(outdat, size);
140: PetscArrayzero(ptr, size);
141: {
142: PetscInt *iptr = tmp, ict = 0;
143: for (i = 0; i < nrqs; i++) {
144: j = pa[i];
145: iptr += ict;
146: outdat[j] = iptr;
147: ict = w1[j];
148: }
149: }
151: /* Form the outgoing messages */
152: /*plug in the headers*/
153: for (i = 0; i < nrqs; i++) {
154: j = pa[i];
155: outdat[j][0] = 0;
156: PetscArrayzero(outdat[j] + 1, 2 * w3[j]);
157: ptr[j] = outdat[j] + 2 * w3[j] + 1;
158: }
160: /* Memory for doing local proc's work*/
161: {
162: PetscCalloc5(imax, &table, imax, &data, imax, &isz, Mbs * imax, &d_p, (Mbs / PETSC_BITS_PER_BYTE + 1) * imax, &t_p);
164: for (i = 0; i < imax; i++) {
165: table[i] = t_p + (Mbs / PETSC_BITS_PER_BYTE + 1) * i;
166: data[i] = d_p + (Mbs)*i;
167: }
168: }
170: /* Parse the IS and update local tables and the outgoing buf with the data*/
171: {
172: PetscInt n_i, *data_i, isz_i, *outdat_j, ctr_j;
173: PetscBT table_i;
175: for (i = 0; i < imax; i++) {
176: PetscArrayzero(ctr, size);
177: n_i = n[i];
178: table_i = table[i];
179: idx_i = idx[i];
180: data_i = data[i];
181: isz_i = isz[i];
182: for (j = 0; j < n_i; j++) { /* parse the indices of each IS */
183: row = idx_i[j];
184: PetscLayoutFindOwner(C->rmap, row * C->rmap->bs, &proc);
185: if (proc != rank) { /* copy to the outgoing buffer */
186: ctr[proc]++;
187: *ptr[proc] = row;
188: ptr[proc]++;
189: } else { /* Update the local table */
190: if (!PetscBTLookupSet(table_i, row)) data_i[isz_i++] = row;
191: }
192: }
193: /* Update the headers for the current IS */
194: for (j = 0; j < size; j++) { /* Can Optimise this loop by using pa[] */
195: if ((ctr_j = ctr[j])) {
196: outdat_j = outdat[j];
197: k = ++outdat_j[0];
198: outdat_j[2 * k] = ctr_j;
199: outdat_j[2 * k - 1] = i;
200: }
201: }
202: isz[i] = isz_i;
203: }
204: }
206: /* Now post the sends */
207: PetscMalloc1(nrqs, &s_waits1);
208: for (i = 0; i < nrqs; ++i) {
209: j = pa[i];
210: MPI_Isend(outdat[j], w1[j], MPIU_INT, j, tag1, comm, s_waits1 + i);
211: }
213: /* No longer need the original indices*/
214: for (i = 0; i < imax; ++i) ISRestoreIndices(is[i], idx + i);
215: PetscFree2(*(PetscInt ***)&idx, n);
217: PetscMalloc1(imax, &iscomms);
218: for (i = 0; i < imax; ++i) {
219: PetscCommDuplicate(PetscObjectComm((PetscObject)is[i]), &iscomms[i], NULL);
220: ISDestroy(&is[i]);
221: }
223: /* Do Local work*/
224: MatIncreaseOverlap_MPIBAIJ_Local(C, imax, table, isz, data);
226: /* Receive messages*/
227: MPI_Waitall(nrqr, r_waits1, MPI_STATUSES_IGNORE);
228: MPI_Waitall(nrqs, s_waits1, MPI_STATUSES_IGNORE);
230: /* Phase 1 sends are complete - deallocate buffers */
231: PetscFree4(outdat, ptr, tmp, ctr);
232: PetscFree4(w1, w2, w3, w4);
234: PetscMalloc1(nrqr, &xdata);
235: PetscMalloc1(nrqr, &isz1);
236: MatIncreaseOverlap_MPIBAIJ_Receive(C, nrqr, rbuf, xdata, isz1);
237: if (rbuf) {
238: PetscFree(rbuf[0]);
239: PetscFree(rbuf);
240: }
242: /* Send the data back*/
243: /* Do a global reduction to know the buffer space req for incoming messages*/
244: {
245: PetscMPIInt *rw1;
247: PetscCalloc1(size, &rw1);
249: for (i = 0; i < nrqr; ++i) {
250: proc = onodes1[i];
251: rw1[proc] = isz1[i];
252: }
254: /* Determine the number of messages to expect, their lengths, from from-ids */
255: PetscGatherMessageLengths(comm, nrqr, nrqs, rw1, &onodes2, &olengths2);
256: PetscFree(rw1);
257: }
258: /* Now post the Irecvs corresponding to these messages */
259: PetscPostIrecvInt(comm, tag2, nrqs, onodes2, olengths2, &rbuf2, &r_waits2);
261: /* Now post the sends */
262: PetscMalloc1(nrqr, &s_waits2);
263: for (i = 0; i < nrqr; ++i) {
264: j = onodes1[i];
265: MPI_Isend(xdata[i], isz1[i], MPIU_INT, j, tag2, comm, s_waits2 + i);
266: }
268: PetscFree(onodes1);
269: PetscFree(olengths1);
271: /* receive work done on other processors*/
272: {
273: PetscMPIInt idex;
274: PetscInt is_no, ct1, max, *rbuf2_i, isz_i, *data_i, jmax;
275: PetscBT table_i;
277: for (i = 0; i < nrqs; ++i) {
278: MPI_Waitany(nrqs, r_waits2, &idex, MPI_STATUS_IGNORE);
279: /* Process the message*/
280: rbuf2_i = rbuf2[idex];
281: ct1 = 2 * rbuf2_i[0] + 1;
282: jmax = rbuf2[idex][0];
283: for (j = 1; j <= jmax; j++) {
284: max = rbuf2_i[2 * j];
285: is_no = rbuf2_i[2 * j - 1];
286: isz_i = isz[is_no];
287: data_i = data[is_no];
288: table_i = table[is_no];
289: for (k = 0; k < max; k++, ct1++) {
290: row = rbuf2_i[ct1];
291: if (!PetscBTLookupSet(table_i, row)) data_i[isz_i++] = row;
292: }
293: isz[is_no] = isz_i;
294: }
295: }
296: MPI_Waitall(nrqr, s_waits2, MPI_STATUSES_IGNORE);
297: }
299: for (i = 0; i < imax; ++i) {
300: ISCreateGeneral(iscomms[i], isz[i], data[i], PETSC_COPY_VALUES, is + i);
301: PetscCommDestroy(&iscomms[i]);
302: }
304: PetscFree(iscomms);
305: PetscFree(onodes2);
306: PetscFree(olengths2);
308: PetscFree(pa);
309: if (rbuf2) {
310: PetscFree(rbuf2[0]);
311: PetscFree(rbuf2);
312: }
313: PetscFree(s_waits1);
314: PetscFree(r_waits1);
315: PetscFree(s_waits2);
316: PetscFree(r_waits2);
317: PetscFree5(table, data, isz, d_p, t_p);
318: if (xdata) {
319: PetscFree(xdata[0]);
320: PetscFree(xdata);
321: }
322: PetscFree(isz1);
323: return 0;
324: }
326: /*
327: MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do
328: the work on the local processor.
330: Inputs:
331: C - MAT_MPIBAIJ;
332: imax - total no of index sets processed at a time;
333: table - an array of char - size = Mbs bits.
335: Output:
336: isz - array containing the count of the solution elements corresponding
337: to each index set;
338: data - pointer to the solutions
339: */
340: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C, PetscInt imax, PetscBT *table, PetscInt *isz, PetscInt **data)
341: {
342: Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
343: Mat A = c->A, B = c->B;
344: Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)B->data;
345: PetscInt start, end, val, max, rstart, cstart, *ai, *aj;
346: PetscInt *bi, *bj, *garray, i, j, k, row, *data_i, isz_i;
347: PetscBT table_i;
349: rstart = c->rstartbs;
350: cstart = c->cstartbs;
351: ai = a->i;
352: aj = a->j;
353: bi = b->i;
354: bj = b->j;
355: garray = c->garray;
357: for (i = 0; i < imax; i++) {
358: data_i = data[i];
359: table_i = table[i];
360: isz_i = isz[i];
361: for (j = 0, max = isz[i]; j < max; j++) {
362: row = data_i[j] - rstart;
363: start = ai[row];
364: end = ai[row + 1];
365: for (k = start; k < end; k++) { /* Amat */
366: val = aj[k] + cstart;
367: if (!PetscBTLookupSet(table_i, val)) data_i[isz_i++] = val;
368: }
369: start = bi[row];
370: end = bi[row + 1];
371: for (k = start; k < end; k++) { /* Bmat */
372: val = garray[bj[k]];
373: if (!PetscBTLookupSet(table_i, val)) data_i[isz_i++] = val;
374: }
375: }
376: isz[i] = isz_i;
377: }
378: return 0;
379: }
380: /*
381: MatIncreaseOverlap_MPIBAIJ_Receive - Process the received messages,
382: and return the output
384: Input:
385: C - the matrix
386: nrqr - no of messages being processed.
387: rbuf - an array of pointers to the received requests
389: Output:
390: xdata - array of messages to be sent back
391: isz1 - size of each message
393: For better efficiency perhaps we should malloc separately each xdata[i],
394: then if a remalloc is required we need only copy the data for that one row
395: rather than all previous rows as it is now where a single large chunk of
396: memory is used.
398: */
399: static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C, PetscInt nrqr, PetscInt **rbuf, PetscInt **xdata, PetscInt *isz1)
400: {
401: Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
402: Mat A = c->A, B = c->B;
403: Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)B->data;
404: PetscInt rstart, cstart, *ai, *aj, *bi, *bj, *garray, i, j, k;
405: PetscInt row, total_sz, ct, ct1, ct2, ct3, mem_estimate, oct2, l, start, end;
406: PetscInt val, max1, max2, Mbs, no_malloc = 0, *tmp, new_estimate, ctr;
407: PetscInt *rbuf_i, kmax, rbuf_0;
408: PetscBT xtable;
410: Mbs = c->Mbs;
411: rstart = c->rstartbs;
412: cstart = c->cstartbs;
413: ai = a->i;
414: aj = a->j;
415: bi = b->i;
416: bj = b->j;
417: garray = c->garray;
419: for (i = 0, ct = 0, total_sz = 0; i < nrqr; ++i) {
420: rbuf_i = rbuf[i];
421: rbuf_0 = rbuf_i[0];
422: ct += rbuf_0;
423: for (j = 1; j <= rbuf_0; j++) total_sz += rbuf_i[2 * j];
424: }
426: if (c->Mbs) max1 = ct * (a->nz + b->nz) / c->Mbs;
427: else max1 = 1;
428: mem_estimate = 3 * ((total_sz > max1 ? total_sz : max1) + 1);
429: if (nrqr) {
430: PetscMalloc1(mem_estimate, &xdata[0]);
431: ++no_malloc;
432: }
433: PetscBTCreate(Mbs, &xtable);
434: PetscArrayzero(isz1, nrqr);
436: ct3 = 0;
437: for (i = 0; i < nrqr; i++) { /* for easch mesg from proc i */
438: rbuf_i = rbuf[i];
439: rbuf_0 = rbuf_i[0];
440: ct1 = 2 * rbuf_0 + 1;
441: ct2 = ct1;
442: ct3 += ct1;
443: for (j = 1; j <= rbuf_0; j++) { /* for each IS from proc i*/
444: PetscBTMemzero(Mbs, xtable);
445: oct2 = ct2;
446: kmax = rbuf_i[2 * j];
447: for (k = 0; k < kmax; k++, ct1++) {
448: row = rbuf_i[ct1];
449: if (!PetscBTLookupSet(xtable, row)) {
450: if (!(ct3 < mem_estimate)) {
451: new_estimate = (PetscInt)(1.5 * mem_estimate) + 1;
452: PetscMalloc1(new_estimate, &tmp);
453: PetscArraycpy(tmp, xdata[0], mem_estimate);
454: PetscFree(xdata[0]);
455: xdata[0] = tmp;
456: mem_estimate = new_estimate;
457: ++no_malloc;
458: for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1];
459: }
460: xdata[i][ct2++] = row;
461: ct3++;
462: }
463: }
464: for (k = oct2, max2 = ct2; k < max2; k++) {
465: row = xdata[i][k] - rstart;
466: start = ai[row];
467: end = ai[row + 1];
468: for (l = start; l < end; l++) {
469: val = aj[l] + cstart;
470: if (!PetscBTLookupSet(xtable, val)) {
471: if (!(ct3 < mem_estimate)) {
472: new_estimate = (PetscInt)(1.5 * mem_estimate) + 1;
473: PetscMalloc1(new_estimate, &tmp);
474: PetscArraycpy(tmp, xdata[0], mem_estimate);
475: PetscFree(xdata[0]);
476: xdata[0] = tmp;
477: mem_estimate = new_estimate;
478: ++no_malloc;
479: for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1];
480: }
481: xdata[i][ct2++] = val;
482: ct3++;
483: }
484: }
485: start = bi[row];
486: end = bi[row + 1];
487: for (l = start; l < end; l++) {
488: val = garray[bj[l]];
489: if (!PetscBTLookupSet(xtable, val)) {
490: if (!(ct3 < mem_estimate)) {
491: new_estimate = (PetscInt)(1.5 * mem_estimate) + 1;
492: PetscMalloc1(new_estimate, &tmp);
493: PetscArraycpy(tmp, xdata[0], mem_estimate);
494: PetscFree(xdata[0]);
495: xdata[0] = tmp;
496: mem_estimate = new_estimate;
497: ++no_malloc;
498: for (ctr = 1; ctr <= i; ctr++) xdata[ctr] = xdata[ctr - 1] + isz1[ctr - 1];
499: }
500: xdata[i][ct2++] = val;
501: ct3++;
502: }
503: }
504: }
505: /* Update the header*/
506: xdata[i][2 * j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
507: xdata[i][2 * j - 1] = rbuf_i[2 * j - 1];
508: }
509: xdata[i][0] = rbuf_0;
510: if (i + 1 < nrqr) xdata[i + 1] = xdata[i] + ct2;
511: isz1[i] = ct2; /* size of each message */
512: }
513: PetscBTDestroy(&xtable);
514: PetscInfo(C, "Allocated %" PetscInt_FMT " bytes, required %" PetscInt_FMT ", no of mallocs = %" PetscInt_FMT "\n", mem_estimate, ct3, no_malloc);
515: return 0;
516: }
518: PetscErrorCode MatCreateSubMatrices_MPIBAIJ(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submat[])
519: {
520: IS *isrow_block, *iscol_block;
521: Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
522: PetscInt nmax, nstages_local, nstages, i, pos, max_no, N = C->cmap->N, bs = C->rmap->bs;
523: Mat_SeqBAIJ *subc;
524: Mat_SubSppt *smat;
526: /* The compression and expansion should be avoided. Doesn't point
527: out errors, might change the indices, hence buggey */
528: PetscMalloc2(ismax + 1, &isrow_block, ismax + 1, &iscol_block);
529: ISCompressIndicesGeneral(N, C->rmap->n, bs, ismax, isrow, isrow_block);
530: ISCompressIndicesGeneral(N, C->cmap->n, bs, ismax, iscol, iscol_block);
532: /* Determine the number of stages through which submatrices are done */
533: if (!C->cmap->N) nmax = 20 * 1000000 / sizeof(PetscInt);
534: else nmax = 20 * 1000000 / (c->Nbs * sizeof(PetscInt));
535: if (!nmax) nmax = 1;
537: if (scall == MAT_INITIAL_MATRIX) {
538: nstages_local = ismax / nmax + ((ismax % nmax) ? 1 : 0); /* local nstages */
540: /* Make sure every processor loops through the nstages */
541: MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C));
543: /* Allocate memory to hold all the submatrices and dummy submatrices */
544: PetscCalloc1(ismax + nstages, submat);
545: } else { /* MAT_REUSE_MATRIX */
546: if (ismax) {
547: subc = (Mat_SeqBAIJ *)((*submat)[0]->data);
548: smat = subc->submatis1;
549: } else { /* (*submat)[0] is a dummy matrix */
550: smat = (Mat_SubSppt *)(*submat)[0]->data;
551: }
553: nstages = smat->nstages;
554: }
556: for (i = 0, pos = 0; i < nstages; i++) {
557: if (pos + nmax <= ismax) max_no = nmax;
558: else if (pos >= ismax) max_no = 0;
559: else max_no = ismax - pos;
561: MatCreateSubMatrices_MPIBAIJ_local(C, max_no, isrow_block + pos, iscol_block + pos, scall, *submat + pos);
562: if (!max_no) {
563: if (scall == MAT_INITIAL_MATRIX) { /* submat[pos] is a dummy matrix */
564: smat = (Mat_SubSppt *)(*submat)[pos]->data;
565: smat->nstages = nstages;
566: }
567: pos++; /* advance to next dummy matrix if any */
568: } else pos += max_no;
569: }
571: if (scall == MAT_INITIAL_MATRIX && ismax) {
572: /* save nstages for reuse */
573: subc = (Mat_SeqBAIJ *)((*submat)[0]->data);
574: smat = subc->submatis1;
575: smat->nstages = nstages;
576: }
578: for (i = 0; i < ismax; i++) {
579: ISDestroy(&isrow_block[i]);
580: ISDestroy(&iscol_block[i]);
581: }
582: PetscFree2(isrow_block, iscol_block);
583: return 0;
584: }
586: #if defined(PETSC_USE_CTABLE)
587: PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank)
588: {
589: PetscInt nGlobalNd = proc_gnode[size];
590: PetscMPIInt fproc;
592: PetscMPIIntCast((PetscInt)(((float)row * (float)size / (float)nGlobalNd + 0.5)), &fproc);
593: if (fproc > size) fproc = size;
594: while (row < proc_gnode[fproc] || row >= proc_gnode[fproc + 1]) {
595: if (row < proc_gnode[fproc]) fproc--;
596: else fproc++;
597: }
598: *rank = fproc;
599: return 0;
600: }
601: #endif
603: /* -------------------------------------------------------------------------*/
604: /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */
605: PetscErrorCode MatCreateSubMatrices_MPIBAIJ_local(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submats)
606: {
607: Mat_MPIBAIJ *c = (Mat_MPIBAIJ *)C->data;
608: Mat A = c->A;
609: Mat_SeqBAIJ *a = (Mat_SeqBAIJ *)A->data, *b = (Mat_SeqBAIJ *)c->B->data, *subc;
610: const PetscInt **icol, **irow;
611: PetscInt *nrow, *ncol, start;
612: PetscMPIInt rank, size, tag0, tag2, tag3, tag4, *w1, *w2, *w3, *w4, nrqr;
613: PetscInt **sbuf1, **sbuf2, *sbuf2_i, i, j, k, l, ct1, ct2, **rbuf1, row, proc = -1;
614: PetscInt nrqs = 0, msz, **ptr = NULL, *req_size = NULL, *ctr = NULL, *pa, *tmp = NULL, tcol;
615: PetscInt **rbuf3 = NULL, *req_source1 = NULL, *req_source2, **sbuf_aj, **rbuf2 = NULL, max1, max2;
616: PetscInt **lens, is_no, ncols, *cols, mat_i, *mat_j, tmp2, jmax;
617: #if defined(PETSC_USE_CTABLE)
618: PetscTable *cmap, cmap_i = NULL, *rmap, rmap_i;
619: #else
620: PetscInt **cmap, *cmap_i = NULL, **rmap, *rmap_i;
621: #endif
622: const PetscInt *irow_i, *icol_i;
623: PetscInt ctr_j, *sbuf1_j, *sbuf_aj_i, *rbuf1_i, kmax, *lens_i;
624: MPI_Request *s_waits1, *r_waits1, *s_waits2, *r_waits2, *r_waits3;
625: MPI_Request *r_waits4, *s_waits3, *s_waits4;
626: MPI_Comm comm;
627: PetscScalar **rbuf4, *rbuf4_i = NULL, **sbuf_aa, *vals, *mat_a = NULL, *imat_a = NULL, *sbuf_aa_i;
628: PetscMPIInt *onodes1, *olengths1, end;
629: PetscInt **row2proc, *row2proc_i, *imat_ilen, *imat_j, *imat_i;
630: Mat_SubSppt *smat_i;
631: PetscBool *issorted, colflag, iscsorted = PETSC_TRUE;
632: PetscInt *sbuf1_i, *rbuf2_i, *rbuf3_i, ilen;
633: PetscInt bs = C->rmap->bs, bs2 = c->bs2, rstart = c->rstartbs;
634: PetscBool ijonly = c->ijonly; /* private flag indicates only matrix data structures are requested */
635: PetscInt nzA, nzB, *a_i = a->i, *b_i = b->i, *a_j = a->j, *b_j = b->j, ctmp, imark, *cworkA, *cworkB;
636: PetscScalar *vworkA = NULL, *vworkB = NULL, *a_a = a->a, *b_a = b->a;
637: PetscInt cstart = c->cstartbs, *bmap = c->garray;
638: PetscBool *allrows, *allcolumns;
640: PetscObjectGetComm((PetscObject)C, &comm);
641: size = c->size;
642: rank = c->rank;
644: PetscMalloc5(ismax, &row2proc, ismax, &cmap, ismax, &rmap, ismax + 1, &allcolumns, ismax, &allrows);
645: PetscMalloc5(ismax, (PetscInt ***)&irow, ismax, (PetscInt ***)&icol, ismax, &nrow, ismax, &ncol, ismax, &issorted);
647: for (i = 0; i < ismax; i++) {
648: ISSorted(iscol[i], &issorted[i]);
649: if (!issorted[i]) iscsorted = issorted[i]; /* columns are not sorted! */
650: ISSorted(isrow[i], &issorted[i]);
652: /* Check for special case: allcolumns */
653: ISIdentity(iscol[i], &colflag);
654: ISGetLocalSize(iscol[i], &ncol[i]);
656: if (colflag && ncol[i] == c->Nbs) {
657: allcolumns[i] = PETSC_TRUE;
658: icol[i] = NULL;
659: } else {
660: allcolumns[i] = PETSC_FALSE;
661: ISGetIndices(iscol[i], &icol[i]);
662: }
664: /* Check for special case: allrows */
665: ISIdentity(isrow[i], &colflag);
666: ISGetLocalSize(isrow[i], &nrow[i]);
667: if (colflag && nrow[i] == c->Mbs) {
668: allrows[i] = PETSC_TRUE;
669: irow[i] = NULL;
670: } else {
671: allrows[i] = PETSC_FALSE;
672: ISGetIndices(isrow[i], &irow[i]);
673: }
674: }
676: if (scall == MAT_REUSE_MATRIX) {
677: /* Assumes new rows are same length as the old rows */
678: for (i = 0; i < ismax; i++) {
679: subc = (Mat_SeqBAIJ *)(submats[i]->data);
682: /* Initial matrix as if empty */
683: PetscArrayzero(subc->ilen, subc->mbs);
685: /* Initial matrix as if empty */
686: submats[i]->factortype = C->factortype;
688: smat_i = subc->submatis1;
690: nrqs = smat_i->nrqs;
691: nrqr = smat_i->nrqr;
692: rbuf1 = smat_i->rbuf1;
693: rbuf2 = smat_i->rbuf2;
694: rbuf3 = smat_i->rbuf3;
695: req_source2 = smat_i->req_source2;
697: sbuf1 = smat_i->sbuf1;
698: sbuf2 = smat_i->sbuf2;
699: ptr = smat_i->ptr;
700: tmp = smat_i->tmp;
701: ctr = smat_i->ctr;
703: pa = smat_i->pa;
704: req_size = smat_i->req_size;
705: req_source1 = smat_i->req_source1;
707: allcolumns[i] = smat_i->allcolumns;
708: allrows[i] = smat_i->allrows;
709: row2proc[i] = smat_i->row2proc;
710: rmap[i] = smat_i->rmap;
711: cmap[i] = smat_i->cmap;
712: }
714: if (!ismax) { /* Get dummy submatrices and retrieve struct submatis1 */
716: smat_i = (Mat_SubSppt *)submats[0]->data;
718: nrqs = smat_i->nrqs;
719: nrqr = smat_i->nrqr;
720: rbuf1 = smat_i->rbuf1;
721: rbuf2 = smat_i->rbuf2;
722: rbuf3 = smat_i->rbuf3;
723: req_source2 = smat_i->req_source2;
725: sbuf1 = smat_i->sbuf1;
726: sbuf2 = smat_i->sbuf2;
727: ptr = smat_i->ptr;
728: tmp = smat_i->tmp;
729: ctr = smat_i->ctr;
731: pa = smat_i->pa;
732: req_size = smat_i->req_size;
733: req_source1 = smat_i->req_source1;
735: allcolumns[0] = PETSC_FALSE;
736: }
737: } else { /* scall == MAT_INITIAL_MATRIX */
738: /* Get some new tags to keep the communication clean */
739: PetscObjectGetNewTag((PetscObject)C, &tag2);
740: PetscObjectGetNewTag((PetscObject)C, &tag3);
742: /* evaluate communication - mesg to who, length of mesg, and buffer space
743: required. Based on this, buffers are allocated, and data copied into them*/
744: PetscCalloc4(size, &w1, size, &w2, size, &w3, size, &w4); /* mesg size, initialize work vectors */
746: for (i = 0; i < ismax; i++) {
747: jmax = nrow[i];
748: irow_i = irow[i];
750: PetscMalloc1(jmax, &row2proc_i);
751: row2proc[i] = row2proc_i;
753: if (issorted[i]) proc = 0;
754: for (j = 0; j < jmax; j++) {
755: if (!issorted[i]) proc = 0;
756: if (allrows[i]) row = j;
757: else row = irow_i[j];
759: while (row >= c->rangebs[proc + 1]) proc++;
760: w4[proc]++;
761: row2proc_i[j] = proc; /* map row index to proc */
762: }
763: for (j = 0; j < size; j++) {
764: if (w4[j]) {
765: w1[j] += w4[j];
766: w3[j]++;
767: w4[j] = 0;
768: }
769: }
770: }
772: nrqs = 0; /* no of outgoing messages */
773: msz = 0; /* total mesg length (for all procs) */
774: w1[rank] = 0; /* no mesg sent to self */
775: w3[rank] = 0;
776: for (i = 0; i < size; i++) {
777: if (w1[i]) {
778: w2[i] = 1;
779: nrqs++;
780: } /* there exists a message to proc i */
781: }
782: PetscMalloc1(nrqs, &pa); /*(proc -array)*/
783: for (i = 0, j = 0; i < size; i++) {
784: if (w1[i]) {
785: pa[j] = i;
786: j++;
787: }
788: }
790: /* Each message would have a header = 1 + 2*(no of IS) + data */
791: for (i = 0; i < nrqs; i++) {
792: j = pa[i];
793: w1[j] += w2[j] + 2 * w3[j];
794: msz += w1[j];
795: }
796: PetscInfo(0, "Number of outgoing messages %" PetscInt_FMT " Total message length %" PetscInt_FMT "\n", nrqs, msz);
798: /* Determine the number of messages to expect, their lengths, from from-ids */
799: PetscGatherNumberOfMessages(comm, w2, w1, &nrqr);
800: PetscGatherMessageLengths(comm, nrqs, nrqr, w1, &onodes1, &olengths1);
802: /* Now post the Irecvs corresponding to these messages */
803: PetscObjectGetNewTag((PetscObject)C, &tag0);
804: PetscPostIrecvInt(comm, tag0, nrqr, onodes1, olengths1, &rbuf1, &r_waits1);
806: /* Allocate Memory for outgoing messages */
807: PetscMalloc4(size, &sbuf1, size, &ptr, 2 * msz, &tmp, size, &ctr);
808: PetscArrayzero(sbuf1, size);
809: PetscArrayzero(ptr, size);
811: {
812: PetscInt *iptr = tmp;
813: k = 0;
814: for (i = 0; i < nrqs; i++) {
815: j = pa[i];
816: iptr += k;
817: sbuf1[j] = iptr;
818: k = w1[j];
819: }
820: }
822: /* Form the outgoing messages. Initialize the header space */
823: for (i = 0; i < nrqs; i++) {
824: j = pa[i];
825: sbuf1[j][0] = 0;
826: PetscArrayzero(sbuf1[j] + 1, 2 * w3[j]);
827: ptr[j] = sbuf1[j] + 2 * w3[j] + 1;
828: }
830: /* Parse the isrow and copy data into outbuf */
831: for (i = 0; i < ismax; i++) {
832: row2proc_i = row2proc[i];
833: PetscArrayzero(ctr, size);
834: irow_i = irow[i];
835: jmax = nrow[i];
836: for (j = 0; j < jmax; j++) { /* parse the indices of each IS */
837: proc = row2proc_i[j];
838: if (allrows[i]) row = j;
839: else row = irow_i[j];
841: if (proc != rank) { /* copy to the outgoing buf*/
842: ctr[proc]++;
843: *ptr[proc] = row;
844: ptr[proc]++;
845: }
846: }
847: /* Update the headers for the current IS */
848: for (j = 0; j < size; j++) { /* Can Optimise this loop too */
849: if ((ctr_j = ctr[j])) {
850: sbuf1_j = sbuf1[j];
851: k = ++sbuf1_j[0];
852: sbuf1_j[2 * k] = ctr_j;
853: sbuf1_j[2 * k - 1] = i;
854: }
855: }
856: }
858: /* Now post the sends */
859: PetscMalloc1(nrqs, &s_waits1);
860: for (i = 0; i < nrqs; ++i) {
861: j = pa[i];
862: MPI_Isend(sbuf1[j], w1[j], MPIU_INT, j, tag0, comm, s_waits1 + i);
863: }
865: /* Post Receives to capture the buffer size */
866: PetscMalloc1(nrqs, &r_waits2);
867: PetscMalloc3(nrqs, &req_source2, nrqs, &rbuf2, nrqs, &rbuf3);
868: if (nrqs) rbuf2[0] = tmp + msz;
869: for (i = 1; i < nrqs; ++i) rbuf2[i] = rbuf2[i - 1] + w1[pa[i - 1]];
870: for (i = 0; i < nrqs; ++i) {
871: j = pa[i];
872: MPI_Irecv(rbuf2[i], w1[j], MPIU_INT, j, tag2, comm, r_waits2 + i);
873: }
875: /* Send to other procs the buf size they should allocate */
876: /* Receive messages*/
877: PetscMalloc1(nrqr, &s_waits2);
878: PetscMalloc3(nrqr, &sbuf2, nrqr, &req_size, nrqr, &req_source1);
880: MPI_Waitall(nrqr, r_waits1, MPI_STATUSES_IGNORE);
881: for (i = 0; i < nrqr; ++i) {
882: req_size[i] = 0;
883: rbuf1_i = rbuf1[i];
884: start = 2 * rbuf1_i[0] + 1;
885: end = olengths1[i];
886: PetscMalloc1(end, &sbuf2[i]);
887: sbuf2_i = sbuf2[i];
888: for (j = start; j < end; j++) {
889: row = rbuf1_i[j] - rstart;
890: ncols = a_i[row + 1] - a_i[row] + b_i[row + 1] - b_i[row];
891: sbuf2_i[j] = ncols;
892: req_size[i] += ncols;
893: }
894: req_source1[i] = onodes1[i];
895: /* form the header */
896: sbuf2_i[0] = req_size[i];
897: for (j = 1; j < start; j++) sbuf2_i[j] = rbuf1_i[j];
899: MPI_Isend(sbuf2_i, end, MPIU_INT, req_source1[i], tag2, comm, s_waits2 + i);
900: }
902: PetscFree(onodes1);
903: PetscFree(olengths1);
905: PetscFree(r_waits1);
906: PetscFree4(w1, w2, w3, w4);
908: /* Receive messages*/
909: PetscMalloc1(nrqs, &r_waits3);
911: MPI_Waitall(nrqs, r_waits2, MPI_STATUSES_IGNORE);
912: for (i = 0; i < nrqs; ++i) {
913: PetscMalloc1(rbuf2[i][0], &rbuf3[i]);
914: req_source2[i] = pa[i];
915: MPI_Irecv(rbuf3[i], rbuf2[i][0], MPIU_INT, req_source2[i], tag3, comm, r_waits3 + i);
916: }
917: PetscFree(r_waits2);
919: /* Wait on sends1 and sends2 */
920: MPI_Waitall(nrqs, s_waits1, MPI_STATUSES_IGNORE);
921: MPI_Waitall(nrqr, s_waits2, MPI_STATUSES_IGNORE);
922: PetscFree(s_waits1);
923: PetscFree(s_waits2);
925: /* Now allocate sending buffers for a->j, and send them off */
926: PetscMalloc1(nrqr, &sbuf_aj);
927: for (i = 0, j = 0; i < nrqr; i++) j += req_size[i];
928: if (nrqr) PetscMalloc1(j, &sbuf_aj[0]);
929: for (i = 1; i < nrqr; i++) sbuf_aj[i] = sbuf_aj[i - 1] + req_size[i - 1];
931: PetscMalloc1(nrqr, &s_waits3);
932: {
933: for (i = 0; i < nrqr; i++) {
934: rbuf1_i = rbuf1[i];
935: sbuf_aj_i = sbuf_aj[i];
936: ct1 = 2 * rbuf1_i[0] + 1;
937: ct2 = 0;
938: for (j = 1, max1 = rbuf1_i[0]; j <= max1; j++) {
939: kmax = rbuf1[i][2 * j];
940: for (k = 0; k < kmax; k++, ct1++) {
941: row = rbuf1_i[ct1] - rstart;
942: nzA = a_i[row + 1] - a_i[row];
943: nzB = b_i[row + 1] - b_i[row];
944: ncols = nzA + nzB;
945: cworkA = a_j + a_i[row];
946: cworkB = b_j + b_i[row];
948: /* load the column indices for this row into cols */
949: cols = sbuf_aj_i + ct2;
950: for (l = 0; l < nzB; l++) {
951: if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp;
952: else break;
953: }
954: imark = l;
955: for (l = 0; l < nzA; l++) cols[imark + l] = cstart + cworkA[l];
956: for (l = imark; l < nzB; l++) cols[nzA + l] = bmap[cworkB[l]];
957: ct2 += ncols;
958: }
959: }
960: MPI_Isend(sbuf_aj_i, req_size[i], MPIU_INT, req_source1[i], tag3, comm, s_waits3 + i);
961: }
962: }
964: /* create col map: global col of C -> local col of submatrices */
965: #if defined(PETSC_USE_CTABLE)
966: for (i = 0; i < ismax; i++) {
967: if (!allcolumns[i]) {
968: PetscTableCreate(ncol[i], c->Nbs, &cmap[i]);
970: jmax = ncol[i];
971: icol_i = icol[i];
972: cmap_i = cmap[i];
973: for (j = 0; j < jmax; j++) PetscTableAdd(cmap[i], icol_i[j] + 1, j + 1, INSERT_VALUES);
974: } else cmap[i] = NULL;
975: }
976: #else
977: for (i = 0; i < ismax; i++) {
978: if (!allcolumns[i]) {
979: PetscCalloc1(c->Nbs, &cmap[i]);
980: jmax = ncol[i];
981: icol_i = icol[i];
982: cmap_i = cmap[i];
983: for (j = 0; j < jmax; j++) cmap_i[icol_i[j]] = j + 1;
984: } else cmap[i] = NULL;
985: }
986: #endif
988: /* Create lens which is required for MatCreate... */
989: for (i = 0, j = 0; i < ismax; i++) j += nrow[i];
990: PetscMalloc1(ismax, &lens);
992: if (ismax) PetscCalloc1(j, &lens[0]);
993: for (i = 1; i < ismax; i++) lens[i] = lens[i - 1] + nrow[i - 1];
995: /* Update lens from local data */
996: for (i = 0; i < ismax; i++) {
997: row2proc_i = row2proc[i];
998: jmax = nrow[i];
999: if (!allcolumns[i]) cmap_i = cmap[i];
1000: irow_i = irow[i];
1001: lens_i = lens[i];
1002: for (j = 0; j < jmax; j++) {
1003: if (allrows[i]) row = j;
1004: else row = irow_i[j]; /* global blocked row of C */
1006: proc = row2proc_i[j];
1007: if (proc == rank) {
1008: /* Get indices from matA and then from matB */
1009: #if defined(PETSC_USE_CTABLE)
1010: PetscInt tt;
1011: #endif
1012: row = row - rstart;
1013: nzA = a_i[row + 1] - a_i[row];
1014: nzB = b_i[row + 1] - b_i[row];
1015: cworkA = a_j + a_i[row];
1016: cworkB = b_j + b_i[row];
1018: if (!allcolumns[i]) {
1019: #if defined(PETSC_USE_CTABLE)
1020: for (k = 0; k < nzA; k++) {
1021: PetscTableFind(cmap_i, cstart + cworkA[k] + 1, &tt);
1022: if (tt) lens_i[j]++;
1023: }
1024: for (k = 0; k < nzB; k++) {
1025: PetscTableFind(cmap_i, bmap[cworkB[k]] + 1, &tt);
1026: if (tt) lens_i[j]++;
1027: }
1029: #else
1030: for (k = 0; k < nzA; k++) {
1031: if (cmap_i[cstart + cworkA[k]]) lens_i[j]++;
1032: }
1033: for (k = 0; k < nzB; k++) {
1034: if (cmap_i[bmap[cworkB[k]]]) lens_i[j]++;
1035: }
1036: #endif
1037: } else { /* allcolumns */
1038: lens_i[j] = nzA + nzB;
1039: }
1040: }
1041: }
1042: }
1044: /* Create row map: global row of C -> local row of submatrices */
1045: for (i = 0; i < ismax; i++) {
1046: if (!allrows[i]) {
1047: #if defined(PETSC_USE_CTABLE)
1048: PetscTableCreate(nrow[i], c->Mbs, &rmap[i]);
1049: irow_i = irow[i];
1050: jmax = nrow[i];
1051: for (j = 0; j < jmax; j++) {
1052: if (allrows[i]) {
1053: PetscTableAdd(rmap[i], j + 1, j + 1, INSERT_VALUES);
1054: } else {
1055: PetscTableAdd(rmap[i], irow_i[j] + 1, j + 1, INSERT_VALUES);
1056: }
1057: }
1058: #else
1059: PetscCalloc1(c->Mbs, &rmap[i]);
1060: rmap_i = rmap[i];
1061: irow_i = irow[i];
1062: jmax = nrow[i];
1063: for (j = 0; j < jmax; j++) {
1064: if (allrows[i]) rmap_i[j] = j;
1065: else rmap_i[irow_i[j]] = j;
1066: }
1067: #endif
1068: } else rmap[i] = NULL;
1069: }
1071: /* Update lens from offproc data */
1072: {
1073: PetscInt *rbuf2_i, *rbuf3_i, *sbuf1_i;
1075: MPI_Waitall(nrqs, r_waits3, MPI_STATUSES_IGNORE);
1076: for (tmp2 = 0; tmp2 < nrqs; tmp2++) {
1077: sbuf1_i = sbuf1[pa[tmp2]];
1078: jmax = sbuf1_i[0];
1079: ct1 = 2 * jmax + 1;
1080: ct2 = 0;
1081: rbuf2_i = rbuf2[tmp2];
1082: rbuf3_i = rbuf3[tmp2];
1083: for (j = 1; j <= jmax; j++) {
1084: is_no = sbuf1_i[2 * j - 1];
1085: max1 = sbuf1_i[2 * j];
1086: lens_i = lens[is_no];
1087: if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1088: rmap_i = rmap[is_no];
1089: for (k = 0; k < max1; k++, ct1++) {
1090: if (allrows[is_no]) {
1091: row = sbuf1_i[ct1];
1092: } else {
1093: #if defined(PETSC_USE_CTABLE)
1094: PetscTableFind(rmap_i, sbuf1_i[ct1] + 1, &row);
1095: row--;
1097: #else
1098: row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
1099: #endif
1100: }
1101: max2 = rbuf2_i[ct1];
1102: for (l = 0; l < max2; l++, ct2++) {
1103: if (!allcolumns[is_no]) {
1104: #if defined(PETSC_USE_CTABLE)
1105: PetscTableFind(cmap_i, rbuf3_i[ct2] + 1, &tcol);
1106: #else
1107: tcol = cmap_i[rbuf3_i[ct2]];
1108: #endif
1109: if (tcol) lens_i[row]++;
1110: } else { /* allcolumns */
1111: lens_i[row]++; /* lens_i[row] += max2 ? */
1112: }
1113: }
1114: }
1115: }
1116: }
1117: }
1118: PetscFree(r_waits3);
1119: MPI_Waitall(nrqr, s_waits3, MPI_STATUSES_IGNORE);
1120: PetscFree(s_waits3);
1122: /* Create the submatrices */
1123: for (i = 0; i < ismax; i++) {
1124: PetscInt bs_tmp;
1125: if (ijonly) bs_tmp = 1;
1126: else bs_tmp = bs;
1128: MatCreate(PETSC_COMM_SELF, submats + i);
1129: MatSetSizes(submats[i], nrow[i] * bs_tmp, ncol[i] * bs_tmp, PETSC_DETERMINE, PETSC_DETERMINE);
1131: MatSetType(submats[i], ((PetscObject)A)->type_name);
1132: MatSeqBAIJSetPreallocation(submats[i], bs_tmp, 0, lens[i]);
1133: MatSeqSBAIJSetPreallocation(submats[i], bs_tmp, 0, lens[i]); /* this subroutine is used by SBAIJ routines */
1135: /* create struct Mat_SubSppt and attached it to submat */
1136: PetscNew(&smat_i);
1137: subc = (Mat_SeqBAIJ *)submats[i]->data;
1138: subc->submatis1 = smat_i;
1140: smat_i->destroy = submats[i]->ops->destroy;
1141: submats[i]->ops->destroy = MatDestroySubMatrix_SeqBAIJ;
1142: submats[i]->factortype = C->factortype;
1144: smat_i->id = i;
1145: smat_i->nrqs = nrqs;
1146: smat_i->nrqr = nrqr;
1147: smat_i->rbuf1 = rbuf1;
1148: smat_i->rbuf2 = rbuf2;
1149: smat_i->rbuf3 = rbuf3;
1150: smat_i->sbuf2 = sbuf2;
1151: smat_i->req_source2 = req_source2;
1153: smat_i->sbuf1 = sbuf1;
1154: smat_i->ptr = ptr;
1155: smat_i->tmp = tmp;
1156: smat_i->ctr = ctr;
1158: smat_i->pa = pa;
1159: smat_i->req_size = req_size;
1160: smat_i->req_source1 = req_source1;
1162: smat_i->allcolumns = allcolumns[i];
1163: smat_i->allrows = allrows[i];
1164: smat_i->singleis = PETSC_FALSE;
1165: smat_i->row2proc = row2proc[i];
1166: smat_i->rmap = rmap[i];
1167: smat_i->cmap = cmap[i];
1168: }
1170: if (!ismax) { /* Create dummy submats[0] for reuse struct subc */
1171: MatCreate(PETSC_COMM_SELF, &submats[0]);
1172: MatSetSizes(submats[0], 0, 0, PETSC_DETERMINE, PETSC_DETERMINE);
1173: MatSetType(submats[0], MATDUMMY);
1175: /* create struct Mat_SubSppt and attached it to submat */
1176: PetscNew(&smat_i);
1177: submats[0]->data = (void *)smat_i;
1179: smat_i->destroy = submats[0]->ops->destroy;
1180: submats[0]->ops->destroy = MatDestroySubMatrix_Dummy;
1181: submats[0]->factortype = C->factortype;
1183: smat_i->id = 0;
1184: smat_i->nrqs = nrqs;
1185: smat_i->nrqr = nrqr;
1186: smat_i->rbuf1 = rbuf1;
1187: smat_i->rbuf2 = rbuf2;
1188: smat_i->rbuf3 = rbuf3;
1189: smat_i->sbuf2 = sbuf2;
1190: smat_i->req_source2 = req_source2;
1192: smat_i->sbuf1 = sbuf1;
1193: smat_i->ptr = ptr;
1194: smat_i->tmp = tmp;
1195: smat_i->ctr = ctr;
1197: smat_i->pa = pa;
1198: smat_i->req_size = req_size;
1199: smat_i->req_source1 = req_source1;
1201: smat_i->allcolumns = PETSC_FALSE;
1202: smat_i->singleis = PETSC_FALSE;
1203: smat_i->row2proc = NULL;
1204: smat_i->rmap = NULL;
1205: smat_i->cmap = NULL;
1206: }
1208: if (ismax) PetscFree(lens[0]);
1209: PetscFree(lens);
1210: if (sbuf_aj) {
1211: PetscFree(sbuf_aj[0]);
1212: PetscFree(sbuf_aj);
1213: }
1215: } /* endof scall == MAT_INITIAL_MATRIX */
1217: /* Post recv matrix values */
1218: if (!ijonly) {
1219: PetscObjectGetNewTag((PetscObject)C, &tag4);
1220: PetscMalloc1(nrqs, &rbuf4);
1221: PetscMalloc1(nrqs, &r_waits4);
1222: for (i = 0; i < nrqs; ++i) {
1223: PetscMalloc1(rbuf2[i][0] * bs2, &rbuf4[i]);
1224: MPI_Irecv(rbuf4[i], rbuf2[i][0] * bs2, MPIU_SCALAR, req_source2[i], tag4, comm, r_waits4 + i);
1225: }
1227: /* Allocate sending buffers for a->a, and send them off */
1228: PetscMalloc1(nrqr, &sbuf_aa);
1229: for (i = 0, j = 0; i < nrqr; i++) j += req_size[i];
1231: if (nrqr) PetscMalloc1(j * bs2, &sbuf_aa[0]);
1232: for (i = 1; i < nrqr; i++) sbuf_aa[i] = sbuf_aa[i - 1] + req_size[i - 1] * bs2;
1234: PetscMalloc1(nrqr, &s_waits4);
1236: for (i = 0; i < nrqr; i++) {
1237: rbuf1_i = rbuf1[i];
1238: sbuf_aa_i = sbuf_aa[i];
1239: ct1 = 2 * rbuf1_i[0] + 1;
1240: ct2 = 0;
1241: for (j = 1, max1 = rbuf1_i[0]; j <= max1; j++) {
1242: kmax = rbuf1_i[2 * j];
1243: for (k = 0; k < kmax; k++, ct1++) {
1244: row = rbuf1_i[ct1] - rstart;
1245: nzA = a_i[row + 1] - a_i[row];
1246: nzB = b_i[row + 1] - b_i[row];
1247: ncols = nzA + nzB;
1248: cworkB = b_j + b_i[row];
1249: vworkA = a_a + a_i[row] * bs2;
1250: vworkB = b_a + b_i[row] * bs2;
1252: /* load the column values for this row into vals*/
1253: vals = sbuf_aa_i + ct2 * bs2;
1254: for (l = 0; l < nzB; l++) {
1255: if ((bmap[cworkB[l]]) < cstart) {
1256: PetscArraycpy(vals + l * bs2, vworkB + l * bs2, bs2);
1257: } else break;
1258: }
1259: imark = l;
1260: for (l = 0; l < nzA; l++) PetscArraycpy(vals + (imark + l) * bs2, vworkA + l * bs2, bs2);
1261: for (l = imark; l < nzB; l++) PetscArraycpy(vals + (nzA + l) * bs2, vworkB + l * bs2, bs2);
1263: ct2 += ncols;
1264: }
1265: }
1266: MPI_Isend(sbuf_aa_i, req_size[i] * bs2, MPIU_SCALAR, req_source1[i], tag4, comm, s_waits4 + i);
1267: }
1268: }
1270: /* Assemble the matrices */
1271: /* First assemble the local rows */
1272: for (i = 0; i < ismax; i++) {
1273: row2proc_i = row2proc[i];
1274: subc = (Mat_SeqBAIJ *)submats[i]->data;
1275: imat_ilen = subc->ilen;
1276: imat_j = subc->j;
1277: imat_i = subc->i;
1278: imat_a = subc->a;
1280: if (!allcolumns[i]) cmap_i = cmap[i];
1281: rmap_i = rmap[i];
1282: irow_i = irow[i];
1283: jmax = nrow[i];
1284: for (j = 0; j < jmax; j++) {
1285: if (allrows[i]) row = j;
1286: else row = irow_i[j];
1287: proc = row2proc_i[j];
1289: if (proc == rank) {
1290: row = row - rstart;
1291: nzA = a_i[row + 1] - a_i[row];
1292: nzB = b_i[row + 1] - b_i[row];
1293: cworkA = a_j + a_i[row];
1294: cworkB = b_j + b_i[row];
1295: if (!ijonly) {
1296: vworkA = a_a + a_i[row] * bs2;
1297: vworkB = b_a + b_i[row] * bs2;
1298: }
1300: if (allrows[i]) {
1301: row = row + rstart;
1302: } else {
1303: #if defined(PETSC_USE_CTABLE)
1304: PetscTableFind(rmap_i, row + rstart + 1, &row);
1305: row--;
1308: #else
1309: row = rmap_i[row + rstart];
1310: #endif
1311: }
1312: mat_i = imat_i[row];
1313: if (!ijonly) mat_a = imat_a + mat_i * bs2;
1314: mat_j = imat_j + mat_i;
1315: ilen = imat_ilen[row];
1317: /* load the column indices for this row into cols*/
1318: if (!allcolumns[i]) {
1319: for (l = 0; l < nzB; l++) {
1320: if ((ctmp = bmap[cworkB[l]]) < cstart) {
1321: #if defined(PETSC_USE_CTABLE)
1322: PetscTableFind(cmap_i, ctmp + 1, &tcol);
1323: if (tcol) {
1324: #else
1325: if ((tcol = cmap_i[ctmp])) {
1326: #endif
1327: *mat_j++ = tcol - 1;
1328: PetscArraycpy(mat_a, vworkB + l * bs2, bs2);
1329: mat_a += bs2;
1330: ilen++;
1331: }
1332: } else break;
1333: }
1334: imark = l;
1335: for (l = 0; l < nzA; l++) {
1336: #if defined(PETSC_USE_CTABLE)
1337: PetscTableFind(cmap_i, cstart + cworkA[l] + 1, &tcol);
1338: if (tcol) {
1339: #else
1340: if ((tcol = cmap_i[cstart + cworkA[l]])) {
1341: #endif
1342: *mat_j++ = tcol - 1;
1343: if (!ijonly) {
1344: PetscArraycpy(mat_a, vworkA + l * bs2, bs2);
1345: mat_a += bs2;
1346: }
1347: ilen++;
1348: }
1349: }
1350: for (l = imark; l < nzB; l++) {
1351: #if defined(PETSC_USE_CTABLE)
1352: PetscTableFind(cmap_i, bmap[cworkB[l]] + 1, &tcol);
1353: if (tcol) {
1354: #else
1355: if ((tcol = cmap_i[bmap[cworkB[l]]])) {
1356: #endif
1357: *mat_j++ = tcol - 1;
1358: if (!ijonly) {
1359: PetscArraycpy(mat_a, vworkB + l * bs2, bs2);
1360: mat_a += bs2;
1361: }
1362: ilen++;
1363: }
1364: }
1365: } else { /* allcolumns */
1366: for (l = 0; l < nzB; l++) {
1367: if ((ctmp = bmap[cworkB[l]]) < cstart) {
1368: *mat_j++ = ctmp;
1369: PetscArraycpy(mat_a, vworkB + l * bs2, bs2);
1370: mat_a += bs2;
1371: ilen++;
1372: } else break;
1373: }
1374: imark = l;
1375: for (l = 0; l < nzA; l++) {
1376: *mat_j++ = cstart + cworkA[l];
1377: if (!ijonly) {
1378: PetscArraycpy(mat_a, vworkA + l * bs2, bs2);
1379: mat_a += bs2;
1380: }
1381: ilen++;
1382: }
1383: for (l = imark; l < nzB; l++) {
1384: *mat_j++ = bmap[cworkB[l]];
1385: if (!ijonly) {
1386: PetscArraycpy(mat_a, vworkB + l * bs2, bs2);
1387: mat_a += bs2;
1388: }
1389: ilen++;
1390: }
1391: }
1392: imat_ilen[row] = ilen;
1393: }
1394: }
1395: }
1397: /* Now assemble the off proc rows */
1398: if (!ijonly) MPI_Waitall(nrqs, r_waits4, MPI_STATUSES_IGNORE);
1399: for (tmp2 = 0; tmp2 < nrqs; tmp2++) {
1400: sbuf1_i = sbuf1[pa[tmp2]];
1401: jmax = sbuf1_i[0];
1402: ct1 = 2 * jmax + 1;
1403: ct2 = 0;
1404: rbuf2_i = rbuf2[tmp2];
1405: rbuf3_i = rbuf3[tmp2];
1406: if (!ijonly) rbuf4_i = rbuf4[tmp2];
1407: for (j = 1; j <= jmax; j++) {
1408: is_no = sbuf1_i[2 * j - 1];
1409: rmap_i = rmap[is_no];
1410: if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1411: subc = (Mat_SeqBAIJ *)submats[is_no]->data;
1412: imat_ilen = subc->ilen;
1413: imat_j = subc->j;
1414: imat_i = subc->i;
1415: if (!ijonly) imat_a = subc->a;
1416: max1 = sbuf1_i[2 * j];
1417: for (k = 0; k < max1; k++, ct1++) { /* for each recved block row */
1418: row = sbuf1_i[ct1];
1420: if (allrows[is_no]) {
1421: row = sbuf1_i[ct1];
1422: } else {
1423: #if defined(PETSC_USE_CTABLE)
1424: PetscTableFind(rmap_i, row + 1, &row);
1425: row--;
1427: #else
1428: row = rmap_i[row];
1429: #endif
1430: }
1431: ilen = imat_ilen[row];
1432: mat_i = imat_i[row];
1433: if (!ijonly) mat_a = imat_a + mat_i * bs2;
1434: mat_j = imat_j + mat_i;
1435: max2 = rbuf2_i[ct1];
1436: if (!allcolumns[is_no]) {
1437: for (l = 0; l < max2; l++, ct2++) {
1438: #if defined(PETSC_USE_CTABLE)
1439: PetscTableFind(cmap_i, rbuf3_i[ct2] + 1, &tcol);
1440: #else
1441: tcol = cmap_i[rbuf3_i[ct2]];
1442: #endif
1443: if (tcol) {
1444: *mat_j++ = tcol - 1;
1445: if (!ijonly) {
1446: PetscArraycpy(mat_a, rbuf4_i + ct2 * bs2, bs2);
1447: mat_a += bs2;
1448: }
1449: ilen++;
1450: }
1451: }
1452: } else { /* allcolumns */
1453: for (l = 0; l < max2; l++, ct2++) {
1454: *mat_j++ = rbuf3_i[ct2]; /* same global column index of C */
1455: if (!ijonly) {
1456: PetscArraycpy(mat_a, rbuf4_i + ct2 * bs2, bs2);
1457: mat_a += bs2;
1458: }
1459: ilen++;
1460: }
1461: }
1462: imat_ilen[row] = ilen;
1463: }
1464: }
1465: }
1467: if (!iscsorted) { /* sort column indices of the rows */
1468: MatScalar *work;
1470: PetscMalloc1(bs2, &work);
1471: for (i = 0; i < ismax; i++) {
1472: subc = (Mat_SeqBAIJ *)submats[i]->data;
1473: imat_ilen = subc->ilen;
1474: imat_j = subc->j;
1475: imat_i = subc->i;
1476: if (!ijonly) imat_a = subc->a;
1477: if (allcolumns[i]) continue;
1479: jmax = nrow[i];
1480: for (j = 0; j < jmax; j++) {
1481: mat_i = imat_i[j];
1482: mat_j = imat_j + mat_i;
1483: ilen = imat_ilen[j];
1484: if (ijonly) {
1485: PetscSortInt(ilen, mat_j);
1486: } else {
1487: mat_a = imat_a + mat_i * bs2;
1488: PetscSortIntWithDataArray(ilen, mat_j, mat_a, bs2 * sizeof(MatScalar), work);
1489: }
1490: }
1491: }
1492: PetscFree(work);
1493: }
1495: if (!ijonly) {
1496: PetscFree(r_waits4);
1497: MPI_Waitall(nrqr, s_waits4, MPI_STATUSES_IGNORE);
1498: PetscFree(s_waits4);
1499: }
1501: /* Restore the indices */
1502: for (i = 0; i < ismax; i++) {
1503: if (!allrows[i]) ISRestoreIndices(isrow[i], irow + i);
1504: if (!allcolumns[i]) ISRestoreIndices(iscol[i], icol + i);
1505: }
1507: for (i = 0; i < ismax; i++) {
1508: MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY);
1509: MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY);
1510: }
1512: PetscFree5(*(PetscInt ***)&irow, *(PetscInt ***)&icol, nrow, ncol, issorted);
1513: PetscFree5(row2proc, cmap, rmap, allcolumns, allrows);
1515: if (!ijonly) {
1516: if (sbuf_aa) {
1517: PetscFree(sbuf_aa[0]);
1518: PetscFree(sbuf_aa);
1519: }
1521: for (i = 0; i < nrqs; ++i) PetscFree(rbuf4[i]);
1522: PetscFree(rbuf4);
1523: }
1524: c->ijonly = PETSC_FALSE; /* set back to the default */
1525: return 0;
1526: }