Actual source code: plexdistribute.c

  1: #include <petsc/private/dmpleximpl.h>
  2: #include <petsc/private/dmlabelimpl.h>

  4: /*@C
  5:   DMPlexSetAdjacencyUser - Define adjacency in the mesh using a user-provided callback

  7:   Input Parameters:
  8: + dm      - The DM object
  9: . user    - The user callback, may be NULL (to clear the callback)
 10: - ctx     - context for callback evaluation, may be NULL

 12:   Level: advanced

 14:   Notes:
 15:      The caller of DMPlexGetAdjacency may need to arrange that a large enough array is available for the adjacency.

 17:      Any setting here overrides other configuration of DMPlex adjacency determination.

 19: .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexGetAdjacencyUser()`
 20: @*/
 21: PetscErrorCode DMPlexSetAdjacencyUser(DM dm, PetscErrorCode (*user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void *ctx)
 22: {
 23:   DM_Plex *mesh = (DM_Plex *)dm->data;

 26:   mesh->useradjacency    = user;
 27:   mesh->useradjacencyctx = ctx;
 28:   return 0;
 29: }

 31: /*@C
 32:   DMPlexGetAdjacencyUser - get the user-defined adjacency callback

 34:   Input Parameter:
 35: . dm      - The DM object

 37:   Output Parameters:
 38: + user    - The callback
 39: - ctx     - context for callback evaluation

 41:   Level: advanced

 43: .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexGetAdjacency()`, `DMPlexSetAdjacencyUser()`
 44: @*/
 45: PetscErrorCode DMPlexGetAdjacencyUser(DM dm, PetscErrorCode (**user)(DM, PetscInt, PetscInt *, PetscInt[], void *), void **ctx)
 46: {
 47:   DM_Plex *mesh = (DM_Plex *)dm->data;

 50:   if (user) *user = mesh->useradjacency;
 51:   if (ctx) *ctx = mesh->useradjacencyctx;
 52:   return 0;
 53: }

 55: /*@
 56:   DMPlexSetAdjacencyUseAnchors - Define adjacency in the mesh using the point-to-point constraints.

 58:   Input Parameters:
 59: + dm      - The DM object
 60: - useAnchors - Flag to use the constraints.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 62:   Level: intermediate

 64: .seealso: `DMGetAdjacency()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
 65: @*/
 66: PetscErrorCode DMPlexSetAdjacencyUseAnchors(DM dm, PetscBool useAnchors)
 67: {
 68:   DM_Plex *mesh = (DM_Plex *)dm->data;

 71:   mesh->useAnchors = useAnchors;
 72:   return 0;
 73: }

 75: /*@
 76:   DMPlexGetAdjacencyUseAnchors - Query whether adjacency in the mesh uses the point-to-point constraints.

 78:   Input Parameter:
 79: . dm      - The DM object

 81:   Output Parameter:
 82: . useAnchors - Flag to use the closure.  If PETSC_TRUE, then constrained points are omitted from DMPlexGetAdjacency(), and their anchor points appear in their place.

 84:   Level: intermediate

 86: .seealso: `DMPlexSetAdjacencyUseAnchors()`, `DMSetAdjacency()`, `DMGetAdjacency()`, `DMPlexDistribute()`, `DMPlexPreallocateOperator()`, `DMPlexSetAnchors()`
 87: @*/
 88: PetscErrorCode DMPlexGetAdjacencyUseAnchors(DM dm, PetscBool *useAnchors)
 89: {
 90:   DM_Plex *mesh = (DM_Plex *)dm->data;

 94:   *useAnchors = mesh->useAnchors;
 95:   return 0;
 96: }

 98: static PetscErrorCode DMPlexGetAdjacency_Cone_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
 99: {
100:   const PetscInt *cone   = NULL;
101:   PetscInt        numAdj = 0, maxAdjSize = *adjSize, coneSize, c;

104:   DMPlexGetConeSize(dm, p, &coneSize);
105:   DMPlexGetCone(dm, p, &cone);
106:   for (c = 0; c <= coneSize; ++c) {
107:     const PetscInt  point   = !c ? p : cone[c - 1];
108:     const PetscInt *support = NULL;
109:     PetscInt        supportSize, s, q;

111:     DMPlexGetSupportSize(dm, point, &supportSize);
112:     DMPlexGetSupport(dm, point, &support);
113:     for (s = 0; s < supportSize; ++s) {
114:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = support[s]), 0); ++q) {
115:         if (support[s] == adj[q]) break;
116:       }
118:     }
119:   }
120:   *adjSize = numAdj;
121:   return 0;
122: }

124: static PetscErrorCode DMPlexGetAdjacency_Support_Internal(DM dm, PetscInt p, PetscInt *adjSize, PetscInt adj[])
125: {
126:   const PetscInt *support = NULL;
127:   PetscInt        numAdj = 0, maxAdjSize = *adjSize, supportSize, s;

130:   DMPlexGetSupportSize(dm, p, &supportSize);
131:   DMPlexGetSupport(dm, p, &support);
132:   for (s = 0; s <= supportSize; ++s) {
133:     const PetscInt  point = !s ? p : support[s - 1];
134:     const PetscInt *cone  = NULL;
135:     PetscInt        coneSize, c, q;

137:     DMPlexGetConeSize(dm, point, &coneSize);
138:     DMPlexGetCone(dm, point, &cone);
139:     for (c = 0; c < coneSize; ++c) {
140:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = cone[c]), 0); ++q) {
141:         if (cone[c] == adj[q]) break;
142:       }
144:     }
145:   }
146:   *adjSize = numAdj;
147:   return 0;
148: }

150: static PetscErrorCode DMPlexGetAdjacency_Transitive_Internal(DM dm, PetscInt p, PetscBool useClosure, PetscInt *adjSize, PetscInt adj[])
151: {
152:   PetscInt *star   = NULL;
153:   PetscInt  numAdj = 0, maxAdjSize = *adjSize, starSize, s;

156:   DMPlexGetTransitiveClosure(dm, p, useClosure, &starSize, &star);
157:   for (s = 0; s < starSize * 2; s += 2) {
158:     const PetscInt *closure = NULL;
159:     PetscInt        closureSize, c, q;

161:     DMPlexGetTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure);
162:     for (c = 0; c < closureSize * 2; c += 2) {
163:       for (q = 0; q < numAdj || ((void)(adj[numAdj++] = closure[c]), 0); ++q) {
164:         if (closure[c] == adj[q]) break;
165:       }
167:     }
168:     DMPlexRestoreTransitiveClosure(dm, star[s], (PetscBool)!useClosure, &closureSize, (PetscInt **)&closure);
169:   }
170:   DMPlexRestoreTransitiveClosure(dm, p, useClosure, &starSize, &star);
171:   *adjSize = numAdj;
172:   return 0;
173: }

175: PetscErrorCode DMPlexGetAdjacency_Internal(DM dm, PetscInt p, PetscBool useCone, PetscBool useTransitiveClosure, PetscBool useAnchors, PetscInt *adjSize, PetscInt *adj[])
176: {
177:   static PetscInt asiz       = 0;
178:   PetscInt        maxAnchors = 1;
179:   PetscInt        aStart = -1, aEnd = -1;
180:   PetscInt        maxAdjSize;
181:   PetscSection    aSec = NULL;
182:   IS              aIS  = NULL;
183:   const PetscInt *anchors;
184:   DM_Plex        *mesh = (DM_Plex *)dm->data;

187:   if (useAnchors) {
188:     DMPlexGetAnchors(dm, &aSec, &aIS);
189:     if (aSec) {
190:       PetscSectionGetMaxDof(aSec, &maxAnchors);
191:       maxAnchors = PetscMax(1, maxAnchors);
192:       PetscSectionGetChart(aSec, &aStart, &aEnd);
193:       ISGetIndices(aIS, &anchors);
194:     }
195:   }
196:   if (!*adj) {
197:     PetscInt depth, coneSeries, supportSeries, maxC, maxS, pStart, pEnd;

199:     DMPlexGetChart(dm, &pStart, &pEnd);
200:     DMPlexGetDepth(dm, &depth);
201:     depth = PetscMax(depth, -depth);
202:     DMPlexGetMaxSizes(dm, &maxC, &maxS);
203:     coneSeries    = (maxC > 1) ? ((PetscPowInt(maxC, depth + 1) - 1) / (maxC - 1)) : depth + 1;
204:     supportSeries = (maxS > 1) ? ((PetscPowInt(maxS, depth + 1) - 1) / (maxS - 1)) : depth + 1;
205:     asiz          = PetscMax(PetscPowInt(maxS, depth) * coneSeries, PetscPowInt(maxC, depth) * supportSeries);
206:     asiz *= maxAnchors;
207:     asiz = PetscMin(asiz, pEnd - pStart);
208:     PetscMalloc1(asiz, adj);
209:   }
210:   if (*adjSize < 0) *adjSize = asiz;
211:   maxAdjSize = *adjSize;
212:   if (mesh->useradjacency) {
213:     (*mesh->useradjacency)(dm, p, adjSize, *adj, mesh->useradjacencyctx);
214:   } else if (useTransitiveClosure) {
215:     DMPlexGetAdjacency_Transitive_Internal(dm, p, useCone, adjSize, *adj);
216:   } else if (useCone) {
217:     DMPlexGetAdjacency_Cone_Internal(dm, p, adjSize, *adj);
218:   } else {
219:     DMPlexGetAdjacency_Support_Internal(dm, p, adjSize, *adj);
220:   }
221:   if (useAnchors && aSec) {
222:     PetscInt  origSize = *adjSize;
223:     PetscInt  numAdj   = origSize;
224:     PetscInt  i        = 0, j;
225:     PetscInt *orig     = *adj;

227:     while (i < origSize) {
228:       PetscInt p    = orig[i];
229:       PetscInt aDof = 0;

231:       if (p >= aStart && p < aEnd) PetscSectionGetDof(aSec, p, &aDof);
232:       if (aDof) {
233:         PetscInt aOff;
234:         PetscInt s, q;

236:         for (j = i + 1; j < numAdj; j++) orig[j - 1] = orig[j];
237:         origSize--;
238:         numAdj--;
239:         PetscSectionGetOffset(aSec, p, &aOff);
240:         for (s = 0; s < aDof; ++s) {
241:           for (q = 0; q < numAdj || ((void)(orig[numAdj++] = anchors[aOff + s]), 0); ++q) {
242:             if (anchors[aOff + s] == orig[q]) break;
243:           }
245:         }
246:       } else {
247:         i++;
248:       }
249:     }
250:     *adjSize = numAdj;
251:     ISRestoreIndices(aIS, &anchors);
252:   }
253:   return 0;
254: }

256: /*@
257:   DMPlexGetAdjacency - Return all points adjacent to the given point

259:   Input Parameters:
260: + dm - The DM object
261: - p  - The point

263:   Input/Output Parameters:
264: + adjSize - The maximum size of adj if it is non-NULL, or PETSC_DETERMINE;
265:             on output the number of adjacent points
266: - adj - Either NULL so that the array is allocated, or an existing array with size adjSize;
267:         on output contains the adjacent points

269:   Level: advanced

271:   Notes:
272:     The user must PetscFree the adj array if it was not passed in.

274: .seealso: `DMSetAdjacency()`, `DMPlexDistribute()`, `DMCreateMatrix()`, `DMPlexPreallocateOperator()`
275: @*/
276: PetscErrorCode DMPlexGetAdjacency(DM dm, PetscInt p, PetscInt *adjSize, PetscInt *adj[])
277: {
278:   PetscBool useCone, useClosure, useAnchors;

284:   DMGetBasicAdjacency(dm, &useCone, &useClosure);
285:   DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
286:   DMPlexGetAdjacency_Internal(dm, p, useCone, useClosure, useAnchors, adjSize, adj);
287:   return 0;
288: }

290: /*@
291:   DMPlexCreateTwoSidedProcessSF - Create an SF which just has process connectivity

293:   Collective on dm

295:   Input Parameters:
296: + dm      - The DM
297: . sfPoint - The PetscSF which encodes point connectivity
298: . rootRankSection -
299: . rootRanks -
300: . leftRankSection -
301: - leafRanks -

303:   Output Parameters:
304: + processRanks - A list of process neighbors, or NULL
305: - sfProcess    - An SF encoding the two-sided process connectivity, or NULL

307:   Level: developer

309: .seealso: `PetscSFCreate()`, `DMPlexCreateProcessSF()`
310: @*/
311: PetscErrorCode DMPlexCreateTwoSidedProcessSF(DM dm, PetscSF sfPoint, PetscSection rootRankSection, IS rootRanks, PetscSection leafRankSection, IS leafRanks, IS *processRanks, PetscSF *sfProcess)
312: {
313:   const PetscSFNode *remotePoints;
314:   PetscInt          *localPointsNew;
315:   PetscSFNode       *remotePointsNew;
316:   const PetscInt    *nranks;
317:   PetscInt          *ranksNew;
318:   PetscBT            neighbors;
319:   PetscInt           pStart, pEnd, p, numLeaves, l, numNeighbors, n;
320:   PetscMPIInt        size, proc, rank;

326:   MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size);
327:   MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank);
328:   PetscSFGetGraph(sfPoint, NULL, &numLeaves, NULL, &remotePoints);
329:   PetscBTCreate(size, &neighbors);
330:   PetscBTMemzero(size, neighbors);
331:   /* Compute root-to-leaf process connectivity */
332:   PetscSectionGetChart(rootRankSection, &pStart, &pEnd);
333:   ISGetIndices(rootRanks, &nranks);
334:   for (p = pStart; p < pEnd; ++p) {
335:     PetscInt ndof, noff, n;

337:     PetscSectionGetDof(rootRankSection, p, &ndof);
338:     PetscSectionGetOffset(rootRankSection, p, &noff);
339:     for (n = 0; n < ndof; ++n) PetscBTSet(neighbors, nranks[noff + n]);
340:   }
341:   ISRestoreIndices(rootRanks, &nranks);
342:   /* Compute leaf-to-neighbor process connectivity */
343:   PetscSectionGetChart(leafRankSection, &pStart, &pEnd);
344:   ISGetIndices(leafRanks, &nranks);
345:   for (p = pStart; p < pEnd; ++p) {
346:     PetscInt ndof, noff, n;

348:     PetscSectionGetDof(leafRankSection, p, &ndof);
349:     PetscSectionGetOffset(leafRankSection, p, &noff);
350:     for (n = 0; n < ndof; ++n) PetscBTSet(neighbors, nranks[noff + n]);
351:   }
352:   ISRestoreIndices(leafRanks, &nranks);
353:   /* Compute leaf-to-root process connectivity */
354:   for (l = 0; l < numLeaves; ++l) PetscBTSet(neighbors, remotePoints[l].rank);
355:   /* Calculate edges */
356:   PetscBTClear(neighbors, rank);
357:   for (proc = 0, numNeighbors = 0; proc < size; ++proc) {
358:     if (PetscBTLookup(neighbors, proc)) ++numNeighbors;
359:   }
360:   PetscMalloc1(numNeighbors, &ranksNew);
361:   PetscMalloc1(numNeighbors, &localPointsNew);
362:   PetscMalloc1(numNeighbors, &remotePointsNew);
363:   for (proc = 0, n = 0; proc < size; ++proc) {
364:     if (PetscBTLookup(neighbors, proc)) {
365:       ranksNew[n]              = proc;
366:       localPointsNew[n]        = proc;
367:       remotePointsNew[n].index = rank;
368:       remotePointsNew[n].rank  = proc;
369:       ++n;
370:     }
371:   }
372:   PetscBTDestroy(&neighbors);
373:   if (processRanks) ISCreateGeneral(PetscObjectComm((PetscObject)dm), numNeighbors, ranksNew, PETSC_OWN_POINTER, processRanks);
374:   else PetscFree(ranksNew);
375:   if (sfProcess) {
376:     PetscSFCreate(PetscObjectComm((PetscObject)dm), sfProcess);
377:     PetscObjectSetName((PetscObject)*sfProcess, "Two-Sided Process SF");
378:     PetscSFSetFromOptions(*sfProcess);
379:     PetscSFSetGraph(*sfProcess, size, numNeighbors, localPointsNew, PETSC_OWN_POINTER, remotePointsNew, PETSC_OWN_POINTER);
380:   }
381:   return 0;
382: }

384: /*@
385:   DMPlexDistributeOwnership - Compute owner information for shared points. This basically gets two-sided for an SF.

387:   Collective on dm

389:   Input Parameter:
390: . dm - The DM

392:   Output Parameters:
393: + rootSection - The number of leaves for a given root point
394: . rootrank    - The rank of each edge into the root point
395: . leafSection - The number of processes sharing a given leaf point
396: - leafrank    - The rank of each process sharing a leaf point

398:   Level: developer

400: .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
401: @*/
402: PetscErrorCode DMPlexDistributeOwnership(DM dm, PetscSection rootSection, IS *rootrank, PetscSection leafSection, IS *leafrank)
403: {
404:   MPI_Comm        comm;
405:   PetscSF         sfPoint;
406:   const PetscInt *rootdegree;
407:   PetscInt       *myrank, *remoterank;
408:   PetscInt        pStart, pEnd, p, nedges;
409:   PetscMPIInt     rank;

411:   PetscObjectGetComm((PetscObject)dm, &comm);
412:   MPI_Comm_rank(comm, &rank);
413:   DMPlexGetChart(dm, &pStart, &pEnd);
414:   DMGetPointSF(dm, &sfPoint);
415:   /* Compute number of leaves for each root */
416:   PetscObjectSetName((PetscObject)rootSection, "Root Section");
417:   PetscSectionSetChart(rootSection, pStart, pEnd);
418:   PetscSFComputeDegreeBegin(sfPoint, &rootdegree);
419:   PetscSFComputeDegreeEnd(sfPoint, &rootdegree);
420:   for (p = pStart; p < pEnd; ++p) PetscSectionSetDof(rootSection, p, rootdegree[p - pStart]);
421:   PetscSectionSetUp(rootSection);
422:   /* Gather rank of each leaf to root */
423:   PetscSectionGetStorageSize(rootSection, &nedges);
424:   PetscMalloc1(pEnd - pStart, &myrank);
425:   PetscMalloc1(nedges, &remoterank);
426:   for (p = 0; p < pEnd - pStart; ++p) myrank[p] = rank;
427:   PetscSFGatherBegin(sfPoint, MPIU_INT, myrank, remoterank);
428:   PetscSFGatherEnd(sfPoint, MPIU_INT, myrank, remoterank);
429:   PetscFree(myrank);
430:   ISCreateGeneral(comm, nedges, remoterank, PETSC_OWN_POINTER, rootrank);
431:   /* Distribute remote ranks to leaves */
432:   PetscObjectSetName((PetscObject)leafSection, "Leaf Section");
433:   DMPlexDistributeFieldIS(dm, sfPoint, rootSection, *rootrank, leafSection, leafrank);
434:   return 0;
435: }

437: #if 0
438: static PetscErrorCode DMPlexCopyOverlapLabels(DM dm, DM ndm)
439: {
440:   DM_Plex *mesh  = (DM_Plex *) dm->data;
441:   DM_Plex *nmesh = (DM_Plex *) ndm->data;

443:   if (mesh->numOvLabels) {
444:     const char *name;
445:     PetscInt    l;

447:     nmesh->numOvLabels = mesh->numOvLabels;
448:     for (l = 0; l < mesh->numOvLabels; ++l) {
449:       PetscObjectGetName((PetscObject) mesh->ovLabels[l], &name);
450:       DMGetLabel(ndm, name, &nmesh->ovLabels[l]);
451:       nmesh->ovValues[l] = mesh->ovValues[l];
452:     }
453:     PetscObjectGetName((PetscObject) mesh->ovExLabel, &name);
454:     DMGetLabel(ndm, name, &nmesh->ovExLabel);
455:     nmesh->ovExValue = mesh->ovExValue;
456:   }
457:   return 0;
458: }
459: #endif

461: /*@C
462:   DMPlexCreateOverlapLabel - Compute a label indicating what overlap points should be sent to new processes

464:   Collective on dm

466:   Input Parameters:
467: + dm          - The DM
468: . levels      - Number of overlap levels
469: . rootSection - The number of leaves for a given root point
470: . rootrank    - The rank of each edge into the root point
471: . leafSection - The number of processes sharing a given leaf point
472: - leafrank    - The rank of each process sharing a leaf point

474:   Output Parameter:
475: . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings

477:   Level: developer

479: .seealso: `DMPlexCreateOverlapLabelFromLabels()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
480: @*/
481: PetscErrorCode DMPlexCreateOverlapLabel(DM dm, PetscInt levels, PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
482: {
483:   MPI_Comm           comm;
484:   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
485:   PetscSF            sfPoint;
486:   const PetscSFNode *remote;
487:   const PetscInt    *local;
488:   const PetscInt    *nrank, *rrank;
489:   PetscInt          *adj = NULL;
490:   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l;
491:   PetscMPIInt        rank, size;
492:   PetscBool          flg;

494:   *ovLabel = NULL;
495:   PetscObjectGetComm((PetscObject)dm, &comm);
496:   MPI_Comm_size(comm, &size);
497:   MPI_Comm_rank(comm, &rank);
498:   if (size == 1) return 0;
499:   DMGetPointSF(dm, &sfPoint);
500:   DMPlexGetChart(dm, &pStart, &pEnd);
501:   if (!levels) {
502:     /* Add owned points */
503:     DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
504:     for (p = pStart; p < pEnd; ++p) DMLabelSetValue(*ovLabel, p, rank);
505:     return 0;
506:   }
507:   PetscSectionGetChart(leafSection, &sStart, &sEnd);
508:   PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
509:   DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);
510:   /* Handle leaves: shared with the root point */
511:   for (l = 0; l < nleaves; ++l) {
512:     PetscInt adjSize = PETSC_DETERMINE, a;

514:     DMPlexGetAdjacency(dm, local ? local[l] : l, &adjSize, &adj);
515:     for (a = 0; a < adjSize; ++a) DMLabelSetValue(ovAdjByRank, adj[a], remote[l].rank);
516:   }
517:   ISGetIndices(rootrank, &rrank);
518:   ISGetIndices(leafrank, &nrank);
519:   /* Handle roots */
520:   for (p = pStart; p < pEnd; ++p) {
521:     PetscInt adjSize = PETSC_DETERMINE, neighbors = 0, noff, n, a;

523:     if ((p >= sStart) && (p < sEnd)) {
524:       /* Some leaves share a root with other leaves on different processes */
525:       PetscSectionGetDof(leafSection, p, &neighbors);
526:       if (neighbors) {
527:         PetscSectionGetOffset(leafSection, p, &noff);
528:         DMPlexGetAdjacency(dm, p, &adjSize, &adj);
529:         for (n = 0; n < neighbors; ++n) {
530:           const PetscInt remoteRank = nrank[noff + n];

532:           if (remoteRank == rank) continue;
533:           for (a = 0; a < adjSize; ++a) DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);
534:         }
535:       }
536:     }
537:     /* Roots are shared with leaves */
538:     PetscSectionGetDof(rootSection, p, &neighbors);
539:     if (!neighbors) continue;
540:     PetscSectionGetOffset(rootSection, p, &noff);
541:     DMPlexGetAdjacency(dm, p, &adjSize, &adj);
542:     for (n = 0; n < neighbors; ++n) {
543:       const PetscInt remoteRank = rrank[noff + n];

545:       if (remoteRank == rank) continue;
546:       for (a = 0; a < adjSize; ++a) DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);
547:     }
548:   }
549:   PetscFree(adj);
550:   ISRestoreIndices(rootrank, &rrank);
551:   ISRestoreIndices(leafrank, &nrank);
552:   /* Add additional overlap levels */
553:   for (l = 1; l < levels; l++) {
554:     /* Propagate point donations over SF to capture remote connections */
555:     DMPlexPartitionLabelPropagate(dm, ovAdjByRank);
556:     /* Add next level of point donations to the label */
557:     DMPlexPartitionLabelAdjacency(dm, ovAdjByRank);
558:   }
559:   /* We require the closure in the overlap */
560:   DMPlexPartitionLabelClosure(dm, ovAdjByRank);
561:   PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg);
562:   if (flg) {
563:     PetscViewer viewer;
564:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);
565:     DMLabelView(ovAdjByRank, viewer);
566:   }
567:   /* Invert sender to receiver label */
568:   DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
569:   DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);
570:   /* Add owned points, except for shared local points */
571:   for (p = pStart; p < pEnd; ++p) DMLabelSetValue(*ovLabel, p, rank);
572:   for (l = 0; l < nleaves; ++l) {
573:     DMLabelClearValue(*ovLabel, local[l], rank);
574:     DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
575:   }
576:   /* Clean up */
577:   DMLabelDestroy(&ovAdjByRank);
578:   return 0;
579: }

581: static PetscErrorCode HandlePoint_Private(DM dm, PetscInt p, PetscSection section, const PetscInt ranks[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], DMLabel ovAdjByRank)
582: {
583:   PetscInt neighbors, el;

585:   PetscSectionGetDof(section, p, &neighbors);
586:   if (neighbors) {
587:     PetscInt   *adj     = NULL;
588:     PetscInt    adjSize = PETSC_DETERMINE, noff, n, a;
589:     PetscMPIInt rank;

591:     MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank);
592:     PetscSectionGetOffset(section, p, &noff);
593:     DMPlexGetAdjacency(dm, p, &adjSize, &adj);
594:     for (n = 0; n < neighbors; ++n) {
595:       const PetscInt remoteRank = ranks[noff + n];

597:       if (remoteRank == rank) continue;
598:       for (a = 0; a < adjSize; ++a) {
599:         PetscBool insert = PETSC_TRUE;

601:         for (el = 0; el < numExLabels; ++el) {
602:           PetscInt exVal;
603:           DMLabelGetValue(exLabel[el], adj[a], &exVal);
604:           if (exVal == exValue[el]) {
605:             insert = PETSC_FALSE;
606:             break;
607:           }
608:         }
609:         if (insert) DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);
610:       }
611:     }
612:     PetscFree(adj);
613:   }
614:   return 0;
615: }

617: /*@C
618:   DMPlexCreateOverlapLabelFromLabels - Compute a label indicating what overlap points should be sent to new processes

620:   Collective on dm

622:   Input Parameters:
623: + dm          - The DM
624: . numLabels   - The number of labels to draw candidate points from
625: . label       - An array of labels containing candidate points
626: . value       - An array of label values marking the candidate points
627: . numExLabels - The number of labels to use for exclusion
628: . exLabel     - An array of labels indicating points to be excluded, or NULL
629: . exValue     - An array of label values to be excluded, or NULL
630: . rootSection - The number of leaves for a given root point
631: . rootrank    - The rank of each edge into the root point
632: . leafSection - The number of processes sharing a given leaf point
633: - leafrank    - The rank of each process sharing a leaf point

635:   Output Parameter:
636: . ovLabel     - DMLabel containing remote overlap contributions as point/rank pairings

638:   Note:
639:   The candidate points are only added to the overlap if they are adjacent to a shared point

641:   Level: developer

643: .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexGetAdjacency()`, `DMPlexDistributeOwnership()`, `DMPlexDistribute()`
644: @*/
645: PetscErrorCode DMPlexCreateOverlapLabelFromLabels(DM dm, PetscInt numLabels, const DMLabel label[], const PetscInt value[], PetscInt numExLabels, const DMLabel exLabel[], const PetscInt exValue[], PetscSection rootSection, IS rootrank, PetscSection leafSection, IS leafrank, DMLabel *ovLabel)
646: {
647:   MPI_Comm           comm;
648:   DMLabel            ovAdjByRank; /* A DMLabel containing all points adjacent to shared points, separated by rank (value in label) */
649:   PetscSF            sfPoint;
650:   const PetscSFNode *remote;
651:   const PetscInt    *local;
652:   const PetscInt    *nrank, *rrank;
653:   PetscInt          *adj = NULL;
654:   PetscInt           pStart, pEnd, p, sStart, sEnd, nleaves, l, el;
655:   PetscMPIInt        rank, size;
656:   PetscBool          flg;

658:   *ovLabel = NULL;
659:   PetscObjectGetComm((PetscObject)dm, &comm);
660:   MPI_Comm_size(comm, &size);
661:   MPI_Comm_rank(comm, &rank);
662:   if (size == 1) return 0;
663:   DMGetPointSF(dm, &sfPoint);
664:   DMPlexGetChart(dm, &pStart, &pEnd);
665:   PetscSectionGetChart(leafSection, &sStart, &sEnd);
666:   PetscSFGetGraph(sfPoint, NULL, &nleaves, &local, &remote);
667:   DMLabelCreate(PETSC_COMM_SELF, "Overlap adjacency", &ovAdjByRank);
668:   ISGetIndices(rootrank, &rrank);
669:   ISGetIndices(leafrank, &nrank);
670:   for (l = 0; l < numLabels; ++l) {
671:     IS              valIS;
672:     const PetscInt *points;
673:     PetscInt        n;

675:     DMLabelGetStratumIS(label[l], value[l], &valIS);
676:     if (!valIS) continue;
677:     ISGetIndices(valIS, &points);
678:     ISGetLocalSize(valIS, &n);
679:     for (PetscInt i = 0; i < n; ++i) {
680:       const PetscInt p = points[i];

682:       if ((p >= sStart) && (p < sEnd)) {
683:         PetscInt loc, adjSize = PETSC_DETERMINE;

685:         /* Handle leaves: shared with the root point */
686:         if (local) PetscFindInt(p, nleaves, local, &loc);
687:         else loc = (p >= 0 && p < nleaves) ? p : -1;
688:         if (loc >= 0) {
689:           const PetscInt remoteRank = remote[loc].rank;

691:           DMPlexGetAdjacency(dm, p, &adjSize, &adj);
692:           for (PetscInt a = 0; a < adjSize; ++a) {
693:             PetscBool insert = PETSC_TRUE;

695:             for (el = 0; el < numExLabels; ++el) {
696:               PetscInt exVal;
697:               DMLabelGetValue(exLabel[el], adj[a], &exVal);
698:               if (exVal == exValue[el]) {
699:                 insert = PETSC_FALSE;
700:                 break;
701:               }
702:             }
703:             if (insert) DMLabelSetValue(ovAdjByRank, adj[a], remoteRank);
704:           }
705:         }
706:         /* Some leaves share a root with other leaves on different processes */
707:         HandlePoint_Private(dm, p, leafSection, nrank, numExLabels, exLabel, exValue, ovAdjByRank);
708:       }
709:       /* Roots are shared with leaves */
710:       HandlePoint_Private(dm, p, rootSection, rrank, numExLabels, exLabel, exValue, ovAdjByRank);
711:     }
712:     ISRestoreIndices(valIS, &points);
713:     ISDestroy(&valIS);
714:   }
715:   PetscFree(adj);
716:   ISRestoreIndices(rootrank, &rrank);
717:   ISRestoreIndices(leafrank, &nrank);
718:   /* We require the closure in the overlap */
719:   DMPlexPartitionLabelClosure(dm, ovAdjByRank);
720:   PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-overlap_view", &flg);
721:   if (flg) {
722:     PetscViewer viewer;
723:     PetscViewerASCIIGetStdout(PetscObjectComm((PetscObject)dm), &viewer);
724:     DMLabelView(ovAdjByRank, viewer);
725:   }
726:   /* Invert sender to receiver label */
727:   DMLabelCreate(PETSC_COMM_SELF, "Overlap label", ovLabel);
728:   DMPlexPartitionLabelInvert(dm, ovAdjByRank, NULL, *ovLabel);
729:   /* Add owned points, except for shared local points */
730:   for (p = pStart; p < pEnd; ++p) DMLabelSetValue(*ovLabel, p, rank);
731:   for (l = 0; l < nleaves; ++l) {
732:     DMLabelClearValue(*ovLabel, local[l], rank);
733:     DMLabelSetValue(*ovLabel, remote[l].index, remote[l].rank);
734:   }
735:   /* Clean up */
736:   DMLabelDestroy(&ovAdjByRank);
737:   return 0;
738: }

740: /*@C
741:   DMPlexCreateOverlapMigrationSF - Create an SF describing the new mesh distribution to make the overlap described by the input SF

743:   Collective on dm

745:   Input Parameters:
746: + dm          - The DM
747: - overlapSF   - The SF mapping ghost points in overlap to owner points on other processes

749:   Output Parameter:
750: . migrationSF - An SF that maps original points in old locations to points in new locations

752:   Level: developer

754: .seealso: `DMPlexCreateOverlapLabel()`, `DMPlexDistributeOverlap()`, `DMPlexDistribute()`
755: @*/
756: PetscErrorCode DMPlexCreateOverlapMigrationSF(DM dm, PetscSF overlapSF, PetscSF *migrationSF)
757: {
758:   MPI_Comm           comm;
759:   PetscMPIInt        rank, size;
760:   PetscInt           d, dim, p, pStart, pEnd, nroots, nleaves, newLeaves, point, numSharedPoints;
761:   PetscInt          *pointDepths, *remoteDepths, *ilocal;
762:   PetscInt          *depthRecv, *depthShift, *depthIdx;
763:   PetscSFNode       *iremote;
764:   PetscSF            pointSF;
765:   const PetscInt    *sharedLocal;
766:   const PetscSFNode *overlapRemote, *sharedRemote;

769:   PetscObjectGetComm((PetscObject)dm, &comm);
770:   MPI_Comm_rank(comm, &rank);
771:   MPI_Comm_size(comm, &size);
772:   DMGetDimension(dm, &dim);

774:   /* Before building the migration SF we need to know the new stratum offsets */
775:   PetscSFGetGraph(overlapSF, &nroots, &nleaves, NULL, &overlapRemote);
776:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
777:   for (d = 0; d < dim + 1; d++) {
778:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
779:     for (p = pStart; p < pEnd; p++) pointDepths[p] = d;
780:   }
781:   for (p = 0; p < nleaves; p++) remoteDepths[p] = -1;
782:   PetscSFBcastBegin(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE);
783:   PetscSFBcastEnd(overlapSF, MPIU_INT, pointDepths, remoteDepths, MPI_REPLACE);

785:   /* Count received points in each stratum and compute the internal strata shift */
786:   PetscMalloc3(dim + 1, &depthRecv, dim + 1, &depthShift, dim + 1, &depthIdx);
787:   for (d = 0; d < dim + 1; d++) depthRecv[d] = 0;
788:   for (p = 0; p < nleaves; p++) depthRecv[remoteDepths[p]]++;
789:   depthShift[dim] = 0;
790:   for (d = 0; d < dim; d++) depthShift[d] = depthRecv[dim];
791:   for (d = 1; d < dim; d++) depthShift[d] += depthRecv[0];
792:   for (d = dim - 2; d > 0; d--) depthShift[d] += depthRecv[d + 1];
793:   for (d = 0; d < dim + 1; d++) {
794:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
795:     depthIdx[d] = pStart + depthShift[d];
796:   }

798:   /* Form the overlap SF build an SF that describes the full overlap migration SF */
799:   DMPlexGetChart(dm, &pStart, &pEnd);
800:   newLeaves = pEnd - pStart + nleaves;
801:   PetscMalloc1(newLeaves, &ilocal);
802:   PetscMalloc1(newLeaves, &iremote);
803:   /* First map local points to themselves */
804:   for (d = 0; d < dim + 1; d++) {
805:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
806:     for (p = pStart; p < pEnd; p++) {
807:       point                = p + depthShift[d];
808:       ilocal[point]        = point;
809:       iremote[point].index = p;
810:       iremote[point].rank  = rank;
811:       depthIdx[d]++;
812:     }
813:   }

815:   /* Add in the remote roots for currently shared points */
816:   DMGetPointSF(dm, &pointSF);
817:   PetscSFGetGraph(pointSF, NULL, &numSharedPoints, &sharedLocal, &sharedRemote);
818:   for (d = 0; d < dim + 1; d++) {
819:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
820:     for (p = 0; p < numSharedPoints; p++) {
821:       if (pStart <= sharedLocal[p] && sharedLocal[p] < pEnd) {
822:         point                = sharedLocal[p] + depthShift[d];
823:         iremote[point].index = sharedRemote[p].index;
824:         iremote[point].rank  = sharedRemote[p].rank;
825:       }
826:     }
827:   }

829:   /* Now add the incoming overlap points */
830:   for (p = 0; p < nleaves; p++) {
831:     point                = depthIdx[remoteDepths[p]];
832:     ilocal[point]        = point;
833:     iremote[point].index = overlapRemote[p].index;
834:     iremote[point].rank  = overlapRemote[p].rank;
835:     depthIdx[remoteDepths[p]]++;
836:   }
837:   PetscFree2(pointDepths, remoteDepths);

839:   PetscSFCreate(comm, migrationSF);
840:   PetscObjectSetName((PetscObject)*migrationSF, "Overlap Migration SF");
841:   PetscSFSetFromOptions(*migrationSF);
842:   DMPlexGetChart(dm, &pStart, &pEnd);
843:   PetscSFSetGraph(*migrationSF, pEnd - pStart, newLeaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);

845:   PetscFree3(depthRecv, depthShift, depthIdx);
846:   return 0;
847: }

849: /*@
850:   DMPlexStratifyMigrationSF - Rearrange the leaves of a migration sf for stratification.

852:   Input Parameters:
853: + dm          - The DM
854: - sf          - A star forest with non-ordered leaves, usually defining a DM point migration

856:   Output Parameter:
857: . migrationSF - A star forest with added leaf indirection that ensures the resulting DM is stratified

859:   Note:
860:   This lexicographically sorts by (depth, cellType)

862:   Level: developer

864: .seealso: `DMPlexPartitionLabelCreateSF()`, `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
865: @*/
866: PetscErrorCode DMPlexStratifyMigrationSF(DM dm, PetscSF sf, PetscSF *migrationSF)
867: {
868:   MPI_Comm           comm;
869:   PetscMPIInt        rank, size;
870:   PetscInt           d, ldepth, depth, dim, p, pStart, pEnd, nroots, nleaves;
871:   PetscSFNode       *pointDepths, *remoteDepths;
872:   PetscInt          *ilocal;
873:   PetscInt          *depthRecv, *depthShift, *depthIdx;
874:   PetscInt          *ctRecv, *ctShift, *ctIdx;
875:   const PetscSFNode *iremote;

878:   PetscObjectGetComm((PetscObject)dm, &comm);
879:   MPI_Comm_rank(comm, &rank);
880:   MPI_Comm_size(comm, &size);
881:   DMPlexGetDepth(dm, &ldepth);
882:   DMGetDimension(dm, &dim);
883:   MPIU_Allreduce(&ldepth, &depth, 1, MPIU_INT, MPI_MAX, comm);
885:   PetscLogEventBegin(DMPLEX_PartStratSF, dm, 0, 0, 0);

887:   /* Before building the migration SF we need to know the new stratum offsets */
888:   PetscSFGetGraph(sf, &nroots, &nleaves, NULL, &iremote);
889:   PetscMalloc2(nroots, &pointDepths, nleaves, &remoteDepths);
890:   for (d = 0; d < depth + 1; ++d) {
891:     DMPlexGetDepthStratum(dm, d, &pStart, &pEnd);
892:     for (p = pStart; p < pEnd; ++p) {
893:       DMPolytopeType ct;

895:       DMPlexGetCellType(dm, p, &ct);
896:       pointDepths[p].index = d;
897:       pointDepths[p].rank  = ct;
898:     }
899:   }
900:   for (p = 0; p < nleaves; ++p) {
901:     remoteDepths[p].index = -1;
902:     remoteDepths[p].rank  = -1;
903:   }
904:   PetscSFBcastBegin(sf, MPIU_2INT, pointDepths, remoteDepths, MPI_REPLACE);
905:   PetscSFBcastEnd(sf, MPIU_2INT, pointDepths, remoteDepths, MPI_REPLACE);
906:   /* Count received points in each stratum and compute the internal strata shift */
907:   PetscCalloc6(depth + 1, &depthRecv, depth + 1, &depthShift, depth + 1, &depthIdx, DM_NUM_POLYTOPES, &ctRecv, DM_NUM_POLYTOPES, &ctShift, DM_NUM_POLYTOPES, &ctIdx);
908:   for (p = 0; p < nleaves; ++p) {
909:     if (remoteDepths[p].rank < 0) {
910:       ++depthRecv[remoteDepths[p].index];
911:     } else {
912:       ++ctRecv[remoteDepths[p].rank];
913:     }
914:   }
915:   {
916:     PetscInt depths[4], dims[4], shift = 0, i, c;

918:     /* Cells (depth), Vertices (0), Faces (depth-1), Edges (1)
919:          Consider DM_POLYTOPE_FV_GHOST and DM_POLYTOPE_INTERIOR_GHOST as cells
920:      */
921:     depths[0] = depth;
922:     depths[1] = 0;
923:     depths[2] = depth - 1;
924:     depths[3] = 1;
925:     dims[0]   = dim;
926:     dims[1]   = 0;
927:     dims[2]   = dim - 1;
928:     dims[3]   = 1;
929:     for (i = 0; i <= depth; ++i) {
930:       const PetscInt dep = depths[i];
931:       const PetscInt dim = dims[i];

933:       for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
934:         if (DMPolytopeTypeGetDim((DMPolytopeType)c) != dim && !(i == 0 && (c == DM_POLYTOPE_FV_GHOST || c == DM_POLYTOPE_INTERIOR_GHOST))) continue;
935:         ctShift[c] = shift;
936:         shift += ctRecv[c];
937:       }
938:       depthShift[dep] = shift;
939:       shift += depthRecv[dep];
940:     }
941:     for (c = 0; c < DM_NUM_POLYTOPES; ++c) {
942:       const PetscInt ctDim = DMPolytopeTypeGetDim((DMPolytopeType)c);

944:       if ((ctDim < 0 || ctDim > dim) && (c != DM_POLYTOPE_FV_GHOST && c != DM_POLYTOPE_INTERIOR_GHOST)) {
945:         ctShift[c] = shift;
946:         shift += ctRecv[c];
947:       }
948:     }
949:   }
950:   /* Derive a new local permutation based on stratified indices */
951:   PetscMalloc1(nleaves, &ilocal);
952:   for (p = 0; p < nleaves; ++p) {
953:     const PetscInt       dep = remoteDepths[p].index;
954:     const DMPolytopeType ct  = (DMPolytopeType)remoteDepths[p].rank;

956:     if ((PetscInt)ct < 0) {
957:       ilocal[p] = depthShift[dep] + depthIdx[dep];
958:       ++depthIdx[dep];
959:     } else {
960:       ilocal[p] = ctShift[ct] + ctIdx[ct];
961:       ++ctIdx[ct];
962:     }
963:   }
964:   PetscSFCreate(comm, migrationSF);
965:   PetscObjectSetName((PetscObject)*migrationSF, "Migration SF");
966:   PetscSFSetGraph(*migrationSF, nroots, nleaves, ilocal, PETSC_OWN_POINTER, (PetscSFNode *)iremote, PETSC_COPY_VALUES);
967:   PetscFree2(pointDepths, remoteDepths);
968:   PetscFree6(depthRecv, depthShift, depthIdx, ctRecv, ctShift, ctIdx);
969:   PetscLogEventEnd(DMPLEX_PartStratSF, dm, 0, 0, 0);
970:   return 0;
971: }

973: /*@
974:   DMPlexDistributeField - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

976:   Collective on dm

978:   Input Parameters:
979: + dm - The DMPlex object
980: . pointSF - The PetscSF describing the communication pattern
981: . originalSection - The PetscSection for existing data layout
982: - originalVec - The existing data in a local vector

984:   Output Parameters:
985: + newSection - The PetscSF describing the new data layout
986: - newVec - The new data in a local vector

988:   Level: developer

990: .seealso: `DMPlexDistribute()`, `DMPlexDistributeFieldIS()`, `DMPlexDistributeData()`
991: @*/
992: PetscErrorCode DMPlexDistributeField(DM dm, PetscSF pointSF, PetscSection originalSection, Vec originalVec, PetscSection newSection, Vec newVec)
993: {
994:   PetscSF      fieldSF;
995:   PetscInt    *remoteOffsets, fieldSize;
996:   PetscScalar *originalValues, *newValues;

998:   PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0);
999:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

1001:   PetscSectionGetStorageSize(newSection, &fieldSize);
1002:   VecSetSizes(newVec, fieldSize, PETSC_DETERMINE);
1003:   VecSetType(newVec, dm->vectype);

1005:   VecGetArray(originalVec, &originalValues);
1006:   VecGetArray(newVec, &newValues);
1007:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
1008:   PetscFree(remoteOffsets);
1009:   PetscSFBcastBegin(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE);
1010:   PetscSFBcastEnd(fieldSF, MPIU_SCALAR, originalValues, newValues, MPI_REPLACE);
1011:   PetscSFDestroy(&fieldSF);
1012:   VecRestoreArray(newVec, &newValues);
1013:   VecRestoreArray(originalVec, &originalValues);
1014:   PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0);
1015:   return 0;
1016: }

1018: /*@
1019:   DMPlexDistributeFieldIS - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

1021:   Collective on dm

1023:   Input Parameters:
1024: + dm - The DMPlex object
1025: . pointSF - The PetscSF describing the communication pattern
1026: . originalSection - The PetscSection for existing data layout
1027: - originalIS - The existing data

1029:   Output Parameters:
1030: + newSection - The PetscSF describing the new data layout
1031: - newIS - The new data

1033:   Level: developer

1035: .seealso: `DMPlexDistribute()`, `DMPlexDistributeField()`, `DMPlexDistributeData()`
1036: @*/
1037: PetscErrorCode DMPlexDistributeFieldIS(DM dm, PetscSF pointSF, PetscSection originalSection, IS originalIS, PetscSection newSection, IS *newIS)
1038: {
1039:   PetscSF         fieldSF;
1040:   PetscInt       *newValues, *remoteOffsets, fieldSize;
1041:   const PetscInt *originalValues;

1043:   PetscLogEventBegin(DMPLEX_DistributeField, dm, 0, 0, 0);
1044:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

1046:   PetscSectionGetStorageSize(newSection, &fieldSize);
1047:   PetscMalloc1(fieldSize, &newValues);

1049:   ISGetIndices(originalIS, &originalValues);
1050:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
1051:   PetscFree(remoteOffsets);
1052:   PetscSFBcastBegin(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE);
1053:   PetscSFBcastEnd(fieldSF, MPIU_INT, (PetscInt *)originalValues, newValues, MPI_REPLACE);
1054:   PetscSFDestroy(&fieldSF);
1055:   ISRestoreIndices(originalIS, &originalValues);
1056:   ISCreateGeneral(PetscObjectComm((PetscObject)pointSF), fieldSize, newValues, PETSC_OWN_POINTER, newIS);
1057:   PetscLogEventEnd(DMPLEX_DistributeField, dm, 0, 0, 0);
1058:   return 0;
1059: }

1061: /*@
1062:   DMPlexDistributeData - Distribute field data to match a given PetscSF, usually the SF from mesh distribution

1064:   Collective on dm

1066:   Input Parameters:
1067: + dm - The DMPlex object
1068: . pointSF - The PetscSF describing the communication pattern
1069: . originalSection - The PetscSection for existing data layout
1070: . datatype - The type of data
1071: - originalData - The existing data

1073:   Output Parameters:
1074: + newSection - The PetscSection describing the new data layout
1075: - newData - The new data

1077:   Level: developer

1079: .seealso: `DMPlexDistribute()`, `DMPlexDistributeField()`
1080: @*/
1081: PetscErrorCode DMPlexDistributeData(DM dm, PetscSF pointSF, PetscSection originalSection, MPI_Datatype datatype, void *originalData, PetscSection newSection, void **newData)
1082: {
1083:   PetscSF     fieldSF;
1084:   PetscInt   *remoteOffsets, fieldSize;
1085:   PetscMPIInt dataSize;

1087:   PetscLogEventBegin(DMPLEX_DistributeData, dm, 0, 0, 0);
1088:   PetscSFDistributeSection(pointSF, originalSection, &remoteOffsets, newSection);

1090:   PetscSectionGetStorageSize(newSection, &fieldSize);
1091:   MPI_Type_size(datatype, &dataSize);
1092:   PetscMalloc(fieldSize * dataSize, newData);

1094:   PetscSFCreateSectionSF(pointSF, originalSection, remoteOffsets, newSection, &fieldSF);
1095:   PetscFree(remoteOffsets);
1096:   PetscSFBcastBegin(fieldSF, datatype, originalData, *newData, MPI_REPLACE);
1097:   PetscSFBcastEnd(fieldSF, datatype, originalData, *newData, MPI_REPLACE);
1098:   PetscSFDestroy(&fieldSF);
1099:   PetscLogEventEnd(DMPLEX_DistributeData, dm, 0, 0, 0);
1100:   return 0;
1101: }

1103: static PetscErrorCode DMPlexDistributeCones(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1104: {
1105:   DM_Plex     *pmesh = (DM_Plex *)(dmParallel)->data;
1106:   MPI_Comm     comm;
1107:   PetscSF      coneSF;
1108:   PetscSection originalConeSection, newConeSection;
1109:   PetscInt    *remoteOffsets, *cones, *globCones, *newCones, newConesSize;
1110:   PetscBool    flg;

1114:   PetscLogEventBegin(DMPLEX_DistributeCones, dm, 0, 0, 0);
1115:   /* Distribute cone section */
1116:   PetscObjectGetComm((PetscObject)dm, &comm);
1117:   DMPlexGetConeSection(dm, &originalConeSection);
1118:   DMPlexGetConeSection(dmParallel, &newConeSection);
1119:   PetscSFDistributeSection(migrationSF, originalConeSection, &remoteOffsets, newConeSection);
1120:   DMSetUp(dmParallel);
1121:   /* Communicate and renumber cones */
1122:   PetscSFCreateSectionSF(migrationSF, originalConeSection, remoteOffsets, newConeSection, &coneSF);
1123:   PetscFree(remoteOffsets);
1124:   DMPlexGetCones(dm, &cones);
1125:   if (original) {
1126:     PetscInt numCones;

1128:     PetscSectionGetStorageSize(originalConeSection, &numCones);
1129:     PetscMalloc1(numCones, &globCones);
1130:     ISLocalToGlobalMappingApplyBlock(original, numCones, cones, globCones);
1131:   } else {
1132:     globCones = cones;
1133:   }
1134:   DMPlexGetCones(dmParallel, &newCones);
1135:   PetscSFBcastBegin(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE);
1136:   PetscSFBcastEnd(coneSF, MPIU_INT, globCones, newCones, MPI_REPLACE);
1137:   if (original) PetscFree(globCones);
1138:   PetscSectionGetStorageSize(newConeSection, &newConesSize);
1139:   ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newConesSize, newCones, NULL, newCones);
1140:   if (PetscDefined(USE_DEBUG)) {
1141:     PetscInt  p;
1142:     PetscBool valid = PETSC_TRUE;
1143:     for (p = 0; p < newConesSize; ++p) {
1144:       if (newCones[p] < 0) {
1145:         valid = PETSC_FALSE;
1146:         PetscPrintf(PETSC_COMM_SELF, "[%d] Point %" PetscInt_FMT " not in overlap SF\n", PetscGlobalRank, p);
1147:       }
1148:     }
1150:   }
1151:   PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-cones_view", &flg);
1152:   if (flg) {
1153:     PetscPrintf(comm, "Serial Cone Section:\n");
1154:     PetscSectionView(originalConeSection, PETSC_VIEWER_STDOUT_(comm));
1155:     PetscPrintf(comm, "Parallel Cone Section:\n");
1156:     PetscSectionView(newConeSection, PETSC_VIEWER_STDOUT_(comm));
1157:     PetscSFView(coneSF, NULL);
1158:   }
1159:   DMPlexGetConeOrientations(dm, &cones);
1160:   DMPlexGetConeOrientations(dmParallel, &newCones);
1161:   PetscSFBcastBegin(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE);
1162:   PetscSFBcastEnd(coneSF, MPIU_INT, cones, newCones, MPI_REPLACE);
1163:   PetscSFDestroy(&coneSF);
1164:   PetscLogEventEnd(DMPLEX_DistributeCones, dm, 0, 0, 0);
1165:   /* Create supports and stratify DMPlex */
1166:   {
1167:     PetscInt pStart, pEnd;

1169:     PetscSectionGetChart(pmesh->coneSection, &pStart, &pEnd);
1170:     PetscSectionSetChart(pmesh->supportSection, pStart, pEnd);
1171:   }
1172:   DMPlexSymmetrize(dmParallel);
1173:   DMPlexStratify(dmParallel);
1174:   {
1175:     PetscBool useCone, useClosure, useAnchors;

1177:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
1178:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1179:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1180:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1181:   }
1182:   return 0;
1183: }

1185: static PetscErrorCode DMPlexDistributeCoordinates(DM dm, PetscSF migrationSF, DM dmParallel)
1186: {
1187:   MPI_Comm         comm;
1188:   DM               cdm, cdmParallel;
1189:   PetscSection     originalCoordSection, newCoordSection;
1190:   Vec              originalCoordinates, newCoordinates;
1191:   PetscInt         bs;
1192:   const char      *name;
1193:   const PetscReal *maxCell, *Lstart, *L;


1198:   PetscObjectGetComm((PetscObject)dm, &comm);
1199:   DMGetCoordinateDM(dm, &cdm);
1200:   DMGetCoordinateDM(dmParallel, &cdmParallel);
1201:   DMCopyDisc(cdm, cdmParallel);
1202:   DMGetCoordinateSection(dm, &originalCoordSection);
1203:   DMGetCoordinateSection(dmParallel, &newCoordSection);
1204:   DMGetCoordinatesLocal(dm, &originalCoordinates);
1205:   if (originalCoordinates) {
1206:     VecCreate(PETSC_COMM_SELF, &newCoordinates);
1207:     PetscObjectGetName((PetscObject)originalCoordinates, &name);
1208:     PetscObjectSetName((PetscObject)newCoordinates, name);

1210:     DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1211:     DMSetCoordinatesLocal(dmParallel, newCoordinates);
1212:     VecGetBlockSize(originalCoordinates, &bs);
1213:     VecSetBlockSize(newCoordinates, bs);
1214:     VecDestroy(&newCoordinates);
1215:   }

1217:   PetscObjectGetComm((PetscObject)dm, &comm);
1218:   DMGetPeriodicity(dm, &maxCell, &Lstart, &L);
1219:   DMSetPeriodicity(dmParallel, maxCell, Lstart, L);
1220:   DMGetCellCoordinateDM(dm, &cdm);
1221:   if (cdm) {
1222:     DMClone(dmParallel, &cdmParallel);
1223:     DMSetCellCoordinateDM(dmParallel, cdmParallel);
1224:     DMCopyDisc(cdm, cdmParallel);
1225:     DMDestroy(&cdmParallel);
1226:     DMGetCellCoordinateSection(dm, &originalCoordSection);
1227:     DMGetCellCoordinatesLocal(dm, &originalCoordinates);
1228:     PetscSectionCreate(comm, &newCoordSection);
1229:     if (originalCoordinates) {
1230:       VecCreate(PETSC_COMM_SELF, &newCoordinates);
1231:       PetscObjectGetName((PetscObject)originalCoordinates, &name);
1232:       PetscObjectSetName((PetscObject)newCoordinates, name);

1234:       DMPlexDistributeField(dm, migrationSF, originalCoordSection, originalCoordinates, newCoordSection, newCoordinates);
1235:       VecGetBlockSize(originalCoordinates, &bs);
1236:       VecSetBlockSize(newCoordinates, bs);
1237:       DMSetCellCoordinateSection(dmParallel, bs, newCoordSection);
1238:       DMSetCellCoordinatesLocal(dmParallel, newCoordinates);
1239:       VecDestroy(&newCoordinates);
1240:     }
1241:     PetscSectionDestroy(&newCoordSection);
1242:   }
1243:   return 0;
1244: }

1246: static PetscErrorCode DMPlexDistributeLabels(DM dm, PetscSF migrationSF, DM dmParallel)
1247: {
1248:   DM_Plex         *mesh = (DM_Plex *)dm->data;
1249:   MPI_Comm         comm;
1250:   DMLabel          depthLabel;
1251:   PetscMPIInt      rank;
1252:   PetscInt         depth, d, numLabels, numLocalLabels, l;
1253:   PetscBool        hasLabels  = PETSC_FALSE, lsendDepth, sendDepth;
1254:   PetscObjectState depthState = -1;


1259:   PetscLogEventBegin(DMPLEX_DistributeLabels, dm, 0, 0, 0);
1260:   PetscObjectGetComm((PetscObject)dm, &comm);
1261:   MPI_Comm_rank(comm, &rank);

1263:   /* If the user has changed the depth label, communicate it instead */
1264:   DMPlexGetDepth(dm, &depth);
1265:   DMPlexGetDepthLabel(dm, &depthLabel);
1266:   if (depthLabel) PetscObjectStateGet((PetscObject)depthLabel, &depthState);
1267:   lsendDepth = mesh->depthState != depthState ? PETSC_TRUE : PETSC_FALSE;
1268:   MPIU_Allreduce(&lsendDepth, &sendDepth, 1, MPIU_BOOL, MPI_LOR, comm);
1269:   if (sendDepth) {
1270:     DMPlexGetDepthLabel(dmParallel, &dmParallel->depthLabel);
1271:     DMRemoveLabelBySelf(dmParallel, &dmParallel->depthLabel, PETSC_FALSE);
1272:   }
1273:   /* Everyone must have either the same number of labels, or none */
1274:   DMGetNumLabels(dm, &numLocalLabels);
1275:   numLabels = numLocalLabels;
1276:   MPI_Bcast(&numLabels, 1, MPIU_INT, 0, comm);
1277:   if (numLabels == numLocalLabels) hasLabels = PETSC_TRUE;
1278:   for (l = 0; l < numLabels; ++l) {
1279:     DMLabel     label = NULL, labelNew = NULL;
1280:     PetscBool   isDepth, lisOutput     = PETSC_TRUE, isOutput;
1281:     const char *name = NULL;

1283:     if (hasLabels) {
1284:       DMGetLabelByNum(dm, l, &label);
1285:       /* Skip "depth" because it is recreated */
1286:       PetscObjectGetName((PetscObject)label, &name);
1287:       PetscStrcmp(name, "depth", &isDepth);
1288:     }
1289:     MPI_Bcast(&isDepth, 1, MPIU_BOOL, 0, comm);
1290:     if (isDepth && !sendDepth) continue;
1291:     DMLabelDistribute(label, migrationSF, &labelNew);
1292:     if (isDepth) {
1293:       /* Put in any missing strata which can occur if users are managing the depth label themselves */
1294:       PetscInt gdepth;

1296:       MPIU_Allreduce(&depth, &gdepth, 1, MPIU_INT, MPI_MAX, comm);
1298:       for (d = 0; d <= gdepth; ++d) {
1299:         PetscBool has;

1301:         DMLabelHasStratum(labelNew, d, &has);
1302:         if (!has) DMLabelAddStratum(labelNew, d);
1303:       }
1304:     }
1305:     DMAddLabel(dmParallel, labelNew);
1306:     /* Put the output flag in the new label */
1307:     if (hasLabels) DMGetLabelOutput(dm, name, &lisOutput);
1308:     MPIU_Allreduce(&lisOutput, &isOutput, 1, MPIU_BOOL, MPI_LAND, comm);
1309:     PetscObjectGetName((PetscObject)labelNew, &name);
1310:     DMSetLabelOutput(dmParallel, name, isOutput);
1311:     DMLabelDestroy(&labelNew);
1312:   }
1313:   {
1314:     DMLabel ctLabel;

1316:     // Reset label for fast lookup
1317:     DMPlexGetCellTypeLabel(dmParallel, &ctLabel);
1318:     DMLabelMakeAllInvalid_Internal(ctLabel);
1319:   }
1320:   PetscLogEventEnd(DMPLEX_DistributeLabels, dm, 0, 0, 0);
1321:   return 0;
1322: }

1324: static PetscErrorCode DMPlexDistributeSetupTree(DM dm, PetscSF migrationSF, ISLocalToGlobalMapping original, ISLocalToGlobalMapping renumbering, DM dmParallel)
1325: {
1326:   DM_Plex     *mesh  = (DM_Plex *)dm->data;
1327:   DM_Plex     *pmesh = (DM_Plex *)(dmParallel)->data;
1328:   MPI_Comm     comm;
1329:   DM           refTree;
1330:   PetscSection origParentSection, newParentSection;
1331:   PetscInt    *origParents, *origChildIDs;
1332:   PetscBool    flg;

1336:   PetscObjectGetComm((PetscObject)dm, &comm);

1338:   /* Set up tree */
1339:   DMPlexGetReferenceTree(dm, &refTree);
1340:   DMPlexSetReferenceTree(dmParallel, refTree);
1341:   DMPlexGetTree(dm, &origParentSection, &origParents, &origChildIDs, NULL, NULL);
1342:   if (origParentSection) {
1343:     PetscInt  pStart, pEnd;
1344:     PetscInt *newParents, *newChildIDs, *globParents;
1345:     PetscInt *remoteOffsetsParents, newParentSize;
1346:     PetscSF   parentSF;

1348:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1349:     PetscSectionCreate(PetscObjectComm((PetscObject)dmParallel), &newParentSection);
1350:     PetscSectionSetChart(newParentSection, pStart, pEnd);
1351:     PetscSFDistributeSection(migrationSF, origParentSection, &remoteOffsetsParents, newParentSection);
1352:     PetscSFCreateSectionSF(migrationSF, origParentSection, remoteOffsetsParents, newParentSection, &parentSF);
1353:     PetscFree(remoteOffsetsParents);
1354:     PetscSectionGetStorageSize(newParentSection, &newParentSize);
1355:     PetscMalloc2(newParentSize, &newParents, newParentSize, &newChildIDs);
1356:     if (original) {
1357:       PetscInt numParents;

1359:       PetscSectionGetStorageSize(origParentSection, &numParents);
1360:       PetscMalloc1(numParents, &globParents);
1361:       ISLocalToGlobalMappingApplyBlock(original, numParents, origParents, globParents);
1362:     } else {
1363:       globParents = origParents;
1364:     }
1365:     PetscSFBcastBegin(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE);
1366:     PetscSFBcastEnd(parentSF, MPIU_INT, globParents, newParents, MPI_REPLACE);
1367:     if (original) PetscFree(globParents);
1368:     PetscSFBcastBegin(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE);
1369:     PetscSFBcastEnd(parentSF, MPIU_INT, origChildIDs, newChildIDs, MPI_REPLACE);
1370:     ISGlobalToLocalMappingApplyBlock(renumbering, IS_GTOLM_MASK, newParentSize, newParents, NULL, newParents);
1371:     if (PetscDefined(USE_DEBUG)) {
1372:       PetscInt  p;
1373:       PetscBool valid = PETSC_TRUE;
1374:       for (p = 0; p < newParentSize; ++p) {
1375:         if (newParents[p] < 0) {
1376:           valid = PETSC_FALSE;
1377:           PetscPrintf(PETSC_COMM_SELF, "Point %" PetscInt_FMT " not in overlap SF\n", p);
1378:         }
1379:       }
1381:     }
1382:     PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-parents_view", &flg);
1383:     if (flg) {
1384:       PetscPrintf(comm, "Serial Parent Section: \n");
1385:       PetscSectionView(origParentSection, PETSC_VIEWER_STDOUT_(comm));
1386:       PetscPrintf(comm, "Parallel Parent Section: \n");
1387:       PetscSectionView(newParentSection, PETSC_VIEWER_STDOUT_(comm));
1388:       PetscSFView(parentSF, NULL);
1389:     }
1390:     DMPlexSetTree(dmParallel, newParentSection, newParents, newChildIDs);
1391:     PetscSectionDestroy(&newParentSection);
1392:     PetscFree2(newParents, newChildIDs);
1393:     PetscSFDestroy(&parentSF);
1394:   }
1395:   pmesh->useAnchors = mesh->useAnchors;
1396:   return 0;
1397: }

1399: PETSC_UNUSED static PetscErrorCode DMPlexDistributeSF(DM dm, PetscSF migrationSF, DM dmParallel)
1400: {
1401:   PetscMPIInt rank, size;
1402:   MPI_Comm    comm;


1407:   /* Create point SF for parallel mesh */
1408:   PetscLogEventBegin(DMPLEX_DistributeSF, dm, 0, 0, 0);
1409:   PetscObjectGetComm((PetscObject)dm, &comm);
1410:   MPI_Comm_rank(comm, &rank);
1411:   MPI_Comm_size(comm, &size);
1412:   {
1413:     const PetscInt *leaves;
1414:     PetscSFNode    *remotePoints, *rowners, *lowners;
1415:     PetscInt        numRoots, numLeaves, numGhostPoints = 0, p, gp, *ghostPoints;
1416:     PetscInt        pStart, pEnd;

1418:     DMPlexGetChart(dmParallel, &pStart, &pEnd);
1419:     PetscSFGetGraph(migrationSF, &numRoots, &numLeaves, &leaves, NULL);
1420:     PetscMalloc2(numRoots, &rowners, numLeaves, &lowners);
1421:     for (p = 0; p < numRoots; p++) {
1422:       rowners[p].rank  = -1;
1423:       rowners[p].index = -1;
1424:     }
1425:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners, MPI_REPLACE);
1426:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners, MPI_REPLACE);
1427:     for (p = 0; p < numLeaves; ++p) {
1428:       if (lowners[p].rank < 0 || lowners[p].rank == rank) { /* Either put in a bid or we know we own it */
1429:         lowners[p].rank  = rank;
1430:         lowners[p].index = leaves ? leaves[p] : p;
1431:       } else if (lowners[p].rank >= 0) { /* Point already claimed so flag so that MAXLOC does not listen to us */
1432:         lowners[p].rank  = -2;
1433:         lowners[p].index = -2;
1434:       }
1435:     }
1436:     for (p = 0; p < numRoots; p++) { /* Root must not participate in the rediction, flag so that MAXLOC does not use */
1437:       rowners[p].rank  = -3;
1438:       rowners[p].index = -3;
1439:     }
1440:     PetscSFReduceBegin(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1441:     PetscSFReduceEnd(migrationSF, MPIU_2INT, lowners, rowners, MPI_MAXLOC);
1442:     PetscSFBcastBegin(migrationSF, MPIU_2INT, rowners, lowners, MPI_REPLACE);
1443:     PetscSFBcastEnd(migrationSF, MPIU_2INT, rowners, lowners, MPI_REPLACE);
1444:     for (p = 0; p < numLeaves; ++p) {
1446:       if (lowners[p].rank != rank) ++numGhostPoints;
1447:     }
1448:     PetscMalloc1(numGhostPoints, &ghostPoints);
1449:     PetscMalloc1(numGhostPoints, &remotePoints);
1450:     for (p = 0, gp = 0; p < numLeaves; ++p) {
1451:       if (lowners[p].rank != rank) {
1452:         ghostPoints[gp]        = leaves ? leaves[p] : p;
1453:         remotePoints[gp].rank  = lowners[p].rank;
1454:         remotePoints[gp].index = lowners[p].index;
1455:         ++gp;
1456:       }
1457:     }
1458:     PetscFree2(rowners, lowners);
1459:     PetscSFSetGraph((dmParallel)->sf, pEnd - pStart, numGhostPoints, ghostPoints, PETSC_OWN_POINTER, remotePoints, PETSC_OWN_POINTER);
1460:     PetscSFSetFromOptions((dmParallel)->sf);
1461:   }
1462:   {
1463:     PetscBool useCone, useClosure, useAnchors;

1465:     DMGetBasicAdjacency(dm, &useCone, &useClosure);
1466:     DMSetBasicAdjacency(dmParallel, useCone, useClosure);
1467:     DMPlexGetAdjacencyUseAnchors(dm, &useAnchors);
1468:     DMPlexSetAdjacencyUseAnchors(dmParallel, useAnchors);
1469:   }
1470:   PetscLogEventEnd(DMPLEX_DistributeSF, dm, 0, 0, 0);
1471:   return 0;
1472: }

1474: /*@
1475:   DMPlexSetPartitionBalance - Should distribution of the DM attempt to balance the shared point partition?

1477:   Input Parameters:
1478: + dm - The DMPlex object
1479: - flg - Balance the partition?

1481:   Level: intermediate

1483: .seealso: `DMPlexDistribute()`, `DMPlexGetPartitionBalance()`
1484: @*/
1485: PetscErrorCode DMPlexSetPartitionBalance(DM dm, PetscBool flg)
1486: {
1487:   DM_Plex *mesh = (DM_Plex *)dm->data;

1489:   mesh->partitionBalance = flg;
1490:   return 0;
1491: }

1493: /*@
1494:   DMPlexGetPartitionBalance - Does distribution of the DM attempt to balance the shared point partition?

1496:   Input Parameter:
1497: . dm - The DMPlex object

1499:   Output Parameter:
1500: . flg - Balance the partition?

1502:   Level: intermediate

1504: .seealso: `DMPlexDistribute()`, `DMPlexSetPartitionBalance()`
1505: @*/
1506: PetscErrorCode DMPlexGetPartitionBalance(DM dm, PetscBool *flg)
1507: {
1508:   DM_Plex *mesh = (DM_Plex *)dm->data;

1512:   *flg = mesh->partitionBalance;
1513:   return 0;
1514: }

1516: typedef struct {
1517:   PetscInt vote, rank, index;
1518: } Petsc3Int;

1520: /* MaxLoc, but carry a third piece of information around */
1521: static void MPIAPI MaxLocCarry(void *in_, void *inout_, PetscMPIInt *len_, MPI_Datatype *dtype)
1522: {
1523:   Petsc3Int *a = (Petsc3Int *)inout_;
1524:   Petsc3Int *b = (Petsc3Int *)in_;
1525:   PetscInt   i, len = *len_;
1526:   for (i = 0; i < len; i++) {
1527:     if (a[i].vote < b[i].vote) {
1528:       a[i].vote  = b[i].vote;
1529:       a[i].rank  = b[i].rank;
1530:       a[i].index = b[i].index;
1531:     } else if (a[i].vote <= b[i].vote) {
1532:       if (a[i].rank >= b[i].rank) {
1533:         a[i].rank  = b[i].rank;
1534:         a[i].index = b[i].index;
1535:       }
1536:     }
1537:   }
1538: }

1540: /*@C
1541:   DMPlexCreatePointSF - Build a point SF from an SF describing a point migration

1543:   Input Parameters:
1544: + dm          - The source DMPlex object
1545: . migrationSF - The star forest that describes the parallel point remapping
1546: - ownership   - Flag causing a vote to determine point ownership

1548:   Output Parameter:
1549: . pointSF     - The star forest describing the point overlap in the remapped DM

1551:   Notes:
1552:   Output pointSF is guaranteed to have the array of local indices (leaves) sorted.

1554:   Level: developer

1556: .seealso: `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1557: @*/
1558: PetscErrorCode DMPlexCreatePointSF(DM dm, PetscSF migrationSF, PetscBool ownership, PetscSF *pointSF)
1559: {
1560:   PetscMPIInt        rank, size;
1561:   PetscInt           p, nroots, nleaves, idx, npointLeaves;
1562:   PetscInt          *pointLocal;
1563:   const PetscInt    *leaves;
1564:   const PetscSFNode *roots;
1565:   PetscSFNode       *rootNodes, *leafNodes, *pointRemote;
1566:   Vec                shifts;
1567:   const PetscInt     numShifts  = 13759;
1568:   const PetscScalar *shift      = NULL;
1569:   const PetscBool    shiftDebug = PETSC_FALSE;
1570:   PetscBool          balance;

1573:   MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank);
1574:   MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size);
1575:   PetscLogEventBegin(DMPLEX_CreatePointSF, dm, 0, 0, 0);

1577:   DMPlexGetPartitionBalance(dm, &balance);
1578:   PetscSFGetGraph(migrationSF, &nroots, &nleaves, &leaves, &roots);
1579:   PetscMalloc2(nroots, &rootNodes, nleaves, &leafNodes);
1580:   if (ownership) {
1581:     MPI_Op       op;
1582:     MPI_Datatype datatype;
1583:     Petsc3Int   *rootVote = NULL, *leafVote = NULL;
1584:     /* If balancing, we compute a random cyclic shift of the rank for each remote point. That way, the max will evenly distribute among ranks. */
1585:     if (balance) {
1586:       PetscRandom r;

1588:       PetscRandomCreate(PETSC_COMM_SELF, &r);
1589:       PetscRandomSetInterval(r, 0, 2467 * size);
1590:       VecCreate(PETSC_COMM_SELF, &shifts);
1591:       VecSetSizes(shifts, numShifts, numShifts);
1592:       VecSetType(shifts, VECSTANDARD);
1593:       VecSetRandom(shifts, r);
1594:       PetscRandomDestroy(&r);
1595:       VecGetArrayRead(shifts, &shift);
1596:     }

1598:     PetscMalloc1(nroots, &rootVote);
1599:     PetscMalloc1(nleaves, &leafVote);
1600:     /* Point ownership vote: Process with highest rank owns shared points */
1601:     for (p = 0; p < nleaves; ++p) {
1602:       if (shiftDebug) {
1603:         PetscCall(PetscSynchronizedPrintf(PetscObjectComm((PetscObject)dm), "[%d] Point %" PetscInt_FMT " RemotePoint %" PetscInt_FMT " Shift %" PetscInt_FMT " MyRank %" PetscInt_FMT "\n", rank, leaves ? leaves[p] : p, roots[p].index,
1604:                                           (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]), (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size));
1605:       }
1606:       /* Either put in a bid or we know we own it */
1607:       leafVote[p].vote  = (rank + (shift ? (PetscInt)PetscRealPart(shift[roots[p].index % numShifts]) : 0)) % size;
1608:       leafVote[p].rank  = rank;
1609:       leafVote[p].index = p;
1610:     }
1611:     for (p = 0; p < nroots; p++) {
1612:       /* Root must not participate in the reduction, flag so that MAXLOC does not use */
1613:       rootVote[p].vote  = -3;
1614:       rootVote[p].rank  = -3;
1615:       rootVote[p].index = -3;
1616:     }
1617:     MPI_Type_contiguous(3, MPIU_INT, &datatype);
1618:     MPI_Type_commit(&datatype);
1619:     MPI_Op_create(&MaxLocCarry, 1, &op);
1620:     PetscSFReduceBegin(migrationSF, datatype, leafVote, rootVote, op);
1621:     PetscSFReduceEnd(migrationSF, datatype, leafVote, rootVote, op);
1622:     MPI_Op_free(&op);
1623:     MPI_Type_free(&datatype);
1624:     for (p = 0; p < nroots; p++) {
1625:       rootNodes[p].rank  = rootVote[p].rank;
1626:       rootNodes[p].index = rootVote[p].index;
1627:     }
1628:     PetscFree(leafVote);
1629:     PetscFree(rootVote);
1630:   } else {
1631:     for (p = 0; p < nroots; p++) {
1632:       rootNodes[p].index = -1;
1633:       rootNodes[p].rank  = rank;
1634:     }
1635:     for (p = 0; p < nleaves; p++) {
1636:       /* Write new local id into old location */
1637:       if (roots[p].rank == rank) rootNodes[roots[p].index].index = leaves ? leaves[p] : p;
1638:     }
1639:   }
1640:   PetscSFBcastBegin(migrationSF, MPIU_2INT, rootNodes, leafNodes, MPI_REPLACE);
1641:   PetscSFBcastEnd(migrationSF, MPIU_2INT, rootNodes, leafNodes, MPI_REPLACE);

1643:   for (npointLeaves = 0, p = 0; p < nleaves; p++) {
1644:     if (leafNodes[p].rank != rank) npointLeaves++;
1645:   }
1646:   PetscMalloc1(npointLeaves, &pointLocal);
1647:   PetscMalloc1(npointLeaves, &pointRemote);
1648:   for (idx = 0, p = 0; p < nleaves; p++) {
1649:     if (leafNodes[p].rank != rank) {
1650:       /* Note that pointLocal is automatically sorted as it is sublist of 0, ..., nleaves-1 */
1651:       pointLocal[idx]  = p;
1652:       pointRemote[idx] = leafNodes[p];
1653:       idx++;
1654:     }
1655:   }
1656:   if (shift) {
1657:     VecRestoreArrayRead(shifts, &shift);
1658:     VecDestroy(&shifts);
1659:   }
1660:   if (shiftDebug) PetscSynchronizedFlush(PetscObjectComm((PetscObject)dm), PETSC_STDOUT);
1661:   PetscSFCreate(PetscObjectComm((PetscObject)dm), pointSF);
1662:   PetscSFSetFromOptions(*pointSF);
1663:   PetscSFSetGraph(*pointSF, nleaves, npointLeaves, pointLocal, PETSC_OWN_POINTER, pointRemote, PETSC_OWN_POINTER);
1664:   PetscFree2(rootNodes, leafNodes);
1665:   PetscLogEventEnd(DMPLEX_CreatePointSF, dm, 0, 0, 0);
1666:   if (PetscDefined(USE_DEBUG)) DMPlexCheckPointSF(dm, *pointSF, PETSC_FALSE);
1667:   return 0;
1668: }

1670: /*@C
1671:   DMPlexMigrate  - Migrates internal DM data over the supplied star forest

1673:   Collective on dm

1675:   Input Parameters:
1676: + dm       - The source DMPlex object
1677: - sf       - The star forest communication context describing the migration pattern

1679:   Output Parameter:
1680: . targetDM - The target DMPlex object

1682:   Level: intermediate

1684: .seealso: `DMPlexDistribute()`, `DMPlexDistributeOverlap()`
1685: @*/
1686: PetscErrorCode DMPlexMigrate(DM dm, PetscSF sf, DM targetDM)
1687: {
1688:   MPI_Comm               comm;
1689:   PetscInt               dim, cdim, nroots;
1690:   PetscSF                sfPoint;
1691:   ISLocalToGlobalMapping ltogMigration;
1692:   ISLocalToGlobalMapping ltogOriginal = NULL;

1695:   PetscLogEventBegin(DMPLEX_Migrate, dm, 0, 0, 0);
1696:   PetscObjectGetComm((PetscObject)dm, &comm);
1697:   DMGetDimension(dm, &dim);
1698:   DMSetDimension(targetDM, dim);
1699:   DMGetCoordinateDim(dm, &cdim);
1700:   DMSetCoordinateDim(targetDM, cdim);

1702:   /* Check for a one-to-all distribution pattern */
1703:   DMGetPointSF(dm, &sfPoint);
1704:   PetscSFGetGraph(sfPoint, &nroots, NULL, NULL, NULL);
1705:   if (nroots >= 0) {
1706:     IS        isOriginal;
1707:     PetscInt  n, size, nleaves;
1708:     PetscInt *numbering_orig, *numbering_new;

1710:     /* Get the original point numbering */
1711:     DMPlexCreatePointNumbering(dm, &isOriginal);
1712:     ISLocalToGlobalMappingCreateIS(isOriginal, &ltogOriginal);
1713:     ISLocalToGlobalMappingGetSize(ltogOriginal, &size);
1714:     ISLocalToGlobalMappingGetBlockIndices(ltogOriginal, (const PetscInt **)&numbering_orig);
1715:     /* Convert to positive global numbers */
1716:     for (n = 0; n < size; n++) {
1717:       if (numbering_orig[n] < 0) numbering_orig[n] = -(numbering_orig[n] + 1);
1718:     }
1719:     /* Derive the new local-to-global mapping from the old one */
1720:     PetscSFGetGraph(sf, NULL, &nleaves, NULL, NULL);
1721:     PetscMalloc1(nleaves, &numbering_new);
1722:     PetscSFBcastBegin(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE);
1723:     PetscSFBcastEnd(sf, MPIU_INT, numbering_orig, numbering_new, MPI_REPLACE);
1724:     ISLocalToGlobalMappingCreate(comm, 1, nleaves, numbering_new, PETSC_OWN_POINTER, &ltogMigration);
1725:     ISLocalToGlobalMappingRestoreIndices(ltogOriginal, (const PetscInt **)&numbering_orig);
1726:     ISDestroy(&isOriginal);
1727:   } else {
1728:     /* One-to-all distribution pattern: We can derive LToG from SF */
1729:     ISLocalToGlobalMappingCreateSF(sf, 0, &ltogMigration);
1730:   }
1731:   PetscObjectSetName((PetscObject)ltogMigration, "Point renumbering for DM migration");
1732:   ISLocalToGlobalMappingViewFromOptions(ltogMigration, (PetscObject)dm, "-partition_view");
1733:   /* Migrate DM data to target DM */
1734:   DMPlexDistributeCones(dm, sf, ltogOriginal, ltogMigration, targetDM);
1735:   DMPlexDistributeLabels(dm, sf, targetDM);
1736:   DMPlexDistributeCoordinates(dm, sf, targetDM);
1737:   DMPlexDistributeSetupTree(dm, sf, ltogOriginal, ltogMigration, targetDM);
1738:   ISLocalToGlobalMappingDestroy(&ltogOriginal);
1739:   ISLocalToGlobalMappingDestroy(&ltogMigration);
1740:   PetscLogEventEnd(DMPLEX_Migrate, dm, 0, 0, 0);
1741:   return 0;
1742: }

1744: /*@C
1745:   DMPlexDistribute - Distributes the mesh and any associated sections.

1747:   Collective on dm

1749:   Input Parameters:
1750: + dm  - The original DMPlex object
1751: - overlap - The overlap of partitions, 0 is the default

1753:   Output Parameters:
1754: + sf - The PetscSF used for point distribution, or NULL if not needed
1755: - dmParallel - The distributed DMPlex object

1757:   Note: If the mesh was not distributed, the output dmParallel will be NULL.

1759:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1760:   representation on the mesh.

1762:   Level: intermediate

1764: .seealso: `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexGetOverlap()`
1765: @*/
1766: PetscErrorCode DMPlexDistribute(DM dm, PetscInt overlap, PetscSF *sf, DM *dmParallel)
1767: {
1768:   MPI_Comm         comm;
1769:   PetscPartitioner partitioner;
1770:   IS               cellPart;
1771:   PetscSection     cellPartSection;
1772:   DM               dmCoord;
1773:   DMLabel          lblPartition, lblMigration;
1774:   PetscSF          sfMigration, sfStratified, sfPoint;
1775:   PetscBool        flg, balance;
1776:   PetscMPIInt      rank, size;


1783:   if (sf) *sf = NULL;
1784:   *dmParallel = NULL;
1785:   PetscObjectGetComm((PetscObject)dm, &comm);
1786:   MPI_Comm_rank(comm, &rank);
1787:   MPI_Comm_size(comm, &size);
1788:   if (size == 1) return 0;

1790:   PetscLogEventBegin(DMPLEX_Distribute, dm, 0, 0, 0);
1791:   /* Create cell partition */
1792:   PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0);
1793:   PetscSectionCreate(comm, &cellPartSection);
1794:   DMPlexGetPartitioner(dm, &partitioner);
1795:   PetscPartitionerDMPlexPartition(partitioner, dm, NULL, cellPartSection, &cellPart);
1796:   PetscLogEventBegin(DMPLEX_PartSelf, dm, 0, 0, 0);
1797:   {
1798:     /* Convert partition to DMLabel */
1799:     IS              is;
1800:     PetscHSetI      ht;
1801:     const PetscInt *points;
1802:     PetscInt       *iranks;
1803:     PetscInt        pStart, pEnd, proc, npoints, poff = 0, nranks;

1805:     DMLabelCreate(PETSC_COMM_SELF, "Point Partition", &lblPartition);
1806:     /* Preallocate strata */
1807:     PetscHSetICreate(&ht);
1808:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1809:     for (proc = pStart; proc < pEnd; proc++) {
1810:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1811:       if (npoints) PetscHSetIAdd(ht, proc);
1812:     }
1813:     PetscHSetIGetSize(ht, &nranks);
1814:     PetscMalloc1(nranks, &iranks);
1815:     PetscHSetIGetElems(ht, &poff, iranks);
1816:     PetscHSetIDestroy(&ht);
1817:     DMLabelAddStrata(lblPartition, nranks, iranks);
1818:     PetscFree(iranks);
1819:     /* Inline DMPlexPartitionLabelClosure() */
1820:     ISGetIndices(cellPart, &points);
1821:     PetscSectionGetChart(cellPartSection, &pStart, &pEnd);
1822:     for (proc = pStart; proc < pEnd; proc++) {
1823:       PetscSectionGetDof(cellPartSection, proc, &npoints);
1824:       if (!npoints) continue;
1825:       PetscSectionGetOffset(cellPartSection, proc, &poff);
1826:       DMPlexClosurePoints_Private(dm, npoints, points + poff, &is);
1827:       DMLabelSetStratumIS(lblPartition, proc, is);
1828:       ISDestroy(&is);
1829:     }
1830:     ISRestoreIndices(cellPart, &points);
1831:   }
1832:   PetscLogEventEnd(DMPLEX_PartSelf, dm, 0, 0, 0);

1834:   DMLabelCreate(PETSC_COMM_SELF, "Point migration", &lblMigration);
1835:   DMPlexPartitionLabelInvert(dm, lblPartition, NULL, lblMigration);
1836:   DMPlexPartitionLabelCreateSF(dm, lblMigration, &sfMigration);
1837:   DMPlexStratifyMigrationSF(dm, sfMigration, &sfStratified);
1838:   PetscSFDestroy(&sfMigration);
1839:   sfMigration = sfStratified;
1840:   PetscSFSetUp(sfMigration);
1841:   PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0);
1842:   PetscOptionsHasName(((PetscObject)dm)->options, ((PetscObject)dm)->prefix, "-partition_view", &flg);
1843:   if (flg) {
1844:     DMLabelView(lblPartition, PETSC_VIEWER_STDOUT_(comm));
1845:     PetscSFView(sfMigration, PETSC_VIEWER_STDOUT_(comm));
1846:   }

1848:   /* Create non-overlapping parallel DM and migrate internal data */
1849:   DMPlexCreate(comm, dmParallel);
1850:   PetscObjectSetName((PetscObject)*dmParallel, "Parallel Mesh");
1851:   DMPlexMigrate(dm, sfMigration, *dmParallel);

1853:   /* Build the point SF without overlap */
1854:   DMPlexGetPartitionBalance(dm, &balance);
1855:   DMPlexSetPartitionBalance(*dmParallel, balance);
1856:   DMPlexCreatePointSF(*dmParallel, sfMigration, PETSC_TRUE, &sfPoint);
1857:   DMSetPointSF(*dmParallel, sfPoint);
1858:   DMGetCoordinateDM(*dmParallel, &dmCoord);
1859:   if (dmCoord) DMSetPointSF(dmCoord, sfPoint);
1860:   if (flg) PetscSFView(sfPoint, NULL);

1862:   if (overlap > 0) {
1863:     DM                 dmOverlap;
1864:     PetscInt           nroots, nleaves, noldleaves, l;
1865:     const PetscInt    *oldLeaves;
1866:     PetscSFNode       *newRemote, *permRemote;
1867:     const PetscSFNode *oldRemote;
1868:     PetscSF            sfOverlap, sfOverlapPoint;

1870:     /* Add the partition overlap to the distributed DM */
1871:     DMPlexDistributeOverlap(*dmParallel, overlap, &sfOverlap, &dmOverlap);
1872:     DMDestroy(dmParallel);
1873:     *dmParallel = dmOverlap;
1874:     if (flg) {
1875:       PetscPrintf(comm, "Overlap Migration SF:\n");
1876:       PetscSFView(sfOverlap, NULL);
1877:     }

1879:     /* Re-map the migration SF to establish the full migration pattern */
1880:     PetscSFGetGraph(sfMigration, &nroots, &noldleaves, &oldLeaves, &oldRemote);
1881:     PetscSFGetGraph(sfOverlap, NULL, &nleaves, NULL, NULL);
1882:     PetscMalloc1(nleaves, &newRemote);
1883:     /* oldRemote: original root point mapping to original leaf point
1884:        newRemote: original leaf point mapping to overlapped leaf point */
1885:     if (oldLeaves) {
1886:       /* After stratification, the migration remotes may not be in root (canonical) order, so we reorder using the leaf numbering */
1887:       PetscMalloc1(noldleaves, &permRemote);
1888:       for (l = 0; l < noldleaves; ++l) permRemote[oldLeaves[l]] = oldRemote[l];
1889:       oldRemote = permRemote;
1890:     }
1891:     PetscSFBcastBegin(sfOverlap, MPIU_2INT, oldRemote, newRemote, MPI_REPLACE);
1892:     PetscSFBcastEnd(sfOverlap, MPIU_2INT, oldRemote, newRemote, MPI_REPLACE);
1893:     if (oldLeaves) PetscFree(oldRemote);
1894:     PetscSFCreate(comm, &sfOverlapPoint);
1895:     PetscSFSetGraph(sfOverlapPoint, nroots, nleaves, NULL, PETSC_OWN_POINTER, newRemote, PETSC_OWN_POINTER);
1896:     PetscSFDestroy(&sfOverlap);
1897:     PetscSFDestroy(&sfMigration);
1898:     sfMigration = sfOverlapPoint;
1899:   }
1900:   /* Cleanup Partition */
1901:   DMLabelDestroy(&lblPartition);
1902:   DMLabelDestroy(&lblMigration);
1903:   PetscSectionDestroy(&cellPartSection);
1904:   ISDestroy(&cellPart);
1905:   /* Copy discretization */
1906:   DMCopyDisc(dm, *dmParallel);
1907:   /* Create sfNatural */
1908:   if (dm->useNatural) {
1909:     PetscSection section;

1911:     DMSetUseNatural(*dmParallel, PETSC_TRUE);
1912:     DMGetLocalSection(dm, &section);

1914:     /* First DM with useNatural = PETSC_TRUE is considered natural */
1915:     /* sfMigration and sfNatural are respectively the point and dofs SFs mapping to this natural DM */
1916:     /* Compose with a previous sfNatural if present */
1917:     if (dm->sfNatural) {
1918:       PetscSF natSF;

1920:       DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &natSF);
1921:       PetscSFCompose(dm->sfNatural, natSF, &(*dmParallel)->sfNatural);
1922:       PetscSFDestroy(&natSF);
1923:     } else {
1924:       DMPlexCreateGlobalToNaturalSF(*dmParallel, section, sfMigration, &(*dmParallel)->sfNatural);
1925:     }
1926:     /* Compose with a previous sfMigration if present */
1927:     if (dm->sfMigration) {
1928:       PetscSF naturalPointSF;

1930:       PetscSFCompose(dm->sfMigration, sfMigration, &naturalPointSF);
1931:       PetscSFDestroy(&sfMigration);
1932:       sfMigration = naturalPointSF;
1933:     }
1934:   }
1935:   DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *dmParallel);
1936:   /* Cleanup */
1937:   if (sf) {
1938:     *sf = sfMigration;
1939:   } else PetscSFDestroy(&sfMigration);
1940:   PetscSFDestroy(&sfPoint);
1941:   PetscLogEventEnd(DMPLEX_Distribute, dm, 0, 0, 0);
1942:   return 0;
1943: }

1945: /*@C
1946:   DMPlexDistributeOverlap - Add partition overlap to a distributed non-overlapping DM.

1948:   Collective on dm

1950:   Input Parameters:
1951: + dm  - The non-overlapping distributed DMPlex object
1952: - overlap - The overlap of partitions (the same on all ranks)

1954:   Output Parameters:
1955: + sf - The PetscSF used for point distribution
1956: - dmOverlap - The overlapping distributed DMPlex object, or NULL

1958:   Notes:
1959:   If the mesh was not distributed, the return value is NULL.

1961:   The user can control the definition of adjacency for the mesh using DMSetAdjacency(). They should choose the combination appropriate for the function
1962:   representation on the mesh.

1964:   Options Database Keys:
1965: + -dm_plex_overlap_labels <name1,name2,...> - List of overlap label names
1966: . -dm_plex_overlap_values <int1,int2,...>   - List of overlap label values
1967: . -dm_plex_overlap_exclude_label <name>     - Label used to exclude points from overlap
1968: - -dm_plex_overlap_exclude_value <int>      - Label value used to exclude points from overlap

1970:   Level: advanced

1972: .seealso: `DMPlexCreate()`, `DMSetAdjacency()`, `DMPlexDistribute()`, `DMPlexCreateOverlapLabel()`, `DMPlexGetOverlap()`
1973: @*/
1974: PetscErrorCode DMPlexDistributeOverlap(DM dm, PetscInt overlap, PetscSF *sf, DM *dmOverlap)
1975: {
1976:   DM_Plex     *mesh = (DM_Plex *)dm->data;
1977:   MPI_Comm     comm;
1978:   PetscMPIInt  size, rank;
1979:   PetscSection rootSection, leafSection;
1980:   IS           rootrank, leafrank;
1981:   DM           dmCoord;
1982:   DMLabel      lblOverlap;
1983:   PetscSF      sfOverlap, sfStratified, sfPoint;


1990:   if (sf) *sf = NULL;
1991:   *dmOverlap = NULL;
1992:   PetscObjectGetComm((PetscObject)dm, &comm);
1993:   MPI_Comm_size(comm, &size);
1994:   MPI_Comm_rank(comm, &rank);
1995:   if (size == 1) return 0;
1996:   {
1997:     // We need to get options for the _already_distributed mesh, so it must be done here
1998:     PetscInt    overlap;
1999:     const char *prefix;
2000:     char        oldPrefix[PETSC_MAX_PATH_LEN];

2002:     oldPrefix[0] = '\0';
2003:     PetscObjectGetOptionsPrefix((PetscObject)dm, &prefix);
2004:     PetscStrcpy(oldPrefix, prefix);
2005:     PetscObjectAppendOptionsPrefix((PetscObject)dm, "dist_");
2006:     DMPlexGetOverlap(dm, &overlap);
2007:     PetscObjectOptionsBegin((PetscObject)dm);
2008:     DMSetFromOptions_Overlap_Plex(dm, PetscOptionsObject, &overlap);
2009:     PetscOptionsEnd();
2010:     PetscObjectSetOptionsPrefix((PetscObject)dm, oldPrefix[0] == '\0' ? NULL : oldPrefix);
2011:   }
2012:   PetscLogEventBegin(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
2013:   /* Compute point overlap with neighbouring processes on the distributed DM */
2014:   PetscLogEventBegin(DMPLEX_Partition, dm, 0, 0, 0);
2015:   PetscSectionCreate(comm, &rootSection);
2016:   PetscSectionCreate(comm, &leafSection);
2017:   DMPlexDistributeOwnership(dm, rootSection, &rootrank, leafSection, &leafrank);
2018:   if (mesh->numOvLabels) DMPlexCreateOverlapLabelFromLabels(dm, mesh->numOvLabels, mesh->ovLabels, mesh->ovValues, mesh->numOvExLabels, mesh->ovExLabels, mesh->ovExValues, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
2019:   else DMPlexCreateOverlapLabel(dm, overlap, rootSection, rootrank, leafSection, leafrank, &lblOverlap);
2020:   /* Convert overlap label to stratified migration SF */
2021:   DMPlexPartitionLabelCreateSF(dm, lblOverlap, &sfOverlap);
2022:   DMPlexStratifyMigrationSF(dm, sfOverlap, &sfStratified);
2023:   PetscSFDestroy(&sfOverlap);
2024:   sfOverlap = sfStratified;
2025:   PetscObjectSetName((PetscObject)sfOverlap, "Overlap SF");
2026:   PetscSFSetFromOptions(sfOverlap);

2028:   PetscSectionDestroy(&rootSection);
2029:   PetscSectionDestroy(&leafSection);
2030:   ISDestroy(&rootrank);
2031:   ISDestroy(&leafrank);
2032:   PetscLogEventEnd(DMPLEX_Partition, dm, 0, 0, 0);

2034:   /* Build the overlapping DM */
2035:   DMPlexCreate(comm, dmOverlap);
2036:   PetscObjectSetName((PetscObject)*dmOverlap, "Parallel Mesh");
2037:   DMPlexMigrate(dm, sfOverlap, *dmOverlap);
2038:   /* Store the overlap in the new DM */
2039:   DMPlexSetOverlap_Plex(*dmOverlap, dm, overlap);
2040:   /* Build the new point SF */
2041:   DMPlexCreatePointSF(*dmOverlap, sfOverlap, PETSC_FALSE, &sfPoint);
2042:   DMSetPointSF(*dmOverlap, sfPoint);
2043:   DMGetCoordinateDM(*dmOverlap, &dmCoord);
2044:   if (dmCoord) DMSetPointSF(dmCoord, sfPoint);
2045:   DMGetCellCoordinateDM(*dmOverlap, &dmCoord);
2046:   if (dmCoord) DMSetPointSF(dmCoord, sfPoint);
2047:   PetscSFDestroy(&sfPoint);
2048:   /* Cleanup overlap partition */
2049:   DMLabelDestroy(&lblOverlap);
2050:   if (sf) *sf = sfOverlap;
2051:   else PetscSFDestroy(&sfOverlap);
2052:   PetscLogEventEnd(DMPLEX_DistributeOverlap, dm, 0, 0, 0);
2053:   return 0;
2054: }

2056: PetscErrorCode DMPlexGetOverlap_Plex(DM dm, PetscInt *overlap)
2057: {
2058:   DM_Plex *mesh = (DM_Plex *)dm->data;

2060:   *overlap = mesh->overlap;
2061:   return 0;
2062: }

2064: PetscErrorCode DMPlexSetOverlap_Plex(DM dm, DM dmSrc, PetscInt overlap)
2065: {
2066:   DM_Plex *mesh    = NULL;
2067:   DM_Plex *meshSrc = NULL;

2070:   mesh          = (DM_Plex *)dm->data;
2071:   mesh->overlap = overlap;
2072:   if (dmSrc) {
2074:     meshSrc = (DM_Plex *)dmSrc->data;
2075:     mesh->overlap += meshSrc->overlap;
2076:   }
2077:   return 0;
2078: }

2080: /*@
2081:   DMPlexGetOverlap - Get the width of the cell overlap

2083:   Not collective

2085:   Input Parameter:
2086: . dm   - The DM

2088:   Output Parameter:
2089: . overlap - the width of the cell overlap

2091:   Level: intermediate

2093: .seealso: `DMPlexSetOverlap()`, `DMPlexDistribute()`
2094: @*/
2095: PetscErrorCode DMPlexGetOverlap(DM dm, PetscInt *overlap)
2096: {
2099:   PetscUseMethod(dm, "DMPlexGetOverlap_C", (DM, PetscInt *), (dm, overlap));
2100:   return 0;
2101: }

2103: /*@
2104:   DMPlexSetOverlap - Set the width of the cell overlap

2106:   Logically collective

2108:   Input Parameters:
2109: + dm      - The DM
2110: . dmSrc   - The DM that produced this one, or NULL
2111: - overlap - the width of the cell overlap

2113:   Note:
2114:   The overlap from dmSrc is added to dm

2116:   Level: intermediate

2118: .seealso: `DMPlexGetOverlap()`, `DMPlexDistribute()`
2119: @*/
2120: PetscErrorCode DMPlexSetOverlap(DM dm, DM dmSrc, PetscInt overlap)
2121: {
2124:   PetscTryMethod(dm, "DMPlexSetOverlap_C", (DM, DM, PetscInt), (dm, dmSrc, overlap));
2125:   return 0;
2126: }

2128: PetscErrorCode DMPlexDistributeSetDefault_Plex(DM dm, PetscBool dist)
2129: {
2130:   DM_Plex *mesh = (DM_Plex *)dm->data;

2132:   mesh->distDefault = dist;
2133:   return 0;
2134: }

2136: /*@
2137:   DMPlexDistributeSetDefault - Set flag indicating whether the DM should be distributed by default

2139:   Logically collective

2141:   Input Parameters:
2142: + dm   - The DM
2143: - dist - Flag for distribution

2145:   Level: intermediate

2147: .seealso: `DMPlexDistributeGetDefault()`, `DMPlexDistribute()`
2148: @*/
2149: PetscErrorCode DMPlexDistributeSetDefault(DM dm, PetscBool dist)
2150: {
2153:   PetscTryMethod(dm, "DMPlexDistributeSetDefault_C", (DM, PetscBool), (dm, dist));
2154:   return 0;
2155: }

2157: PetscErrorCode DMPlexDistributeGetDefault_Plex(DM dm, PetscBool *dist)
2158: {
2159:   DM_Plex *mesh = (DM_Plex *)dm->data;

2161:   *dist = mesh->distDefault;
2162:   return 0;
2163: }

2165: /*@
2166:   DMPlexDistributeGetDefault - Get flag indicating whether the DM should be distributed by default

2168:   Not collective

2170:   Input Parameter:
2171: . dm   - The DM

2173:   Output Parameter:
2174: . dist - Flag for distribution

2176:   Level: intermediate

2178: .seealso: `DMPlexDistributeSetDefault()`, `DMPlexDistribute()`
2179: @*/
2180: PetscErrorCode DMPlexDistributeGetDefault(DM dm, PetscBool *dist)
2181: {
2184:   PetscUseMethod(dm, "DMPlexDistributeGetDefault_C", (DM, PetscBool *), (dm, dist));
2185:   return 0;
2186: }

2188: /*@C
2189:   DMPlexGetGatherDM - Get a copy of the DMPlex that gathers all points on the
2190:   root process of the original's communicator.

2192:   Collective on dm

2194:   Input Parameter:
2195: . dm - the original DMPlex object

2197:   Output Parameters:
2198: + sf - the PetscSF used for point distribution (optional)
2199: - gatherMesh - the gathered DM object, or NULL

2201:   Level: intermediate

2203: .seealso: `DMPlexDistribute()`, `DMPlexGetRedundantDM()`
2204: @*/
2205: PetscErrorCode DMPlexGetGatherDM(DM dm, PetscSF *sf, DM *gatherMesh)
2206: {
2207:   MPI_Comm         comm;
2208:   PetscMPIInt      size;
2209:   PetscPartitioner oldPart, gatherPart;

2213:   *gatherMesh = NULL;
2214:   if (sf) *sf = NULL;
2215:   comm = PetscObjectComm((PetscObject)dm);
2216:   MPI_Comm_size(comm, &size);
2217:   if (size == 1) return 0;
2218:   DMPlexGetPartitioner(dm, &oldPart);
2219:   PetscObjectReference((PetscObject)oldPart);
2220:   PetscPartitionerCreate(comm, &gatherPart);
2221:   PetscPartitionerSetType(gatherPart, PETSCPARTITIONERGATHER);
2222:   DMPlexSetPartitioner(dm, gatherPart);
2223:   DMPlexDistribute(dm, 0, sf, gatherMesh);

2225:   DMPlexSetPartitioner(dm, oldPart);
2226:   PetscPartitionerDestroy(&gatherPart);
2227:   PetscPartitionerDestroy(&oldPart);
2228:   return 0;
2229: }

2231: /*@C
2232:   DMPlexGetRedundantDM - Get a copy of the DMPlex that is completely copied on each process.

2234:   Collective on dm

2236:   Input Parameter:
2237: . dm - the original DMPlex object

2239:   Output Parameters:
2240: + sf - the PetscSF used for point distribution (optional)
2241: - redundantMesh - the redundant DM object, or NULL

2243:   Level: intermediate

2245: .seealso: `DMPlexDistribute()`, `DMPlexGetGatherDM()`
2246: @*/
2247: PetscErrorCode DMPlexGetRedundantDM(DM dm, PetscSF *sf, DM *redundantMesh)
2248: {
2249:   MPI_Comm     comm;
2250:   PetscMPIInt  size, rank;
2251:   PetscInt     pStart, pEnd, p;
2252:   PetscInt     numPoints = -1;
2253:   PetscSF      migrationSF, sfPoint, gatherSF;
2254:   DM           gatherDM, dmCoord;
2255:   PetscSFNode *points;

2259:   *redundantMesh = NULL;
2260:   comm           = PetscObjectComm((PetscObject)dm);
2261:   MPI_Comm_size(comm, &size);
2262:   if (size == 1) {
2263:     PetscObjectReference((PetscObject)dm);
2264:     *redundantMesh = dm;
2265:     if (sf) *sf = NULL;
2266:     return 0;
2267:   }
2268:   DMPlexGetGatherDM(dm, &gatherSF, &gatherDM);
2269:   if (!gatherDM) return 0;
2270:   MPI_Comm_rank(comm, &rank);
2271:   DMPlexGetChart(gatherDM, &pStart, &pEnd);
2272:   numPoints = pEnd - pStart;
2273:   MPI_Bcast(&numPoints, 1, MPIU_INT, 0, comm);
2274:   PetscMalloc1(numPoints, &points);
2275:   PetscSFCreate(comm, &migrationSF);
2276:   for (p = 0; p < numPoints; p++) {
2277:     points[p].index = p;
2278:     points[p].rank  = 0;
2279:   }
2280:   PetscSFSetGraph(migrationSF, pEnd - pStart, numPoints, NULL, PETSC_OWN_POINTER, points, PETSC_OWN_POINTER);
2281:   DMPlexCreate(comm, redundantMesh);
2282:   PetscObjectSetName((PetscObject)*redundantMesh, "Redundant Mesh");
2283:   DMPlexMigrate(gatherDM, migrationSF, *redundantMesh);
2284:   /* This is to express that all point are in overlap */
2285:   DMPlexSetOverlap_Plex(*redundantMesh, NULL, PETSC_MAX_INT);
2286:   DMPlexCreatePointSF(*redundantMesh, migrationSF, PETSC_FALSE, &sfPoint);
2287:   DMSetPointSF(*redundantMesh, sfPoint);
2288:   DMGetCoordinateDM(*redundantMesh, &dmCoord);
2289:   if (dmCoord) DMSetPointSF(dmCoord, sfPoint);
2290:   PetscSFDestroy(&sfPoint);
2291:   if (sf) {
2292:     PetscSF tsf;

2294:     PetscSFCompose(gatherSF, migrationSF, &tsf);
2295:     DMPlexStratifyMigrationSF(dm, tsf, sf);
2296:     PetscSFDestroy(&tsf);
2297:   }
2298:   PetscSFDestroy(&migrationSF);
2299:   PetscSFDestroy(&gatherSF);
2300:   DMDestroy(&gatherDM);
2301:   DMCopyDisc(dm, *redundantMesh);
2302:   DMPlexCopy_Internal(dm, PETSC_TRUE, PETSC_FALSE, *redundantMesh);
2303:   return 0;
2304: }

2306: /*@
2307:   DMPlexIsDistributed - Find out whether this DM is distributed, i.e. more than one rank owns some points.

2309:   Collective

2311:   Input Parameter:
2312: . dm      - The DM object

2314:   Output Parameter:
2315: . distributed - Flag whether the DM is distributed

2317:   Level: intermediate

2319:   Notes:
2320:   This currently finds out whether at least two ranks have any DAG points.
2321:   This involves MPI_Allreduce() with one integer.
2322:   The result is currently not stashed so every call to this routine involves this global communication.

2324: .seealso: `DMPlexDistribute()`, `DMPlexGetOverlap()`, `DMPlexIsInterpolated()`
2325: @*/
2326: PetscErrorCode DMPlexIsDistributed(DM dm, PetscBool *distributed)
2327: {
2328:   PetscInt    pStart, pEnd, count;
2329:   MPI_Comm    comm;
2330:   PetscMPIInt size;

2334:   PetscObjectGetComm((PetscObject)dm, &comm);
2335:   MPI_Comm_size(comm, &size);
2336:   if (size == 1) {
2337:     *distributed = PETSC_FALSE;
2338:     return 0;
2339:   }
2340:   DMPlexGetChart(dm, &pStart, &pEnd);
2341:   count = (pEnd - pStart) > 0 ? 1 : 0;
2342:   MPI_Allreduce(MPI_IN_PLACE, &count, 1, MPIU_INT, MPI_SUM, comm);
2343:   *distributed = count > 1 ? PETSC_TRUE : PETSC_FALSE;
2344:   return 0;
2345: }

2347: /*@C
2348:   DMPlexDistributionSetName - Set the name of the specific parallel distribution

2350:   Input Parameters:
2351: + dm   - The DM
2352: - name - The name of the specific parallel distribution

2354:   Note:
2355:   If distribution name is set when saving, DMPlexTopologyView() saves the plex's
2356:   parallel distribution (i.e., partition, ownership, and local ordering of points) under
2357:   this name. Conversely, if distribution name is set when loading, DMPlexTopologyLoad()
2358:   loads the parallel distribution stored in file under this name.

2360:   Level: developer

2362: .seealso: `DMPlexDistributionGetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2363: @*/
2364: PetscErrorCode DMPlexDistributionSetName(DM dm, const char name[])
2365: {
2366:   DM_Plex *mesh = (DM_Plex *)dm->data;

2370:   PetscFree(mesh->distributionName);
2371:   PetscStrallocpy(name, &mesh->distributionName);
2372:   return 0;
2373: }

2375: /*@C
2376:   DMPlexDistributionGetName - Retrieve the name of the specific parallel distribution

2378:   Input Parameter:
2379: . dm - The DM

2381:   Output Parameter:
2382: . name - The name of the specific parallel distribution

2384:   Note:
2385:   If distribution name is set when saving, DMPlexTopologyView() saves the plex's
2386:   parallel distribution (i.e., partition, ownership, and local ordering of points) under
2387:   this name. Conversely, if distribution name is set when loading, DMPlexTopologyLoad()
2388:   loads the parallel distribution stored in file under this name.

2390:   Level: developer

2392: .seealso: `DMPlexDistributionSetName()`, `DMPlexTopologyView()`, `DMPlexTopologyLoad()`
2393: @*/
2394: PetscErrorCode DMPlexDistributionGetName(DM dm, const char *name[])
2395: {
2396:   DM_Plex *mesh = (DM_Plex *)dm->data;

2400:   *name = mesh->distributionName;
2401:   return 0;
2402: }