Actual source code: ex29.c

  1: static char help[] = "Test scalable partitioning on distributed meshes\n\n";

  3: #include <petscdmplex.h>

  5: enum {
  6:   STAGE_LOAD,
  7:   STAGE_DISTRIBUTE,
  8:   STAGE_REFINE,
  9:   STAGE_OVERLAP
 10: };

 12: typedef struct {
 13:   PetscLogEvent createMeshEvent;
 14:   PetscLogStage stages[4];
 15:   /* Domain and mesh definition */
 16:   PetscInt overlap; /* The cell overlap to use during partitioning */
 17: } AppCtx;

 19: PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
 20: {
 21:   options->overlap = PETSC_FALSE;

 23:   PetscOptionsBegin(comm, "", "Meshing Problem Options", "DMPLEX");
 24:   PetscOptionsBoundedInt("-overlap", "The cell overlap for partitioning", "ex29.c", options->overlap, &options->overlap, NULL, 0);
 25:   PetscOptionsEnd();

 27:   PetscLogEventRegister("CreateMesh", DM_CLASSID, &options->createMeshEvent);
 28:   PetscLogStageRegister("MeshLoad", &options->stages[STAGE_LOAD]);
 29:   PetscLogStageRegister("MeshDistribute", &options->stages[STAGE_DISTRIBUTE]);
 30:   PetscLogStageRegister("MeshRefine", &options->stages[STAGE_REFINE]);
 31:   PetscLogStageRegister("MeshOverlap", &options->stages[STAGE_OVERLAP]);
 32:   return 0;
 33: }

 35: PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
 36: {
 37:   PetscMPIInt rank, size;

 39:   PetscLogEventBegin(user->createMeshEvent, 0, 0, 0, 0);
 40:   MPI_Comm_rank(comm, &rank);
 41:   MPI_Comm_size(comm, &size);
 42:   PetscLogStagePush(user->stages[STAGE_LOAD]);
 43:   DMCreate(comm, dm);
 44:   DMSetType(*dm, DMPLEX);
 45:   DMSetFromOptions(*dm);
 46:   PetscLogStagePop();
 47:   {
 48:     DM               pdm = NULL;
 49:     PetscPartitioner part;

 51:     DMPlexGetPartitioner(*dm, &part);
 52:     PetscPartitionerSetFromOptions(part);
 53:     /* Distribute mesh over processes */
 54:     PetscLogStagePush(user->stages[STAGE_DISTRIBUTE]);
 55:     DMPlexDistribute(*dm, 0, NULL, &pdm);
 56:     if (pdm) {
 57:       DMDestroy(dm);
 58:       *dm = pdm;
 59:     }
 60:     PetscLogStagePop();
 61:   }
 62:   PetscLogStagePush(user->stages[STAGE_REFINE]);
 63:   PetscObjectSetOptionsPrefix((PetscObject)*dm, "post_");
 64:   DMSetFromOptions(*dm);
 65:   PetscObjectSetOptionsPrefix((PetscObject)*dm, "");
 66:   PetscLogStagePop();
 67:   if (user->overlap) {
 68:     DM odm = NULL;
 69:     /* Add the level-1 overlap to refined mesh */
 70:     PetscLogStagePush(user->stages[STAGE_OVERLAP]);
 71:     DMPlexDistributeOverlap(*dm, 1, NULL, &odm);
 72:     if (odm) {
 73:       DMView(odm, PETSC_VIEWER_STDOUT_WORLD);
 74:       DMDestroy(dm);
 75:       *dm = odm;
 76:     }
 77:     PetscLogStagePop();
 78:   }
 79:   DMViewFromOptions(*dm, NULL, "-dm_view");
 80:   PetscLogEventEnd(user->createMeshEvent, 0, 0, 0, 0);
 81:   return 0;
 82: }

 84: int main(int argc, char **argv)
 85: {
 86:   DM               dm, pdm;
 87:   AppCtx           user; /* user-defined work context */
 88:   PetscPartitioner part;

 91:   PetscInitialize(&argc, &argv, NULL, help);
 92:   ProcessOptions(PETSC_COMM_WORLD, &user);
 93:   CreateMesh(PETSC_COMM_WORLD, &user, &dm);
 94:   DMPlexGetPartitioner(dm, &part);
 95:   PetscPartitionerSetFromOptions(part);
 96:   DMPlexDistribute(dm, user.overlap, NULL, &pdm);
 97:   if (pdm) DMViewFromOptions(pdm, NULL, "-pdm_view");
 98:   DMDestroy(&dm);
 99:   DMDestroy(&pdm);
100:   PetscFinalize();
101:   return 0;
102: }

104: /*TEST

106:   test:
107:     suffix: 0
108:     requires: ctetgen
109:     args: -dm_plex_dim 3 -post_dm_refine 2 -petscpartitioner_type simple -dm_view
110:   test:
111:     suffix: 1
112:     args: -dm_plex_dim 3 -dm_plex_simplex 0 -post_dm_refine 2 -petscpartitioner_type simple -dm_view
113:   test:
114:     suffix: quad_0
115:     nsize: 2
116:     args: -dm_plex_dim 3 -dm_plex_simplex 0 -post_dm_refine 2 -petscpartitioner_type simple -dm_view -pdm_view

118: TEST*/