Actual source code: overlapsplit.c

  1: /*
  2:  * Increase the overlap of a 'big' subdomain across several processor cores
  3:  *
  4:  * Author: Fande Kong <fdkong.jd@gmail.com>
  5:  */

  7: #include <petscsf.h>
  8: #include <petsc/private/matimpl.h>

 10: /*
 11:  * Increase overlap for the sub-matrix across sub communicator
 12:  * sub-matrix could be a graph or numerical matrix
 13:  * */
 14: PetscErrorCode MatIncreaseOverlapSplit_Single(Mat mat, IS *is, PetscInt ov)
 15: {
 16:   PetscInt        i, nindx, *indices_sc, *indices_ov, localsize, *localsizes_sc, localsize_tmp;
 17:   PetscInt       *indices_ov_rd, nroots, nleaves, *localoffsets, *indices_recv, *sources_sc, *sources_sc_rd;
 18:   const PetscInt *indices;
 19:   PetscMPIInt     srank, ssize, issamecomm, k, grank;
 20:   IS              is_sc, allis_sc, partitioning;
 21:   MPI_Comm        gcomm, dcomm, scomm;
 22:   PetscSF         sf;
 23:   PetscSFNode    *remote;
 24:   Mat            *smat;
 25:   MatPartitioning part;

 27:   /* get a sub communicator before call individual MatIncreaseOverlap
 28:    * since the sub communicator may be changed.
 29:    * */
 30:   PetscObjectGetComm((PetscObject)(*is), &dcomm);
 31:   /* make a copy before the original one is deleted */
 32:   PetscCommDuplicate(dcomm, &scomm, NULL);
 33:   /* get a global communicator, where mat should be a global matrix  */
 34:   PetscObjectGetComm((PetscObject)mat, &gcomm);
 35:   PetscUseTypeMethod(mat, increaseoverlap, 1, is, ov);
 36:   MPI_Comm_compare(gcomm, scomm, &issamecomm);
 37:   /* if the sub-communicator is the same as the global communicator,
 38:    * user does not want to use a sub-communicator
 39:    * */
 40:   if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) {
 41:     PetscCommDestroy(&scomm);
 42:     return 0;
 43:   }
 44:   /* if the sub-communicator is petsc_comm_self,
 45:    * user also does not care the sub-communicator
 46:    * */
 47:   MPI_Comm_compare(scomm, PETSC_COMM_SELF, &issamecomm);
 48:   if (issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) {
 49:     PetscCommDestroy(&scomm);
 50:     return 0;
 51:   }
 52:   MPI_Comm_rank(scomm, &srank);
 53:   MPI_Comm_size(scomm, &ssize);
 54:   MPI_Comm_rank(gcomm, &grank);
 55:   /* create a new IS based on sub-communicator
 56:    * since the old IS is often based on petsc_comm_self
 57:    * */
 58:   ISGetLocalSize(*is, &nindx);
 59:   PetscMalloc1(nindx, &indices_sc);
 60:   ISGetIndices(*is, &indices);
 61:   PetscArraycpy(indices_sc, indices, nindx);
 62:   ISRestoreIndices(*is, &indices);
 63:   /* we do not need any more */
 64:   ISDestroy(is);
 65:   /* create a index set based on the sub communicator  */
 66:   ISCreateGeneral(scomm, nindx, indices_sc, PETSC_OWN_POINTER, &is_sc);
 67:   /* gather all indices within  the sub communicator */
 68:   ISAllGather(is_sc, &allis_sc);
 69:   ISDestroy(&is_sc);
 70:   /* gather local sizes */
 71:   PetscMalloc1(ssize, &localsizes_sc);
 72:   /* get individual local sizes for all index sets */
 73:   MPI_Gather(&nindx, 1, MPIU_INT, localsizes_sc, 1, MPIU_INT, 0, scomm);
 74:   /* only root does these computations */
 75:   if (!srank) {
 76:     /* get local size for the big index set */
 77:     ISGetLocalSize(allis_sc, &localsize);
 78:     PetscCalloc2(localsize, &indices_ov, localsize, &sources_sc);
 79:     PetscCalloc2(localsize, &indices_ov_rd, localsize, &sources_sc_rd);
 80:     ISGetIndices(allis_sc, &indices);
 81:     PetscArraycpy(indices_ov, indices, localsize);
 82:     ISRestoreIndices(allis_sc, &indices);
 83:     ISDestroy(&allis_sc);
 84:     /* assign corresponding sources */
 85:     localsize_tmp = 0;
 86:     for (k = 0; k < ssize; k++) {
 87:       for (i = 0; i < localsizes_sc[k]; i++) sources_sc[localsize_tmp++] = k;
 88:     }
 89:     /* record where indices come from */
 90:     PetscSortIntWithArray(localsize, indices_ov, sources_sc);
 91:     /* count local sizes for reduced indices */
 92:     PetscArrayzero(localsizes_sc, ssize);
 93:     /* initialize the first entity */
 94:     if (localsize) {
 95:       indices_ov_rd[0] = indices_ov[0];
 96:       sources_sc_rd[0] = sources_sc[0];
 97:       localsizes_sc[sources_sc[0]]++;
 98:     }
 99:     localsize_tmp = 1;
100:     /* remove duplicate integers */
101:     for (i = 1; i < localsize; i++) {
102:       if (indices_ov[i] != indices_ov[i - 1]) {
103:         indices_ov_rd[localsize_tmp]   = indices_ov[i];
104:         sources_sc_rd[localsize_tmp++] = sources_sc[i];
105:         localsizes_sc[sources_sc[i]]++;
106:       }
107:     }
108:     PetscFree2(indices_ov, sources_sc);
109:     PetscCalloc1(ssize + 1, &localoffsets);
110:     for (k = 0; k < ssize; k++) localoffsets[k + 1] = localoffsets[k] + localsizes_sc[k];
111:     nleaves = localoffsets[ssize];
112:     PetscArrayzero(localoffsets, ssize + 1);
113:     nroots = localsizes_sc[srank];
114:     PetscMalloc1(nleaves, &remote);
115:     for (i = 0; i < nleaves; i++) {
116:       remote[i].rank  = sources_sc_rd[i];
117:       remote[i].index = localoffsets[sources_sc_rd[i]]++;
118:     }
119:     PetscFree(localoffsets);
120:   } else {
121:     ISDestroy(&allis_sc);
122:     /* Allocate a 'zero' pointer to avoid using uninitialized variable  */
123:     PetscCalloc1(0, &remote);
124:     nleaves       = 0;
125:     indices_ov_rd = NULL;
126:     sources_sc_rd = NULL;
127:   }
128:   /* scatter sizes to everybody */
129:   MPI_Scatter(localsizes_sc, 1, MPIU_INT, &nroots, 1, MPIU_INT, 0, scomm);
130:   PetscFree(localsizes_sc);
131:   PetscCalloc1(nroots, &indices_recv);
132:   /* set data back to every body */
133:   PetscSFCreate(scomm, &sf);
134:   PetscSFSetType(sf, PETSCSFBASIC);
135:   PetscSFSetFromOptions(sf);
136:   PetscSFSetGraph(sf, nroots, nleaves, NULL, PETSC_OWN_POINTER, remote, PETSC_OWN_POINTER);
137:   PetscSFReduceBegin(sf, MPIU_INT, indices_ov_rd, indices_recv, MPI_REPLACE);
138:   PetscSFReduceEnd(sf, MPIU_INT, indices_ov_rd, indices_recv, MPI_REPLACE);
139:   PetscSFDestroy(&sf);
140:   PetscFree2(indices_ov_rd, sources_sc_rd);
141:   ISCreateGeneral(scomm, nroots, indices_recv, PETSC_OWN_POINTER, &is_sc);
142:   MatCreateSubMatricesMPI(mat, 1, &is_sc, &is_sc, MAT_INITIAL_MATRIX, &smat);
143:   ISDestroy(&allis_sc);
144:   /* create a partitioner to repartition the sub-matrix */
145:   MatPartitioningCreate(scomm, &part);
146:   MatPartitioningSetAdjacency(part, smat[0]);
147: #if defined(PETSC_HAVE_PARMETIS)
148:   /* if there exists a ParMETIS installation, we try to use ParMETIS
149:    * because a repartition routine possibly work better
150:    * */
151:   MatPartitioningSetType(part, MATPARTITIONINGPARMETIS);
152:   /* try to use reparition function, instead of partition function */
153:   MatPartitioningParmetisSetRepartition(part);
154: #else
155:   /* we at least provide a default partitioner to rebalance the computation  */
156:   MatPartitioningSetType(part, MATPARTITIONINGAVERAGE);
157: #endif
158:   /* user can pick up any partitioner by using an option */
159:   MatPartitioningSetFromOptions(part);
160:   MatPartitioningApply(part, &partitioning);
161:   MatPartitioningDestroy(&part);
162:   MatDestroy(&(smat[0]));
163:   PetscFree(smat);
164:   /* get local rows including  overlap */
165:   ISBuildTwoSided(partitioning, is_sc, is);
166:   ISDestroy(&is_sc);
167:   ISDestroy(&partitioning);
168:   PetscCommDestroy(&scomm);
169:   return 0;
170: }