Actual source code: mpiu.c


  2: #include <petscsys.h>
  3: #include <petsc/private/petscimpl.h>
  4: /*
  5:     Note that tag of 0 is ok because comm is a private communicator
  6:   generated below just for these routines.
  7: */

  9: PETSC_INTERN PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm comm, int ng)
 10: {
 11:   PetscMPIInt rank, size, tag = 0;
 12:   MPI_Status  status;

 14:   MPI_Comm_size(comm, &size);
 15:   if (size == 1) return 0;
 16:   MPI_Comm_rank(comm, &rank);
 17:   if (rank) MPI_Recv(NULL, 0, MPI_INT, rank - 1, tag, comm, &status);
 18:   /* Send to the next process in the group unless we are the last process */
 19:   if ((rank % ng) < ng - 1 && rank != size - 1) MPI_Send(NULL, 0, MPI_INT, rank + 1, tag, comm);
 20:   return 0;
 21: }

 23: PETSC_INTERN PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm comm, int ng)
 24: {
 25:   PetscMPIInt rank, size, tag = 0;
 26:   MPI_Status  status;

 28:   MPI_Comm_rank(comm, &rank);
 29:   MPI_Comm_size(comm, &size);
 30:   if (size == 1) return 0;

 32:   /* Send to the first process in the next group */
 33:   if ((rank % ng) == ng - 1 || rank == size - 1) MPI_Send(NULL, 0, MPI_INT, (rank + 1) % size, tag, comm);
 34:   if (rank == 0) MPI_Recv(NULL, 0, MPI_INT, size - 1, tag, comm, &status);
 35:   return 0;
 36: }

 38: /* ---------------------------------------------------------------------*/
 39: /*
 40:     The variable Petsc_Seq_keyval is used to indicate an MPI attribute that
 41:   is attached to a communicator that manages the sequential phase code below.
 42: */
 43: PetscMPIInt Petsc_Seq_keyval = MPI_KEYVAL_INVALID;

 45: /*@
 46:    PetscSequentialPhaseBegin - Begins a sequential section of code.

 48:    Collective

 50:    Input Parameters:
 51: +  comm - Communicator to sequentialize.
 52: -  ng   - Number in processor group.  This many processes are allowed to execute
 53:    at the same time (usually 1)

 55:    Level: intermediate

 57:    Notes:
 58:    `PetscSequentialPhaseBegin()` and `PetscSequentialPhaseEnd()` provide a
 59:    way to force a section of code to be executed by the processes in
 60:    rank order.  Typically, this is done with
 61: .vb
 62:       PetscSequentialPhaseBegin(comm, 1);
 63:       <code to be executed sequentially>
 64:       PetscSequentialPhaseEnd(comm, 1);
 65: .ve

 67:    You should use `PetscSynchronizedPrintf()` to ensure output between MPI ranks is properly order and not these routines.

 69: .seealso: `PetscSequentialPhaseEnd()`, `PetscSynchronizedPrintf()`
 70: @*/
 71: PetscErrorCode PetscSequentialPhaseBegin(MPI_Comm comm, int ng)
 72: {
 73:   PetscMPIInt size;
 74:   MPI_Comm    local_comm, *addr_local_comm;

 76:   PetscSysInitializePackage();
 77:   MPI_Comm_size(comm, &size);
 78:   if (size == 1) return 0;

 80:   /* Get the private communicator for the sequential operations */
 81:   if (Petsc_Seq_keyval == MPI_KEYVAL_INVALID) MPI_Comm_create_keyval(MPI_COMM_NULL_COPY_FN, MPI_COMM_NULL_DELETE_FN, &Petsc_Seq_keyval, NULL);

 83:   MPI_Comm_dup(comm, &local_comm);
 84:   PetscMalloc1(1, &addr_local_comm);

 86:   *addr_local_comm = local_comm;

 88:   MPI_Comm_set_attr(comm, Petsc_Seq_keyval, (void *)addr_local_comm);
 89:   PetscSequentialPhaseBegin_Private(local_comm, ng);
 90:   return 0;
 91: }

 93: /*@
 94:    PetscSequentialPhaseEnd - Ends a sequential section of code.

 96:    Collective

 98:    Input Parameters:
 99: +  comm - Communicator to sequentialize.
100: -  ng   - Number in processor group.  This many processes are allowed to execute
101:    at the same time (usually 1)

103:    Level: intermediate

105:    Note:
106:    See `PetscSequentialPhaseBegin()` for more details.

108: .seealso: `PetscSequentialPhaseBegin()`
109: @*/
110: PetscErrorCode PetscSequentialPhaseEnd(MPI_Comm comm, int ng)
111: {
112:   PetscMPIInt size, flag;
113:   MPI_Comm    local_comm, *addr_local_comm;

115:   MPI_Comm_size(comm, &size);
116:   if (size == 1) return 0;

118:   MPI_Comm_get_attr(comm, Petsc_Seq_keyval, (void **)&addr_local_comm, &flag);
120:   local_comm = *addr_local_comm;

122:   PetscSequentialPhaseEnd_Private(local_comm, ng);

124:   PetscFree(addr_local_comm);
125:   MPI_Comm_free(&local_comm);
126:   MPI_Comm_delete_attr(comm, Petsc_Seq_keyval);
127:   return 0;
128: }

130: /*@C
131:   PetscGlobalMinMaxInt - Get the global min/max from local min/max input

133:   Collective

135:   Input Parameter:
136: . minMaxVal - An array with the local min and max

138:   Output Parameter:
139: . minMaxValGlobal - An array with the global min and max

141:   Level: beginner

143: .seealso: `PetscSplitOwnership()`, `PetscGlobalMinMaxReal()`
144: @*/
145: PetscErrorCode PetscGlobalMinMaxInt(MPI_Comm comm, const PetscInt minMaxVal[2], PetscInt minMaxValGlobal[2])
146: {
147:   PetscInt sendbuf[3], recvbuf[3];

149:   sendbuf[0] = -minMaxVal[0]; /* Note that -PETSC_MIN_INT = PETSC_MIN_INT */
150:   sendbuf[1] = minMaxVal[1];
151:   sendbuf[2] = (minMaxVal[0] == PETSC_MIN_INT) ? 1 : 0; /* Are there PETSC_MIN_INT in minMaxVal[0]? */
152:   MPI_Allreduce(sendbuf, recvbuf, 3, MPIU_INT, MPI_MAX, comm);
153:   minMaxValGlobal[0] = recvbuf[2] ? PETSC_MIN_INT : -recvbuf[0];
154:   minMaxValGlobal[1] = recvbuf[1];
155:   return 0;
156: }

158: /*@C
159:   PetscGlobalMinMaxReal - Get the global min/max from local min/max input

161:   Collective

163:   Input Parameter:
164: . minMaxVal - An array with the local min and max

166:   Output Parameter:
167: . minMaxValGlobal - An array with the global min and max

169:   Level: beginner

171: .seealso: `PetscSplitOwnership()`, `PetscGlobalMinMaxInt()`
172: @*/
173: PetscErrorCode PetscGlobalMinMaxReal(MPI_Comm comm, const PetscReal minMaxVal[2], PetscReal minMaxValGlobal[2])
174: {
175:   PetscReal sendbuf[2];

177:   sendbuf[0] = -minMaxVal[0];
178:   sendbuf[1] = minMaxVal[1];
179:   MPIU_Allreduce(sendbuf, minMaxValGlobal, 2, MPIU_REAL, MPIU_MAX, comm);
180:   minMaxValGlobal[0] = -minMaxValGlobal[0];
181:   return 0;
182: }