Actual source code: ex8.c

  1: static char help[] = "Test VecScatterCreateToZero, VecScatterCreateToAll\n\n";

  3: #include <petscvec.h>
  4: int main(int argc, char **argv)
  5: {
  6:   PetscInt    i, N = 10, n = PETSC_DECIDE, low, high, onlylocal = -1;
  7:   PetscMPIInt size, rank;
  8:   Vec         x, y;
  9:   VecScatter  vscat;

 12:   PetscInitialize(&argc, &argv, (char *)0, help);
 13:   MPI_Comm_size(PETSC_COMM_WORLD, &size);
 14:   MPI_Comm_rank(PETSC_COMM_WORLD, &rank);
 15:   PetscOptionsGetInt(NULL, NULL, "-n", &N, NULL);

 17:   /* Trigger special case in VecScatterCreateToAll to deal with the one-to-all pattern */
 18:   PetscOptionsGetInt(NULL, NULL, "-onlylocal", &onlylocal, NULL);
 19:   if (onlylocal >= 0 && onlylocal < size) n = (rank == onlylocal ? N : 0);

 21:   VecCreate(PETSC_COMM_WORLD, &x);
 22:   VecSetFromOptions(x);
 23:   VecSetSizes(x, n, N);
 24:   VecGetOwnershipRange(x, &low, &high);
 25:   PetscObjectSetName((PetscObject)x, "x");

 27:   /*-------------------------------------*/
 28:   /*       VecScatterCreateToZero        */
 29:   /*-------------------------------------*/

 31:   /* MPI vec x = [0, 1, 2, .., N-1] */
 32:   for (i = low; i < high; i++) VecSetValue(x, i, (PetscScalar)i, INSERT_VALUES);
 33:   VecAssemblyBegin(x);
 34:   VecAssemblyEnd(x);

 36:   PetscPrintf(PETSC_COMM_WORLD, "\nTesting VecScatterCreateToZero\n");
 37:   VecScatterCreateToZero(x, &vscat, &y);
 38:   PetscObjectSetName((PetscObject)y, "y");

 40:   /* Test PetscSFBcastAndOp with op = MPI_REPLACE, which does y = x on rank 0 */
 41:   VecScatterBegin(vscat, x, y, INSERT_VALUES, SCATTER_FORWARD);
 42:   VecScatterEnd(vscat, x, y, INSERT_VALUES, SCATTER_FORWARD);
 43:   if (rank == 0) VecView(y, PETSC_VIEWER_STDOUT_SELF);

 45:   /* Test PetscSFBcastAndOp with op = MPI_SUM, which does y += x */
 46:   VecScatterBegin(vscat, x, y, ADD_VALUES, SCATTER_FORWARD);
 47:   VecScatterEnd(vscat, x, y, ADD_VALUES, SCATTER_FORWARD);
 48:   if (rank == 0) VecView(y, PETSC_VIEWER_STDOUT_SELF);

 50:   /* Test PetscSFReduce with op = MPI_REPLACE, which does x = y */
 51:   VecScatterBegin(vscat, y, x, INSERT_VALUES, SCATTER_REVERSE);
 52:   VecScatterEnd(vscat, y, x, INSERT_VALUES, SCATTER_REVERSE);
 53:   VecView(x, PETSC_VIEWER_STDOUT_WORLD);

 55:   /* Test PetscSFReduce with op = MPI_SUM, which does x += y on x's local part on rank 0*/
 56:   VecScatterBegin(vscat, y, x, ADD_VALUES, SCATTER_REVERSE_LOCAL);
 57:   VecScatterEnd(vscat, y, x, ADD_VALUES, SCATTER_REVERSE_LOCAL);
 58:   VecView(x, PETSC_VIEWER_STDOUT_WORLD);

 60:   VecDestroy(&y);
 61:   VecScatterDestroy(&vscat);

 63:   /*-------------------------------------*/
 64:   /*       VecScatterCreateToAll         */
 65:   /*-------------------------------------*/
 66:   for (i = low; i < high; i++) VecSetValue(x, i, (PetscScalar)i, INSERT_VALUES);
 67:   VecAssemblyBegin(x);
 68:   VecAssemblyEnd(x);

 70:   PetscPrintf(PETSC_COMM_WORLD, "\nTesting VecScatterCreateToAll\n");

 72:   VecScatterCreateToAll(x, &vscat, &y);
 73:   PetscObjectSetName((PetscObject)y, "y");

 75:   /* Test PetscSFBcastAndOp with op = MPI_REPLACE, which does y = x on all ranks */
 76:   VecScatterBegin(vscat, x, y, INSERT_VALUES, SCATTER_FORWARD);
 77:   VecScatterEnd(vscat, x, y, INSERT_VALUES, SCATTER_FORWARD);
 78:   if (rank == 0) VecView(y, PETSC_VIEWER_STDOUT_SELF);

 80:   /* Test PetscSFBcastAndOp with op = MPI_SUM, which does y += x */
 81:   VecScatterBegin(vscat, x, y, ADD_VALUES, SCATTER_FORWARD);
 82:   VecScatterEnd(vscat, x, y, ADD_VALUES, SCATTER_FORWARD);
 83:   if (rank == 0) VecView(y, PETSC_VIEWER_STDOUT_SELF);

 85:   /* Test PetscSFReduce with op = MPI_REPLACE, which does x = y */
 86:   VecScatterBegin(vscat, y, x, INSERT_VALUES, SCATTER_REVERSE);
 87:   VecScatterEnd(vscat, y, x, INSERT_VALUES, SCATTER_REVERSE);
 88:   VecView(x, PETSC_VIEWER_STDOUT_WORLD);

 90:   /* Test PetscSFReduce with op = MPI_SUM, which does x += size*y */
 91:   VecScatterBegin(vscat, y, x, ADD_VALUES, SCATTER_REVERSE);
 92:   VecScatterEnd(vscat, y, x, ADD_VALUES, SCATTER_REVERSE);
 93:   VecView(x, PETSC_VIEWER_STDOUT_WORLD);
 94:   VecDestroy(&x);
 95:   VecDestroy(&y);
 96:   VecScatterDestroy(&vscat);

 98:   PetscFinalize();
 99:   return 0;
100: }

102: /*TEST

104:    testset:
105:       # N=10 is divisible by nsize, to trigger Allgather/Gather in SF
106:       nsize: 2
107:       # Exact numbers really matter here
108:       diff_args: -j
109:       filter: grep -v "type"
110:       output_file: output/ex8_1.out

112:       test:
113:         suffix: 1_standard

115:       test:
116:         suffix: 1_cuda
117:         # sf_backend cuda is not needed if compiling only with cuda
118:         args: -vec_type cuda -sf_backend cuda
119:         requires: cuda

121:       test:
122:         suffix: 1_hip
123:         args: -vec_type hip -sf_backend hip
124:         requires: hip

126:       test:
127:         suffix: 1_cuda_aware_mpi
128:         # sf_backend cuda is not needed if compiling only with cuda
129:         args: -vec_type cuda -sf_backend cuda
130:         requires: cuda defined(PETSC_HAVE_MPI_GPU_AWARE)

132:    testset:
133:       # N=10 is not divisible by nsize, to trigger Allgatherv/Gatherv in SF
134:       nsize: 3
135:       # Exact numbers really matter here
136:       diff_args: -j
137:       filter: grep -v "type" | grep -v "Process "
138:       output_file: output/ex8_2.out

140:       test:
141:         suffix: 2_standard

143:       test:
144:         suffix: 2_cuda
145:         # sf_backend cuda is not needed if compiling only with cuda
146:         args: -vec_type cuda -sf_backend cuda
147:         requires: cuda

149:       test:
150:         suffix: 2_hip
151:         # sf_backend hip is not needed if compiling only with hip
152:         args: -vec_type hip -sf_backend hip
153:         requires: hip

155:       test:
156:         suffix: 2_cuda_aware_mpi
157:         args: -vec_type cuda
158:         requires: cuda defined(PETSC_HAVE_MPI_GPU_AWARE)

160:    testset:
161:       # trigger one-to-all pattern in Allgatherv
162:       nsize: 3
163:       diff_args: -j
164:       filter: grep -v "type" | grep -v "Process "
165:       output_file: output/ex8_3.out
166:       args: -onlylocal 1

168:       test:
169:         suffix: 2_standard_onetoall

171:       test:
172:         suffix: 2_cuda_onetoall
173:         # sf_backend cuda is not needed if compiling only with cuda
174:         args: -vec_type cuda -sf_backend cuda
175:         requires: cuda

177:       test:
178:         suffix: 2_hip_onetoall
179:         # sf_backend hip is not needed if compiling only with hip
180:         args: -vec_type hip -sf_backend hip
181:         requires: hip

183:       test:
184:         suffix: 2_cuda_aware_mpi_onetoall
185:         args: -vec_type cuda
186:         requires: cuda defined(PETSC_HAVE_MPI_GPU_AWARE)

188: TEST*/