Actual source code: dasub.c
1: /*
2: Code for manipulating distributed regular arrays in parallel.
3: */
5: #include <petsc/private/dmdaimpl.h>
7: /*@
8: DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the `DMDA`
10: Collective on da
12: Input Parameters:
13: + da - the distributed array
14: . x - the first physical coordinate
15: . y - the second physical coordinate
16: - z - the third physical coordinate
18: Output Parameters:
19: + II - the first logical coordinate (-1 on processes that do not contain that point)
20: . JJ - the second logical coordinate (-1 on processes that do not contain that point)
21: . KK - the third logical coordinate (-1 on processes that do not contain that point)
22: . X - (optional) the first coordinate of the located grid point
23: . Y - (optional) the second coordinate of the located grid point
24: - Z - (optional) the third coordinate of the located grid point
26: Level: advanced
28: Note:
29: All processors that share the `DMDA` must call this with the same coordinate value
31: .seealso: `DM`, `DMDA`
32: @*/
33: PetscErrorCode DMDAGetLogicalCoordinate(DM da, PetscScalar x, PetscScalar y, PetscScalar z, PetscInt *II, PetscInt *JJ, PetscInt *KK, PetscScalar *X, PetscScalar *Y, PetscScalar *Z)
34: {
35: Vec coors;
36: DM dacoors;
37: DMDACoor2d **c;
38: PetscInt i, j, xs, xm, ys, ym;
39: PetscReal d, D = PETSC_MAX_REAL, Dv;
40: PetscMPIInt rank, root;
45: *II = -1;
46: *JJ = -1;
48: DMGetCoordinateDM(da, &dacoors);
49: DMDAGetCorners(dacoors, &xs, &ys, NULL, &xm, &ym, NULL);
50: DMGetCoordinates(da, &coors);
51: DMDAVecGetArrayRead(dacoors, coors, &c);
52: for (j = ys; j < ys + ym; j++) {
53: for (i = xs; i < xs + xm; i++) {
54: d = PetscSqrtReal(PetscRealPart((c[j][i].x - x) * (c[j][i].x - x) + (c[j][i].y - y) * (c[j][i].y - y)));
55: if (d < D) {
56: D = d;
57: *II = i;
58: *JJ = j;
59: }
60: }
61: }
62: MPIU_Allreduce(&D, &Dv, 1, MPIU_REAL, MPIU_MIN, PetscObjectComm((PetscObject)da));
63: if (D != Dv) {
64: *II = -1;
65: *JJ = -1;
66: rank = 0;
67: } else {
68: *X = c[*JJ][*II].x;
69: *Y = c[*JJ][*II].y;
70: MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank);
71: rank++;
72: }
73: MPIU_Allreduce(&rank, &root, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)da));
74: root--;
75: MPI_Bcast(X, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da));
76: MPI_Bcast(Y, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da));
77: DMDAVecRestoreArrayRead(dacoors, coors, &c);
78: return 0;
79: }
81: /*@
82: DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a `DMDA` vector
84: Collective on da
86: Input Parameters:
87: + da - the distributed array
88: . dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z`
89: - gp - global grid point number in this direction
91: Output Parameters:
92: + newvec - the new vector that can hold the values (size zero on all processes except process 0)
93: - scatter - the `VecScatter` that will map from the original vector to the slice
95: Level: advanced
97: Note:
98: All processors that share the `DMDA` must call this with the same gp value
100: .seealso: `DM`, `DMDA`, `DMDirection`, `Vec`, `VecScatter`
101: @*/
102: PetscErrorCode DMDAGetRay(DM da, DMDirection dir, PetscInt gp, Vec *newvec, VecScatter *scatter)
103: {
104: PetscMPIInt rank;
105: DM_DA *dd = (DM_DA *)da->data;
106: IS is;
107: AO ao;
108: Vec vec;
109: PetscInt *indices, i, j;
112: MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank);
113: DMDAGetAO(da, &ao);
114: if (rank == 0) {
115: if (da->dim == 1) {
116: if (dir == DM_X) {
117: PetscMalloc1(dd->w, &indices);
118: indices[0] = dd->w * gp;
119: for (i = 1; i < dd->w; ++i) indices[i] = indices[i - 1] + 1;
120: AOApplicationToPetsc(ao, dd->w, indices);
121: VecCreate(PETSC_COMM_SELF, newvec);
122: VecSetBlockSize(*newvec, dd->w);
123: VecSetSizes(*newvec, dd->w, PETSC_DETERMINE);
124: VecSetType(*newvec, VECSEQ);
125: ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is);
126: } else {
128: SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
129: }
130: } else {
131: if (dir == DM_Y) {
132: PetscMalloc1(dd->w * dd->M, &indices);
133: indices[0] = gp * dd->M * dd->w;
134: for (i = 1; i < dd->M * dd->w; i++) indices[i] = indices[i - 1] + 1;
136: AOApplicationToPetsc(ao, dd->M * dd->w, indices);
137: VecCreate(PETSC_COMM_SELF, newvec);
138: VecSetBlockSize(*newvec, dd->w);
139: VecSetSizes(*newvec, dd->M * dd->w, PETSC_DETERMINE);
140: VecSetType(*newvec, VECSEQ);
141: ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->M, indices, PETSC_OWN_POINTER, &is);
142: } else if (dir == DM_X) {
143: PetscMalloc1(dd->w * dd->N, &indices);
144: indices[0] = dd->w * gp;
145: for (j = 1; j < dd->w; j++) indices[j] = indices[j - 1] + 1;
146: for (i = 1; i < dd->N; i++) {
147: indices[i * dd->w] = indices[i * dd->w - 1] + dd->w * dd->M - dd->w + 1;
148: for (j = 1; j < dd->w; j++) indices[i * dd->w + j] = indices[i * dd->w + j - 1] + 1;
149: }
150: AOApplicationToPetsc(ao, dd->w * dd->N, indices);
151: VecCreate(PETSC_COMM_SELF, newvec);
152: VecSetBlockSize(*newvec, dd->w);
153: VecSetSizes(*newvec, dd->N * dd->w, PETSC_DETERMINE);
154: VecSetType(*newvec, VECSEQ);
155: ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->N, indices, PETSC_OWN_POINTER, &is);
156: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
157: }
158: } else {
159: VecCreateSeq(PETSC_COMM_SELF, 0, newvec);
160: ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is);
161: }
162: DMGetGlobalVector(da, &vec);
163: VecScatterCreate(vec, is, *newvec, NULL, scatter);
164: DMRestoreGlobalVector(da, &vec);
165: ISDestroy(&is);
166: return 0;
167: }
169: /*@C
170: DMDAGetProcessorSubset - Returns a communicator consisting only of the
171: processors in a `DMDA` that own a particular global x, y, or z grid point
172: (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
174: Collective on da
176: Input Parameters:
177: + da - the distributed array
178: . dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z`
179: - gp - global grid point number in this direction
181: Output Parameter:
182: . comm - new communicator
184: Level: advanced
186: Notes:
187: All processors that share the `DMDA` must call this with the same gp value
189: After use, comm should be freed with `MPI_Comm_free()`
191: This routine is particularly useful to compute boundary conditions
192: or other application-specific calculations that require manipulating
193: sets of data throughout a logical plane of grid points.
195: Fortran Note:
196: Not supported from Fortran
198: .seealso: `DM`, `DMDA`, `DMDirection`
199: @*/
200: PetscErrorCode DMDAGetProcessorSubset(DM da, DMDirection dir, PetscInt gp, MPI_Comm *comm)
201: {
202: MPI_Group group, subgroup;
203: PetscInt i, ict, flag, *owners, xs, xm, ys, ym, zs, zm;
204: PetscMPIInt size, *ranks = NULL;
205: DM_DA *dd = (DM_DA *)da->data;
208: flag = 0;
209: DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);
210: MPI_Comm_size(PetscObjectComm((PetscObject)da), &size);
211: if (dir == DM_Z) {
214: if (gp >= zs && gp < zs + zm) flag = 1;
215: } else if (dir == DM_Y) {
218: if (gp >= ys && gp < ys + ym) flag = 1;
219: } else if (dir == DM_X) {
221: if (gp >= xs && gp < xs + xm) flag = 1;
222: } else SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction");
224: PetscMalloc2(size, &owners, size, &ranks);
225: MPI_Allgather(&flag, 1, MPIU_INT, owners, 1, MPIU_INT, PetscObjectComm((PetscObject)da));
226: ict = 0;
227: PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir);
228: for (i = 0; i < size; i++) {
229: if (owners[i]) {
230: ranks[ict] = i;
231: ict++;
232: PetscInfo(da, "%" PetscInt_FMT " ", i);
233: }
234: }
235: PetscInfo(da, "\n");
236: MPI_Comm_group(PetscObjectComm((PetscObject)da), &group);
237: MPI_Group_incl(group, ict, ranks, &subgroup);
238: MPI_Comm_create(PetscObjectComm((PetscObject)da), subgroup, comm);
239: MPI_Group_free(&subgroup);
240: MPI_Group_free(&group);
241: PetscFree2(owners, ranks);
242: return 0;
243: }
245: /*@C
246: DMDAGetProcessorSubsets - Returns communicators consisting only of the
247: processors in a `DMDA` adjacent in a particular dimension,
248: corresponding to a logical plane in a 3D grid or a line in a 2D grid.
250: Collective on da
252: Input Parameters:
253: + da - the distributed array
254: - dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z`
256: Output Parameter:
257: . subcomm - new communicator
259: Level: advanced
261: Notes:
262: This routine is useful for distributing one-dimensional data in a tensor product grid.
264: After use, comm should be freed with` MPI_Comm_free()`
266: Fortran Note:
267: Not supported from Fortran
269: .seealso: `DM`, `DMDA`, `DMDirection`
270: @*/
271: PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm)
272: {
273: MPI_Comm comm;
274: MPI_Group group, subgroup;
275: PetscInt subgroupSize = 0;
276: PetscInt *firstPoints;
277: PetscMPIInt size, *subgroupRanks = NULL;
278: PetscInt xs, xm, ys, ym, zs, zm, firstPoint, p;
281: PetscObjectGetComm((PetscObject)da, &comm);
282: DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);
283: MPI_Comm_size(comm, &size);
284: if (dir == DM_Z) {
286: firstPoint = zs;
287: } else if (dir == DM_Y) {
289: firstPoint = ys;
290: } else if (dir == DM_X) {
291: firstPoint = xs;
292: } else SETERRQ(comm, PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction");
294: PetscMalloc2(size, &firstPoints, size, &subgroupRanks);
295: MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);
296: PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir);
297: for (p = 0; p < size; ++p) {
298: if (firstPoints[p] == firstPoint) {
299: subgroupRanks[subgroupSize++] = p;
300: PetscInfo(da, "%" PetscInt_FMT " ", p);
301: }
302: }
303: PetscInfo(da, "\n");
304: MPI_Comm_group(comm, &group);
305: MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);
306: MPI_Comm_create(comm, subgroup, subcomm);
307: MPI_Group_free(&subgroup);
308: MPI_Group_free(&group);
309: PetscFree2(firstPoints, subgroupRanks);
310: return 0;
311: }