Actual source code: ex30.c

  1: static char help[] = "Grid based Landau collision operator with PIC interface with OpenMP setup. (one species per grid)\n";

  3: /*
  4:    Support 2.5V with axisymmetric coordinates
  5:      - r,z coordinates
  6:      - Domain and species data input by Landau operator
  7:      - "radius" for each grid, normalized with electron thermal velocity
  8:      - Domain: (0,radius) x (-radius,radius), thus first coordinate x[0] is perpendicular velocity and 2pi*x[0] term is added for axisymmetric
  9:    Supports full 3V

 11:  */

 13: #include "petscdmplex.h"
 14: #include "petscds.h"
 15: #include "petscdmswarm.h"
 16: #include "petscksp.h"
 17: #include <petsc/private/petscimpl.h>
 18: #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
 19:   #include <omp.h>
 20: #endif
 21: #include <petsclandau.h>
 22: #include <petscdmcomposite.h>

 24: typedef struct {
 25:   Mat MpTrans;
 26:   Mat Mp;
 27:   Vec ff;
 28:   Vec uu;
 29: } MatShellCtx;

 31: PetscErrorCode MatMultMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy)
 32: {
 33:   MatShellCtx *matshellctx;

 36:   MatShellGetContext(MtM, &matshellctx);
 38:   MatMult(matshellctx->Mp, xx, matshellctx->ff);
 39:   MatMult(matshellctx->MpTrans, matshellctx->ff, yy);
 40:   return 0;
 41: }

 43: PetscErrorCode MatMultAddMtM_SeqAIJ(Mat MtM, Vec xx, Vec yy, Vec zz)
 44: {
 45:   MatShellCtx *matshellctx;

 48:   MatShellGetContext(MtM, &matshellctx);
 50:   MatMult(matshellctx->Mp, xx, matshellctx->ff);
 51:   MatMultAdd(matshellctx->MpTrans, matshellctx->ff, yy, zz);
 52:   return 0;
 53: }

 55: PetscErrorCode createSwarm(const DM dm, PetscInt dim, DM *sw)
 56: {
 57:   PetscInt Nc = 1;

 60:   DMCreate(PETSC_COMM_SELF, sw);
 61:   DMSetType(*sw, DMSWARM);
 62:   DMSetDimension(*sw, dim);
 63:   DMSwarmSetType(*sw, DMSWARM_PIC);
 64:   DMSwarmSetCellDM(*sw, dm);
 65:   DMSwarmRegisterPetscDatatypeField(*sw, "w_q", Nc, PETSC_SCALAR);
 66:   DMSwarmFinalizeFieldRegister(*sw);
 67:   DMSetFromOptions(*sw);
 68:   return 0;
 69: }

 71: PetscErrorCode gridToParticles(const DM dm, DM sw, Vec rhs, Vec work, Mat M_p, Mat Mass)
 72: {
 73:   PetscBool    is_lsqr;
 74:   KSP          ksp;
 75:   Mat          PM_p = NULL, MtM, D;
 76:   Vec          ff;
 77:   PetscInt     N, M, nzl;
 78:   MatShellCtx *matshellctx;

 81:   MatMult(Mass, rhs, work);
 82:   VecCopy(work, rhs);
 83:   // pseudo-inverse
 84:   KSPCreate(PETSC_COMM_SELF, &ksp);
 85:   KSPSetOptionsPrefix(ksp, "ftop_");
 86:   KSPSetFromOptions(ksp);
 87:   PetscObjectTypeCompare((PetscObject)ksp, KSPLSQR, &is_lsqr);
 88:   if (!is_lsqr) {
 89:     MatGetLocalSize(M_p, &M, &N);
 90:     if (N > M) {
 91:       PC pc;
 92:       PetscInfo(ksp, " M (%" PetscInt_FMT ") < M (%" PetscInt_FMT ") -- skip revert to lsqr\n", M, N);
 93:       is_lsqr = PETSC_TRUE;
 94:       KSPSetType(ksp, KSPLSQR);
 95:       KSPGetPC(ksp, &pc);
 96:       PCSetType(pc, PCNONE); // could put in better solver -ftop_pc_type bjacobi -ftop_sub_pc_type lu -ftop_sub_pc_factor_shift_type nonzero
 97:     } else {
 98:       PetscNew(&matshellctx);
 99:       MatCreateShell(PetscObjectComm((PetscObject)dm), N, N, PETSC_DECIDE, PETSC_DECIDE, matshellctx, &MtM);
100:       MatTranspose(M_p, MAT_INITIAL_MATRIX, &matshellctx->MpTrans);
101:       matshellctx->Mp = M_p;
102:       MatShellSetOperation(MtM, MATOP_MULT, (void (*)(void))MatMultMtM_SeqAIJ);
103:       MatShellSetOperation(MtM, MATOP_MULT_ADD, (void (*)(void))MatMultAddMtM_SeqAIJ);
104:       MatCreateVecs(M_p, &matshellctx->uu, &matshellctx->ff);
105:       MatCreateSeqAIJ(PETSC_COMM_SELF, N, N, 1, NULL, &D);
106:       MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_Mp_mat_view");
107:       for (int i = 0; i < N; i++) {
108:         const PetscScalar *vals;
109:         const PetscInt    *cols;
110:         PetscScalar        dot = 0;
111:         MatGetRow(matshellctx->MpTrans, i, &nzl, &cols, &vals);
112:         for (int ii = 0; ii < nzl; ii++) dot += PetscSqr(vals[ii]);
114:         MatSetValue(D, i, i, dot, INSERT_VALUES);
115:       }
116:       MatAssemblyBegin(D, MAT_FINAL_ASSEMBLY);
117:       MatAssemblyEnd(D, MAT_FINAL_ASSEMBLY);
118:       PetscInfo(M_p, "createMtMKSP Have %" PetscInt_FMT " eqs, nzl = %" PetscInt_FMT "\n", N, nzl);
119:       KSPSetOperators(ksp, MtM, D);
120:       MatViewFromOptions(D, NULL, "-ftop2_D_mat_view");
121:       MatViewFromOptions(M_p, NULL, "-ftop2_Mp_mat_view");
122:       MatViewFromOptions(matshellctx->MpTrans, NULL, "-ftop2_MpTranspose_mat_view");
123:     }
124:   }
125:   if (is_lsqr) {
126:     PC        pc;
127:     PetscBool is_bjac;
128:     KSPGetPC(ksp, &pc);
129:     PetscObjectTypeCompare((PetscObject)pc, PCBJACOBI, &is_bjac);
130:     if (is_bjac) {
131:       DMSwarmCreateMassMatrixSquare(sw, dm, &PM_p);
132:       KSPSetOperators(ksp, M_p, PM_p);
133:     } else {
134:       KSPSetOperators(ksp, M_p, M_p);
135:     }
136:   }
137:   DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff); // this grabs access
138:   if (!is_lsqr) {
139:     KSPSolve(ksp, rhs, matshellctx->uu);
140:     MatMult(M_p, matshellctx->uu, ff);
141:     MatDestroy(&matshellctx->MpTrans);
142:     VecDestroy(&matshellctx->ff);
143:     VecDestroy(&matshellctx->uu);
144:     MatDestroy(&D);
145:     MatDestroy(&MtM);
146:     PetscFree(matshellctx);
147:   } else {
148:     KSPSolveTranspose(ksp, rhs, ff);
149:   }
150:   KSPDestroy(&ksp);
151:   /* Visualize particle field */
152:   VecViewFromOptions(ff, NULL, "-weights_view");
153:   MatDestroy(&PM_p);
154:   DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff);

156:   return 0;
157: }

159: PetscErrorCode particlesToGrid(const DM dm, DM sw, const PetscInt Np, const PetscInt a_tid, const PetscInt dim, const PetscReal xx[], const PetscReal yy[], const PetscReal zz[], const PetscReal a_wp[], Vec rho, Mat *Mp_out)
160: {
161:   PetscBool     removePoints = PETSC_TRUE;
162:   PetscReal    *wq, *coords;
163:   PetscDataType dtype;
164:   Mat           M_p;
165:   Vec           ff;
166:   PetscInt      bs, p, zero = 0;

169:   DMSwarmSetLocalSizes(sw, Np, zero);
170:   DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wq);
171:   DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords);
172:   for (p = 0; p < Np; p++) {
173:     coords[p * dim + 0] = xx[p];
174:     coords[p * dim + 1] = yy[p];
175:     wq[p]               = a_wp[p];
176:     if (dim == 3) coords[p * dim + 2] = zz[p];
177:   }
178:   DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords);
179:   DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wq);
180:   DMSwarmMigrate(sw, removePoints);
181:   PetscObjectSetName((PetscObject)sw, "Particle Grid");

183:   /* This gives M f = \int_\Omega \phi f, which looks like a rhs for a PDE */
184:   DMCreateMassMatrix(sw, dm, &M_p);

186:   PetscObjectSetName((PetscObject)rho, "rho");
187:   DMSwarmCreateGlobalVectorFromField(sw, "w_q", &ff);
188:   PetscObjectSetName((PetscObject)ff, "weights");
189:   MatMultTranspose(M_p, ff, rho);
190:   DMSwarmDestroyGlobalVectorFromField(sw, "w_q", &ff);

192:   // output
193:   *Mp_out = M_p;

195:   return 0;
196: }
197: static void maxwellian(PetscInt dim, const PetscReal x[], PetscReal kt_m, PetscReal n, PetscScalar *u)
198: {
199:   PetscInt  i;
200:   PetscReal v2 = 0, theta = 2.0 * kt_m; /* theta = 2kT/mc^2 */

202:   /* compute the exponents, v^2 */
203:   for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
204:   /* evaluate the Maxwellian */
205:   u[0] = n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
206: }

208: #define MAX_NUM_THRDS 12
209: PetscErrorCode go(TS ts, Vec X, const PetscInt NUserV, const PetscInt a_Np, const PetscInt dim, const PetscInt b_target, const PetscInt g_target)
210: {
211:   DM             pack, *globSwarmArray, grid_dm[LANDAU_MAX_GRIDS];
212:   Mat           *globMpArray, g_Mass[LANDAU_MAX_GRIDS];
213:   KSP            t_ksp[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
214:   Vec            t_fhat[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
215:   PetscInt       nDMs, glb_b_id, nTargetP = 0;
216:   PetscErrorCode 0;
217: #if defined(PETSC_HAVE_OPENMP) && defined(PETSC_HAVE_THREADSAFETY)
218:   PetscInt numthreads = PetscNumOMPThreads;
219: #else
220:   PetscInt numthreads = 1;
221: #endif
222:   LandauCtx *ctx;
223:   Vec       *globXArray;
224:   PetscReal  moments_0[3], moments_1[3], dt_init;

229:   TSGetDM(ts, &pack);
230:   DMGetApplicationContext(pack, &ctx);
232:   DMCompositeGetNumberDM(pack, &nDMs);
233:   PetscInfo(pack, "Have %" PetscInt_FMT " total grids, with %" PetscInt_FMT " Landau local batched and %" PetscInt_FMT " global items (vertices)\n", ctx->num_grids, ctx->batch_sz, NUserV);
234:   PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray);
235:   PetscMalloc(sizeof(*globMpArray) * nDMs, &globMpArray);
236:   PetscMalloc(sizeof(*globSwarmArray) * nDMs, &globSwarmArray);
237:   DMViewFromOptions(ctx->plex[g_target], NULL, "-ex30_dm_view");
238:   // create mass matrices
239:   DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray); // just to duplicate
240:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {               // add same particels for all grids
241:     Vec          subX = globXArray[LAND_PACK_IDX(0, grid)];
242:     DM           dm   = ctx->plex[grid];
243:     PetscSection s;
244:     grid_dm[grid] = dm;
245:     DMCreateMassMatrix(dm, dm, &g_Mass[grid]);
246:     //
247:     DMGetLocalSection(dm, &s);
248:     DMPlexCreateClosureIndex(dm, s);
249:     for (int tid = 0; tid < numthreads; tid++) {
250:       VecDuplicate(subX, &t_fhat[grid][tid]);
251:       KSPCreate(PETSC_COMM_SELF, &t_ksp[grid][tid]);
252:       KSPSetOptionsPrefix(t_ksp[grid][tid], "ptof_");
253:       KSPSetOperators(t_ksp[grid][tid], g_Mass[grid], g_Mass[grid]);
254:       KSPSetFromOptions(t_ksp[grid][tid]);
255:     }
256:   }
257:   DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray);
258:   // create particle raw data. could use OMP with a thread safe malloc, but this is just the fake user
259:   for (int i = 0; i < 3; i++) moments_0[i] = moments_1[i] = 0;
260:   TSGetTimeStep(ts, &dt_init); // we could have an adaptive time stepper
261:   for (PetscInt global_batch_id = 0; global_batch_id < NUserV; global_batch_id += ctx->batch_sz) {
262:     TSSetTime(ts, 0);
263:     CHKERRQ(ierr);
264:     TSSetStepNumber(ts, 0);
265:     CHKERRQ(ierr);
266:     TSSetTimeStep(ts, dt_init);
267:     CHKERRQ(ierr);
268:     VecZeroEntries(X);
269:     DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray);
270:     if (b_target >= global_batch_id && b_target < global_batch_id + ctx->batch_sz) PetscObjectSetName((PetscObject)globXArray[LAND_PACK_IDX(b_target % ctx->batch_sz, g_target)], "rho");
271:     // create fake particles
272:     for (PetscInt b_id_0 = 0; b_id_0 < ctx->batch_sz; b_id_0 += numthreads) {
273:       PetscReal *xx_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *yy_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *zz_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS], *wp_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
274:       PetscInt   Np_t[LANDAU_MAX_GRIDS][MAX_NUM_THRDS];
275:       // make particles
276:       for (int tid = 0; tid < numthreads; tid++) {
277:         const PetscInt b_id = b_id_0 + tid;
278:         if ((glb_b_id = global_batch_id + b_id) < NUserV) {        // the ragged edge of the last batch
279:           PetscInt Npp0 = a_Np + (glb_b_id % a_Np), NN;            // fake user: number of particels in each dimension with add some load imbalance and diff (<2x)
280:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
281:             const PetscReal kT_m = ctx->k * ctx->thermal_temps[ctx->species_offset[grid]] / ctx->masses[ctx->species_offset[grid]] / (ctx->v_0 * ctx->v_0); /* theta = 2kT/mc^2 per species -- TODO */
282:             ;
283:             PetscReal lo[3] = {-ctx->radius[grid], -ctx->radius[grid], -ctx->radius[grid]}, hi[3] = {ctx->radius[grid], ctx->radius[grid], ctx->radius[grid]}, hp[3], vole; // would be nice to get box from DM
284:             PetscInt  Npi = Npp0, Npj = 2 * Npp0, Npk = 1;
285:             if (dim == 2) lo[0] = 0; // Landau coordinate (r,z)
286:             else Npi = Npj = Npk = Npp0;
287:             // User: use glb_b_id to index into your data
288:             NN = Npi * Npj * Npk; // make a regular grid of particles Npp x Npp
289:             if (glb_b_id == b_target) {
290:               nTargetP = NN;
291:               PetscInfo(pack, "Target %" PetscInt_FMT " with %" PetscInt_FMT " particels\n", glb_b_id, NN);
292:             }
293:             Np_t[grid][tid] = NN;
294:             PetscMalloc4(NN, &xx_t[grid][tid], NN, &yy_t[grid][tid], NN, &wp_t[grid][tid], dim == 2 ? 1 : NN, &zz_t[grid][tid]);
295:             hp[0] = (hi[0] - lo[0]) / Npi;
296:             hp[1] = (hi[1] - lo[1]) / Npj;
297:             hp[2] = (hi[2] - lo[2]) / Npk;
298:             if (dim == 2) hp[2] = 1;
299:             PetscInfo(pack, " lo = %14.7e, hi = %14.7e; hp = %14.7e, %14.7e; kT_m = %g; \n", (double)lo[1], (double)hi[1], (double)hp[0], (double)hp[1], (double)kT_m); // temp
300:             vole = hp[0] * hp[1] * hp[2] * ctx->n[grid];                                                                                                                           // fix for multi-species
301:             PetscInfo(pack, "Vertex %" PetscInt_FMT ", grid %" PetscInt_FMT " with %" PetscInt_FMT " particles (diagnostic target = %" PetscInt_FMT ")\n", glb_b_id, grid, NN, b_target);
302:             for (int pj = 0, pp = 0; pj < Npj; pj++) {
303:               for (int pk = 0; pk < Npk; pk++) {
304:                 for (int pi = 0; pi < Npi; pi++, pp++) {
305:                   xx_t[grid][tid][pp] = lo[0] + hp[0] / 2.0 + pi * hp[0];
306:                   yy_t[grid][tid][pp] = lo[1] + hp[1] / 2.0 + pj * hp[1];
307:                   if (dim == 3) zz_t[grid][tid][pp] = lo[2] + hp[2] / 2.0 + pk * hp[2];
308:                   {
309:                     PetscReal x[] = {xx_t[grid][tid][pp], yy_t[grid][tid][pp], dim == 2 ? 0 : zz_t[grid][tid][pp]};
310:                     maxwellian(dim, x, kT_m, vole, &wp_t[grid][tid][pp]);
311:                     //PetscInfo(pack,"%" PetscInt_FMT ") x = %14.7e, %14.7e, %14.7e, n = %14.7e, w = %14.7e\n", pp, x[0], x[1], dim==2 ? 0 : x[2], ctx->n[grid], wp_t[grid][tid][pp]); // temp
312:                     if (glb_b_id == b_target) {
313:                       PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * x[0] : 1;
314:                       for (int i = 0; i < dim; ++i) v2 += PetscSqr(x[i]);
315:                       moments_0[0] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
316:                       moments_0[1] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * x[1]; // z-momentum
317:                       moments_0[2] += fact * wp_t[grid][tid][pp] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * v2;
318:                     }
319:                   }
320:                 }
321:               }
322:             }
323:           } // grid
324:         }   // active
325:       }     // fake threads
326:       /* Create particle swarm */
327:       PetscPragmaOMP(parallel for)
328:         for (int tid=0; tid<numthreads; tid++)
329:       {
330:         const PetscInt b_id = b_id_0 + tid;
331:         if ((glb_b_id = global_batch_id + b_id) < NUserV) { // the ragged edge of the last batch
332:           //PetscInfo(pack,"Create swarms for 'glob' index %" PetscInt_FMT " create swarm\n",glb_b_id);
333:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
334:             PetscErrorCode ierr_t;
335:             PetscSection   section;
336:             PetscInt       Nf;
337:             DM             dm = grid_dm[grid];
338:             ierr_t            = DMGetLocalSection(dm, &section);
339:             ierr_t            = PetscSectionGetNumFields(section, &Nf);
340:             if (Nf != 1) ierr_t = 9999;
341:             else {
342:               ierr_t = DMViewFromOptions(dm, NULL, "-dm_view");
343:               ierr_t = PetscInfo(pack, "call createSwarm [%" PetscInt_FMT ".%" PetscInt_FMT "] local batch index %" PetscInt_FMT "\n", b_id, grid, LAND_PACK_IDX(b_id, grid));
344:               ierr_t = createSwarm(dm, dim, &globSwarmArray[LAND_PACK_IDX(b_id, grid)]);
345:             }
346:             if (ierr_t) ierr_t;
347:           }
348:         } // active
349:       }
351:       ierr;
352:       // p --> g: make globMpArray & set X
353:       PetscPragmaOMP(parallel for)
354:         for (int tid=0; tid<numthreads; tid++)
355:       {
356:         const PetscInt b_id = b_id_0 + tid;
357:         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
358:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
359:             PetscErrorCode ierr_t;
360:             DM             dm   = grid_dm[grid];
361:             DM             sw   = globSwarmArray[LAND_PACK_IDX(b_id, grid)];
362:             Vec            subX = globXArray[LAND_PACK_IDX(b_id, grid)], work = t_fhat[grid][tid];
363:             PetscInfo(pack, "particlesToGrid %" PetscInt_FMT ".%" PetscInt_FMT ") particlesToGrid for local batch %" PetscInt_FMT "\n", global_batch_id, grid, LAND_PACK_IDX(b_id, grid));
364:             ierr_t = particlesToGrid(dm, sw, Np_t[grid][tid], tid, dim, xx_t[grid][tid], yy_t[grid][tid], zz_t[grid][tid], wp_t[grid][tid], subX, &globMpArray[LAND_PACK_IDX(b_id, grid)]);
365:             if (ierr_t) ierr_t;
366:             // u = M^_1 f_w
367:             ierr_t = VecCopy(subX, work);
368:             ierr_t = KSPSolve(t_ksp[grid][tid], work, subX);
369:             if (ierr_t) ierr_t;
370:           }
371:         }
372:       }
373:       ierr;
374:       /* Cleanup */
375:       for (int tid = 0; tid < numthreads; tid++) {
376:         const PetscInt b_id = b_id_0 + tid;
377:         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
378:           PetscInfo(pack, "Free for global batch %" PetscInt_FMT " of %" PetscInt_FMT "\n", glb_b_id + 1, NUserV);
379:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
380:             PetscFree4(xx_t[grid][tid], yy_t[grid][tid], wp_t[grid][tid], zz_t[grid][tid]);
381:           }
382:         } // active
383:       }
384:     } // Landau
385:     if (b_target >= global_batch_id && b_target < global_batch_id + ctx->batch_sz) VecViewFromOptions(globXArray[LAND_PACK_IDX(b_target % ctx->batch_sz, g_target)], NULL, "-ex30_vec_view");
386:     DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray);
387:     DMPlexLandauPrintNorms(X, 0);
388:     // advance
389:     TSSetSolution(ts, X);
390:     PetscInfo(pack, "Advance vertex %" PetscInt_FMT " to %" PetscInt_FMT " (with padding)\n", global_batch_id, global_batch_id + ctx->batch_sz);
391:     TSSolve(ts, X);
392:     DMPlexLandauPrintNorms(X, 1);
393:     DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray);
394:     // map back to particles
395:     for (PetscInt b_id_0 = 0; b_id_0 < ctx->batch_sz; b_id_0 += numthreads) {
396:       PetscInfo(pack, "g2p: global batch %" PetscInt_FMT " of %" PetscInt_FMT ", Landau batch %" PetscInt_FMT " of %" PetscInt_FMT ": map back to particles\n", global_batch_id + 1, NUserV, b_id_0 + 1, ctx->batch_sz);
397:       PetscPragmaOMP(parallel for)
398:         for (int tid=0; tid<numthreads; tid++)
399:       {
400:         const PetscInt b_id = b_id_0 + tid;
401:         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
402:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
403:             PetscErrorCode ierr_t;
404:             PetscInfo(pack, "gridToParticles: global batch %" PetscInt_FMT ", local batch b=%" PetscInt_FMT ", grid g=%" PetscInt_FMT ", index(b,g) %" PetscInt_FMT "\n", global_batch_id, b_id, grid, LAND_PACK_IDX(b_id, grid));
405:             ierr_t = gridToParticles(grid_dm[grid], globSwarmArray[LAND_PACK_IDX(b_id, grid)], globXArray[LAND_PACK_IDX(b_id, grid)], t_fhat[grid][tid], globMpArray[LAND_PACK_IDX(b_id, grid)], g_Mass[grid]);
406:             if (ierr_t) ierr_t;
407:           }
408:         }
409:       }
410:       ierr;
411:       /* Cleanup, and get data */
412:       PetscInfo(pack, "Cleanup batches %" PetscInt_FMT " to %" PetscInt_FMT "\n", b_id_0, b_id_0 + numthreads);
413:       for (int tid = 0; tid < numthreads; tid++) {
414:         const PetscInt b_id = b_id_0 + tid;
415:         if ((glb_b_id = global_batch_id + b_id) < NUserV) {
416:           for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
417:             PetscDataType dtype;
418:             PetscReal    *wp, *coords;
419:             DM            sw = globSwarmArray[LAND_PACK_IDX(b_id, grid)];
420:             PetscInt      npoints, bs = 1;
421:             DMSwarmGetField(sw, "w_q", &bs, &dtype, (void **)&wp); // take data out here
422:             if (glb_b_id == b_target) {
423:               DMSwarmGetField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords);
424:               DMSwarmGetLocalSize(sw, &npoints);
425:               for (int p = 0; p < npoints; p++) {
426:                 PetscReal v2 = 0, fact = dim == 2 ? 2.0 * PETSC_PI * coords[p * dim + 0] : 1;
427:                 for (int i = 0; i < dim; ++i) v2 += PetscSqr(coords[p * dim + i]);
428:                 moments_1[0] += fact * wp[p] * ctx->n_0 * ctx->masses[ctx->species_offset[grid]];
429:                 moments_1[1] += fact * wp[p] * ctx->n_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * coords[p * dim + 1]; // z-momentum
430:                 moments_1[2] += fact * wp[p] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ctx->species_offset[grid]] * v2;
431:               }
432:               DMSwarmRestoreField(sw, "DMSwarmPIC_coor", &bs, &dtype, (void **)&coords);
433:             }
434:             DMSwarmRestoreField(sw, "w_q", &bs, &dtype, (void **)&wp);
435:             DMDestroy(&globSwarmArray[LAND_PACK_IDX(b_id, grid)]);
436:             MatDestroy(&globMpArray[LAND_PACK_IDX(b_id, grid)]);
437:           }
438:         }
439:       }
440:     } // thread batch
441:     DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray);
442:   } // user batch
443:   /* Cleanup */
444:   PetscFree(globXArray);
445:   PetscFree(globSwarmArray);
446:   PetscFree(globMpArray);
447:   // clean up mass matrices
448:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // add same particels for all grids
449:     MatDestroy(&g_Mass[grid]);
450:     for (int tid = 0; tid < numthreads; tid++) {
451:       VecDestroy(&t_fhat[grid][tid]);
452:       KSPDestroy(&t_ksp[grid][tid]);
453:     }
454:   }
455:   PetscInfo(X, "Total number density: %20.12e (%20.12e); x-momentum = %20.12e (%20.12e); energy = %20.12e (%20.12e) error = %e (log10 of error = %" PetscInt_FMT "), %" PetscInt_FMT " particles. Use %" PetscInt_FMT " threads\n", (double)moments_1[0], (double)moments_0[0], (double)moments_1[1], (double)moments_0[1], (double)moments_1[2], (double)moments_0[2], (double)((moments_1[2] - moments_0[2]) / moments_0[2]), (PetscInt)PetscLog10Real(PetscAbsReal((moments_1[2] - moments_0[2]) / moments_0[2])), nTargetP, numthreads);
456:   return 0;
457: }

459: int main(int argc, char **argv)
460: {
461:   DM         pack;
462:   Vec        X;
463:   PetscInt   dim = 2, nvert = 1, Np = 10, btarget = 0, gtarget = 0;
464:   TS         ts;
465:   Mat        J;
466:   LandauCtx *ctx;
467: #if defined(PETSC_USE_LOG)
468:   PetscLogStage stage;
469: #endif

472:   PetscInitialize(&argc, &argv, NULL, help);
473:   // process args
474:   PetscOptionsBegin(PETSC_COMM_SELF, "", "Collision Options", "DMPLEX");
475:   PetscOptionsInt("-number_spatial_vertices", "Number of user spatial vertices to be batched for Landau", "ex30.c", nvert, &nvert, NULL);
476:   PetscOptionsInt("-dim", "Velocity space dimension", "ex30.c", dim, &dim, NULL);
477:   PetscOptionsInt("-number_particles_per_dimension", "Number of particles per grid, with slight modification per spatial vertex, in each dimension of base Cartesian grid", "ex30.c", Np, &Np, NULL);
478:   PetscOptionsInt("-view_vertex_target", "Batch to view with diagnostics", "ex30.c", btarget, &btarget, NULL);
480:   PetscOptionsInt("-view_grid_target", "Grid to view with diagnostics", "ex30.c", gtarget, &gtarget, NULL);
481:   PetscOptionsEnd();
482:   /* Create a mesh */
483:   DMPlexLandauCreateVelocitySpace(PETSC_COMM_SELF, dim, "", &X, &J, &pack);
484:   DMSetUp(pack);
485:   DMSetOutputSequenceNumber(pack, 0, 0.0);
486:   DMGetApplicationContext(pack, &ctx);
489:   /* Create timestepping solver context */
490:   TSCreate(PETSC_COMM_SELF, &ts);
491:   TSSetDM(ts, pack);
492:   TSSetIFunction(ts, NULL, DMPlexLandauIFunction, NULL);
493:   TSSetIJacobian(ts, J, J, DMPlexLandauIJacobian, NULL);
494:   TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER);
495:   TSSetFromOptions(ts);
496:   PetscObjectSetName((PetscObject)X, "X");
497:   // do particle advance, warmup
498:   go(ts, X, nvert, Np, dim, btarget, gtarget);
499:   MatZeroEntries(J); // need to zero out so as to not reuse it in Landau's logic
500:   // hot
501:   PetscLogStageRegister("ex30 hot solve", &stage);
502:   PetscLogStagePush(stage);
503:   go(ts, X, nvert, Np, dim, btarget, gtarget);
504:   PetscLogStagePop();
505:   /* clean up */
506:   DMPlexLandauDestroyVelocitySpace(&pack);
507:   TSDestroy(&ts);
508:   VecDestroy(&X);
509:   PetscFinalize();
510:   return 0;
511: }

513: /*TEST

515:   build:
516:     requires: !complex p4est

518:   testset:
519:     requires: double defined(PETSC_USE_DMLANDAU_2D)
520:     output_file: output/ex30_0.out
521:     args: -dim 2 -petscspace_degree 3 -dm_landau_type p4est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \
522:           -dm_landau_amr_post_refine 1 -number_particles_per_dimension 10 -dm_plex_hash_location \
523:           -dm_landau_batch_size 2 -number_spatial_vertices 3 -dm_landau_batch_view_idx 1 -view_vertex_target 2 -view_grid_target 1 \
524:           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
525:           -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-10 -ftop_ksp_type lsqr -ftop_pc_type bjacobi -ftop_sub_pc_factor_shift_type nonzero -ftop_sub_pc_type lu \
526:           -ksp_type preonly -pc_type lu \
527:           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\
528:           -snes_converged_reason -snes_monitor -snes_rtol 1e-14 -snes_stol 1e-14\
529:           -ts_dt 0.01 -ts_rtol 1e-1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec

531:     test:
532:       suffix: cpu
533:       args: -dm_landau_device_type cpu
534:     test:
535:       suffix: kokkos
536:       requires: kokkos_kernels
537:       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos
538:     test:
539:       suffix: cuda
540:       requires: cuda
541:       args: -dm_landau_device_type cuda -dm_mat_type aijcusparse -dm_vec_type cuda

543:   testset:
544:     requires: double !defined(PETSC_USE_DMLANDAU_2D)
545:     output_file: output/ex30_3d.out
546:     args: -dim 3 -petscspace_degree 2 -dm_landau_type p8est -dm_landau_num_species_grid 1,1,1 -dm_landau_amr_levels_max 0,0,0 \
547:           -dm_landau_amr_post_refine 0 -number_particles_per_dimension 5 -dm_plex_hash_location \
548:           -dm_landau_batch_size 1 -number_spatial_vertices 1 -dm_landau_batch_view_idx 0 -view_vertex_target 0 -view_grid_target 0 \
549:           -dm_landau_n 1.000018,1,1e-6 -dm_landau_thermal_temps 2,1,1 -dm_landau_ion_masses 2,180 -dm_landau_ion_charges 1,18 \
550:           -ftop_ksp_converged_reason -ftop_ksp_rtol 1e-12 -ftop_ksp_type cg -ftop_pc_type jacobi \
551:           -ksp_type preonly -pc_type lu \
552:           -ptof_ksp_type cg -ptof_pc_type jacobi -ptof_ksp_converged_reason -ptof_ksp_rtol 1e-12\
553:           -snes_converged_reason -snes_monitor -snes_rtol 1e-12 -snes_stol 1e-12\
554:           -ts_dt 0.1 -ts_exact_final_time stepover -ts_max_snes_failures -1 -ts_max_steps 1 -ts_monitor -ts_type beuler -info :vec

556:     test:
557:       suffix: cpu_3d
558:       args: -dm_landau_device_type cpu
559:     test:
560:       suffix: kokkos_3d
561:       requires: kokkos_kernels
562:       args: -dm_landau_device_type kokkos -dm_mat_type aijkokkos -dm_vec_type kokkos
563:     test:
564:       suffix: cuda_3d
565:       requires: cuda
566:       args: -dm_landau_device_type cuda -dm_mat_type aijcusparse -dm_vec_type cuda

568: TEST*/