Actual source code: plexland.c

  1: #include <../src/mat/impls/aij/seq/aij.h>
  2: #include <petsc/private/dmpleximpl.h>
  3: #include <petsclandau.h>
  4: #include <petscts.h>
  5: #include <petscdmforest.h>
  6: #include <petscdmcomposite.h>

  8: /* Landau collision operator */

 10: /* relativistic terms */
 11: #if defined(PETSC_USE_REAL_SINGLE)
 12:   #define SPEED_OF_LIGHT 2.99792458e8F
 13:   #define C_0(v0)        (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */
 14: #else
 15:   #define SPEED_OF_LIGHT 2.99792458e8
 16:   #define C_0(v0)        (SPEED_OF_LIGHT / v0) /* needed for relativistic tensor on all architectures */
 17: #endif

 19: #define PETSC_THREAD_SYNC
 20: #include "land_tensors.h"

 22: #if defined(PETSC_HAVE_OPENMP)
 23:   #include <omp.h>
 24: #endif

 26: static PetscErrorCode LandauGPUMapsDestroy(void *ptr)
 27: {
 28:   P4estVertexMaps *maps = (P4estVertexMaps *)ptr;
 29:   // free device data
 30:   if (maps[0].deviceType != LANDAU_CPU) {
 31: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
 32:     if (maps[0].deviceType == LANDAU_KOKKOS) {
 33:       LandauKokkosDestroyMatMaps(maps, maps[0].numgrids); // implies Kokkos does
 34:     }                                                                // else could be CUDA
 35: #elif defined(PETSC_HAVE_CUDA)
 36:     if (maps[0].deviceType == LANDAU_CUDA) {
 37:       LandauCUDADestroyMatMaps(maps, maps[0].numgrids);
 38:     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %d ?????", maps->deviceType);
 39: #endif
 40:   }
 41:   // free host data
 42:   for (PetscInt grid = 0; grid < maps[0].numgrids; grid++) {
 43:     PetscFree(maps[grid].c_maps);
 44:     PetscFree(maps[grid].gIdx);
 45:   }
 46:   PetscFree(maps);

 48:   return 0;
 49: }
 50: static PetscErrorCode energy_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
 51: {
 52:   PetscReal v2 = 0;
 53:   /* compute v^2 / 2 */
 54:   for (int i = 0; i < dim; ++i) v2 += x[i] * x[i];
 55:   /* evaluate the Maxwellian */
 56:   u[0] = v2 / 2;
 57:   return 0;
 58: }

 60: /* needs double */
 61: static PetscErrorCode gamma_m1_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
 62: {
 63:   PetscReal *c2_0_arr = ((PetscReal *)actx);
 64:   double     u2 = 0, c02 = (double)*c2_0_arr, xx;

 66:   /* compute u^2 / 2 */
 67:   for (int i = 0; i < dim; ++i) u2 += x[i] * x[i];
 68:   /* gamma - 1 = g_eps, for conditioning and we only take derivatives */
 69:   xx = u2 / c02;
 70: #if defined(PETSC_USE_DEBUG)
 71:   u[0] = PetscSqrtReal(1. + xx);
 72: #else
 73:   u[0] = xx / (PetscSqrtReal(1. + xx) + 1.) - 1.; // better conditioned. -1 might help condition and only used for derivative
 74: #endif
 75:   return 0;
 76: }

 78: /*
 79:  LandauFormJacobian_Internal - Evaluates Jacobian matrix.

 81:  Input Parameters:
 82:  .  globX - input vector
 83:  .  actx - optional user-defined context
 84:  .  dim - dimension

 86:  Output Parameters:
 87:  .  J0acP - Jacobian matrix filled, not created
 88:  */
 89: static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
 90: {
 91:   LandauCtx         *ctx = (LandauCtx *)a_ctx;
 92:   PetscInt           numCells[LANDAU_MAX_GRIDS], Nq, Nb;
 93:   PetscQuadrature    quad;
 94:   PetscReal          Eq_m[LANDAU_MAX_SPECIES]; // could be static data w/o quench (ex2)
 95:   PetscScalar       *cellClosure = NULL;
 96:   const PetscScalar *xdata       = NULL;
 97:   PetscDS            prob;
 98:   PetscContainer     container;
 99:   P4estVertexMaps   *maps;
100:   Mat                subJ[LANDAU_MAX_GRIDS * LANDAU_MAX_BATCH_SZ];

105:   /* check for matrix container for GPU assembly. Support CPU assembly for debugging */
107:   PetscLogEventBegin(ctx->events[10], 0, 0, 0, 0);
108:   DMGetDS(ctx->plex[0], &prob); // same DS for all grids
109:   PetscObjectQuery((PetscObject)JacP, "assembly_maps", (PetscObject *)&container);
110:   if (container) {
112:     PetscContainerGetPointer(container, (void **)&maps);
114:     for (PetscInt i = 0; i < ctx->num_grids * ctx->batch_sz; i++) subJ[i] = NULL;
115:   } else {
117:     for (PetscInt tid = 0; tid < ctx->batch_sz; tid++) {
118:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) DMCreateMatrix(ctx->plex[grid], &subJ[LAND_PACK_IDX(tid, grid)]);
119:     }
120:     maps = NULL;
121:   }
122:   // get dynamic data (Eq is odd, for quench and Spitzer test) for CPU assembly and raw data for Jacobian GPU assembly. Get host numCells[], Nq (yuck)
123:   PetscFEGetQuadrature(ctx->fe[0], &quad);
124:   PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL);
125:   Nb = Nq;
127:   // get metadata for collecting dynamic data
128:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
129:     PetscInt cStart, cEnd;
131:     DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
132:     numCells[grid] = cEnd - cStart; // grids can have different topology
133:   }
134:   PetscLogEventEnd(ctx->events[10], 0, 0, 0, 0);
135:   if (shift == 0) { /* create dynamic point data: f_alpha for closure of each cell (cellClosure[nbatch,ngrids,ncells[g],f[Nb,ns[g]]]) or xdata */
136:     DM pack;
137:     VecGetDM(a_X, &pack);
139:     PetscLogEventBegin(ctx->events[1], 0, 0, 0, 0);
140:     for (PetscInt fieldA = 0; fieldA < ctx->num_species; fieldA++) {
141:       Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
142:       if (dim == 2) Eq_m[fieldA] *= 2 * PETSC_PI;                                                  /* add the 2pi term that is not in Landau */
143:     }
144:     if (!ctx->gpu_assembly) {
145:       Vec         *locXArray, *globXArray;
146:       PetscScalar *cellClosure_it;
147:       PetscInt     cellClosure_sz = 0, nDMs, Nf[LANDAU_MAX_GRIDS];
148:       PetscSection section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
149:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
150:         DMGetLocalSection(ctx->plex[grid], &section[grid]);
151:         DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);
152:         PetscSectionGetNumFields(section[grid], &Nf[grid]);
153:       }
154:       /* count cellClosure size */
155:       DMCompositeGetNumberDM(pack, &nDMs);
156:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) cellClosure_sz += Nb * Nf[grid] * numCells[grid];
157:       PetscMalloc1(cellClosure_sz * ctx->batch_sz, &cellClosure);
158:       cellClosure_it = cellClosure;
159:       PetscMalloc(sizeof(*locXArray) * nDMs, &locXArray);
160:       PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray);
161:       DMCompositeGetLocalAccessArray(pack, a_X, nDMs, NULL, locXArray);
162:       DMCompositeGetAccessArray(pack, a_X, nDMs, NULL, globXArray);
163:       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP (once)
164:         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
165:           Vec      locX = locXArray[LAND_PACK_IDX(b_id, grid)], globX = globXArray[LAND_PACK_IDX(b_id, grid)], locX2;
166:           PetscInt cStart, cEnd, ei;
167:           VecDuplicate(locX, &locX2);
168:           DMGlobalToLocalBegin(ctx->plex[grid], globX, INSERT_VALUES, locX2);
169:           DMGlobalToLocalEnd(ctx->plex[grid], globX, INSERT_VALUES, locX2);
170:           DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
171:           for (ei = cStart; ei < cEnd; ++ei) {
172:             PetscScalar *coef = NULL;
173:             DMPlexVecGetClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef);
174:             PetscMemcpy(cellClosure_it, coef, Nb * Nf[grid] * sizeof(*cellClosure_it)); /* change if LandauIPReal != PetscScalar */
175:             DMPlexVecRestoreClosure(ctx->plex[grid], section[grid], locX2, ei, NULL, &coef);
176:             cellClosure_it += Nb * Nf[grid];
177:           }
178:           VecDestroy(&locX2);
179:         }
180:       }
182:                  cellClosure_sz * ctx->batch_sz);
183:       DMCompositeRestoreLocalAccessArray(pack, a_X, nDMs, NULL, locXArray);
184:       DMCompositeRestoreAccessArray(pack, a_X, nDMs, NULL, globXArray);
185:       PetscFree(locXArray);
186:       PetscFree(globXArray);
187:       xdata = NULL;
188:     } else {
189:       PetscMemType mtype;
190:       if (ctx->jacobian_field_major_order) { // get data in batch ordering
191:         VecScatterBegin(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD);
192:         VecScatterEnd(ctx->plex_batch, a_X, ctx->work_vec, INSERT_VALUES, SCATTER_FORWARD);
193:         VecGetArrayReadAndMemType(ctx->work_vec, &xdata, &mtype);
194:       } else {
195:         VecGetArrayReadAndMemType(a_X, &xdata, &mtype);
196:       }
198:       cellClosure = NULL;
199:     }
200:     PetscLogEventEnd(ctx->events[1], 0, 0, 0, 0);
201:   } else xdata = cellClosure = NULL;

203:   /* do it */
204:   if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
205:     if (ctx->deviceType == LANDAU_CUDA) {
206: #if defined(PETSC_HAVE_CUDA)
207:       LandauCUDAJacobian(ctx->plex, Nq, ctx->batch_sz, ctx->num_grids, numCells, Eq_m, cellClosure, xdata, &ctx->SData_d, shift, ctx->events, ctx->mat_offset, ctx->species_offset, subJ, JacP);
208: #else
209:       SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "cuda");
210: #endif
211:     } else if (ctx->deviceType == LANDAU_KOKKOS) {
212: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
213:       LandauKokkosJacobian(ctx->plex, Nq, ctx->batch_sz, ctx->num_grids, numCells, Eq_m, cellClosure, xdata, &ctx->SData_d, shift, ctx->events, ctx->mat_offset, ctx->species_offset, subJ, JacP);
214: #else
215:       SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos");
216: #endif
217:     }
218:   } else {               /* CPU version */
219:     PetscTabulation *Tf; // used for CPU and print info. Same on all grids and all species
220:     PetscInt         ip_offset[LANDAU_MAX_GRIDS + 1], ipf_offset[LANDAU_MAX_GRIDS + 1], elem_offset[LANDAU_MAX_GRIDS + 1], IPf_sz_glb, IPf_sz_tot, num_grids = ctx->num_grids, Nf[LANDAU_MAX_GRIDS];
221:     PetscReal       *ff, *dudx, *dudy, *dudz, *invJ_a = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w;
222:     PetscReal        Eq_m[LANDAU_MAX_SPECIES], invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
223:     PetscSection     section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
224:     PetscScalar     *coo_vals = NULL;
225:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
226:       DMGetLocalSection(ctx->plex[grid], &section[grid]);
227:       DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);
228:       PetscSectionGetNumFields(section[grid], &Nf[grid]);
229:     }
230:     /* count IPf size, etc */
231:     PetscDSGetTabulation(prob, &Tf); // Bf, &Df same for all grids
232:     const PetscReal *const BB = Tf[0]->T[0], *const DD = Tf[0]->T[1];
233:     ip_offset[0] = ipf_offset[0] = elem_offset[0] = 0;
234:     for (PetscInt grid = 0; grid < num_grids; grid++) {
235:       PetscInt nfloc        = ctx->species_offset[grid + 1] - ctx->species_offset[grid];
236:       elem_offset[grid + 1] = elem_offset[grid] + numCells[grid];
237:       ip_offset[grid + 1]   = ip_offset[grid] + numCells[grid] * Nq;
238:       ipf_offset[grid + 1]  = ipf_offset[grid] + Nq * nfloc * numCells[grid];
239:     }
240:     IPf_sz_glb = ipf_offset[num_grids];
241:     IPf_sz_tot = IPf_sz_glb * ctx->batch_sz;
242:     // prep COO
243:     if (ctx->coo_assembly) {
244:       PetscMalloc1(ctx->SData_d.coo_size, &coo_vals); // allocate every time?
245:       PetscInfo(ctx->plex[0], "COO Allocate %" PetscInt_FMT " values\n", (PetscInt)ctx->SData_d.coo_size);
246:     }
247:     if (shift == 0.0) { /* compute dynamic data f and df and init data for Jacobian */
248: #if defined(PETSC_HAVE_THREADSAFETY)
249:       double starttime, endtime;
250:       starttime = MPI_Wtime();
251: #endif
252:       PetscLogEventBegin(ctx->events[8], 0, 0, 0, 0);
253:       for (PetscInt fieldA = 0; fieldA < ctx->num_species; fieldA++) {
254:         invMass[fieldA] = ctx->m_0 / ctx->masses[fieldA];
255:         Eq_m[fieldA]    = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
256:         if (dim == 2) Eq_m[fieldA] *= 2 * PETSC_PI;                                                     /* add the 2pi term that is not in Landau */
257:         nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA] / ctx->m_0) * ctx->m_0 / ctx->masses[fieldA];
258:         nu_beta[fieldA]  = PetscSqr(ctx->charges[fieldA] / ctx->epsilon0) * ctx->lnLam / (8 * PETSC_PI) * ctx->t_0 * ctx->n_0 / PetscPowReal(ctx->v_0, 3);
259:       }
260:       PetscMalloc4(IPf_sz_tot, &ff, IPf_sz_tot, &dudx, IPf_sz_tot, &dudy, dim == 3 ? IPf_sz_tot : 0, &dudz);
261:       // F df/dx
262:       for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) {                        // for each element
263:         const PetscInt b_Nelem = elem_offset[num_grids], b_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem; // b_id == OMP thd_id in batch
264:         // find my grid:
265:         PetscInt grid = 0;
266:         while (b_elem_idx >= elem_offset[grid + 1]) grid++; // yuck search for grid
267:         {
268:           const PetscInt loc_nip = numCells[grid] * Nq, loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = b_elem_idx - elem_offset[grid];
269:           const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); //b_id*b_N + ctx->mat_offset[grid];
270:           PetscScalar   *coef, coef_buff[LANDAU_MAX_SPECIES * LANDAU_MAX_NQ];
271:           PetscReal     *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim]; // ingJ is static data on batch 0
272:           PetscInt       b, f, q;
273:           if (cellClosure) {
274:             coef = &cellClosure[b_id * IPf_sz_glb + ipf_offset[grid] + loc_elem * Nb * loc_Nf]; // this is const
275:           } else {
276:             coef = coef_buff;
277:             for (f = 0; f < loc_Nf; ++f) {
278:               LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][f][0];
279:               for (b = 0; b < Nb; ++b) {
280:                 PetscInt idx = Idxs[b];
281:                 if (idx >= 0) {
282:                   coef[f * Nb + b] = xdata[idx + moffset];
283:                 } else {
284:                   idx              = -idx - 1;
285:                   coef[f * Nb + b] = 0;
286:                   for (q = 0; q < maps[grid].num_face; q++) {
287:                     PetscInt    id    = maps[grid].c_maps[idx][q].gid;
288:                     PetscScalar scale = maps[grid].c_maps[idx][q].scale;
289:                     coef[f * Nb + b] += scale * xdata[id + moffset];
290:                   }
291:                 }
292:               }
293:             }
294:           }
295:           /* get f and df */
296:           for (PetscInt qi = 0; qi < Nq; qi++) {
297:             const PetscReal *invJ = &invJe[qi * dim * dim];
298:             const PetscReal *Bq   = &BB[qi * Nb];
299:             const PetscReal *Dq   = &DD[qi * Nb * dim];
300:             PetscReal        u_x[LANDAU_DIM];
301:             /* get f & df */
302:             for (f = 0; f < loc_Nf; ++f) {
303:               const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid] + f * loc_nip + loc_elem * Nq + qi;
304:               PetscInt       b, e;
305:               PetscReal      refSpaceDer[LANDAU_DIM];
306:               ff[idx] = 0.0;
307:               for (int d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
308:               for (b = 0; b < Nb; ++b) {
309:                 const PetscInt cidx = b;
310:                 ff[idx] += Bq[cidx] * PetscRealPart(coef[f * Nb + cidx]);
311:                 for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx * dim + d] * PetscRealPart(coef[f * Nb + cidx]);
312:               }
313:               for (int d = 0; d < LANDAU_DIM; ++d) {
314:                 for (e = 0, u_x[d] = 0.0; e < LANDAU_DIM; ++e) u_x[d] += invJ[e * dim + d] * refSpaceDer[e];
315:               }
316:               dudx[idx] = u_x[0];
317:               dudy[idx] = u_x[1];
318: #if LANDAU_DIM == 3
319:               dudz[idx] = u_x[2];
320: #endif
321:             }
322:           } // q
323:         }   // grid
324:       }     // grid*batch
325:       PetscLogEventEnd(ctx->events[8], 0, 0, 0, 0);
326: #if defined(PETSC_HAVE_THREADSAFETY)
327:       endtime = MPI_Wtime();
328:       if (ctx->stage) ctx->times[LANDAU_F_DF] += (endtime - starttime);
329: #endif
330:     } // Jacobian setup
331:     // assemble Jacobian (or mass)
332:     for (PetscInt tid = 0; tid < ctx->batch_sz * elem_offset[num_grids]; tid++) { // for each element
333:       const PetscInt b_Nelem      = elem_offset[num_grids];
334:       const PetscInt glb_elem_idx = tid % b_Nelem, b_id = tid / b_Nelem;
335:       PetscInt       grid = 0;
336: #if defined(PETSC_HAVE_THREADSAFETY)
337:       double starttime, endtime;
338:       starttime = MPI_Wtime();
339: #endif
340:       while (glb_elem_idx >= elem_offset[grid + 1]) grid++;
341:       {
342:         const PetscInt   loc_Nf = ctx->species_offset[grid + 1] - ctx->species_offset[grid], loc_elem = glb_elem_idx - elem_offset[grid];
343:         const PetscInt   moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset), totDim = loc_Nf * Nq, elemMatSize = totDim * totDim;
344:         PetscScalar     *elemMat;
345:         const PetscReal *invJe = &invJ_a[(ip_offset[grid] + loc_elem * Nq) * dim * dim];
346:         PetscMalloc1(elemMatSize, &elemMat);
347:         PetscMemzero(elemMat, elemMatSize * sizeof(*elemMat));
348:         if (shift == 0.0) { // Jacobian
349:           PetscLogEventBegin(ctx->events[4], 0, 0, 0, 0);
350:         } else { // mass
351:           PetscLogEventBegin(ctx->events[16], 0, 0, 0, 0);
352:         }
353:         for (PetscInt qj = 0; qj < Nq; ++qj) {
354:           const PetscInt jpidx_glb = ip_offset[grid] + qj + loc_elem * Nq;
355:           PetscReal      g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM]; // could make a LANDAU_MAX_SPECIES_GRID ~ number of ions - 1
356:           PetscInt       d, d2, dp, d3, IPf_idx;
357:           if (shift == 0.0) { // Jacobian
358:             const PetscReal *const invJj = &invJe[qj * dim * dim];
359:             PetscReal              gg2[LANDAU_MAX_SPECIES][LANDAU_DIM], gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
360:             const PetscReal        vj[3] = {xx[jpidx_glb], yy[jpidx_glb], zz ? zz[jpidx_glb] : 0}, wj = ww[jpidx_glb];
361:             // create g2 & g3
362:             for (d = 0; d < LANDAU_DIM; d++) { // clear accumulation data D & K
363:               gg2_temp[d] = 0;
364:               for (d2 = 0; d2 < LANDAU_DIM; d2++) gg3_temp[d][d2] = 0;
365:             }
366:             /* inner beta reduction */
367:             IPf_idx = 0;
368:             for (PetscInt grid_r = 0, f_off = 0, ipidx = 0; grid_r < ctx->num_grids; grid_r++, f_off = ctx->species_offset[grid_r]) { // IPf_idx += nip_loc_r*Nfloc_r
369:               PetscInt nip_loc_r = numCells[grid_r] * Nq, Nfloc_r = Nf[grid_r];
370:               for (PetscInt ei_r = 0, loc_fdf_idx = 0; ei_r < numCells[grid_r]; ++ei_r) {
371:                 for (PetscInt qi = 0; qi < Nq; qi++, ipidx++, loc_fdf_idx++) {
372:                   const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
373:                   PetscReal       temp1[3] = {0, 0, 0}, temp2 = 0;
374: #if LANDAU_DIM == 2
375:                   PetscReal Ud[2][2], Uk[2][2], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
376:                   LandauTensor2D(vj, x, y, Ud, Uk, mask);
377: #else
378:                   PetscReal U[3][3], z = zz[ipidx], mask = (PetscAbs(vj[0] - x) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[1] - y) < 100 * PETSC_SQRT_MACHINE_EPSILON && PetscAbs(vj[2] - z) < 100 * PETSC_SQRT_MACHINE_EPSILON) ? 0. : 1.;
379:                   if (ctx->use_relativistic_corrections) {
380:                     LandauTensor3DRelativistic(vj, x, y, z, U, mask, C_0(ctx->v_0));
381:                   } else {
382:                     LandauTensor3D(vj, x, y, z, U, mask);
383:                   }
384: #endif
385:                   for (int f = 0; f < Nfloc_r; ++f) {
386:                     const PetscInt idx = b_id * IPf_sz_glb + ipf_offset[grid_r] + f * nip_loc_r + ei_r * Nq + qi; // IPf_idx + f*nip_loc_r + loc_fdf_idx;
387:                     temp1[0] += dudx[idx] * nu_beta[f + f_off] * invMass[f + f_off];
388:                     temp1[1] += dudy[idx] * nu_beta[f + f_off] * invMass[f + f_off];
389: #if LANDAU_DIM == 3
390:                     temp1[2] += dudz[idx] * nu_beta[f + f_off] * invMass[f + f_off];
391: #endif
392:                     temp2 += ff[idx] * nu_beta[f + f_off];
393:                   }
394:                   temp1[0] *= wi;
395:                   temp1[1] *= wi;
396: #if LANDAU_DIM == 3
397:                   temp1[2] *= wi;
398: #endif
399:                   temp2 *= wi;
400: #if LANDAU_DIM == 2
401:                   for (d2 = 0; d2 < 2; d2++) {
402:                     for (d3 = 0; d3 < 2; ++d3) {
403:                       /* K = U * grad(f): g2=e: i,A */
404:                       gg2_temp[d2] += Uk[d2][d3] * temp1[d3];
405:                       /* D = -U * (I \kron (fx)): g3=f: i,j,A */
406:                       gg3_temp[d2][d3] += Ud[d2][d3] * temp2;
407:                     }
408:                   }
409: #else
410:                   for (d2 = 0; d2 < 3; ++d2) {
411:                     for (d3 = 0; d3 < 3; ++d3) {
412:                       /* K = U * grad(f): g2 = e: i,A */
413:                       gg2_temp[d2] += U[d2][d3] * temp1[d3];
414:                       /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
415:                       gg3_temp[d2][d3] += U[d2][d3] * temp2;
416:                     }
417:                   }
418: #endif
419:                 } // qi
420:               }   // ei_r
421:               IPf_idx += nip_loc_r * Nfloc_r;
422:             } /* grid_r - IPs */
424:             // add alpha and put in gg2/3
425:             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) {
426:               for (d2 = 0; d2 < LANDAU_DIM; d2++) {
427:                 gg2[fieldA][d2] = gg2_temp[d2] * nu_alpha[fieldA + f_off];
428:                 for (d3 = 0; d3 < LANDAU_DIM; d3++) gg3[fieldA][d2][d3] = -gg3_temp[d2][d3] * nu_alpha[fieldA + f_off] * invMass[fieldA + f_off];
429:               }
430:             }
431:             /* add electric field term once per IP */
432:             for (PetscInt fieldA = 0, f_off = ctx->species_offset[grid]; fieldA < loc_Nf; ++fieldA) gg2[fieldA][LANDAU_DIM - 1] += Eq_m[fieldA + f_off];
433:             /* Jacobian transform - g2, g3 */
434:             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
435:               for (d = 0; d < dim; ++d) {
436:                 g2[fieldA][d] = 0.0;
437:                 for (d2 = 0; d2 < dim; ++d2) {
438:                   g2[fieldA][d] += invJj[d * dim + d2] * gg2[fieldA][d2];
439:                   g3[fieldA][d][d2] = 0.0;
440:                   for (d3 = 0; d3 < dim; ++d3) {
441:                     for (dp = 0; dp < dim; ++dp) g3[fieldA][d][d2] += invJj[d * dim + d3] * gg3[fieldA][d3][dp] * invJj[d2 * dim + dp];
442:                   }
443:                   g3[fieldA][d][d2] *= wj;
444:                 }
445:                 g2[fieldA][d] *= wj;
446:               }
447:             }
448:           } else { // mass
449:             PetscReal wj = ww[jpidx_glb];
450:             /* Jacobian transform - g0 */
451:             for (PetscInt fieldA = 0; fieldA < loc_Nf; ++fieldA) {
452:               if (dim == 2) {
453:                 g0[fieldA] = wj * shift * 2. * PETSC_PI; // move this to below and remove g0
454:               } else {
455:                 g0[fieldA] = wj * shift; // move this to below and remove g0
456:               }
457:             }
458:           }
459:           /* FE matrix construction */
460:           {
461:             PetscInt         fieldA, d, f, d2, g;
462:             const PetscReal *BJq = &BB[qj * Nb], *DIq = &DD[qj * Nb * dim];
463:             /* assemble - on the diagonal (I,I) */
464:             for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
465:               for (f = 0; f < Nb; f++) {
466:                 const PetscInt i = fieldA * Nb + f; /* Element matrix row */
467:                 for (g = 0; g < Nb; ++g) {
468:                   const PetscInt j    = fieldA * Nb + g; /* Element matrix column */
469:                   const PetscInt fOff = i * totDim + j;
470:                   if (shift == 0.0) {
471:                     for (d = 0; d < dim; ++d) {
472:                       elemMat[fOff] += DIq[f * dim + d] * g2[fieldA][d] * BJq[g];
473:                       for (d2 = 0; d2 < dim; ++d2) elemMat[fOff] += DIq[f * dim + d] * g3[fieldA][d][d2] * DIq[g * dim + d2];
474:                     }
475:                   } else { // mass
476:                     elemMat[fOff] += BJq[f] * g0[fieldA] * BJq[g];
477:                   }
478:                 }
479:               }
480:             }
481:           }
482:         }                   /* qj loop */
483:         if (shift == 0.0) { // Jacobian
484:           PetscLogEventEnd(ctx->events[4], 0, 0, 0, 0);
485:         } else {
486:           PetscLogEventEnd(ctx->events[16], 0, 0, 0, 0);
487:         }
488: #if defined(PETSC_HAVE_THREADSAFETY)
489:         endtime = MPI_Wtime();
490:         if (ctx->stage) ctx->times[LANDAU_KERNEL] += (endtime - starttime);
491: #endif
492:         /* assemble matrix */
493:         if (!container) {
494:           PetscInt cStart;
495:           PetscLogEventBegin(ctx->events[6], 0, 0, 0, 0);
496:           DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, NULL);
497:           DMPlexMatSetClosure(ctx->plex[grid], section[grid], globsection[grid], subJ[LAND_PACK_IDX(b_id, grid)], loc_elem + cStart, elemMat, ADD_VALUES);
498:           PetscLogEventEnd(ctx->events[6], 0, 0, 0, 0);
499:         } else { // GPU like assembly for debugging
500:           PetscInt    fieldA, q, f, g, d, nr, nc, rows0[LANDAU_MAX_Q_FACE] = {0}, cols0[LANDAU_MAX_Q_FACE] = {0}, rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE];
501:           PetscScalar vals[LANDAU_MAX_Q_FACE * LANDAU_MAX_Q_FACE] = {0}, row_scale[LANDAU_MAX_Q_FACE] = {0}, col_scale[LANDAU_MAX_Q_FACE] = {0};
502:           LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ + 1] = (LandauIdx(*)[LANDAU_MAX_NQ + 1]) ctx->SData_d.coo_elem_point_offsets;
503:           /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
504:           for (fieldA = 0; fieldA < loc_Nf; fieldA++) {
505:             LandauIdx *const Idxs = &maps[grid].gIdx[loc_elem][fieldA][0];
506:             for (f = 0; f < Nb; f++) {
507:               PetscInt idx = Idxs[f];
508:               if (idx >= 0) {
509:                 nr           = 1;
510:                 rows0[0]     = idx;
511:                 row_scale[0] = 1.;
512:               } else {
513:                 idx = -idx - 1;
514:                 for (q = 0, nr = 0; q < maps[grid].num_face; q++, nr++) {
515:                   if (maps[grid].c_maps[idx][q].gid < 0) break;
516:                   rows0[q]     = maps[grid].c_maps[idx][q].gid;
517:                   row_scale[q] = maps[grid].c_maps[idx][q].scale;
518:                 }
519:               }
520:               for (g = 0; g < Nb; ++g) {
521:                 idx = Idxs[g];
522:                 if (idx >= 0) {
523:                   nc           = 1;
524:                   cols0[0]     = idx;
525:                   col_scale[0] = 1.;
526:                 } else {
527:                   idx = -idx - 1;
528:                   nc  = maps[grid].num_face;
529:                   for (q = 0, nc = 0; q < maps[grid].num_face; q++, nc++) {
530:                     if (maps[grid].c_maps[idx][q].gid < 0) break;
531:                     cols0[q]     = maps[grid].c_maps[idx][q].gid;
532:                     col_scale[q] = maps[grid].c_maps[idx][q].scale;
533:                   }
534:                 }
535:                 const PetscInt    i   = fieldA * Nb + f; /* Element matrix row */
536:                 const PetscInt    j   = fieldA * Nb + g; /* Element matrix column */
537:                 const PetscScalar Aij = elemMat[i * totDim + j];
538:                 if (coo_vals) { // mirror (i,j) in CreateStaticGPUData
539:                   const int fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb;
540:                   const int idx0 = b_id * coo_elem_offsets[elem_offset[num_grids]] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
541:                   for (int q = 0, idx2 = idx0; q < nr; q++) {
542:                     for (int d = 0; d < nc; d++, idx2++) coo_vals[idx2] = row_scale[q] * col_scale[d] * Aij;
543:                   }
544:                 } else {
545:                   for (q = 0; q < nr; q++) rows[q] = rows0[q] + moffset;
546:                   for (d = 0; d < nc; d++) cols[d] = cols0[d] + moffset;
547:                   for (q = 0; q < nr; q++) {
548:                     for (d = 0; d < nc; d++) vals[q * nc + d] = row_scale[q] * col_scale[d] * Aij;
549:                   }
550:                   MatSetValues(JacP, nr, rows, nc, cols, vals, ADD_VALUES);
551:                 }
552:               }
553:             }
554:           }
555:         }
556:         if (loc_elem == -1) {
557:           PetscPrintf(ctx->comm, "CPU Element matrix\n");
558:           for (int d = 0; d < totDim; ++d) {
559:             for (int f = 0; f < totDim; ++f) PetscPrintf(ctx->comm, " %12.5e", (double)PetscRealPart(elemMat[d * totDim + f]));
560:             PetscPrintf(ctx->comm, "\n");
561:           }
562:           exit(12);
563:         }
564:         PetscFree(elemMat);
565:       }                 /* grid */
566:     }                   /* outer element & batch loop */
567:     if (shift == 0.0) { // mass
568:       PetscFree4(ff, dudx, dudy, dudz);
569:     }
570:     if (!container) {                                         // 'CPU' assembly move nest matrix to global JacP
571:       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // OpenMP
572:         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
573:           const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset); // b_id*b_N + ctx->mat_offset[grid];
574:           PetscInt           nloc, nzl, colbuf[1024], row;
575:           const PetscInt    *cols;
576:           const PetscScalar *vals;
577:           Mat                B = subJ[LAND_PACK_IDX(b_id, grid)];
578:           MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY);
579:           MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY);
580:           MatGetSize(B, &nloc, NULL);
581:           for (int i = 0; i < nloc; i++) {
582:             MatGetRow(B, i, &nzl, &cols, &vals);
584:             for (int j = 0; j < nzl; j++) colbuf[j] = moffset + cols[j];
585:             row = moffset + i;
586:             MatSetValues(JacP, 1, &row, nzl, colbuf, vals, ADD_VALUES);
587:             MatRestoreRow(B, i, &nzl, &cols, &vals);
588:           }
589:           MatDestroy(&B);
590:         }
591:       }
592:     }
593:     if (coo_vals) {
594:       MatSetValuesCOO(JacP, coo_vals, ADD_VALUES);
595:       PetscFree(coo_vals);
596:     }
597:   } /* CPU version */
598:   MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY);
599:   MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY);
600:   /* clean up */
601:   if (cellClosure) PetscFree(cellClosure);
602:   if (xdata) VecRestoreArrayReadAndMemType(a_X, &xdata);
603:   return 0;
604: }

606: #if defined(LANDAU_ADD_BCS)
607: static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[])
608: {
609:   uexact[0] = 0;
610: }
611: #endif

613: #define MATVEC2(__a, __x, __p) \
614:   { \
615:     int i, j; \
616:     for (i = 0.; i < 2; i++) { \
617:       __p[i] = 0; \
618:       for (j = 0.; j < 2; j++) __p[i] += __a[i][j] * __x[j]; \
619:     } \
620:   }
621: static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y, PetscReal *outX, PetscReal *outY)
622: {
623:   PetscReal rr = PetscSqrtReal(x * x + y * y), outfact, efact;
624:   if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) {
625:     *outX = x;
626:     *outY = y;
627:   } else {
628:     const PetscReal xy[2] = {x, y}, sinphi = y / rr, cosphi = x / rr;
629:     PetscReal       cth, sth, xyprime[2], Rth[2][2], rotcos, newrr;
630:     if (num_sections == 2) {
631:       rotcos  = 0.70710678118654;
632:       outfact = 1.5;
633:       efact   = 2.5;
634:       /* rotate normalized vector into [-pi/4,pi/4) */
635:       if (sinphi >= 0.) { /* top cell, -pi/2 */
636:         cth = 0.707106781186548;
637:         sth = -0.707106781186548;
638:       } else { /* bottom cell -pi/8 */
639:         cth = 0.707106781186548;
640:         sth = .707106781186548;
641:       }
642:     } else if (num_sections == 3) {
643:       rotcos  = 0.86602540378443;
644:       outfact = 1.5;
645:       efact   = 2.5;
646:       /* rotate normalized vector into [-pi/6,pi/6) */
647:       if (sinphi >= 0.5) { /* top cell, -pi/3 */
648:         cth = 0.5;
649:         sth = -0.866025403784439;
650:       } else if (sinphi >= -.5) { /* mid cell 0 */
651:         cth = 1.;
652:         sth = .0;
653:       } else { /* bottom cell +pi/3 */
654:         cth = 0.5;
655:         sth = 0.866025403784439;
656:       }
657:     } else if (num_sections == 4) {
658:       rotcos  = 0.9238795325112;
659:       outfact = 1.5;
660:       efact   = 3;
661:       /* rotate normalized vector into [-pi/8,pi/8) */
662:       if (sinphi >= 0.707106781186548) { /* top cell, -3pi/8 */
663:         cth = 0.38268343236509;
664:         sth = -0.923879532511287;
665:       } else if (sinphi >= 0.) { /* mid top cell -pi/8 */
666:         cth = 0.923879532511287;
667:         sth = -.38268343236509;
668:       } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */
669:         cth = 0.923879532511287;
670:         sth = 0.38268343236509;
671:       } else { /* bottom cell + 3pi/8 */
672:         cth = 0.38268343236509;
673:         sth = .923879532511287;
674:       }
675:     } else {
676:       cth    = 0.;
677:       sth    = 0.;
678:       rotcos = 0;
679:       efact  = 0;
680:     }
681:     Rth[0][0] = cth;
682:     Rth[0][1] = -sth;
683:     Rth[1][0] = sth;
684:     Rth[1][1] = cth;
685:     MATVEC2(Rth, xy, xyprime);
686:     if (num_sections == 2) {
687:       newrr = xyprime[0] / rotcos;
688:     } else {
689:       PetscReal newcosphi = xyprime[0] / rr, rin = r1, rout = rr - rin;
690:       PetscReal routmax = r0 * rotcos / newcosphi - rin, nroutmax = r0 - rin, routfrac = rout / routmax;
691:       newrr = rin + routfrac * nroutmax;
692:     }
693:     *outX = cosphi * newrr;
694:     *outY = sinphi * newrr;
695:     /* grade */
696:     PetscReal fact, tt, rs, re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY));
697:     if (rr > r2) {
698:       rs   = r2;
699:       re   = r0;
700:       fact = outfact;
701:     } /* outer zone */
702:     else {
703:       rs   = r1;
704:       re   = r2;
705:       fact = efact;
706:     } /* electron zone */
707:     tt = (rs + PetscPowReal((rr - rs) / (re - rs), fact) * (re - rs)) / rr;
708:     *outX *= tt;
709:     *outY *= tt;
710:   }
711: }

713: static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
714: {
715:   LandauCtx *ctx = (LandauCtx *)a_ctx;
716:   PetscReal  r = abc[0], z = abc[1];
717:   if (ctx->inflate) {
718:     PetscReal absR, absZ;
719:     absR = PetscAbs(r);
720:     absZ = PetscAbs(z);
721:     CircleInflate(ctx->i_radius[0], ctx->e_radius, ctx->radius[0], ctx->num_sections, absR, absZ, &absR, &absZ); // wrong: how do I know what grid I am on?
722:     r = (r > 0) ? absR : -absR;
723:     z = (z > 0) ? absZ : -absZ;
724:   }
725:   xyz[0] = r;
726:   xyz[1] = z;
727:   if (dim == 3) xyz[2] = abc[2];

729:   return 0;
730: }

732: /* create DMComposite of meshes for each species group */
733: static PetscErrorCode LandauDMCreateVMeshes(MPI_Comm comm_self, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM pack)
734: {
735:   { /* p4est, quads */
736:     /* Create plex mesh of Landau domain */
737:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
738:       PetscReal radius = ctx->radius[grid];
739:       if (!ctx->sphere) {
740:         PetscReal      lo[] = {-radius, -radius, -radius}, hi[] = {radius, radius, radius};
741:         DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim == 2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
742:         if (dim == 2) lo[0] = 0;
743:         DMPlexCreateBoxMesh(comm_self, dim, PETSC_FALSE, ctx->cells0, lo, hi, periodicity, PETSC_TRUE, &ctx->plex[grid]); // todo: make composite and create dm[grid] here
744:         DMLocalizeCoordinates(ctx->plex[grid]);                                                                           /* needed for periodic */
745:         if (dim == 3) PetscObjectSetName((PetscObject)ctx->plex[grid], "cube");
746:         else PetscObjectSetName((PetscObject)ctx->plex[grid], "half-plane");
747:       } else if (dim == 2) { // sphere is all wrong. should just have one inner radius
748:         PetscInt   numCells, cells[16][4], i, j;
749:         PetscInt   numVerts;
750:         PetscReal  inner_radius1 = ctx->i_radius[grid], inner_radius2 = ctx->e_radius;
751:         PetscReal *flatCoords = NULL;
752:         PetscInt  *flatCells  = NULL, *pcell;
753:         if (ctx->num_sections == 2) {
754: #if 1
755:           numCells        = 5;
756:           numVerts        = 10;
757:           int cells2[][4] = {
758:             {0, 1, 4, 3},
759:             {1, 2, 5, 4},
760:             {3, 4, 7, 6},
761:             {4, 5, 8, 7},
762:             {6, 7, 8, 9}
763:           };
764:           for (i = 0; i < numCells; i++)
765:             for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
766:           PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
767:           {
768:             PetscReal(*coords)[2] = (PetscReal(*)[2])flatCoords;
769:             for (j = 0; j < numVerts - 1; j++) {
770:               PetscReal z, r, theta = -PETSC_PI / 2 + (j % 3) * PETSC_PI / 2;
771:               PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius[grid];
772:               z             = rad * PetscSinReal(theta);
773:               coords[j][1]  = z;
774:               r             = rad * PetscCosReal(theta);
775:               coords[j][0]  = r;
776:             }
777:             coords[numVerts - 1][0] = coords[numVerts - 1][1] = 0;
778:           }
779: #else
780:           numCells = 4;
781:           numVerts = 8;
782:           static int cells2[][4] = {
783:             {0, 1, 2, 3},
784:             {4, 5, 1, 0},
785:             {5, 6, 2, 1},
786:             {6, 7, 3, 2}
787:           };
788:           for (i = 0; i < numCells; i++)
789:             for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
790:           loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
791:           {
792:             PetscReal(*coords)[2] = (PetscReal(*)[2])flatCoords;
793:             PetscInt j;
794:             for (j = 0; j < 8; j++) {
795:               PetscReal z, r;
796:               PetscReal theta = -PETSC_PI / 2 + (j % 4) * PETSC_PI / 3.;
797:               PetscReal rad = ctx->radius[grid] * ((j < 4) ? 0.5 : 1.0);
798:               z = rad * PetscSinReal(theta);
799:               coords[j][1] = z;
800:               r = rad * PetscCosReal(theta);
801:               coords[j][0] = r;
802:             }
803:           }
804: #endif
805:         } else if (ctx->num_sections == 3) {
806:           numCells        = 7;
807:           numVerts        = 12;
808:           int cells2[][4] = {
809:             {0, 1, 5,  4 },
810:             {1, 2, 6,  5 },
811:             {2, 3, 7,  6 },
812:             {4, 5, 9,  8 },
813:             {5, 6, 10, 9 },
814:             {6, 7, 11, 10},
815:             {8, 9, 10, 11}
816:           };
817:           for (i = 0; i < numCells; i++)
818:             for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
819:           PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
820:           {
821:             PetscReal(*coords)[2] = (PetscReal(*)[2])flatCoords;
822:             for (j = 0; j < numVerts; j++) {
823:               PetscReal z, r, theta = -PETSC_PI / 2 + (j % 4) * PETSC_PI / 3;
824:               PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius[grid];
825:               z             = rad * PetscSinReal(theta);
826:               coords[j][1]  = z;
827:               r             = rad * PetscCosReal(theta);
828:               coords[j][0]  = r;
829:             }
830:           }
831:         } else if (ctx->num_sections == 4) {
832:           numCells        = 10;
833:           numVerts        = 16;
834:           int cells2[][4] = {
835:             {0,  1,  6,  5 },
836:             {1,  2,  7,  6 },
837:             {2,  3,  8,  7 },
838:             {3,  4,  9,  8 },
839:             {5,  6,  11, 10},
840:             {6,  7,  12, 11},
841:             {7,  8,  13, 12},
842:             {8,  9,  14, 13},
843:             {10, 11, 12, 15},
844:             {12, 13, 14, 15}
845:           };
846:           for (i = 0; i < numCells; i++)
847:             for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
848:           PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
849:           {
850:             PetscReal(*coords)[2] = (PetscReal(*)[2])flatCoords;
851:             for (j = 0; j < numVerts - 1; j++) {
852:               PetscReal z, r, theta = -PETSC_PI / 2 + (j % 5) * PETSC_PI / 4;
853:               PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius[grid];
854:               z             = rad * PetscSinReal(theta);
855:               coords[j][1]  = z;
856:               r             = rad * PetscCosReal(theta);
857:               coords[j][0]  = r;
858:             }
859:             coords[numVerts - 1][0] = coords[numVerts - 1][1] = 0;
860:           }
861:         } else {
862:           numCells = 0;
863:           numVerts = 0;
864:         }
865:         for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
866:           pcell[0] = cells[j][0];
867:           pcell[1] = cells[j][1];
868:           pcell[2] = cells[j][2];
869:           pcell[3] = cells[j][3];
870:         }
871:         DMPlexCreateFromCellListPetsc(comm_self, 2, numCells, numVerts, 4, ctx->interpolate, flatCells, 2, flatCoords, &ctx->plex[grid]);
872:         PetscFree2(flatCoords, flatCells);
873:         PetscObjectSetName((PetscObject)ctx->plex[grid], "semi-circle");
874:       } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere");

876:       DMSetFromOptions(ctx->plex[grid]);
877:     } // grid loop
878:     PetscObjectSetOptionsPrefix((PetscObject)pack, prefix);

880:     { /* convert to p4est (or whatever), wait for discretization to create pack */
881:       char      convType[256];
882:       PetscBool flg;

884:       PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
885:       PetscOptionsFList("-dm_landau_type", "Convert DMPlex to another format (p4est)", "plexland.c", DMList, DMPLEX, convType, 256, &flg);
886:       PetscOptionsEnd();
887:       if (flg) {
888:         ctx->use_p4est = PETSC_TRUE; /* flag for Forest */
889:         for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
890:           DM dmforest;
891:           DMConvert(ctx->plex[grid], convType, &dmforest);
892:           if (dmforest) {
893:             PetscBool isForest;
894:             PetscObjectSetOptionsPrefix((PetscObject)dmforest, prefix);
895:             DMIsForest(dmforest, &isForest);
896:             if (isForest) {
897:               if (ctx->sphere && ctx->inflate) DMForestSetBaseCoordinateMapping(dmforest, GeometryDMLandau, ctx);
898:               DMDestroy(&ctx->plex[grid]);
899:               ctx->plex[grid] = dmforest; // Forest for adaptivity
900:             } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Converted to non Forest?");
901:           } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Convert failed?");
902:         }
903:       } else ctx->use_p4est = PETSC_FALSE; /* flag for Forest */
904:     }
905:   } /* non-file */
906:   DMSetDimension(pack, dim);
907:   PetscObjectSetName((PetscObject)pack, "Mesh");
908:   DMSetApplicationContext(pack, ctx);

910:   return 0;
911: }

913: static PetscErrorCode SetupDS(DM pack, PetscInt dim, PetscInt grid, LandauCtx *ctx)
914: {
915:   PetscInt     ii, i0;
916:   char         buf[256];
917:   PetscSection section;

919:   for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
920:     if (ii == 0) PetscSNPrintf(buf, sizeof(buf), "e");
921:     else PetscSNPrintf(buf, sizeof(buf), "i%" PetscInt_FMT, ii);
922:     /* Setup Discretization - FEM */
923:     PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]);
924:     PetscObjectSetName((PetscObject)ctx->fe[ii], buf);
925:     DMSetField(ctx->plex[grid], i0, NULL, (PetscObject)ctx->fe[ii]);
926:   }
927:   DMCreateDS(ctx->plex[grid]);
928:   DMGetSection(ctx->plex[grid], &section);
929:   for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
930:     if (ii == 0) PetscSNPrintf(buf, sizeof(buf), "se");
931:     else PetscSNPrintf(buf, sizeof(buf), "si%" PetscInt_FMT, ii);
932:     PetscSectionSetComponentName(section, i0, 0, buf);
933:   }
934:   return 0;
935: }

937: /* Define a Maxwellian function for testing out the operator. */

939: /* Using cartesian velocity space coordinates, the particle */
940: /* density, [1/m^3], is defined according to */

942: /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */

944: /* Using some constant, c, we normalize the velocity vector into a */
945: /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */

947: /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */

949: /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
950: /* for finding the particle within the interval in a box dx^3 around x is */

952: /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */

954: typedef struct {
955:   PetscReal v_0;
956:   PetscReal kT_m;
957:   PetscReal n;
958:   PetscReal shift;
959: } MaxwellianCtx;

961: static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
962: {
963:   MaxwellianCtx *mctx = (MaxwellianCtx *)actx;
964:   PetscInt       i;
965:   PetscReal      v2 = 0, theta = 2 * mctx->kT_m / (mctx->v_0 * mctx->v_0); /* theta = 2kT/mc^2 */
966:   /* compute the exponents, v^2 */
967:   for (i = 0; i < dim; ++i) v2 += x[i] * x[i];
968:   /* evaluate the Maxwellian */
969:   u[0] = mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
970:   if (mctx->shift != 0.) {
971:     v2 = 0;
972:     for (i = 0; i < dim - 1; ++i) v2 += x[i] * x[i];
973:     v2 += (x[dim - 1] - mctx->shift) * (x[dim - 1] - mctx->shift);
974:     /* evaluate the shifted Maxwellian */
975:     u[0] += mctx->n * PetscPowReal(PETSC_PI * theta, -1.5) * (PetscExpReal(-v2 / theta));
976:   }
977:   return 0;
978: }

980: /*@
981:  DMPlexLandauAddMaxwellians - Add a Maxwellian distribution to a state

983:  Collective

985:  Input Parameters:
986:  .   dm - The mesh (local)
987:  +   time - Current time
988:  -   temps - Temperatures of each species (global)
989:  .   ns - Number density of each species (global)
990:  -   grid - index into current grid - just used for offset into temp and ns
991:  .   b_id - batch index
992:  -   n_batch - number of batches
993:  +   actx - Landau context

995:  Output Parameter:
996:  .   X  - The state (local to this grid)

998:  Level: beginner

1000:  .keywords: mesh
1001:  .seealso: `DMPlexLandauCreateVelocitySpace()`
1002:  @*/
1003: PetscErrorCode DMPlexLandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx)
1004: {
1005:   LandauCtx *ctx = (LandauCtx *)actx;
1006:   PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *);
1007:   PetscInt       dim;
1008:   MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];

1010:   DMGetDimension(dm, &dim);
1011:   if (!ctx) DMGetApplicationContext(dm, &ctx);
1012:   for (PetscInt ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
1013:     mctxs[i0]      = &data[i0];
1014:     data[i0].v_0   = ctx->v_0;                                            // v_0 same for all grids
1015:     data[i0].kT_m  = ctx->k * temps[ii] / ctx->masses[ii];                /* kT/m */
1016:     data[i0].n     = ns[ii] * (1 + 0.1 * (double)b_id / (double)n_batch); // ramp density up 10% to mimic application, n[0] use for Conner-Hastie
1017:     initu[i0]      = maxwellian;
1018:     data[i0].shift = 0;
1019:   }
1020:   data[0].shift = ctx->electronShift;
1021:   /* need to make ADD_ALL_VALUES work - TODO */
1022:   DMProjectFunction(dm, time, initu, (void **)mctxs, INSERT_ALL_VALUES, X);
1023:   return 0;
1024: }

1026: /*
1027:  LandauSetInitialCondition - Addes Maxwellians with context

1029:  Collective

1031:  Input Parameters:
1032:  .   dm - The mesh
1033:  -   grid - index into current grid - just used for offset into temp and ns
1034:  .   b_id - batch index
1035:  -   n_batch - number of batches
1036:  +   actx - Landau context with T and n

1038:  Output Parameter:
1039:  .   X  - The state

1041:  Level: beginner

1043:  .keywords: mesh
1044:  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauAddMaxwellians()`
1045:  */
1046: static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, PetscInt grid, PetscInt b_id, PetscInt n_batch, void *actx)
1047: {
1048:   LandauCtx *ctx = (LandauCtx *)actx;
1049:   if (!ctx) DMGetApplicationContext(dm, &ctx);
1050:   VecZeroEntries(X);
1051:   DMPlexLandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, grid, b_id, n_batch, ctx);
1052:   return 0;
1053: }

1055: // adapt a level once. Forest in/out
1056: static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscInt type, PetscInt grid, LandauCtx *ctx, DM *newForest)
1057: {
1058:   DM              forest, plex, adaptedDM = NULL;
1059:   PetscDS         prob;
1060:   PetscBool       isForest;
1061:   PetscQuadrature quad;
1062:   PetscInt        Nq, *Nb, cStart, cEnd, c, dim, qj, k;
1063:   DMLabel         adaptLabel = NULL;

1065:   forest = ctx->plex[grid];
1066:   DMCreateDS(forest);
1067:   DMGetDS(forest, &prob);
1068:   DMGetDimension(forest, &dim);
1069:   DMIsForest(forest, &isForest);
1071:   DMConvert(forest, DMPLEX, &plex);
1072:   DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd);
1073:   DMLabelCreate(PETSC_COMM_SELF, "adapt", &adaptLabel);
1074:   PetscFEGetQuadrature(fem, &quad);
1075:   PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL);
1077:   PetscDSGetDimensions(prob, &Nb);
1078:   if (type == 4) {
1079:     for (c = cStart; c < cEnd; c++) DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);
1080:     PetscInfo(sol, "Phase:%s: Uniform refinement\n", "adaptToleranceFEM");
1081:   } else if (type == 2) {
1082:     PetscInt  rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim == 3) ? 8 : 2;
1083:     PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY;
1084:     for (c = 0; c < 64; c++) eCellIdx[c] = iCellIdx[c] = -1;
1085:     for (c = cStart; c < cEnd; c++) {
1086:       PetscReal tt, v0[LANDAU_MAX_NQ * 3], detJ[LANDAU_MAX_NQ];
1087:       DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ);
1088:       for (qj = 0; qj < Nq; ++qj) {
1089:         tt = PetscSqr(v0[dim * qj + 0]) + PetscSqr(v0[dim * qj + 1]) + PetscSqr(((dim == 3) ? v0[dim * qj + 2] : 0));
1090:         r  = PetscSqrtReal(tt);
1091:         if (r < minRad - PETSC_SQRT_MACHINE_EPSILON * 10.) {
1092:           minRad         = r;
1093:           nr             = 0;
1094:           rCellIdx[nr++] = c;
1095:           PetscInfo(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT "\n", (double)r, c, qj + 1, Nq);
1096:         } else if ((r - minRad) < PETSC_SQRT_MACHINE_EPSILON * 100. && nr < nrmax) {
1097:           for (k = 0; k < nr; k++)
1098:             if (c == rCellIdx[k]) break;
1099:           if (k == nr) {
1100:             rCellIdx[nr++] = c;
1101:             PetscInfo(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %" PetscInt_FMT ", qp %" PetscInt_FMT "/%" PetscInt_FMT ", d=%e\n", (double)r, c, qj + 1, Nq, (double)(r - minRad));
1102:           }
1103:         }
1104:         if (ctx->sphere) {
1105:           if ((tt = r - ctx->e_radius) > 0) {
1106:             PetscInfo(sol, "\t\t\t %" PetscInt_FMT " cell r=%g\n", c, (double)tt);
1107:             if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON * 100.) {
1108:               eMinRad             = tt;
1109:               eMaxIdx             = 0;
1110:               eCellIdx[eMaxIdx++] = c;
1111:             } else if (eMaxIdx > 0 && (tt - eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx - 1]) {
1112:               eCellIdx[eMaxIdx++] = c;
1113:             }
1114:           }
1115:           if ((tt = r - ctx->i_radius[grid]) > 0) {
1116:             if (tt < iMinRad - 1.e-5) {
1117:               iMinRad             = tt;
1118:               iMaxIdx             = 0;
1119:               iCellIdx[iMaxIdx++] = c;
1120:             } else if (iMaxIdx > 0 && (tt - iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx - 1]) {
1121:               iCellIdx[iMaxIdx++] = c;
1122:             }
1123:           }
1124:         }
1125:       }
1126:     }
1127:     for (k = 0; k < nr; k++) DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE);
1128:     if (ctx->sphere) {
1129:       for (c = 0; c < eMaxIdx; c++) {
1130:         DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE);
1131:         PetscInfo(sol, "\t\tPhase:%s: refine sphere e cell %" PetscInt_FMT " r=%g\n", "adaptToleranceFEM", eCellIdx[c], (double)eMinRad);
1132:       }
1133:       for (c = 0; c < iMaxIdx; c++) {
1134:         DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE);
1135:         PetscInfo(sol, "\t\tPhase:%s: refine sphere i cell %" PetscInt_FMT " r=%g\n", "adaptToleranceFEM", iCellIdx[c], (double)iMinRad);
1136:       }
1137:     }
1138:     PetscInfo(sol, "Phase:%s: Adaptive refine origin cells %" PetscInt_FMT ",%" PetscInt_FMT " r=%g\n", "adaptToleranceFEM", rCellIdx[0], rCellIdx[1], (double)minRad);
1139:   } else if (type == 0 || type == 1 || type == 3) { /* refine along r=0 axis */
1140:     PetscScalar *coef = NULL;
1141:     Vec          coords;
1142:     PetscInt     csize, Nv, d, nz;
1143:     DM           cdm;
1144:     PetscSection cs;
1145:     DMGetCoordinatesLocal(forest, &coords);
1146:     DMGetCoordinateDM(forest, &cdm);
1147:     DMGetLocalSection(cdm, &cs);
1148:     for (c = cStart; c < cEnd; c++) {
1149:       PetscInt doit = 0, outside = 0;
1150:       DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef);
1151:       Nv = csize / dim;
1152:       for (nz = d = 0; d < Nv; d++) {
1153:         PetscReal z = PetscRealPart(coef[d * dim + (dim - 1)]), x = PetscSqr(PetscRealPart(coef[d * dim + 0])) + ((dim == 3) ? PetscSqr(PetscRealPart(coef[d * dim + 1])) : 0);
1154:         x = PetscSqrtReal(x);
1155:         if (x < PETSC_MACHINE_EPSILON * 10. && PetscAbs(z) < PETSC_MACHINE_EPSILON * 10.) doit = 1;                              /* refine origin */
1156:         else if (type == 0 && (z < -PETSC_MACHINE_EPSILON * 10. || z > ctx->re_radius + PETSC_MACHINE_EPSILON * 10.)) outside++; /* first pass don't refine bottom */
1157:         else if (type == 1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++;                                  /* don't refine outside electron refine radius */
1158:         else if (type == 3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++;                                  /* don't refine outside ion refine radius */
1159:         if (x < PETSC_MACHINE_EPSILON * 10.) nz++;
1160:       }
1161:       DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef);
1162:       if (doit || (outside < Nv && nz)) DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);
1163:     }
1164:     PetscInfo(sol, "Phase:%s: RE refinement\n", "adaptToleranceFEM");
1165:   }
1166:   DMDestroy(&plex);
1167:   DMAdaptLabel(forest, adaptLabel, &adaptedDM);
1168:   DMLabelDestroy(&adaptLabel);
1169:   *newForest = adaptedDM;
1170:   if (adaptedDM) {
1171:     if (isForest) {
1172:       DMForestSetAdaptivityForest(adaptedDM, NULL); // ????
1173:     } else exit(33);                                           // ???????
1174:     DMConvert(adaptedDM, DMPLEX, &plex);
1175:     DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd);
1176:     PetscInfo(sol, "\tPhase: adaptToleranceFEM: %" PetscInt_FMT " cells, %" PetscInt_FMT " total quadrature points\n", cEnd - cStart, Nq * (cEnd - cStart));
1177:     DMDestroy(&plex);
1178:   } else *newForest = NULL;
1179:   return 0;
1180: }

1182: // forest goes in (ctx->plex[grid]), plex comes out
1183: static PetscErrorCode adapt(PetscInt grid, LandauCtx *ctx, Vec *uu)
1184: {
1185:   PetscInt adaptIter;

1187:   PetscInt type, limits[5] = {(grid == 0) ? ctx->numRERefine : 0, (grid == 0) ? ctx->nZRefine1 : 0, ctx->numAMRRefine[grid], (grid == 0) ? ctx->nZRefine2 : 0, ctx->postAMRRefine[grid]};
1188:   for (type = 0; type < 5; type++) {
1189:     for (adaptIter = 0; adaptIter < limits[type]; adaptIter++) {
1190:       DM newForest = NULL;
1191:       adaptToleranceFEM(ctx->fe[0], *uu, type, grid, ctx, &newForest);
1192:       if (newForest) {
1193:         DMDestroy(&ctx->plex[grid]);
1194:         VecDestroy(uu);
1195:         DMCreateGlobalVector(newForest, uu);
1196:         PetscObjectSetName((PetscObject)*uu, "uAMR");
1197:         LandauSetInitialCondition(newForest, *uu, grid, 0, 1, ctx);
1198:         ctx->plex[grid] = newForest;
1199:       } else {
1200:         exit(4); // can happen with no AMR and post refinement
1201:       }
1202:     }
1203:   }
1204:   return 0;
1205: }

1207: static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1208: {
1209:   PetscBool flg, sph_flg;
1210:   PetscInt  ii, nt, nm, nc, num_species_grid[LANDAU_MAX_GRIDS];
1211:   PetscReal v0_grid[LANDAU_MAX_GRIDS];
1212:   DM        dummy;

1214:   DMCreate(ctx->comm, &dummy);
1215:   /* get options - initialize context */
1216:   ctx->verbose = 1; // should be 0 for silent compliance
1217: #if defined(PETSC_HAVE_THREADSAFETY)
1218:   ctx->batch_sz = PetscNumOMPThreads;
1219: #else
1220:   ctx->batch_sz = 1;
1221: #endif
1222:   ctx->batch_view_idx = 0;
1223:   ctx->interpolate    = PETSC_TRUE;
1224:   ctx->gpu_assembly   = PETSC_TRUE;
1225:   ctx->norm_state     = 0;
1226:   ctx->electronShift  = 0;
1227:   ctx->M              = NULL;
1228:   ctx->J              = NULL;
1229:   /* geometry and grids */
1230:   ctx->sphere       = PETSC_FALSE;
1231:   ctx->inflate      = PETSC_FALSE;
1232:   ctx->use_p4est    = PETSC_FALSE;
1233:   ctx->num_sections = 3; /* 2, 3 or 4 */
1234:   for (PetscInt grid = 0; grid < LANDAU_MAX_GRIDS; grid++) {
1235:     ctx->radius[grid]             = 5.; /* thermal radius (velocity) */
1236:     ctx->numAMRRefine[grid]       = 5;
1237:     ctx->postAMRRefine[grid]      = 0;
1238:     ctx->species_offset[grid + 1] = 1; // one species default
1239:     num_species_grid[grid]        = 0;
1240:     ctx->plex[grid]               = NULL; /* cache as expensive to Convert */
1241:   }
1242:   ctx->species_offset[0] = 0;
1243:   ctx->re_radius         = 0.;
1244:   ctx->vperp0_radius1    = 0;
1245:   ctx->vperp0_radius2    = 0;
1246:   ctx->nZRefine1         = 0;
1247:   ctx->nZRefine2         = 0;
1248:   ctx->numRERefine       = 0;
1249:   num_species_grid[0]    = 1; // one species default
1250:   /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1251:   ctx->charges[0]       = -1;                       /* electron charge (MKS) */
1252:   ctx->masses[0]        = 1 / 1835.469965278441013; /* temporary value in proton mass */
1253:   ctx->n[0]             = 1;
1254:   ctx->v_0              = 1; /* thermal velocity, we could start with a scale != 1 */
1255:   ctx->thermal_temps[0] = 1;
1256:   /* constants, etc. */
1257:   ctx->epsilon0 = 8.8542e-12;     /* permittivity of free space (MKS) F/m */
1258:   ctx->k        = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1259:   ctx->lnLam    = 10;             /* cross section ratio large - small angle collisions */
1260:   ctx->n_0      = 1.e20;          /* typical plasma n, but could set it to 1 */
1261:   ctx->Ez       = 0;
1262:   for (PetscInt grid = 0; grid < LANDAU_NUM_TIMERS; grid++) ctx->times[grid] = 0;
1263:   for (PetscInt ii = 0; ii < LANDAU_DIM; ii++) ctx->cells0[ii] = 2;
1264:   if (LANDAU_DIM == 2) ctx->cells0[0] = 1;
1265:   ctx->use_matrix_mass                = PETSC_FALSE;
1266:   ctx->use_relativistic_corrections   = PETSC_FALSE;
1267:   ctx->use_energy_tensor_trick        = PETSC_FALSE; /* Use Eero's trick for energy conservation v --> grad(v^2/2) */
1268:   ctx->SData_d.w                      = NULL;
1269:   ctx->SData_d.x                      = NULL;
1270:   ctx->SData_d.y                      = NULL;
1271:   ctx->SData_d.z                      = NULL;
1272:   ctx->SData_d.invJ                   = NULL;
1273:   ctx->jacobian_field_major_order     = PETSC_FALSE;
1274:   ctx->SData_d.coo_elem_offsets       = NULL;
1275:   ctx->SData_d.coo_elem_point_offsets = NULL;
1276:   ctx->coo_assembly                   = PETSC_FALSE;
1277:   ctx->SData_d.coo_elem_fullNb        = NULL;
1278:   ctx->SData_d.coo_size               = 0;
1279:   PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1280:   {
1281:     char opstring[256];
1282: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1283:     ctx->deviceType = LANDAU_KOKKOS;
1284:     PetscStrcpy(opstring, "kokkos");
1285: #elif defined(PETSC_HAVE_CUDA)
1286:     ctx->deviceType = LANDAU_CUDA;
1287:     PetscStrcpy(opstring, "cuda");
1288: #else
1289:     ctx->deviceType = LANDAU_CPU;
1290:     PetscStrcpy(opstring, "cpu");
1291: #endif
1292:     PetscOptionsString("-dm_landau_device_type", "Use kernels on 'cpu', 'cuda', or 'kokkos'", "plexland.c", opstring, opstring, sizeof(opstring), NULL);
1293:     PetscStrcmp("cpu", opstring, &flg);
1294:     if (flg) {
1295:       ctx->deviceType = LANDAU_CPU;
1296:     } else {
1297:       PetscStrcmp("cuda", opstring, &flg);
1298:       if (flg) {
1299:         ctx->deviceType = LANDAU_CUDA;
1300:       } else {
1301:         PetscStrcmp("kokkos", opstring, &flg);
1302:         if (flg) ctx->deviceType = LANDAU_KOKKOS;
1303:         else SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_device_type %s", opstring);
1304:       }
1305:     }
1306:   }
1307:   PetscOptionsReal("-dm_landau_electron_shift", "Shift in thermal velocity of electrons", "none", ctx->electronShift, &ctx->electronShift, NULL);
1308:   PetscOptionsInt("-dm_landau_verbose", "Level of verbosity output", "plexland.c", ctx->verbose, &ctx->verbose, NULL);
1309:   PetscOptionsInt("-dm_landau_batch_size", "Number of 'vertices' to batch", "ex2.c", ctx->batch_sz, &ctx->batch_sz, NULL);
1311:   PetscOptionsInt("-dm_landau_batch_view_idx", "Index of batch for diagnostics like plotting", "ex2.c", ctx->batch_view_idx, &ctx->batch_view_idx, NULL);
1313:   PetscOptionsReal("-dm_landau_Ez", "Initial parallel electric field in unites of Conner-Hastie critical field", "plexland.c", ctx->Ez, &ctx->Ez, NULL);
1314:   PetscOptionsReal("-dm_landau_n_0", "Normalization constant for number density", "plexland.c", ctx->n_0, &ctx->n_0, NULL);
1315:   PetscOptionsReal("-dm_landau_ln_lambda", "Cross section parameter", "plexland.c", ctx->lnLam, &ctx->lnLam, NULL);
1316:   PetscOptionsBool("-dm_landau_use_mataxpy_mass", "Use fast but slightly fragile MATAXPY to add mass term", "plexland.c", ctx->use_matrix_mass, &ctx->use_matrix_mass, NULL);
1317:   PetscOptionsBool("-dm_landau_use_relativistic_corrections", "Use relativistic corrections", "plexland.c", ctx->use_relativistic_corrections, &ctx->use_relativistic_corrections, NULL);
1318:   PetscCall(PetscOptionsBool("-dm_landau_use_energy_tensor_trick", "Use Eero's trick of using grad(v^2/2) instead of v as args to Landau tensor to conserve energy with relativistic corrections and Q1 elements", "plexland.c", ctx->use_energy_tensor_trick,
1319:                              &ctx->use_energy_tensor_trick, NULL));

1321:   /* get num species with temperature, set defaults */
1322:   for (ii = 1; ii < LANDAU_MAX_SPECIES; ii++) {
1323:     ctx->thermal_temps[ii] = 1;
1324:     ctx->charges[ii]       = 1;
1325:     ctx->masses[ii]        = 1;
1326:     ctx->n[ii]             = 1;
1327:   }
1328:   nt = LANDAU_MAX_SPECIES;
1329:   PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg);
1330:   if (flg) {
1331:     PetscInfo(dummy, "num_species set to number of thermal temps provided (%" PetscInt_FMT ")\n", nt);
1332:     ctx->num_species = nt;
1333:   } else SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1334:   for (ii = 0; ii < ctx->num_species; ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1335:   nm = LANDAU_MAX_SPECIES - 1;
1336:   PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg);
1338:   nm = LANDAU_MAX_SPECIES;
1339:   PetscOptionsRealArray("-dm_landau_n", "Number density of each species = n_s * n_0", "plexland.c", ctx->n, &nm, &flg);
1341:   for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1342:   ctx->masses[0] = 9.10938356e-31;                                           /* electron mass kg (should be about right already) */
1343:   ctx->m_0       = ctx->masses[0];                                           /* arbitrary reference mass, electrons */
1344:   nc             = LANDAU_MAX_SPECIES - 1;
1345:   PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg);
1347:   for (ii = 0; ii < LANDAU_MAX_SPECIES; ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1348:   /* geometry and grids */
1349:   nt = LANDAU_MAX_GRIDS;
1350:   PetscOptionsIntArray("-dm_landau_num_species_grid", "Number of species on each grid: [ 1, ....] or [S, 0 ....] for single grid", "plexland.c", num_species_grid, &nt, &flg);
1351:   if (flg) {
1352:     ctx->num_grids = nt;
1353:     for (ii = nt = 0; ii < ctx->num_grids; ii++) nt += num_species_grid[ii];
1355:                ctx->num_grids, LANDAU_MAX_GRIDS);
1356:   } else {
1357:     ctx->num_grids      = 1; // go back to a single grid run
1358:     num_species_grid[0] = ctx->num_species;
1359:   }
1360:   for (ctx->species_offset[0] = ii = 0; ii < ctx->num_grids; ii++) ctx->species_offset[ii + 1] = ctx->species_offset[ii] + num_species_grid[ii];
1362:              ctx->num_species);
1363:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1364:     int iii       = ctx->species_offset[grid];                                          // normalize with first (arbitrary) species on grid
1365:     v0_grid[grid] = PetscSqrtReal(ctx->k * ctx->thermal_temps[iii] / ctx->masses[iii]); /* arbitrary units for non-dimensionalization: mean velocity in 1D of first species on grid */
1366:   }
1367:   ii = 0;
1368:   PetscOptionsInt("-dm_landau_v0_grid", "Index of grid to use for setting v_0 (electrons are default). Not recommended to change", "plexland.c", ii, &ii, NULL);
1369:   ctx->v_0 = v0_grid[ii];                                                                                                                       /* arbitrary units for non dimensionalization: global mean velocity in 1D of electrons */
1370:   ctx->t_0 = 8 * PETSC_PI * PetscSqr(ctx->epsilon0 * ctx->m_0 / PetscSqr(ctx->charges[0])) / ctx->lnLam / ctx->n_0 * PetscPowReal(ctx->v_0, 3); /* note, this t_0 makes nu[0,0]=1 */
1371:   /* domain */
1372:   nt = LANDAU_MAX_GRIDS;
1373:   PetscOptionsRealArray("-dm_landau_domain_radius", "Phase space size in units of thermal velocity of grid", "plexland.c", ctx->radius, &nt, &flg);
1375:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1376:     if (flg && ctx->radius[grid] <= 0) { /* negative is ratio of c */
1377:       if (ctx->radius[grid] == 0) ctx->radius[grid] = 0.75;
1378:       else ctx->radius[grid] = -ctx->radius[grid];
1379:       ctx->radius[grid] = ctx->radius[grid] * SPEED_OF_LIGHT / ctx->v_0; // use any species on grid to normalize (v_0 same for all on grid)
1380:       PetscInfo(dummy, "Change domain radius to %g for grid %" PetscInt_FMT "\n", (double)ctx->radius[grid], grid);
1381:     }
1382:     ctx->radius[grid] *= v0_grid[grid] / ctx->v_0; // scale domain by thermal radius relative to v_0
1383:   }
1384:   /* amr parameters */
1385:   nt = LANDAU_DIM;
1386:   PetscOptionsIntArray("-dm_landau_num_cells", "Number of cells in each dimension of base grid", "plexland.c", ctx->cells0, &nt, &flg);
1387:   nt = LANDAU_MAX_GRIDS;
1388:   PetscOptionsIntArray("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin, after (RE) refinements along z", "plexland.c", ctx->numAMRRefine, &nt, &flg);
1390:   nt = LANDAU_MAX_GRIDS;
1391:   PetscOptionsIntArray("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &nt, &flg);
1392:   for (ii = 1; ii < ctx->num_grids; ii++) ctx->postAMRRefine[ii] = ctx->postAMRRefine[0]; // all grids the same now
1393:   PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, &flg);
1394:   PetscOptionsInt("-dm_landau_amr_z_refine1", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, &flg);
1395:   PetscOptionsInt("-dm_landau_amr_z_refine2", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, &flg);
1396:   PetscOptionsReal("-dm_landau_re_radius", "velocity range to refine on positive (z>0) r=0 axis for runaways", "plexland.c", ctx->re_radius, &ctx->re_radius, &flg);
1397:   PetscOptionsReal("-dm_landau_z_radius1", "velocity range to refine r=0 axis (for electrons)", "plexland.c", ctx->vperp0_radius1, &ctx->vperp0_radius1, &flg);
1398:   PetscOptionsReal("-dm_landau_z_radius2", "velocity range to refine r=0 axis (for ions) after origin AMR", "plexland.c", ctx->vperp0_radius2, &ctx->vperp0_radius2, &flg);
1399:   /* spherical domain (not used) */
1400:   PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL);
1401:   PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg);
1402:   PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges", "plexland.c", ctx->inflate, &ctx->inflate, &flg);
1403:   PetscOptionsReal("-dm_landau_e_radius", "Electron thermal velocity, used for circular meshes", "plexland.c", ctx->e_radius, &ctx->e_radius, &flg);
1404:   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */
1405:   if (!flg) ctx->e_radius = 1.5 * PetscSqrtReal(8 * ctx->k * ctx->thermal_temps[0] / ctx->masses[0] / PETSC_PI) / ctx->v_0;
1406:   nt = LANDAU_MAX_GRIDS;
1407:   PetscOptionsRealArray("-dm_landau_i_radius", "Ion thermal velocity, used for circular meshes", "plexland.c", ctx->i_radius, &nt, &flg);
1408:   if (flg && !sph_flg) ctx->sphere = PETSC_TRUE;
1409:   if (!flg) {
1410:     ctx->i_radius[0] = 1.5 * PetscSqrtReal(8 * ctx->k * ctx->thermal_temps[1] / ctx->masses[1] / PETSC_PI) / ctx->v_0; // need to correct for ion grid domain
1411:   }
1414:   /* processing options */
1415:   PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL);
1416:   if (ctx->deviceType == LANDAU_CPU || ctx->deviceType == LANDAU_KOKKOS) { // make Kokkos
1417:     PetscOptionsBool("-dm_landau_coo_assembly", "Assemble Jacobian with Kokkos on 'device'", "plexland.c", ctx->coo_assembly, &ctx->coo_assembly, NULL);
1419:   }
1420:   PetscOptionsBool("-dm_landau_jacobian_field_major_order", "Reorder Jacobian for GPU assembly with field major, or block diagonal, ordering (DEPRECATED)", "plexland.c", ctx->jacobian_field_major_order, &ctx->jacobian_field_major_order, NULL);
1423:   PetscOptionsEnd();

1425:   for (ii = ctx->num_species; ii < LANDAU_MAX_SPECIES; ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0;
1426:   if (ctx->verbose > 0) {
1427:     PetscPrintf(ctx->comm, "masses:        e=%10.3e; ions in proton mass units:   %10.3e %10.3e ...\n", (double)ctx->masses[0], (double)(ctx->masses[1] / 1.6720e-27), (double)(ctx->num_species > 2 ? ctx->masses[2] / 1.6720e-27 : 0));
1428:     PetscPrintf(ctx->comm, "charges:       e=%10.3e; charges in elementary units: %10.3e %10.3e\n", (double)ctx->charges[0], (double)(-ctx->charges[1] / ctx->charges[0]), (double)(ctx->num_species > 2 ? -ctx->charges[2] / ctx->charges[0] : 0));
1429:     PetscPrintf(ctx->comm, "n:             e: %10.3e                           i: %10.3e %10.3e\n", (double)ctx->n[0], (double)ctx->n[1], (double)(ctx->num_species > 2 ? ctx->n[2] : 0));
1430:     PetscCall(PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e %10.3e. v_0=%10.3e (%10.3ec) n_0=%10.3e t_0=%10.3e, %s, %s, %" PetscInt_FMT " batched\n", (double)ctx->thermal_temps[0], (double)ctx->thermal_temps[1],
1431:                           (double)((ctx->num_species > 2) ? ctx->thermal_temps[2] : 0), (double)ctx->v_0, (double)(ctx->v_0 / SPEED_OF_LIGHT), (double)ctx->n_0, (double)ctx->t_0, ctx->use_relativistic_corrections ? "relativistic" : "classical", ctx->use_energy_tensor_trick ? "Use trick" : "Intuitive",
1432:                           ctx->batch_sz));
1433:     PetscPrintf(ctx->comm, "Domain radius (AMR levels) grid %d: %10.3e (%" PetscInt_FMT ") ", 0, (double)ctx->radius[0], ctx->numAMRRefine[0]);
1434:     for (ii = 1; ii < ctx->num_grids; ii++) PetscPrintf(ctx->comm, ", %" PetscInt_FMT ": %10.3e (%" PetscInt_FMT ") ", ii, (double)ctx->radius[ii], ctx->numAMRRefine[ii]);
1435:     PetscPrintf(ctx->comm, "\n");
1436:     if (ctx->jacobian_field_major_order) {
1437:       PetscPrintf(ctx->comm, "Using field major order for GPU Jacobian\n");
1438:     } else {
1439:       PetscPrintf(ctx->comm, "Using default Plex order for all matrices\n");
1440:     }
1441:   }
1442:   DMDestroy(&dummy);
1443:   {
1444:     PetscMPIInt rank;
1445:     MPI_Comm_rank(ctx->comm, &rank);
1446:     ctx->stage = 0;
1447:     PetscLogEventRegister("Landau Create", DM_CLASSID, &ctx->events[13]);   /* 13 */
1448:     PetscLogEventRegister(" GPU ass. setup", DM_CLASSID, &ctx->events[2]);  /* 2 */
1449:     PetscLogEventRegister(" Build matrix", DM_CLASSID, &ctx->events[12]);   /* 12 */
1450:     PetscLogEventRegister(" Assembly maps", DM_CLASSID, &ctx->events[15]);  /* 15 */
1451:     PetscLogEventRegister("Landau Mass mat", DM_CLASSID, &ctx->events[14]); /* 14 */
1452:     PetscLogEventRegister("Landau Operator", DM_CLASSID, &ctx->events[11]); /* 11 */
1453:     PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]);  /* 0 */
1454:     PetscLogEventRegister("Landau Mass", DM_CLASSID, &ctx->events[9]);      /* 9 */
1455:     PetscLogEventRegister(" Preamble", DM_CLASSID, &ctx->events[10]);       /* 10 */
1456:     PetscLogEventRegister(" static IP Data", DM_CLASSID, &ctx->events[7]);  /* 7 */
1457:     PetscLogEventRegister(" dynamic IP-Jac", DM_CLASSID, &ctx->events[1]);  /* 1 */
1458:     PetscLogEventRegister(" Kernel-init", DM_CLASSID, &ctx->events[3]);     /* 3 */
1459:     PetscLogEventRegister(" Jac-f-df (GPU)", DM_CLASSID, &ctx->events[8]);  /* 8 */
1460:     PetscLogEventRegister(" J Kernel (GPU)", DM_CLASSID, &ctx->events[4]);  /* 4 */
1461:     PetscLogEventRegister(" M Kernel (GPU)", DM_CLASSID, &ctx->events[16]); /* 16 */
1462:     PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]);     /* 5 */
1463:     PetscLogEventRegister(" CPU assemble", DM_CLASSID, &ctx->events[6]);    /* 6 */

1465:     if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1466:       PetscOptionsClearValue(NULL, "-snes_converged_reason");
1467:       PetscOptionsClearValue(NULL, "-ksp_converged_reason");
1468:       PetscOptionsClearValue(NULL, "-snes_monitor");
1469:       PetscOptionsClearValue(NULL, "-ksp_monitor");
1470:       PetscOptionsClearValue(NULL, "-ts_monitor");
1471:       PetscOptionsClearValue(NULL, "-ts_view");
1472:       PetscOptionsClearValue(NULL, "-ts_adapt_monitor");
1473:       PetscOptionsClearValue(NULL, "-dm_landau_amr_dm_view");
1474:       PetscOptionsClearValue(NULL, "-dm_landau_amr_vec_view");
1475:       PetscOptionsClearValue(NULL, "-dm_landau_mass_dm_view");
1476:       PetscOptionsClearValue(NULL, "-dm_landau_mass_view");
1477:       PetscOptionsClearValue(NULL, "-dm_landau_jacobian_view");
1478:       PetscOptionsClearValue(NULL, "-dm_landau_mat_view");
1479:       PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_converged_reason");
1480:       PetscOptionsClearValue(NULL, "-pc_bjkokkos_ksp_monitor");
1481:       PetscOptionsClearValue(NULL, "-");
1482:       PetscOptionsClearValue(NULL, "-info");
1483:     }
1484:   }
1485:   return 0;
1486: }

1488: static PetscErrorCode CreateStaticGPUData(PetscInt dim, IS grid_batch_is_inv[], LandauCtx *ctx)
1489: {
1490:   PetscSection     section[LANDAU_MAX_GRIDS], globsection[LANDAU_MAX_GRIDS];
1491:   PetscQuadrature  quad;
1492:   const PetscReal *quadWeights;
1493:   PetscInt         numCells[LANDAU_MAX_GRIDS], Nq, Nf[LANDAU_MAX_GRIDS], ncellsTot = 0, MAP_BF_SIZE = 64 * LANDAU_DIM * LANDAU_DIM * LANDAU_MAX_Q_FACE * LANDAU_MAX_SPECIES;
1494:   PetscTabulation *Tf;
1495:   PetscDS          prob;

1497:   DMGetDS(ctx->plex[0], &prob);    // same DS for all grids
1498:   PetscDSGetTabulation(prob, &Tf); // Bf, &Df same for all grids
1499:   /* DS, Tab and quad is same on all grids */
1501:   PetscFEGetQuadrature(ctx->fe[0], &quad);
1502:   PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights);
1504:   /* setup each grid */
1505:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1506:     PetscInt cStart, cEnd;
1508:     DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
1509:     numCells[grid] = cEnd - cStart; // grids can have different topology
1510:     DMGetLocalSection(ctx->plex[grid], &section[grid]);
1511:     DMGetGlobalSection(ctx->plex[grid], &globsection[grid]);
1512:     PetscSectionGetNumFields(section[grid], &Nf[grid]);
1513:     ncellsTot += numCells[grid];
1514:   }
1515:   /* create GPU assembly data */
1516:   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1517:     PetscContainer container;
1518:     PetscScalar   *elemMatrix, *elMat;
1519:     pointInterpolationP4est(*pointMaps)[LANDAU_MAX_Q_FACE];
1520:     P4estVertexMaps *maps;
1521:     const PetscInt  *plex_batch = NULL, Nb = Nq, elMatSz = Nq * Nq * ctx->num_species * ctx->num_species; // tensor elements;
1522:     LandauIdx       *coo_elem_offsets = NULL, *coo_elem_fullNb = NULL, (*coo_elem_point_offsets)[LANDAU_MAX_NQ + 1] = NULL;
1523:     /* create GPU assembly data */
1524:     PetscInfo(ctx->plex[0], "Make GPU maps %d\n", 1);
1525:     PetscLogEventBegin(ctx->events[2], 0, 0, 0, 0);
1526:     PetscMalloc(sizeof(*maps) * ctx->num_grids, &maps);
1527:     PetscMalloc(sizeof(*pointMaps) * MAP_BF_SIZE, &pointMaps);
1528:     PetscMalloc(sizeof(*elemMatrix) * elMatSz, &elemMatrix);

1530:     if (ctx->coo_assembly) {                                                                                                      // setup COO assembly -- put COO metadata directly in ctx->SData_d
1531:       PetscMalloc3(ncellsTot + 1, &coo_elem_offsets, ncellsTot, &coo_elem_fullNb, ncellsTot, &coo_elem_point_offsets); // array of integer pointers
1532:       coo_elem_offsets[0] = 0;                                                                                                    // finish later
1533:       PetscInfo(ctx->plex[0], "COO initialization, %" PetscInt_FMT " cells\n", ncellsTot);
1534:       ctx->SData_d.coo_n_cellsTot         = ncellsTot;
1535:       ctx->SData_d.coo_elem_offsets       = (void *)coo_elem_offsets;
1536:       ctx->SData_d.coo_elem_fullNb        = (void *)coo_elem_fullNb;
1537:       ctx->SData_d.coo_elem_point_offsets = (void *)coo_elem_point_offsets;
1538:     } else {
1539:       ctx->SData_d.coo_elem_offsets = ctx->SData_d.coo_elem_fullNb = NULL;
1540:       ctx->SData_d.coo_elem_point_offsets                          = NULL;
1541:       ctx->SData_d.coo_n_cellsTot                                  = 0;
1542:     }

1544:     ctx->SData_d.coo_max_fullnb = 0;
1545:     for (PetscInt grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1546:       PetscInt cStart, cEnd, Nfloc = Nf[grid], totDim = Nfloc * Nq;
1547:       if (grid_batch_is_inv[grid]) ISGetIndices(grid_batch_is_inv[grid], &plex_batch);
1548:       DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
1549:       // make maps
1550:       maps[grid].d_self       = NULL;
1551:       maps[grid].num_elements = numCells[grid];
1552:       maps[grid].num_face     = (PetscInt)(pow(Nq, 1. / ((double)dim)) + .001);                 // Q
1553:       maps[grid].num_face     = (PetscInt)(pow(maps[grid].num_face, (double)(dim - 1)) + .001); // Q^2
1554:       maps[grid].num_reduced  = 0;
1555:       maps[grid].deviceType   = ctx->deviceType;
1556:       maps[grid].numgrids     = ctx->num_grids;
1557:       // count reduced and get
1558:       PetscMalloc(maps[grid].num_elements * sizeof(*maps[grid].gIdx), &maps[grid].gIdx);
1559:       for (int ej = cStart, eidx = 0; ej < cEnd; ++ej, ++eidx, glb_elem_idx++) {
1560:         if (coo_elem_offsets) coo_elem_offsets[glb_elem_idx + 1] = coo_elem_offsets[glb_elem_idx]; // start with last one, then add
1561:         for (int fieldA = 0; fieldA < Nf[grid]; fieldA++) {
1562:           int fullNb = 0;
1563:           for (int q = 0; q < Nb; ++q) {
1564:             PetscInt     numindices, *indices;
1565:             PetscScalar *valuesOrig = elMat = elemMatrix;
1566:             PetscArrayzero(elMat, totDim * totDim);
1567:             elMat[(fieldA * Nb + q) * totDim + fieldA * Nb + q] = 1;
1568:             DMPlexGetClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **)&elMat);
1569:             for (PetscInt f = 0; f < numindices; ++f) { // look for a non-zero on the diagonal
1570:               if (PetscAbs(PetscRealPart(elMat[f * numindices + f])) > PETSC_MACHINE_EPSILON) {
1571:                 // found it
1572:                 if (PetscAbs(PetscRealPart(elMat[f * numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) { // normal vertex 1.0
1573:                   if (plex_batch) {
1574:                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)plex_batch[indices[f]];
1575:                   } else {
1576:                     maps[grid].gIdx[eidx][fieldA][q] = (LandauIdx)indices[f];
1577:                   }
1578:                   fullNb++;
1579:                 } else { //found a constraint
1580:                   int            jj                = 0;
1581:                   PetscReal      sum               = 0;
1582:                   const PetscInt ff                = f;
1583:                   maps[grid].gIdx[eidx][fieldA][q] = -maps[grid].num_reduced - 1; // store (-)index: id = -(idx+1): idx = -id - 1

1585:                   do {                                                                                              // constraints are continuous in Plex - exploit that here
1586:                     int ii;                                                                                         // get 'scale'
1587:                     for (ii = 0, pointMaps[maps[grid].num_reduced][jj].scale = 0; ii < maps[grid].num_face; ii++) { // sum row of outer product to recover vector value
1588:                       if (ff + ii < numindices) {                                                                   // 3D has Q and Q^2 interps so might run off end. We could test that elMat[f*numindices + ff + ii] > 0, and break if not
1589:                         pointMaps[maps[grid].num_reduced][jj].scale += PetscRealPart(elMat[f * numindices + ff + ii]);
1590:                       }
1591:                     }
1592:                     sum += pointMaps[maps[grid].num_reduced][jj].scale; // diagnostic
1593:                     // get 'gid'
1594:                     if (pointMaps[maps[grid].num_reduced][jj].scale == 0) pointMaps[maps[grid].num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps
1595:                     else {
1596:                       if (plex_batch) {
1597:                         pointMaps[maps[grid].num_reduced][jj].gid = plex_batch[indices[f]];
1598:                       } else {
1599:                         pointMaps[maps[grid].num_reduced][jj].gid = indices[f];
1600:                       }
1601:                       fullNb++;
1602:                     }
1603:                   } while (++jj < maps[grid].num_face && ++f < numindices); // jj is incremented if we hit the end
1604:                   while (jj < maps[grid].num_face) {
1605:                     pointMaps[maps[grid].num_reduced][jj].scale = 0;
1606:                     pointMaps[maps[grid].num_reduced][jj].gid   = -1;
1607:                     jj++;
1608:                   }
1609:                   if (PetscAbs(sum - 1.0) > 10 * PETSC_MACHINE_EPSILON) { // debug
1610:                     int       d, f;
1611:                     PetscReal tmp = 0;
1612:                     PetscPrintf(PETSC_COMM_SELF, "\t\t%d.%d.%d) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%d)\n", eidx, q, fieldA, (double)sum, LANDAU_MAX_Q_FACE, maps[grid].num_face);
1613:                     for (d = 0, tmp = 0; d < numindices; ++d) {
1614:                       if (tmp != 0 && PetscAbs(tmp - 1.0) > 10 * PETSC_MACHINE_EPSILON) PetscPrintf(PETSC_COMM_WORLD, "%3d) %3" PetscInt_FMT ": ", d, indices[d]);
1615:                       for (f = 0; f < numindices; ++f) tmp += PetscRealPart(elMat[d * numindices + f]);
1616:                       if (tmp != 0) PetscPrintf(ctx->comm, " | %22.16e\n", (double)tmp);
1617:                     }
1618:                   }
1619:                   maps[grid].num_reduced++;
1621:                 }
1622:                 break;
1623:               }
1624:             }
1625:             // cleanup
1626:             DMPlexRestoreClosureIndices(ctx->plex[grid], section[grid], globsection[grid], ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **)&elMat);
1627:             if (elMat != valuesOrig) DMRestoreWorkArray(ctx->plex[grid], numindices * numindices, MPIU_SCALAR, &elMat);
1628:           }
1629:           if (ctx->coo_assembly) {                                 // setup COO assembly
1630:             coo_elem_offsets[glb_elem_idx + 1] += fullNb * fullNb; // one species block, adds a block for each species, on this element in this grid
1631:             if (fieldA == 0) {                                     // cache full Nb for this element, on this grid per species
1632:               coo_elem_fullNb[glb_elem_idx] = fullNb;
1633:               if (fullNb > ctx->SData_d.coo_max_fullnb) ctx->SData_d.coo_max_fullnb = fullNb;
1635:           }
1636:         } // field
1637:       }   // cell
1638:       // allocate and copy point data maps[grid].gIdx[eidx][field][q]
1639:       PetscMalloc(maps[grid].num_reduced * sizeof(*maps[grid].c_maps), &maps[grid].c_maps);
1640:       for (int ej = 0; ej < maps[grid].num_reduced; ++ej) {
1641:         for (int q = 0; q < maps[grid].num_face; ++q) {
1642:           maps[grid].c_maps[ej][q].scale = pointMaps[ej][q].scale;
1643:           maps[grid].c_maps[ej][q].gid   = pointMaps[ej][q].gid;
1644:         }
1645:       }
1646: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1647:       if (ctx->deviceType == LANDAU_KOKKOS) {
1648:         LandauKokkosCreateMatMaps(maps, pointMaps, Nf, Nq, grid); // implies Kokkos does
1649:       }                                                                      // else could be CUDA
1650: #endif
1651: #if defined(PETSC_HAVE_CUDA)
1652:       if (ctx->deviceType == LANDAU_CUDA) LandauCUDACreateMatMaps(maps, pointMaps, Nf, Nq, grid);
1653: #endif
1654:       if (plex_batch) {
1655:         ISRestoreIndices(grid_batch_is_inv[grid], &plex_batch);
1656:         ISDestroy(&grid_batch_is_inv[grid]); // we are done with this
1657:       }
1658:     } /* grids */
1659:     // finish COO
1660:     if (ctx->coo_assembly) { // setup COO assembly
1661:       PetscInt *oor, *ooc;
1662:       ctx->SData_d.coo_size = coo_elem_offsets[ncellsTot] * ctx->batch_sz;
1663:       PetscMalloc2(ctx->SData_d.coo_size, &oor, ctx->SData_d.coo_size, &ooc);
1664:       for (int i = 0; i < ctx->SData_d.coo_size; i++) oor[i] = ooc[i] = -1;
1665:       // get
1666:       for (int grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1667:         for (int ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) {
1668:           const int              fullNb           = coo_elem_fullNb[glb_elem_idx];
1669:           const LandauIdx *const Idxs             = &maps[grid].gIdx[ej][0][0]; // just use field-0 maps, They should be the same but this is just for COO storage
1670:           coo_elem_point_offsets[glb_elem_idx][0] = 0;
1671:           for (int f = 0, cnt2 = 0; f < Nb; f++) {
1672:             int idx                                     = Idxs[f];
1673:             coo_elem_point_offsets[glb_elem_idx][f + 1] = coo_elem_point_offsets[glb_elem_idx][f]; // start at last
1674:             if (idx >= 0) {
1675:               cnt2++;
1676:               coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc
1677:             } else {
1678:               idx = -idx - 1;
1679:               for (int q = 0; q < maps[grid].num_face; q++) {
1680:                 if (maps[grid].c_maps[idx][q].gid < 0) break;
1681:                 cnt2++;
1682:                 coo_elem_point_offsets[glb_elem_idx][f + 1]++; // inc
1683:               }
1684:             }
1686:           }
1688:         }
1689:       }
1690:       // set
1691:       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
1692:         for (int grid = 0, glb_elem_idx = 0; grid < ctx->num_grids; grid++) {
1693:           const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
1694:           for (int ej = 0; ej < numCells[grid]; ++ej, glb_elem_idx++) {
1695:             const int fullNb = coo_elem_fullNb[glb_elem_idx], fullNb2 = fullNb * fullNb;
1696:             // set (i,j)
1697:             for (int fieldA = 0; fieldA < Nf[grid]; fieldA++) {
1698:               const LandauIdx *const Idxs = &maps[grid].gIdx[ej][fieldA][0];
1699:               int                    rows[LANDAU_MAX_Q_FACE], cols[LANDAU_MAX_Q_FACE];
1700:               for (int f = 0; f < Nb; ++f) {
1701:                 const int nr = coo_elem_point_offsets[glb_elem_idx][f + 1] - coo_elem_point_offsets[glb_elem_idx][f];
1702:                 if (nr == 1) rows[0] = Idxs[f];
1703:                 else {
1704:                   const int idx = -Idxs[f] - 1;
1705:                   for (int q = 0; q < nr; q++) rows[q] = maps[grid].c_maps[idx][q].gid;
1706:                 }
1707:                 for (int g = 0; g < Nb; ++g) {
1708:                   const int nc = coo_elem_point_offsets[glb_elem_idx][g + 1] - coo_elem_point_offsets[glb_elem_idx][g];
1709:                   if (nc == 1) cols[0] = Idxs[g];
1710:                   else {
1711:                     const int idx = -Idxs[g] - 1;
1712:                     for (int q = 0; q < nc; q++) cols[q] = maps[grid].c_maps[idx][q].gid;
1713:                   }
1714:                   const int idx0 = b_id * coo_elem_offsets[ncellsTot] + coo_elem_offsets[glb_elem_idx] + fieldA * fullNb2 + fullNb * coo_elem_point_offsets[glb_elem_idx][f] + nr * coo_elem_point_offsets[glb_elem_idx][g];
1715:                   for (int q = 0, idx = idx0; q < nr; q++) {
1716:                     for (int d = 0; d < nc; d++, idx++) {
1717:                       oor[idx] = rows[q] + moffset;
1718:                       ooc[idx] = cols[d] + moffset;
1719:                     }
1720:                   }
1721:                 }
1722:               }
1723:             }
1724:           } // cell
1725:         }   // grid
1726:       }     // batch
1727:       MatSetPreallocationCOO(ctx->J, ctx->SData_d.coo_size, oor, ooc);
1728:       PetscFree2(oor, ooc);
1729:     }
1730:     PetscFree(pointMaps);
1731:     PetscFree(elemMatrix);
1732:     PetscContainerCreate(PETSC_COMM_SELF, &container);
1733:     PetscContainerSetPointer(container, (void *)maps);
1734:     PetscContainerSetUserDestroy(container, LandauGPUMapsDestroy);
1735:     PetscObjectCompose((PetscObject)ctx->J, "assembly_maps", (PetscObject)container);
1736:     PetscContainerDestroy(&container);
1737:     PetscLogEventEnd(ctx->events[2], 0, 0, 0, 0);
1738:   } // end GPU assembly
1739:   { /* create static point data, Jacobian called first, only one vertex copy */
1740:     PetscReal     *invJe, *ww, *xx, *yy, *zz = NULL, *invJ_a;
1741:     PetscInt       outer_ipidx, outer_ej, grid, nip_glb = 0;
1742:     PetscFE        fe;
1743:     const PetscInt Nb = Nq;
1744:     PetscLogEventBegin(ctx->events[7], 0, 0, 0, 0);
1745:     PetscInfo(ctx->plex[0], "Initialize static data\n");
1746:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) nip_glb += Nq * numCells[grid];
1747:     /* collect f data, first time is for Jacobian, but make mass now */
1748:     if (ctx->verbose > 0) {
1749:       PetscInt ncells = 0, N;
1750:       MatGetSize(ctx->J, &N, NULL);
1751:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) ncells += numCells[grid];
1752:       PetscCall(PetscPrintf(ctx->comm, "%d) %s %" PetscInt_FMT " IPs, %" PetscInt_FMT " cells total, Nb=%" PetscInt_FMT ", Nq=%" PetscInt_FMT ", dim=%" PetscInt_FMT ", Tab: Nb=%" PetscInt_FMT " Nf=%" PetscInt_FMT " Np=%" PetscInt_FMT " cdim=%" PetscInt_FMT " N=%" PetscInt_FMT "\n", 0, "FormLandau", nip_glb, ncells, Nb, Nq, dim, Nb,
1753:                             ctx->num_species, Nb, dim, N));
1754:     }
1755:     PetscMalloc4(nip_glb, &ww, nip_glb, &xx, nip_glb, &yy, nip_glb * dim * dim, &invJ_a);
1756:     if (dim == 3) PetscMalloc1(nip_glb, &zz);
1757:     if (ctx->use_energy_tensor_trick) {
1758:       PetscFECreateDefault(PETSC_COMM_SELF, dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &fe);
1759:       PetscObjectSetName((PetscObject)fe, "energy");
1760:     }
1761:     /* init each grids static data - no batch */
1762:     for (grid = 0, outer_ipidx = 0, outer_ej = 0; grid < ctx->num_grids; grid++) { // OpenMP (once)
1763:       Vec          v2_2 = NULL;                                                    // projected function: v^2/2 for non-relativistic, gamma... for relativistic
1764:       PetscSection e_section;
1765:       DM           dmEnergy;
1766:       PetscInt     cStart, cEnd, ej;

1768:       DMPlexGetHeightStratum(ctx->plex[grid], 0, &cStart, &cEnd);
1769:       // prep energy trick, get v^2 / 2 vector
1770:       if (ctx->use_energy_tensor_trick) {
1771:         PetscErrorCode (*energyf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {ctx->use_relativistic_corrections ? gamma_m1_f : energy_f};
1772:         Vec        glob_v2;
1773:         PetscReal *c2_0[1], data[1] = {PetscSqr(C_0(ctx->v_0))};

1775:         DMClone(ctx->plex[grid], &dmEnergy);
1776:         PetscObjectSetName((PetscObject)dmEnergy, "energy");
1777:         DMSetField(dmEnergy, 0, NULL, (PetscObject)fe);
1778:         DMCreateDS(dmEnergy);
1779:         DMGetSection(dmEnergy, &e_section);
1780:         DMGetGlobalVector(dmEnergy, &glob_v2);
1781:         PetscObjectSetName((PetscObject)glob_v2, "trick");
1782:         c2_0[0] = &data[0];
1783:         DMProjectFunction(dmEnergy, 0., energyf, (void **)c2_0, INSERT_ALL_VALUES, glob_v2);
1784:         DMGetLocalVector(dmEnergy, &v2_2);
1785:         VecZeroEntries(v2_2); /* zero BCs so don't set */
1786:         DMGlobalToLocalBegin(dmEnergy, glob_v2, INSERT_VALUES, v2_2);
1787:         DMGlobalToLocalEnd(dmEnergy, glob_v2, INSERT_VALUES, v2_2);
1788:         DMViewFromOptions(dmEnergy, NULL, "-energy_dm_view");
1789:         VecViewFromOptions(glob_v2, NULL, "-energy_vec_view");
1790:         DMRestoreGlobalVector(dmEnergy, &glob_v2);
1791:       }
1792:       /* append part of the IP data for each grid */
1793:       for (ej = 0; ej < numCells[grid]; ++ej, ++outer_ej) {
1794:         PetscScalar *coefs = NULL;
1795:         PetscReal    vj[LANDAU_MAX_NQ * LANDAU_DIM], detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ * LANDAU_DIM * LANDAU_DIM], c0 = C_0(ctx->v_0), c02 = PetscSqr(c0);
1796:         invJe = invJ_a + outer_ej * Nq * dim * dim;
1797:         DMPlexComputeCellGeometryFEM(ctx->plex[grid], ej + cStart, quad, vj, Jdummy, invJe, detJj);
1798:         if (ctx->use_energy_tensor_trick) DMPlexVecGetClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs);
1799:         /* create static point data */
1800:         for (PetscInt qj = 0; qj < Nq; qj++, outer_ipidx++) {
1801:           const PetscInt   gidx = outer_ipidx;
1802:           const PetscReal *invJ = &invJe[qj * dim * dim];
1803:           ww[gidx]              = detJj[qj] * quadWeights[qj];
1804:           if (dim == 2) ww[gidx] *= vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */
1805:           // get xx, yy, zz
1806:           if (ctx->use_energy_tensor_trick) {
1807:             double                 refSpaceDer[3], eGradPhi[3];
1808:             const PetscReal *const DD = Tf[0]->T[1];
1809:             const PetscReal       *Dq = &DD[qj * Nb * dim];
1810:             for (int d = 0; d < 3; ++d) refSpaceDer[d] = eGradPhi[d] = 0.0;
1811:             for (int b = 0; b < Nb; ++b) {
1812:               for (int d = 0; d < dim; ++d) refSpaceDer[d] += Dq[b * dim + d] * PetscRealPart(coefs[b]);
1813:             }
1814:             xx[gidx] = 1e10;
1815:             if (ctx->use_relativistic_corrections) {
1816:               double dg2_c2 = 0;
1817:               //for (int d = 0; d < dim; ++d) refSpaceDer[d] *= c02;
1818:               for (int d = 0; d < dim; ++d) dg2_c2 += PetscSqr(refSpaceDer[d]);
1819:               dg2_c2 *= (double)c02;
1820:               if (dg2_c2 >= .999) {
1821:                 xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1822:                 yy[gidx] = vj[qj * dim + 1];
1823:                 if (dim == 3) zz[gidx] = vj[qj * dim + 2];
1824:                 PetscPrintf(ctx->comm, "Error: %12.5e %" PetscInt_FMT ".%" PetscInt_FMT ") dg2/c02 = %12.5e x= %12.5e %12.5e %12.5e\n", (double)PetscSqrtReal(xx[gidx] * xx[gidx] + yy[gidx] * yy[gidx] + zz[gidx] * zz[gidx]), ej, qj, dg2_c2, (double)xx[gidx], (double)yy[gidx], (double)zz[gidx]);
1825:               } else {
1826:                 PetscReal fact = c02 / PetscSqrtReal(1. - dg2_c2);
1827:                 for (int d = 0; d < dim; ++d) refSpaceDer[d] *= fact;
1828:                 // could test with other point u' that (grad - grad') * U (refSpaceDer, refSpaceDer') == 0
1829:               }
1830:             }
1831:             if (xx[gidx] == 1e10) {
1832:               for (int d = 0; d < dim; ++d) {
1833:                 for (int e = 0; e < dim; ++e) eGradPhi[d] += invJ[e * dim + d] * refSpaceDer[e];
1834:               }
1835:               xx[gidx] = eGradPhi[0];
1836:               yy[gidx] = eGradPhi[1];
1837:               if (dim == 3) zz[gidx] = eGradPhi[2];
1838:             }
1839:           } else {
1840:             xx[gidx] = vj[qj * dim + 0]; /* coordinate */
1841:             yy[gidx] = vj[qj * dim + 1];
1842:             if (dim == 3) zz[gidx] = vj[qj * dim + 2];
1843:           }
1844:         } /* q */
1845:         if (ctx->use_energy_tensor_trick) DMPlexVecRestoreClosure(dmEnergy, e_section, v2_2, ej + cStart, NULL, &coefs);
1846:       } /* ej */
1847:       if (ctx->use_energy_tensor_trick) {
1848:         DMRestoreLocalVector(dmEnergy, &v2_2);
1849:         DMDestroy(&dmEnergy);
1850:       }
1851:     } /* grid */
1852:     if (ctx->use_energy_tensor_trick) PetscFEDestroy(&fe);
1853:     /* cache static data */
1854:     if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
1855: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS_KERNELS)
1856:       PetscReal invMass[LANDAU_MAX_SPECIES], nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
1857:       for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1858:         for (PetscInt ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++) {
1859:           invMass[ii]  = ctx->m_0 / ctx->masses[ii];
1860:           nu_alpha[ii] = PetscSqr(ctx->charges[ii] / ctx->m_0) * ctx->m_0 / ctx->masses[ii];
1861:           nu_beta[ii]  = PetscSqr(ctx->charges[ii] / ctx->epsilon0) * ctx->lnLam / (8 * PETSC_PI) * ctx->t_0 * ctx->n_0 / PetscPowReal(ctx->v_0, 3);
1862:         }
1863:       }
1864:       if (ctx->deviceType == LANDAU_CUDA) {
1865:   #if defined(PETSC_HAVE_CUDA)
1866:         LandauCUDAStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d);
1867:   #else
1868:         SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type cuda not built");
1869:   #endif
1870:       } else if (ctx->deviceType == LANDAU_KOKKOS) {
1871:   #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1872:         LandauKokkosStaticDataSet(ctx->plex[0], Nq, ctx->batch_sz, ctx->num_grids, numCells, ctx->species_offset, ctx->mat_offset, nu_alpha, nu_beta, invMass, invJ_a, xx, yy, zz, ww, &ctx->SData_d);
1873:   #else
1874:         SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type kokkos not built");
1875:   #endif
1876:       }
1877: #endif
1878:       /* free */
1879:       PetscFree4(ww, xx, yy, invJ_a);
1880:       if (dim == 3) PetscFree(zz);
1881:     } else { /* CPU version, just copy in, only use part */
1882:       ctx->SData_d.w    = (void *)ww;
1883:       ctx->SData_d.x    = (void *)xx;
1884:       ctx->SData_d.y    = (void *)yy;
1885:       ctx->SData_d.z    = (void *)zz;
1886:       ctx->SData_d.invJ = (void *)invJ_a;
1887:     }
1888:     PetscLogEventEnd(ctx->events[7], 0, 0, 0, 0);
1889:   } // initialize
1890:   return 0;
1891: }

1893: /* < v, u > */
1894: static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1895: {
1896:   g0[0] = 1.;
1897: }

1899: /* < v, u > */
1900: static void g0_fake(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1901: {
1902:   static double ttt = 1e-12;
1903:   g0[0]             = ttt++;
1904: }

1906: /* < v, u > */
1907: static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1908: {
1909:   g0[0] = 2. * PETSC_PI * x[0];
1910: }

1912: static PetscErrorCode MatrixNfDestroy(void *ptr)
1913: {
1914:   PetscInt *nf = (PetscInt *)ptr;
1915:   PetscFree(nf);
1916:   return 0;
1917: }

1919: /*
1920:  LandauCreateJacobianMatrix - creates ctx->J with without real data. Hard to keep sparse.
1921:   - Like DMPlexLandauCreateMassMatrix. Should remove one and combine
1922:   - has old support for field major ordering
1923:  */
1924: static PetscErrorCode LandauCreateJacobianMatrix(MPI_Comm comm, Vec X, IS grid_batch_is_inv[LANDAU_MAX_GRIDS], LandauCtx *ctx)
1925: {
1926:   PetscInt *idxs = NULL;
1927:   Mat       subM[LANDAU_MAX_GRIDS];

1929:   if (!ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1930:     return 0;
1931:   }
1932:   // get the RCM for this grid to separate out species into blocks -- create 'idxs' & 'ctx->batch_is' -- not used
1933:   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) PetscMalloc1(ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, &idxs);
1934:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1935:     const PetscInt *values, n = ctx->mat_offset[grid + 1] - ctx->mat_offset[grid];
1936:     Mat             gMat;
1937:     DM              massDM;
1938:     PetscDS         prob;
1939:     Vec             tvec;
1940:     // get "mass" matrix for reordering
1941:     DMClone(ctx->plex[grid], &massDM);
1942:     DMCopyFields(ctx->plex[grid], massDM);
1943:     DMCreateDS(massDM);
1944:     DMGetDS(massDM, &prob);
1945:     for (int ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) PetscDSSetJacobian(prob, ix, ix, g0_fake, NULL, NULL, NULL);
1946:     PetscOptionsInsertString(NULL, "-dm_preallocate_only"); // this trick is need to both sparsify the matrix and avoid runtime error
1947:     DMCreateMatrix(massDM, &gMat);
1948:     PetscOptionsInsertString(NULL, "-dm_preallocate_only false");
1949:     MatSetOption(gMat, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
1950:     MatSetOption(gMat, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE);
1951:     DMCreateLocalVector(ctx->plex[grid], &tvec);
1952:     DMPlexSNESComputeJacobianFEM(massDM, tvec, gMat, gMat, ctx);
1953:     MatViewFromOptions(gMat, NULL, "-dm_landau_reorder_mat_view");
1954:     DMDestroy(&massDM);
1955:     VecDestroy(&tvec);
1956:     subM[grid] = gMat;
1957:     if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
1958:       MatOrderingType rtype = MATORDERINGRCM;
1959:       IS              isrow, isicol;
1960:       MatGetOrdering(gMat, rtype, &isrow, &isicol);
1961:       ISInvertPermutation(isrow, PETSC_DECIDE, &grid_batch_is_inv[grid]);
1962:       ISGetIndices(isrow, &values);
1963:       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
1964: #if !defined(LANDAU_SPECIES_MAJOR)
1965:         PetscInt N = ctx->mat_offset[ctx->num_grids], n0 = ctx->mat_offset[grid] + b_id * N;
1966:         for (int ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0;
1967: #else
1968:         PetscInt n0 = ctx->mat_offset[grid] * ctx->batch_sz + b_id * n;
1969:         for (int ii = 0; ii < n; ++ii) idxs[n0 + ii] = values[ii] + n0;
1970: #endif
1971:       }
1972:       ISRestoreIndices(isrow, &values);
1973:       ISDestroy(&isrow);
1974:       ISDestroy(&isicol);
1975:     }
1976:   }
1977:   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) ISCreateGeneral(comm, ctx->mat_offset[ctx->num_grids] * ctx->batch_sz, idxs, PETSC_OWN_POINTER, &ctx->batch_is);
1978:   // get a block matrix
1979:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
1980:     Mat      B = subM[grid];
1981:     PetscInt nloc, nzl, *colbuf, row, COL_BF_SIZE = 1024;
1982:     PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf);
1983:     MatGetSize(B, &nloc, NULL);
1984:     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
1985:       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
1986:       const PetscInt    *cols;
1987:       const PetscScalar *vals;
1988:       for (int i = 0; i < nloc; i++) {
1989:         MatGetRow(B, i, &nzl, NULL, NULL);
1990:         if (nzl > COL_BF_SIZE) {
1991:           PetscFree(colbuf);
1992:           PetscInfo(ctx->plex[grid], "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl);
1993:           COL_BF_SIZE = nzl;
1994:           PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf);
1995:         }
1996:         MatGetRow(B, i, &nzl, &cols, &vals);
1997:         for (int j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
1998:         row = i + moffset;
1999:         MatSetValues(ctx->J, 1, &row, nzl, colbuf, vals, INSERT_VALUES);
2000:         MatRestoreRow(B, i, &nzl, &cols, &vals);
2001:       }
2002:     }
2003:     PetscFree(colbuf);
2004:   }
2005:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) MatDestroy(&subM[grid]);
2006:   MatAssemblyBegin(ctx->J, MAT_FINAL_ASSEMBLY);
2007:   MatAssemblyEnd(ctx->J, MAT_FINAL_ASSEMBLY);

2009:   // debug
2010:   MatViewFromOptions(ctx->J, NULL, "-dm_landau_mat_view");
2011:   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2012:     Mat mat_block_order;
2013:     MatCreateSubMatrix(ctx->J, ctx->batch_is, ctx->batch_is, MAT_INITIAL_MATRIX, &mat_block_order); // use MatPermute
2014:     MatViewFromOptions(mat_block_order, NULL, "-dm_landau_mat_view");
2015:     MatDestroy(&mat_block_order);
2016:     VecScatterCreate(X, ctx->batch_is, X, NULL, &ctx->plex_batch);
2017:     VecDuplicate(X, &ctx->work_vec);
2018:   }

2020:   return 0;
2021: }

2023: PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat);
2024: /*@C
2025:  DMPlexLandauCreateVelocitySpace - Create a DMPlex velocity space mesh

2027:  Collective

2029:  Input Parameters:
2030:  +   comm  - The MPI communicator
2031:  .   dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
2032:  -   prefix - prefix for options (not tested)

2034:  Output Parameter:
2035:  .   pack  - The DM object representing the mesh
2036:  +   X - A vector (user destroys)
2037:  -   J - Optional matrix (object destroys)

2039:  Level: beginner

2041:  .keywords: mesh
2042:  .seealso: `DMPlexCreate()`, `DMPlexLandauDestroyVelocitySpace()`
2043:  @*/
2044: PetscErrorCode DMPlexLandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *pack)
2045: {
2046:   LandauCtx *ctx;
2047:   Vec        Xsub[LANDAU_MAX_GRIDS];
2048:   IS         grid_batch_is_inv[LANDAU_MAX_GRIDS];

2052:   PetscNew(&ctx);
2053:   ctx->comm = comm; /* used for diagnostics and global errors */
2054:   /* process options */
2055:   ProcessOptions(ctx, prefix);
2056:   if (dim == 2) ctx->use_relativistic_corrections = PETSC_FALSE;
2057:   /* Create Mesh */
2058:   DMCompositeCreate(PETSC_COMM_SELF, pack);
2059:   PetscLogEventBegin(ctx->events[13], 0, 0, 0, 0);
2060:   PetscLogEventBegin(ctx->events[15], 0, 0, 0, 0);
2061:   LandauDMCreateVMeshes(PETSC_COMM_SELF, dim, prefix, ctx, *pack); // creates grids (Forest of AMR)
2062:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2063:     /* create FEM */
2064:     SetupDS(ctx->plex[grid], dim, grid, ctx);
2065:     /* set initial state */
2066:     DMCreateGlobalVector(ctx->plex[grid], &Xsub[grid]);
2067:     PetscObjectSetName((PetscObject)Xsub[grid], "u_orig");
2068:     /* initial static refinement, no solve */
2069:     LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, 0, 1, ctx);
2070:     /* forest refinement - forest goes in (if forest), plex comes out */
2071:     if (ctx->use_p4est) {
2072:       DM plex;
2073:       adapt(grid, ctx, &Xsub[grid]);                                      // forest goes in, plex comes out
2074:       DMViewFromOptions(ctx->plex[grid], NULL, "-dm_landau_amr_dm_view"); // need to differentiate - todo
2075:       VecViewFromOptions(Xsub[grid], NULL, "-dm_landau_amr_vec_view");
2076:       // convert to plex, all done with this level
2077:       DMConvert(ctx->plex[grid], DMPLEX, &plex);
2078:       DMDestroy(&ctx->plex[grid]);
2079:       ctx->plex[grid] = plex;
2080:     }
2081: #if !defined(LANDAU_SPECIES_MAJOR)
2082:     DMCompositeAddDM(*pack, ctx->plex[grid]);
2083: #else
2084:     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
2085:       DMCompositeAddDM(*pack, ctx->plex[grid]);
2086:     }
2087: #endif
2088:     DMSetApplicationContext(ctx->plex[grid], ctx);
2089:   }
2090: #if !defined(LANDAU_SPECIES_MAJOR)
2091:   // stack the batched DMs, could do it all here!!! b_id=0
2092:   for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) {
2093:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) DMCompositeAddDM(*pack, ctx->plex[grid]);
2094:   }
2095: #endif
2096:   // create ctx->mat_offset
2097:   ctx->mat_offset[0] = 0;
2098:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2099:     PetscInt n;
2100:     VecGetLocalSize(Xsub[grid], &n);
2101:     ctx->mat_offset[grid + 1] = ctx->mat_offset[grid] + n;
2102:   }
2103:   // creat DM & Jac
2104:   DMSetApplicationContext(*pack, ctx);
2105:   PetscOptionsInsertString(NULL, "-dm_preallocate_only");
2106:   DMCreateMatrix(*pack, &ctx->J);
2107:   PetscOptionsInsertString(NULL, "-dm_preallocate_only false");
2108:   MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
2109:   MatSetOption(ctx->J, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE);
2110:   PetscObjectSetName((PetscObject)ctx->J, "Jac");
2111:   // construct initial conditions in X
2112:   DMCreateGlobalVector(*pack, X);
2113:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2114:     PetscInt n;
2115:     VecGetLocalSize(Xsub[grid], &n);
2116:     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2117:       PetscScalar const *values;
2118:       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2119:       LandauSetInitialCondition(ctx->plex[grid], Xsub[grid], grid, b_id, ctx->batch_sz, ctx);
2120:       VecGetArrayRead(Xsub[grid], &values); // Drop whole grid in Plex ordering
2121:       for (int i = 0, idx = moffset; i < n; i++, idx++) VecSetValue(*X, idx, values[i], INSERT_VALUES);
2122:       VecRestoreArrayRead(Xsub[grid], &values);
2123:     }
2124:   }
2125:   // cleanup
2126:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) VecDestroy(&Xsub[grid]);
2127:   /* check for correct matrix type */
2128:   if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
2129:     PetscBool flg;
2130:     if (ctx->deviceType == LANDAU_CUDA) {
2131:       PetscObjectTypeCompareAny((PetscObject)ctx->J, &flg, MATSEQAIJCUSPARSE, MATMPIAIJCUSPARSE, MATAIJCUSPARSE, "");
2133:     } else if (ctx->deviceType == LANDAU_KOKKOS) {
2134:       PetscObjectTypeCompareAny((PetscObject)ctx->J, &flg, MATSEQAIJKOKKOS, MATMPIAIJKOKKOS, MATAIJKOKKOS, "");
2135: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2137: #else
2139: #endif
2140:     }
2141:   }
2142:   PetscLogEventEnd(ctx->events[15], 0, 0, 0, 0);

2144:   // create field major ordering
2145:   ctx->work_vec   = NULL;
2146:   ctx->plex_batch = NULL;
2147:   ctx->batch_is   = NULL;
2148:   for (int i = 0; i < LANDAU_MAX_GRIDS; i++) grid_batch_is_inv[i] = NULL;
2149:   PetscLogEventBegin(ctx->events[12], 0, 0, 0, 0);
2150:   LandauCreateJacobianMatrix(comm, *X, grid_batch_is_inv, ctx);
2151:   PetscLogEventEnd(ctx->events[12], 0, 0, 0, 0);

2153:   // create AMR GPU assembly maps and static GPU data
2154:   CreateStaticGPUData(dim, grid_batch_is_inv, ctx);

2156:   PetscLogEventEnd(ctx->events[13], 0, 0, 0, 0);

2158:   // create mass matrix
2159:   DMPlexLandauCreateMassMatrix(*pack, NULL);

2161:   if (J) *J = ctx->J;

2163:   if (ctx->gpu_assembly && ctx->jacobian_field_major_order) {
2164:     PetscContainer container;
2165:     // cache ctx for KSP with batch/field major Jacobian ordering -ksp_type gmres/etc -dm_landau_jacobian_field_major_order
2166:     PetscContainerCreate(PETSC_COMM_SELF, &container);
2167:     PetscContainerSetPointer(container, (void *)ctx);
2168:     PetscObjectCompose((PetscObject)ctx->J, "LandauCtx", (PetscObject)container);
2169:     PetscContainerDestroy(&container);
2170:     // batch solvers need to map -- can batch solvers work
2171:     PetscContainerCreate(PETSC_COMM_SELF, &container);
2172:     PetscContainerSetPointer(container, (void *)ctx->plex_batch);
2173:     PetscObjectCompose((PetscObject)ctx->J, "plex_batch_is", (PetscObject)container);
2174:     PetscContainerDestroy(&container);
2175:   }
2176:   // for batch solvers
2177:   {
2178:     PetscContainer container;
2179:     PetscInt      *pNf;
2180:     PetscContainerCreate(PETSC_COMM_SELF, &container);
2181:     PetscMalloc1(sizeof(*pNf), &pNf);
2182:     *pNf = ctx->batch_sz;
2183:     PetscContainerSetPointer(container, (void *)pNf);
2184:     PetscContainerSetUserDestroy(container, MatrixNfDestroy);
2185:     PetscObjectCompose((PetscObject)ctx->J, "batch size", (PetscObject)container);
2186:     PetscContainerDestroy(&container);
2187:   }

2189:   return 0;
2190: }

2192: /*@
2193:  DMPlexLandauAccess - Access to the distribution function with user callback

2195:  Collective

2197:  Input Parameters:
2198:  .   pack - the DMComposite
2199:  +   func - call back function
2200:  .   user_ctx - user context

2202:  Input/Output Parameters:
2203:  +   X - Vector to data to

2205:  Level: advanced

2207:  .keywords: mesh
2208:  .seealso: `DMPlexLandauCreateVelocitySpace()`
2209:  @*/
2210: PetscErrorCode DMPlexLandauAccess(DM pack, Vec X, PetscErrorCode (*func)(DM, Vec, PetscInt, PetscInt, PetscInt, void *), void *user_ctx)
2211: {
2212:   LandauCtx *ctx;
2213:   DMGetApplicationContext(pack, &ctx); // uses ctx->num_grids; ctx->plex[grid]; ctx->batch_sz; ctx->mat_offset
2214:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2215:     PetscInt dim, n;
2216:     DMGetDimension(pack, &dim);
2217:     for (PetscInt sp = ctx->species_offset[grid], i0 = 0; sp < ctx->species_offset[grid + 1]; sp++, i0++) {
2218:       Vec      vec;
2219:       PetscInt vf[1] = {i0};
2220:       IS       vis;
2221:       DM       vdm;
2222:       DMCreateSubDM(ctx->plex[grid], 1, vf, &vis, &vdm);
2223:       DMSetApplicationContext(vdm, ctx); // the user might want this
2224:       DMCreateGlobalVector(vdm, &vec);
2225:       VecGetSize(vec, &n);
2226:       for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2227:         const PetscInt moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2228:         VecZeroEntries(vec);
2229:         /* Add your data with 'dm' for species 'sp' to 'vec' */
2230:         func(vdm, vec, i0, grid, b_id, user_ctx);
2231:         /* add to global */
2232:         PetscScalar const *values;
2233:         const PetscInt    *offsets;
2234:         VecGetArrayRead(vec, &values);
2235:         ISGetIndices(vis, &offsets);
2236:         for (int i = 0; i < n; i++) { VecSetValue(X, moffset + offsets[i], values[i], ADD_VALUES); }
2237:         VecRestoreArrayRead(vec, &values);
2238:         ISRestoreIndices(vis, &offsets);
2239:       } // batch
2240:       VecDestroy(&vec);
2241:       ISDestroy(&vis);
2242:       DMDestroy(&vdm);
2243:     }
2244:   } // grid
2245:   return 0;
2246: }

2248: /*@
2249:  DMPlexLandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh

2251:  Collective

2253:  Input/Output Parameters:
2254:  .   dm - the dm to destroy

2256:  Level: beginner

2258:  .keywords: mesh
2259:  .seealso: `DMPlexLandauCreateVelocitySpace()`
2260:  @*/
2261: PetscErrorCode DMPlexLandauDestroyVelocitySpace(DM *dm)
2262: {
2263:   LandauCtx *ctx;
2264:   DMGetApplicationContext(*dm, &ctx);
2265:   MatDestroy(&ctx->M);
2266:   MatDestroy(&ctx->J);
2267:   for (PetscInt ii = 0; ii < ctx->num_species; ii++) PetscFEDestroy(&ctx->fe[ii]);
2268:   ISDestroy(&ctx->batch_is);
2269:   VecDestroy(&ctx->work_vec);
2270:   VecScatterDestroy(&ctx->plex_batch);
2271:   if (ctx->deviceType == LANDAU_CUDA) {
2272: #if defined(PETSC_HAVE_CUDA)
2273:     LandauCUDAStaticDataClear(&ctx->SData_d);
2274: #else
2275:     SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "cuda");
2276: #endif
2277:   } else if (ctx->deviceType == LANDAU_KOKKOS) {
2278: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
2279:     LandauKokkosStaticDataClear(&ctx->SData_d);
2280: #else
2281:     SETERRQ(ctx->comm, PETSC_ERR_ARG_WRONG, "-landau_device_type %s not built", "kokkos");
2282: #endif
2283:   } else {
2284:     if (ctx->SData_d.x) { /* in a CPU run */
2285:       PetscReal *invJ = (PetscReal *)ctx->SData_d.invJ, *xx = (PetscReal *)ctx->SData_d.x, *yy = (PetscReal *)ctx->SData_d.y, *zz = (PetscReal *)ctx->SData_d.z, *ww = (PetscReal *)ctx->SData_d.w;
2286:       LandauIdx *coo_elem_offsets = (LandauIdx *)ctx->SData_d.coo_elem_offsets, *coo_elem_fullNb = (LandauIdx *)ctx->SData_d.coo_elem_fullNb, (*coo_elem_point_offsets)[LANDAU_MAX_NQ + 1] = (LandauIdx(*)[LANDAU_MAX_NQ + 1]) ctx->SData_d.coo_elem_point_offsets;
2287:       PetscFree4(ww, xx, yy, invJ);
2288:       if (zz) PetscFree(zz);
2289:       if (coo_elem_offsets) {
2290:         PetscFree3(coo_elem_offsets, coo_elem_fullNb, coo_elem_point_offsets); // could be NULL
2291:       }
2292:     }
2293:   }

2295:   if (ctx->times[LANDAU_MATRIX_TOTAL] > 0) { // OMP timings
2296:     PetscPrintf(ctx->comm, "TSStep               N  1.0 %10.3e\n", ctx->times[LANDAU_EX2_TSSOLVE]);
2297:     PetscPrintf(ctx->comm, "2:           Solve:  %10.3e with %" PetscInt_FMT " threads\n", ctx->times[LANDAU_EX2_TSSOLVE] - ctx->times[LANDAU_MATRIX_TOTAL], ctx->batch_sz);
2298:     PetscPrintf(ctx->comm, "3:          Landau:  %10.3e\n", ctx->times[LANDAU_MATRIX_TOTAL]);
2299:     PetscPrintf(ctx->comm, "Landau Jacobian       %" PetscInt_FMT " 1.0 %10.3e\n", (PetscInt)ctx->times[LANDAU_JACOBIAN_COUNT], ctx->times[LANDAU_JACOBIAN]);
2300:     PetscPrintf(ctx->comm, "Landau Operator       N 1.0  %10.3e\n", ctx->times[LANDAU_OPERATOR]);
2301:     PetscPrintf(ctx->comm, "Landau Mass           N 1.0  %10.3e\n", ctx->times[LANDAU_MASS]);
2302:     PetscPrintf(ctx->comm, " Jac-f-df (GPU)       N 1.0  %10.3e\n", ctx->times[LANDAU_F_DF]);
2303:     PetscPrintf(ctx->comm, " Kernel (GPU)         N 1.0  %10.3e\n", ctx->times[LANDAU_KERNEL]);
2304:     PetscPrintf(ctx->comm, "MatLUFactorNum        X 1.0 %10.3e\n", ctx->times[KSP_FACTOR]);
2305:     PetscPrintf(ctx->comm, "MatSolve              X 1.0 %10.3e\n", ctx->times[KSP_SOLVE]);
2306:   }
2307:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) DMDestroy(&ctx->plex[grid]);
2308:   PetscFree(ctx);
2309:   DMDestroy(dm);
2310:   return 0;
2311: }

2313: /* < v, ru > */
2314: static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2315: {
2316:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2317:   f0[0]       = u[ii];
2318: }

2320: /* < v, ru > */
2321: static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2322: {
2323:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
2324:   f0[0] = x[jj] * u[ii]; /* x momentum */
2325: }

2327: static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2328: {
2329:   PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
2330:   double   tmp1 = 0.;
2331:   for (i = 0; i < dim; ++i) tmp1 += x[i] * x[i];
2332:   f0[0] = tmp1 * u[ii];
2333: }

2335: static PetscErrorCode gamma_n_f(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf, PetscScalar *u, void *actx)
2336: {
2337:   const PetscReal *c2_0_arr = ((PetscReal *)actx);
2338:   const PetscReal  c02      = c2_0_arr[0];

2340:   for (int s = 0; s < Nf; s++) {
2341:     PetscReal tmp1 = 0.;
2342:     for (int i = 0; i < dim; ++i) tmp1 += x[i] * x[i];
2343: #if defined(PETSC_USE_DEBUG)
2344:     u[s] = PetscSqrtReal(1. + tmp1 / c02); //  u[0] = PetscSqrtReal(1. + xx);
2345: #else
2346:     {
2347:       PetscReal xx = tmp1 / c02;
2348:       u[s] = xx / (PetscSqrtReal(1. + xx) + 1.); // better conditioned = xx/(PetscSqrtReal(1. + xx) + 1.)
2349:     }
2350: #endif
2351:   }
2352:   return 0;
2353: }

2355: /* < v, ru > */
2356: static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2357: {
2358:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2359:   f0[0]       = 2. * PETSC_PI * x[0] * u[ii];
2360: }

2362: /* < v, ru > */
2363: static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2364: {
2365:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2366:   f0[0]       = 2. * PETSC_PI * x[0] * x[1] * u[ii];
2367: }

2369: static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux, const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[], const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[], PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
2370: {
2371:   PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
2372:   f0[0]       = 2. * PETSC_PI * x[0] * (x[0] * x[0] + x[1] * x[1]) * u[ii];
2373: }

2375: /*@
2376:  DMPlexLandauPrintNorms - collects moments and prints them

2378:  Collective

2380:  Input Parameters:
2381:  +   X  - the state
2382:  -   stepi - current step to print

2384:  Level: beginner

2386:  .keywords: mesh
2387:  .seealso: `DMPlexLandauCreateVelocitySpace()`
2388:  @*/
2389: PetscErrorCode DMPlexLandauPrintNorms(Vec X, PetscInt stepi)
2390: {
2391:   LandauCtx  *ctx;
2392:   PetscDS     prob;
2393:   DM          pack;
2394:   PetscInt    cStart, cEnd, dim, ii, i0, nDMs;
2395:   PetscScalar xmomentumtot = 0, ymomentumtot = 0, zmomentumtot = 0, energytot = 0, densitytot = 0, tt[LANDAU_MAX_SPECIES];
2396:   PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
2397:   Vec        *globXArray;

2399:   VecGetDM(X, &pack);
2401:   DMGetDimension(pack, &dim);
2403:   DMGetApplicationContext(pack, &ctx);
2405:   /* print momentum and energy */
2406:   DMCompositeGetNumberDM(pack, &nDMs);
2408:   PetscMalloc(sizeof(*globXArray) * nDMs, &globXArray);
2409:   DMCompositeGetAccessArray(pack, X, nDMs, NULL, globXArray);
2410:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2411:     Vec Xloc = globXArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)];
2412:     DMGetDS(ctx->plex[grid], &prob);
2413:     for (ii = ctx->species_offset[grid], i0 = 0; ii < ctx->species_offset[grid + 1]; ii++, i0++) {
2414:       PetscScalar user[2] = {(PetscScalar)i0, (PetscScalar)ctx->charges[ii]};
2415:       PetscDSSetConstants(prob, 2, user);
2416:       if (dim == 2) { /* 2/3X + 3V (cylindrical coordinates) */
2417:         PetscDSSetObjective(prob, 0, &f0_s_rden);
2418:         DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx);
2419:         density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii];
2420:         PetscDSSetObjective(prob, 0, &f0_s_rmom);
2421:         DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx);
2422:         zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2423:         PetscDSSetObjective(prob, 0, &f0_s_rv2);
2424:         DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx);
2425:         energy[ii] = tt[0] * 0.5 * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii];
2426:         zmomentumtot += zmomentum[ii];
2427:         energytot += energy[ii];
2428:         densitytot += density[ii];
2429:         PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species-%" PetscInt_FMT ": charge density= %20.13e z-momentum= %20.13e energy= %20.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii]));
2430:       } else { /* 2/3Xloc + 3V */
2431:         PetscDSSetObjective(prob, 0, &f0_s_den);
2432:         DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx);
2433:         density[ii] = tt[0] * ctx->n_0 * ctx->charges[ii];
2434:         PetscDSSetObjective(prob, 0, &f0_s_mom);
2435:         user[1] = 0;
2436:         PetscDSSetConstants(prob, 2, user);
2437:         DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx);
2438:         xmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2439:         user[1]       = 1;
2440:         PetscDSSetConstants(prob, 2, user);
2441:         DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx);
2442:         ymomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2443:         user[1]       = 2;
2444:         PetscDSSetConstants(prob, 2, user);
2445:         DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx);
2446:         zmomentum[ii] = tt[0] * ctx->n_0 * ctx->v_0 * ctx->masses[ii];
2447:         if (ctx->use_relativistic_corrections) {
2448:           /* gamma * M * f */
2449:           if (ii == 0 && grid == 0) { // do all at once
2450:             Vec Mf, globGamma, *globMfArray, *globGammaArray;
2451:             PetscErrorCode (*gammaf[1])(PetscInt, PetscReal, const PetscReal[], PetscInt, PetscScalar[], void *) = {gamma_n_f};
2452:             PetscReal *c2_0[1], data[1];

2454:             VecDuplicate(X, &globGamma);
2455:             VecDuplicate(X, &Mf);
2456:             PetscMalloc(sizeof(*globMfArray) * nDMs, &globMfArray);
2457:             PetscMalloc(sizeof(*globMfArray) * nDMs, &globGammaArray);
2458:             /* M * f */
2459:             MatMult(ctx->M, X, Mf);
2460:             /* gamma */
2461:             DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);
2462:             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice, need to fix for batching
2463:               Vec v1  = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)];
2464:               data[0] = PetscSqr(C_0(ctx->v_0));
2465:               c2_0[0] = &data[0];
2466:               DMProjectFunction(ctx->plex[grid], 0., gammaf, (void **)c2_0, INSERT_ALL_VALUES, v1);
2467:             }
2468:             DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);
2469:             /* gamma * Mf */
2470:             DMCompositeGetAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);
2471:             DMCompositeGetAccessArray(pack, Mf, nDMs, NULL, globMfArray);
2472:             for (PetscInt grid = 0; grid < ctx->num_grids; grid++) { // yes a grid loop in a grid loop to print nice
2473:               PetscInt Nf    = ctx->species_offset[grid + 1] - ctx->species_offset[grid], N, bs;
2474:               Vec      Mfsub = globMfArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], Gsub = globGammaArray[LAND_PACK_IDX(ctx->batch_view_idx, grid)], v1, v2;
2475:               // get each component
2476:               VecGetSize(Mfsub, &N);
2477:               VecCreate(ctx->comm, &v1);
2478:               VecSetSizes(v1, PETSC_DECIDE, N / Nf);
2479:               VecCreate(ctx->comm, &v2);
2480:               VecSetSizes(v2, PETSC_DECIDE, N / Nf);
2481:               VecSetFromOptions(v1); // ???
2482:               VecSetFromOptions(v2);
2483:               // get each component
2484:               VecGetBlockSize(Gsub, &bs);
2486:               VecGetBlockSize(Mfsub, &bs);
2488:               for (int i = 0, ix = ctx->species_offset[grid]; i < Nf; i++, ix++) {
2489:                 PetscScalar val;
2490:                 VecStrideGather(Gsub, i, v1, INSERT_VALUES); // this is not right -- TODO
2491:                 VecStrideGather(Mfsub, i, v2, INSERT_VALUES);
2492:                 VecDot(v1, v2, &val);
2493:                 energy[ix] = PetscRealPart(val) * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ix];
2494:               }
2495:               VecDestroy(&v1);
2496:               VecDestroy(&v2);
2497:             } /* grids */
2498:             DMCompositeRestoreAccessArray(pack, globGamma, nDMs, NULL, globGammaArray);
2499:             DMCompositeRestoreAccessArray(pack, Mf, nDMs, NULL, globMfArray);
2500:             PetscFree(globGammaArray);
2501:             PetscFree(globMfArray);
2502:             VecDestroy(&globGamma);
2503:             VecDestroy(&Mf);
2504:           }
2505:         } else {
2506:           PetscDSSetObjective(prob, 0, &f0_s_v2);
2507:           DMPlexComputeIntegralFEM(ctx->plex[grid], Xloc, tt, ctx);
2508:           energy[ii] = 0.5 * tt[0] * ctx->n_0 * ctx->v_0 * ctx->v_0 * ctx->masses[ii];
2509:         }
2510:         PetscPrintf(ctx->comm, "%3" PetscInt_FMT ") species %" PetscInt_FMT ": density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e", stepi, ii, (double)PetscRealPart(density[ii]), (double)PetscRealPart(xmomentum[ii]), (double)PetscRealPart(ymomentum[ii]), (double)PetscRealPart(zmomentum[ii]), (double)PetscRealPart(energy[ii]));
2511:         xmomentumtot += xmomentum[ii];
2512:         ymomentumtot += ymomentum[ii];
2513:         zmomentumtot += zmomentum[ii];
2514:         energytot += energy[ii];
2515:         densitytot += density[ii];
2516:       }
2517:       if (ctx->num_species > 1) PetscPrintf(ctx->comm, "\n");
2518:     }
2519:   }
2520:   DMCompositeRestoreAccessArray(pack, X, nDMs, NULL, globXArray);
2521:   PetscFree(globXArray);
2522:   /* totals */
2523:   DMPlexGetHeightStratum(ctx->plex[0], 0, &cStart, &cEnd);
2524:   if (ctx->num_species > 1) {
2525:     if (dim == 2) {
2526:       PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells on electron grid)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot),
2527:                             (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart));
2528:     } else {
2529:       PetscCall(PetscPrintf(ctx->comm, "\t%3" PetscInt_FMT ") Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %" PetscInt_FMT " cells)", stepi, (double)PetscRealPart(densitytot), (double)PetscRealPart(xmomentumtot), (double)PetscRealPart(ymomentumtot), (double)PetscRealPart(zmomentumtot), (double)PetscRealPart(energytot),
2530:                             (double)(ctx->masses[1] / ctx->masses[0]), cEnd - cStart));
2531:     }
2532:   } else PetscPrintf(ctx->comm, " -- %" PetscInt_FMT " cells", cEnd - cStart);
2533:   PetscPrintf(ctx->comm, "\n");
2534:   return 0;
2535: }

2537: /*@
2538:  DMPlexLandauCreateMassMatrix - Create mass matrix for Landau in Plex space (not field major order of Jacobian)
2539:   - puts mass matrix into ctx->M

2541:  Collective

2543:  Input/Output Parameters:
2544: . pack     - the DM object. Puts matrix in Landau context M field

2546:  Output Parameters:
2547: . Amat - The mass matrix (optional), mass matrix is added to the DM context

2549:  Level: beginner

2551:  .keywords: mesh
2552:  .seealso: `DMPlexLandauCreateVelocitySpace()`
2553:  @*/
2554: PetscErrorCode DMPlexLandauCreateMassMatrix(DM pack, Mat *Amat)
2555: {
2556:   DM         mass_pack, massDM[LANDAU_MAX_GRIDS];
2557:   PetscDS    prob;
2558:   PetscInt   ii, dim, N1 = 1, N2;
2559:   LandauCtx *ctx;
2560:   Mat        packM, subM[LANDAU_MAX_GRIDS];

2564:   DMGetApplicationContext(pack, &ctx);
2566:   PetscLogEventBegin(ctx->events[14], 0, 0, 0, 0);
2567:   DMGetDimension(pack, &dim);
2568:   DMCompositeCreate(PetscObjectComm((PetscObject)pack), &mass_pack);
2569:   /* create pack mass matrix */
2570:   for (PetscInt grid = 0, ix = 0; grid < ctx->num_grids; grid++) {
2571:     DMClone(ctx->plex[grid], &massDM[grid]);
2572:     DMCopyFields(ctx->plex[grid], massDM[grid]);
2573:     DMCreateDS(massDM[grid]);
2574:     DMGetDS(massDM[grid], &prob);
2575:     for (ix = 0, ii = ctx->species_offset[grid]; ii < ctx->species_offset[grid + 1]; ii++, ix++) {
2576:       if (dim == 3) PetscDSSetJacobian(prob, ix, ix, g0_1, NULL, NULL, NULL);
2577:       else PetscDSSetJacobian(prob, ix, ix, g0_r, NULL, NULL, NULL);
2578:     }
2579: #if !defined(LANDAU_SPECIES_MAJOR)
2580:     DMCompositeAddDM(mass_pack, massDM[grid]);
2581: #else
2582:     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) { // add batch size DMs for this species grid
2583:       DMCompositeAddDM(mass_pack, massDM[grid]);
2584:     }
2585: #endif
2586:     DMCreateMatrix(massDM[grid], &subM[grid]);
2587:   }
2588: #if !defined(LANDAU_SPECIES_MAJOR)
2589:   // stack the batched DMs
2590:   for (PetscInt b_id = 1; b_id < ctx->batch_sz; b_id++) {
2591:     for (PetscInt grid = 0; grid < ctx->num_grids; grid++) DMCompositeAddDM(mass_pack, massDM[grid]);
2592:   }
2593: #endif
2594:   PetscOptionsInsertString(NULL, "-dm_preallocate_only");
2595:   DMCreateMatrix(mass_pack, &packM);
2596:   PetscOptionsInsertString(NULL, "-dm_preallocate_only false");
2597:   MatSetOption(packM, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
2598:   MatSetOption(packM, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE);
2599:   DMDestroy(&mass_pack);
2600:   /* make mass matrix for each block */
2601:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2602:     Vec locX;
2603:     DM  plex = massDM[grid];
2604:     DMGetLocalVector(plex, &locX);
2605:     /* Mass matrix is independent of the input, so no need to fill locX */
2606:     DMPlexSNESComputeJacobianFEM(plex, locX, subM[grid], subM[grid], ctx);
2607:     DMRestoreLocalVector(plex, &locX);
2608:     DMDestroy(&massDM[grid]);
2609:   }
2610:   MatGetSize(ctx->J, &N1, NULL);
2611:   MatGetSize(packM, &N2, NULL);
2613:   /* assemble block diagonals */
2614:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) {
2615:     Mat      B = subM[grid];
2616:     PetscInt nloc, nzl, *colbuf, COL_BF_SIZE = 1024, row;
2617:     PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf);
2618:     MatGetSize(B, &nloc, NULL);
2619:     for (PetscInt b_id = 0; b_id < ctx->batch_sz; b_id++) {
2620:       const PetscInt     moffset = LAND_MOFFSET(b_id, grid, ctx->batch_sz, ctx->num_grids, ctx->mat_offset);
2621:       const PetscInt    *cols;
2622:       const PetscScalar *vals;
2623:       for (int i = 0; i < nloc; i++) {
2624:         MatGetRow(B, i, &nzl, NULL, NULL);
2625:         if (nzl > COL_BF_SIZE) {
2626:           PetscFree(colbuf);
2627:           PetscInfo(pack, "Realloc buffer %" PetscInt_FMT " to %" PetscInt_FMT " (row size %" PetscInt_FMT ") \n", COL_BF_SIZE, 2 * COL_BF_SIZE, nzl);
2628:           COL_BF_SIZE = nzl;
2629:           PetscMalloc(sizeof(*colbuf) * COL_BF_SIZE, &colbuf);
2630:         }
2631:         MatGetRow(B, i, &nzl, &cols, &vals);
2632:         for (int j = 0; j < nzl; j++) colbuf[j] = cols[j] + moffset;
2633:         row = i + moffset;
2634:         MatSetValues(packM, 1, &row, nzl, colbuf, vals, INSERT_VALUES);
2635:         MatRestoreRow(B, i, &nzl, &cols, &vals);
2636:       }
2637:     }
2638:     PetscFree(colbuf);
2639:   }
2640:   // cleanup
2641:   for (PetscInt grid = 0; grid < ctx->num_grids; grid++) MatDestroy(&subM[grid]);
2642:   MatAssemblyBegin(packM, MAT_FINAL_ASSEMBLY);
2643:   MatAssemblyEnd(packM, MAT_FINAL_ASSEMBLY);
2644:   PetscObjectSetName((PetscObject)packM, "mass");
2645:   MatViewFromOptions(packM, NULL, "-dm_landau_mass_view");
2646:   ctx->M = packM;
2647:   if (Amat) *Amat = packM;
2648:   PetscLogEventEnd(ctx->events[14], 0, 0, 0, 0);
2649:   return 0;
2650: }

2652: /*@
2653:  DMPlexLandauIFunction - TS residual calculation, confusingly this computes the Jacobian w/o mass

2655:  Collective

2657:  Input Parameters:
2658: +   TS  - The time stepping context
2659: .   time_dummy - current time (not used)
2660: .   X - Current state
2661: .   X_t - Time derivative of current state
2662: -   actx - Landau context

2664:  Output Parameter:
2665: .   F  - The residual

2667:  Level: beginner

2669:  .keywords: mesh
2670:  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIJacobian()`
2671:  @*/
2672: PetscErrorCode DMPlexLandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
2673: {
2674:   LandauCtx *ctx = (LandauCtx *)actx;
2675:   PetscInt   dim;
2676:   DM         pack;
2677: #if defined(PETSC_HAVE_THREADSAFETY)
2678:   double starttime, endtime;
2679: #endif
2680:   PetscObjectState state;

2682:   TSGetDM(ts, &pack);
2683:   DMGetApplicationContext(pack, &ctx);
2685:   if (ctx->stage) PetscLogStagePush(ctx->stage);
2686:   PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0);
2687:   PetscLogEventBegin(ctx->events[0], 0, 0, 0, 0);
2688: #if defined(PETSC_HAVE_THREADSAFETY)
2689:   starttime = MPI_Wtime();
2690: #endif
2691:   DMGetDimension(pack, &dim);
2692:   PetscObjectStateGet((PetscObject)ctx->J, &state);
2693:   if (state != ctx->norm_state) {
2694:     PetscInfo(ts, "Create Landau Jacobian t=%g J.state %" PetscInt64_FMT " --> %" PetscInt64_FMT "\n", (double)time_dummy, ctx->norm_state, state);
2695:     MatZeroEntries(ctx->J);
2696:     LandauFormJacobian_Internal(X, ctx->J, dim, 0.0, (void *)ctx);
2697:     MatViewFromOptions(ctx->J, NULL, "-dm_landau_jacobian_view");
2698:     PetscObjectStateGet((PetscObject)ctx->J, &state);
2699:     ctx->norm_state = state;
2700:   } else {
2701:     PetscInfo(ts, "WARNING Skip forming Jacobian, has not changed %" PetscInt64_FMT "\n", state);
2702:   }
2703:   /* mat vec for op */
2704:   MatMult(ctx->J, X, F); /* C*f */
2705:   /* add time term */
2706:   if (X_t) MatMultAdd(ctx->M, X_t, F, F);
2707: #if defined(PETSC_HAVE_THREADSAFETY)
2708:   if (ctx->stage) {
2709:     endtime = MPI_Wtime();
2710:     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2711:     ctx->times[LANDAU_JACOBIAN] += (endtime - starttime);
2712:     ctx->times[LANDAU_JACOBIAN_COUNT] += 1;
2713:   }
2714: #endif
2715:   PetscLogEventEnd(ctx->events[0], 0, 0, 0, 0);
2716:   PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0);
2717:   if (ctx->stage) {
2718:     PetscLogStagePop();
2719: #if defined(PETSC_HAVE_THREADSAFETY)
2720:     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2721: #endif
2722:   }
2723:   return 0;
2724: }

2726: /*@
2727:  DMPlexLandauIJacobian - TS Jacobian construction, confusingly this adds mass

2729:  Collective

2731:  Input Parameters:
2732: +   TS  - The time stepping context
2733: .   time_dummy - current time (not used)
2734: .   X - Current state
2735: .   U_tdummy - Time derivative of current state (not used)
2736: .   shift - shift for du/dt term
2737: -   actx - Landau context

2739:  Output Parameters:
2740: +   Amat  - Jacobian
2741: -   Pmat  - same as Amat

2743:  Level: beginner

2745:  .keywords: mesh
2746:  .seealso: `DMPlexLandauCreateVelocitySpace()`, `DMPlexLandauIFunction()`
2747:  @*/
2748: PetscErrorCode DMPlexLandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2749: {
2750:   LandauCtx *ctx = NULL;
2751:   PetscInt   dim;
2752:   DM         pack;
2753: #if defined(PETSC_HAVE_THREADSAFETY)
2754:   double starttime, endtime;
2755: #endif
2756:   PetscObjectState state;

2758:   TSGetDM(ts, &pack);
2759:   DMGetApplicationContext(pack, &ctx);
2762:   DMGetDimension(pack, &dim);
2763:   /* get collision Jacobian into A */
2764:   if (ctx->stage) PetscLogStagePush(ctx->stage);
2765:   PetscLogEventBegin(ctx->events[11], 0, 0, 0, 0);
2766:   PetscLogEventBegin(ctx->events[9], 0, 0, 0, 0);
2767: #if defined(PETSC_HAVE_THREADSAFETY)
2768:   starttime = MPI_Wtime();
2769: #endif
2770:   PetscInfo(ts, "Adding mass to Jacobian t=%g, shift=%g\n", (double)time_dummy, (double)shift);
2772:   PetscObjectStateGet((PetscObject)ctx->J, &state);
2774:   if (!ctx->use_matrix_mass) {
2775:     LandauFormJacobian_Internal(X, ctx->J, dim, shift, (void *)ctx);
2776:   } else { /* add mass */
2777:     MatAXPY(Pmat, shift, ctx->M, SAME_NONZERO_PATTERN);
2778:   }
2779: #if defined(PETSC_HAVE_THREADSAFETY)
2780:   if (ctx->stage) {
2781:     endtime = MPI_Wtime();
2782:     ctx->times[LANDAU_OPERATOR] += (endtime - starttime);
2783:     ctx->times[LANDAU_MASS] += (endtime - starttime);
2784:   }
2785: #endif
2786:   PetscLogEventEnd(ctx->events[9], 0, 0, 0, 0);
2787:   PetscLogEventEnd(ctx->events[11], 0, 0, 0, 0);
2788:   if (ctx->stage) {
2789:     PetscLogStagePop();
2790: #if defined(PETSC_HAVE_THREADSAFETY)
2791:     ctx->times[LANDAU_MATRIX_TOTAL] += (endtime - starttime);
2792: #endif
2793:   }
2794:   return 0;
2795: }