Actual source code: cusparsematimpl.h
1: #ifndef PETSC_CUSPARSEMATIMPL_H
2: #define PETSC_CUSPARSEMATIMPL_H
4: #include <petscpkg_version.h>
5: #include <petsc/private/cudavecimpl.h>
6: #include <petscaijdevice.h>
8: #include <cusparse_v2.h>
10: #include <algorithm>
11: #include <vector>
13: #include <thrust/device_vector.h>
14: #include <thrust/device_ptr.h>
15: #include <thrust/device_malloc_allocator.h>
16: #include <thrust/transform.h>
17: #include <thrust/functional.h>
18: #include <thrust/sequence.h>
19: #include <thrust/system/system_error.h>
21: #define PetscCallThrust(body) \
22: do { \
23: try { \
24: body; \
25: } catch (thrust::system_error & e) { \
26: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "Error in Thrust %s", e.what()); \
27: } \
28: } while (0)
30: #if defined(PETSC_USE_COMPLEX)
31: #if defined(PETSC_USE_REAL_SINGLE)
32: const cuComplex PETSC_CUSPARSE_ONE = {1.0f, 0.0f};
33: const cuComplex PETSC_CUSPARSE_ZERO = {0.0f, 0.0f};
34: #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseCcsrilu02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i)
35: #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseCcsrilu02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
36: #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j) cusparseCcsrilu02(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
37: #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseCcsric02_bufferSize(a, b, c, d, (cuComplex *)e, f, g, h, i)
38: #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseCcsric02_analysis(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
39: #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j) cusparseCcsric02(a, b, c, d, (cuComplex *)e, f, g, h, i, j)
40: #elif defined(PETSC_USE_REAL_DOUBLE)
41: const cuDoubleComplex PETSC_CUSPARSE_ONE = {1.0, 0.0};
42: const cuDoubleComplex PETSC_CUSPARSE_ZERO = {0.0, 0.0};
43: #define cusparseXcsrilu02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseZcsrilu02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i)
44: #define cusparseXcsrilu02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseZcsrilu02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
45: #define cusparseXcsrilu02(a, b, c, d, e, f, g, h, i, j) cusparseZcsrilu02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
46: #define cusparseXcsric02_bufferSize(a, b, c, d, e, f, g, h, i) cusparseZcsric02_bufferSize(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i)
47: #define cusparseXcsric02_analysis(a, b, c, d, e, f, g, h, i, j) cusparseZcsric02_analysis(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
48: #define cusparseXcsric02(a, b, c, d, e, f, g, h, i, j) cusparseZcsric02(a, b, c, d, (cuDoubleComplex *)e, f, g, h, i, j)
49: #endif
50: #else
51: const PetscScalar PETSC_CUSPARSE_ONE = 1.0;
52: const PetscScalar PETSC_CUSPARSE_ZERO = 0.0;
53: #if defined(PETSC_USE_REAL_SINGLE)
54: #define cusparseXcsrilu02_bufferSize cusparseScsrilu02_bufferSize
55: #define cusparseXcsrilu02_analysis cusparseScsrilu02_analysis
56: #define cusparseXcsrilu02 cusparseScsrilu02
57: #define cusparseXcsric02_bufferSize cusparseScsric02_bufferSize
58: #define cusparseXcsric02_analysis cusparseScsric02_analysis
59: #define cusparseXcsric02 cusparseScsric02
60: #elif defined(PETSC_USE_REAL_DOUBLE)
61: #define cusparseXcsrilu02_bufferSize cusparseDcsrilu02_bufferSize
62: #define cusparseXcsrilu02_analysis cusparseDcsrilu02_analysis
63: #define cusparseXcsrilu02 cusparseDcsrilu02
64: #define cusparseXcsric02_bufferSize cusparseDcsric02_bufferSize
65: #define cusparseXcsric02_analysis cusparseDcsric02_analysis
66: #define cusparseXcsric02 cusparseDcsric02
67: #endif
68: #endif
70: #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0)
71: #define csrsvInfo_t csrsv2Info_t
72: #define cusparseCreateCsrsvInfo cusparseCreateCsrsv2Info
73: #define cusparseDestroyCsrsvInfo cusparseDestroyCsrsv2Info
74: #if defined(PETSC_USE_COMPLEX)
75: #if defined(PETSC_USE_REAL_SINGLE)
76: #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j) cusparseCcsrsv2_bufferSize(a, b, c, d, e, (cuComplex *)(f), g, h, i, j)
77: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseCcsrsv2_analysis(a, b, c, d, e, (const cuComplex *)(f), g, h, i, j, k)
78: #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseCcsrsv2_solve(a, b, c, d, (const cuComplex *)(e), f, (const cuComplex *)(g), h, i, j, (const cuComplex *)(k), (cuComplex *)(l), m, n)
79: #elif defined(PETSC_USE_REAL_DOUBLE)
80: #define cusparseXcsrsv_buffsize(a, b, c, d, e, f, g, h, i, j) cusparseZcsrsv2_bufferSize(a, b, c, d, e, (cuDoubleComplex *)(f), g, h, i, j)
81: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseZcsrsv2_analysis(a, b, c, d, e, (const cuDoubleComplex *)(f), g, h, i, j, k)
82: #define cusparseXcsrsv_solve(a, b, c, d, e, f, g, h, i, j, k, l, m, n) cusparseZcsrsv2_solve(a, b, c, d, (const cuDoubleComplex *)(e), f, (const cuDoubleComplex *)(g), h, i, j, (const cuDoubleComplex *)(k), (cuDoubleComplex *)(l), m, n)
83: #endif
84: #else /* not complex */
85: #if defined(PETSC_USE_REAL_SINGLE)
86: #define cusparseXcsrsv_buffsize cusparseScsrsv2_bufferSize
87: #define cusparseXcsrsv_analysis cusparseScsrsv2_analysis
88: #define cusparseXcsrsv_solve cusparseScsrsv2_solve
89: #elif defined(PETSC_USE_REAL_DOUBLE)
90: #define cusparseXcsrsv_buffsize cusparseDcsrsv2_bufferSize
91: #define cusparseXcsrsv_analysis cusparseDcsrsv2_analysis
92: #define cusparseXcsrsv_solve cusparseDcsrsv2_solve
93: #endif
94: #endif
95: #else /* PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) */
96: #define csrsvInfo_t cusparseSolveAnalysisInfo_t
97: #define cusparseCreateCsrsvInfo cusparseCreateSolveAnalysisInfo
98: #define cusparseDestroyCsrsvInfo cusparseDestroySolveAnalysisInfo
99: #if defined(PETSC_USE_COMPLEX)
100: #if defined(PETSC_USE_REAL_SINGLE)
101: #define cusparseXcsrsv_solve(a, b, c, d_IGNORED, e, f, g, h, i, j, k, l, m_IGNORED, n_IGNORED) cusparseCcsrsv_solve((a), (b), (c), (cuComplex *)(e), (f), (cuComplex *)(g), (h), (i), (j), (cuComplex *)(k), (cuComplex *)(l))
102: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j_IGNORED, k_IGNORED) cusparseCcsrsv_analysis((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (h), (i))
103: #elif defined(PETSC_USE_REAL_DOUBLE)
104: #define cusparseXcsrsv_solve(a, b, c, d_IGNORED, e, f, g, h, i, j, k, l, m_IGNORED, n_IGNORED) \
105: cusparseZcsrsv_solve((a), (b), (c), (cuDoubleComplex *)(e), (f), (cuDoubleComplex *)(g), (h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l))
106: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j_IGNORED, k_IGNORED) cusparseZcsrsv_analysis((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (h), (i))
107: #endif
108: #else /* not complex */
109: #if defined(PETSC_USE_REAL_SINGLE)
110: #define cusparseXcsrsv_solve cusparseScsrsv_solve
111: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseScsrsv_analysis(a, b, c, d, e, f, g, h, i)
112: #elif defined(PETSC_USE_REAL_DOUBLE)
113: #define cusparseXcsrsv_solve cusparseDcsrsv_solve
114: #define cusparseXcsrsv_analysis(a, b, c, d, e, f, g, h, i, j, k) cusparseDcsrsv_analysis(a, b, c, d, e, f, g, h, i)
115: #endif
116: #endif
117: #endif /* PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) */
119: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
120: #define cusparse_csr2csc cusparseCsr2cscEx2
121: #if defined(PETSC_USE_COMPLEX)
122: #if defined(PETSC_USE_REAL_SINGLE)
123: #define cusparse_scalartype CUDA_C_32F
124: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgeam2(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t)
125: #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
126: cusparseCcsrgeam2_bufferSizeExt(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s, t)
127: #elif defined(PETSC_USE_REAL_DOUBLE)
128: #define cusparse_scalartype CUDA_C_64F
129: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
130: cusparseZcsrgeam2(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t)
131: #define cusparse_csr_spgeam_bufferSize(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) \
132: cusparseZcsrgeam2_bufferSizeExt(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s, t)
133: #endif
134: #else /* not complex */
135: #if defined(PETSC_USE_REAL_SINGLE)
136: #define cusparse_scalartype CUDA_R_32F
137: #define cusparse_csr_spgeam cusparseScsrgeam2
138: #define cusparse_csr_spgeam_bufferSize cusparseScsrgeam2_bufferSizeExt
139: #elif defined(PETSC_USE_REAL_DOUBLE)
140: #define cusparse_scalartype CUDA_R_64F
141: #define cusparse_csr_spgeam cusparseDcsrgeam2
142: #define cusparse_csr_spgeam_bufferSize cusparseDcsrgeam2_bufferSizeExt
143: #endif
144: #endif
145: #else /* PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) */
146: #if defined(PETSC_USE_COMPLEX)
147: #if defined(PETSC_USE_REAL_SINGLE)
148: #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m) cusparseCcsrmv((a), (b), (c), (d), (e), (cuComplex *)(f), (g), (cuComplex *)(h), (i), (j), (cuComplex *)(k), (cuComplex *)(l), (cuComplex *)(m))
149: #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) cusparseCcsrmm((a), (b), (c), (d), (e), (f), (cuComplex *)(g), (h), (cuComplex *)(i), (j), (k), (cuComplex *)(l), (m), (cuComplex *)(n), (cuComplex *)(o), (p))
150: #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l) cusparseCcsr2csc((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (cuComplex *)(h), (i), (j), (k), (l))
151: #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h) cusparseChybmv((a), (b), (cuComplex *)(c), (d), (e), (cuComplex *)(f), (cuComplex *)(g), (cuComplex *)(h))
152: #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j) cusparseCcsr2hyb((a), (b), (c), (d), (cuComplex *)(e), (f), (g), (h), (i), (j))
153: #define cusparse_hyb2csr(a, b, c, d, e, f) cusparseChyb2csr((a), (b), (c), (cuComplex *)(d), (e), (f))
154: #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseCcsrgemm(a, b, c, d, e, f, g, h, (cuComplex *)i, j, k, l, m, (cuComplex *)n, o, p, q, (cuComplex *)r, s, t)
155: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) cusparseCcsrgeam(a, b, c, (cuComplex *)d, e, f, (cuComplex *)g, h, i, (cuComplex *)j, k, l, (cuComplex *)m, n, o, p, (cuComplex *)q, r, s)
156: #elif defined(PETSC_USE_REAL_DOUBLE)
157: #define cusparse_csr_spmv(a, b, c, d, e, f, g, h, i, j, k, l, m) cusparseZcsrmv((a), (b), (c), (d), (e), (cuDoubleComplex *)(f), (g), (cuDoubleComplex *)(h), (i), (j), (cuDoubleComplex *)(k), (cuDoubleComplex *)(l), (cuDoubleComplex *)(m))
158: #define cusparse_csr_spmm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p) \
159: cusparseZcsrmm((a), (b), (c), (d), (e), (f), (cuDoubleComplex *)(g), (h), (cuDoubleComplex *)(i), (j), (k), (cuDoubleComplex *)(l), (m), (cuDoubleComplex *)(n), (cuDoubleComplex *)(o), (p))
160: #define cusparse_csr2csc(a, b, c, d, e, f, g, h, i, j, k, l) cusparseZcsr2csc((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (cuDoubleComplex *)(h), (i), (j), (k), (l))
161: #define cusparse_hyb_spmv(a, b, c, d, e, f, g, h) cusparseZhybmv((a), (b), (cuDoubleComplex *)(c), (d), (e), (cuDoubleComplex *)(f), (cuDoubleComplex *)(g), (cuDoubleComplex *)(h))
162: #define cusparse_csr2hyb(a, b, c, d, e, f, g, h, i, j) cusparseZcsr2hyb((a), (b), (c), (d), (cuDoubleComplex *)(e), (f), (g), (h), (i), (j))
163: #define cusparse_hyb2csr(a, b, c, d, e, f) cusparseZhyb2csr((a), (b), (c), (cuDoubleComplex *)(d), (e), (f))
164: #define cusparse_csr_spgemm(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t) cusparseZcsrgemm(a, b, c, d, e, f, g, h, (cuDoubleComplex *)i, j, k, l, m, (cuDoubleComplex *)n, o, p, q, (cuDoubleComplex *)r, s, t)
165: #define cusparse_csr_spgeam(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s) \
166: cusparseZcsrgeam(a, b, c, (cuDoubleComplex *)d, e, f, (cuDoubleComplex *)g, h, i, (cuDoubleComplex *)j, k, l, (cuDoubleComplex *)m, n, o, p, (cuDoubleComplex *)q, r, s)
167: #endif
168: #else
169: #if defined(PETSC_USE_REAL_SINGLE)
170: #define cusparse_csr_spmv cusparseScsrmv
171: #define cusparse_csr_spmm cusparseScsrmm
172: #define cusparse_csr2csc cusparseScsr2csc
173: #define cusparse_hyb_spmv cusparseShybmv
174: #define cusparse_csr2hyb cusparseScsr2hyb
175: #define cusparse_hyb2csr cusparseShyb2csr
176: #define cusparse_csr_spgemm cusparseScsrgemm
177: #define cusparse_csr_spgeam cusparseScsrgeam
178: #elif defined(PETSC_USE_REAL_DOUBLE)
179: #define cusparse_csr_spmv cusparseDcsrmv
180: #define cusparse_csr_spmm cusparseDcsrmm
181: #define cusparse_csr2csc cusparseDcsr2csc
182: #define cusparse_hyb_spmv cusparseDhybmv
183: #define cusparse_csr2hyb cusparseDcsr2hyb
184: #define cusparse_hyb2csr cusparseDhyb2csr
185: #define cusparse_csr_spgemm cusparseDcsrgemm
186: #define cusparse_csr_spgeam cusparseDcsrgeam
187: #endif
188: #endif
189: #endif /* PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) */
191: #define THRUSTINTARRAY32 thrust::device_vector<int>
192: #define THRUSTINTARRAY thrust::device_vector<PetscInt>
193: #define THRUSTARRAY thrust::device_vector<PetscScalar>
195: /* A CSR matrix structure */
196: struct CsrMatrix {
197: PetscInt num_rows;
198: PetscInt num_cols;
199: PetscInt num_entries;
200: THRUSTINTARRAY32 *row_offsets;
201: THRUSTINTARRAY32 *column_indices;
202: THRUSTARRAY *values;
203: };
205: /* This is struct holding the relevant data needed to a MatSolve */
206: struct Mat_SeqAIJCUSPARSETriFactorStruct {
207: /* Data needed for triangular solve */
208: cusparseMatDescr_t descr;
209: cusparseOperation_t solveOp;
210: CsrMatrix *csrMat;
211: csrsvInfo_t solveInfo;
212: cusparseSolvePolicy_t solvePolicy; /* whether level information is generated and used */
213: int solveBufferSize;
214: void *solveBuffer;
215: size_t csr2cscBufferSize; /* to transpose the triangular factor (only used for CUDA >= 11.0) */
216: void *csr2cscBuffer;
217: PetscScalar *AA_h; /* managed host buffer for moving values to the GPU */
218: };
220: /* This is a larger struct holding all the triangular factors for a solve, transpose solve, and any indices used in a reordering */
221: struct Mat_SeqAIJCUSPARSETriFactors {
222: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtr; /* pointer for lower triangular (factored matrix) on GPU */
223: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtr; /* pointer for upper triangular (factored matrix) on GPU */
224: Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorPtrTranspose; /* pointer for lower triangular (factored matrix) on GPU for the transpose (useful for BiCG) */
225: Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorPtrTranspose; /* pointer for upper triangular (factored matrix) on GPU for the transpose (useful for BiCG)*/
226: THRUSTINTARRAY *rpermIndices; /* indices used for any reordering */
227: THRUSTINTARRAY *cpermIndices; /* indices used for any reordering */
228: THRUSTARRAY *workVector;
229: cusparseHandle_t handle; /* a handle to the cusparse library */
230: PetscInt nnz; /* number of nonzeros ... need this for accurate logging between ICC and ILU */
231: PetscScalar *a_band_d; /* GPU data for banded CSR LU factorization matrix diag(L)=1 */
232: int *i_band_d; /* this could be optimized away */
233: cudaDeviceProp dev_prop;
234: PetscBool init_dev_prop;
236: /* csrilu0/csric0 appeared in cusparse-8.0, but we use it along with cusparseSpSV,
237: which first appeared in cusparse-11.5 with cuda-11.3.
238: */
239: PetscBool factorizeOnDevice; /* Do factorization on device or not */
240: #if CUSPARSE_VERSION >= 11500
241: PetscScalar *csrVal;
242: int *csrRowPtr, *csrColIdx; /* a,i,j of M. Using int since some cusparse APIs only support 32-bit indices */
244: /* Mixed mat descriptor types? yes, different cusparse APIs use different types */
245: cusparseMatDescr_t matDescr_M;
246: cusparseSpMatDescr_t spMatDescr_L, spMatDescr_U;
247: cusparseSpSVDescr_t spsvDescr_L, spsvDescr_Lt, spsvDescr_U, spsvDescr_Ut;
249: cusparseDnVecDescr_t dnVecDescr_X, dnVecDescr_Y;
250: PetscScalar *X, *Y; /* data array of dnVec X and Y */
252: /* Mixed size types? yes, CUDA-11.7.0 declared cusparseDcsrilu02_bufferSizeExt() that returns size_t but did not implement it! */
253: int factBufferSize_M; /* M ~= LU or LLt */
254: size_t spsvBufferSize_L, spsvBufferSize_Lt, spsvBufferSize_U, spsvBufferSize_Ut;
255: /* cusparse needs various buffers for factorization and solve of L, U, Lt, or Ut.
256: So save memory, we share the factorization buffer with one of spsvBuffer_L/U.
257: */
258: void *factBuffer_M, *spsvBuffer_L, *spsvBuffer_U, *spsvBuffer_Lt, *spsvBuffer_Ut;
260: csrilu02Info_t ilu0Info_M;
261: csric02Info_t ic0Info_M;
262: int structural_zero, numerical_zero;
263: cusparseSolvePolicy_t policy_M;
265: /* In MatSolveTranspose() for ILU0, we use the two flags to do on-demand solve */
266: PetscBool createdTransposeSpSVDescr; /* Have we created SpSV descriptors for Lt, Ut? */
267: PetscBool updatedTransposeSpSVAnalysis; /* Have we updated SpSV analysis with the latest L, U values? */
269: PetscLogDouble numericFactFlops; /* Estimated FLOPs in ILU0/ICC0 numeric factorization */
270: #endif
271: };
273: struct Mat_CusparseSpMV {
274: PetscBool initialized; /* Don't rely on spmvBuffer != NULL to test if the struct is initialized, */
275: size_t spmvBufferSize; /* since I'm not sure if smvBuffer can be NULL even after cusparseSpMV_bufferSize() */
276: void *spmvBuffer;
277: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* these are present from CUDA 10.1, but PETSc code makes use of them from CUDA 11 on */
278: cusparseDnVecDescr_t vecXDescr, vecYDescr; /* descriptor for the dense vectors in y=op(A)x */
279: #endif
280: };
282: /* This is struct holding the relevant data needed to a MatMult */
283: struct Mat_SeqAIJCUSPARSEMultStruct {
284: void *mat; /* opaque pointer to a matrix. This could be either a cusparseHybMat_t or a CsrMatrix */
285: cusparseMatDescr_t descr; /* Data needed to describe the matrix for a multiply */
286: THRUSTINTARRAY *cprowIndices; /* compressed row indices used in the parallel SpMV */
287: PetscScalar *alpha_one; /* pointer to a device "scalar" storing the alpha parameter in the SpMV */
288: PetscScalar *beta_zero; /* pointer to a device "scalar" storing the beta parameter in the SpMV as zero*/
289: PetscScalar *beta_one; /* pointer to a device "scalar" storing the beta parameter in the SpMV as one */
290: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
291: cusparseSpMatDescr_t matDescr; /* descriptor for the matrix, used by SpMV and SpMM */
292: Mat_CusparseSpMV cuSpMV[3]; /* different Mat_CusparseSpMV structs for non-transpose, transpose, conj-transpose */
293: Mat_SeqAIJCUSPARSEMultStruct() : matDescr(NULL)
294: {
295: for (int i = 0; i < 3; i++) cuSpMV[i].initialized = PETSC_FALSE;
296: }
297: #endif
298: };
300: /* This is a larger struct holding all the matrices for a SpMV, and SpMV Transpose */
301: struct Mat_SeqAIJCUSPARSE {
302: Mat_SeqAIJCUSPARSEMultStruct *mat; /* pointer to the matrix on the GPU */
303: Mat_SeqAIJCUSPARSEMultStruct *matTranspose; /* pointer to the matrix on the GPU (for the transpose ... useful for BiCG) */
304: THRUSTARRAY *workVector; /* pointer to a workvector to which we can copy the relevant indices of a vector we want to multiply */
305: THRUSTINTARRAY32 *rowoffsets_gpu; /* rowoffsets on GPU in non-compressed-row format. It is used to convert CSR to CSC */
306: PetscInt nrows; /* number of rows of the matrix seen by GPU */
307: MatCUSPARSEStorageFormat format; /* the storage format for the matrix on the device */
308: PetscBool use_cpu_solve; /* Use AIJ_Seq (I)LU solve */
309: cudaStream_t stream; /* a stream for the parallel SpMV ... this is not owned and should not be deleted */
310: cusparseHandle_t handle; /* a handle to the cusparse library ... this may not be owned (if we're working in parallel i.e. multiGPUs) */
311: PetscObjectState nonzerostate; /* track nonzero state to possibly recreate the GPU matrix */
312: #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0)
313: size_t csr2cscBufferSize; /* stuff used to compute the matTranspose above */
314: void *csr2cscBuffer; /* This is used as a C struct and is calloc'ed by PetscNew() */
315: cusparseCsr2CscAlg_t csr2cscAlg; /* algorithms can be selected from command line options */
316: cusparseSpMVAlg_t spmvAlg;
317: cusparseSpMMAlg_t spmmAlg;
318: #endif
319: THRUSTINTARRAY *csr2csc_i;
320: PetscSplitCSRDataStructure deviceMat; /* Matrix on device for, eg, assembly */
322: /* Stuff for basic COO support */
323: THRUSTINTARRAY *cooPerm; /* permutation array that sorts the input coo entris by row and col */
324: THRUSTINTARRAY *cooPerm_a; /* ordered array that indicate i-th nonzero (after sorting) is the j-th unique nonzero */
326: /* Stuff for extended COO support */
327: PetscBool use_extended_coo; /* Use extended COO format */
328: PetscCount *jmap_d; /* perm[disp+jmap[i]..disp+jmap[i+1]) gives indices of entries in v[] associated with i-th nonzero of the matrix */
329: PetscCount *perm_d;
331: Mat_SeqAIJCUSPARSE() : use_extended_coo(PETSC_FALSE), perm_d(NULL), jmap_d(NULL) { }
332: };
334: typedef struct Mat_SeqAIJCUSPARSETriFactors *Mat_SeqAIJCUSPARSETriFactors_p;
336: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat);
337: PETSC_INTERN PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(Mat, PetscCount, PetscInt[], PetscInt[]);
338: PETSC_INTERN PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE_Basic(Mat, const PetscScalar[], InsertMode);
339: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat, Mat, MatReuse, Mat *);
340: PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *);
342: static inline bool isCudaMem(const void *data)
343: {
344: cudaError_t cerr;
345: struct cudaPointerAttributes attr;
346: enum cudaMemoryType mtype;
347: cerr = cudaPointerGetAttributes(&attr, data); /* Do not check error since before CUDA 11.0, passing a host pointer returns cudaErrorInvalidValue */
348: cudaGetLastError(); /* Reset the last error */
349: #if (CUDART_VERSION < 10000)
350: mtype = attr.memoryType;
351: #else
352: mtype = attr.type;
353: #endif
354: if (cerr == cudaSuccess && mtype == cudaMemoryTypeDevice) return true;
355: else return false;
356: }
358: #endif // PETSC_CUSPARSEMATIMPL_H