Actual source code: dgefa4.c
2: /*
3: Inverts 4 by 4 matrix using gaussian elimination with partial pivoting.
5: Used by the sparse factorization routines in
6: src/mat/impls/baij/seq
8: This is a combination of the Linpack routines
9: dgefa() and dgedi() specialized for a size of 4.
11: */
12: #include <petscsys.h>
14: PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4(MatScalar *a, PetscReal shift, PetscBool allowzeropivot, PetscBool *zeropivotdetected)
15: {
16: PetscInt i__2, i__3, kp1, j, k, l, ll, i, ipvt[4], kb, k3;
17: PetscInt k4, j3;
18: MatScalar *aa, *ax, *ay, work[16], stmp;
19: MatReal tmp, max;
21: if (zeropivotdetected) *zeropivotdetected = PETSC_FALSE;
22: shift = .25 * shift * (1.e-12 + PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15]));
24: /* Parameter adjustments */
25: a -= 5;
27: for (k = 1; k <= 3; ++k) {
28: kp1 = k + 1;
29: k3 = 4 * k;
30: k4 = k3 + k;
32: /* find l = pivot index */
33: i__2 = 5 - k;
34: aa = &a[k4];
35: max = PetscAbsScalar(aa[0]);
36: l = 1;
37: for (ll = 1; ll < i__2; ll++) {
38: tmp = PetscAbsScalar(aa[ll]);
39: if (tmp > max) {
40: max = tmp;
41: l = ll + 1;
42: }
43: }
44: l += k - 1;
45: ipvt[k - 1] = l;
47: if (a[l + k3] == 0.0) {
48: if (shift == 0.0) {
49: if (allowzeropivot) {
50: PetscInfo(NULL, "Zero pivot, row %" PetscInt_FMT "\n", k - 1);
51: if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE;
52: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_MAT_LU_ZRPVT, "Zero pivot, row %" PetscInt_FMT, k - 1);
53: } else {
54: /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */
55: a[l + k3] = shift;
56: }
57: }
59: /* interchange if necessary */
60: if (l != k) {
61: stmp = a[l + k3];
62: a[l + k3] = a[k4];
63: a[k4] = stmp;
64: }
66: /* compute multipliers */
67: stmp = -1. / a[k4];
68: i__2 = 4 - k;
69: aa = &a[1 + k4];
70: for (ll = 0; ll < i__2; ll++) aa[ll] *= stmp;
72: /* row elimination with column indexing */
73: ax = &a[k4 + 1];
74: for (j = kp1; j <= 4; ++j) {
75: j3 = 4 * j;
76: stmp = a[l + j3];
77: if (l != k) {
78: a[l + j3] = a[k + j3];
79: a[k + j3] = stmp;
80: }
82: i__3 = 4 - k;
83: ay = &a[1 + k + j3];
84: for (ll = 0; ll < i__3; ll++) ay[ll] += stmp * ax[ll];
85: }
86: }
87: ipvt[3] = 4;
88: if (a[20] == 0.0) {
89: if (PetscLikely(allowzeropivot)) {
90: PetscInfo(NULL, "Zero pivot, row 3\n");
91: if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE;
92: } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_MAT_LU_ZRPVT, "Zero pivot, row 3");
93: }
95: /* Now form the inverse */
96: /* compute inverse(u) */
97: for (k = 1; k <= 4; ++k) {
98: k3 = 4 * k;
99: k4 = k3 + k;
100: a[k4] = 1.0 / a[k4];
101: stmp = -a[k4];
102: i__2 = k - 1;
103: aa = &a[k3 + 1];
104: for (ll = 0; ll < i__2; ll++) aa[ll] *= stmp;
105: kp1 = k + 1;
106: if (4 < kp1) continue;
107: ax = aa;
108: for (j = kp1; j <= 4; ++j) {
109: j3 = 4 * j;
110: stmp = a[k + j3];
111: a[k + j3] = 0.0;
112: ay = &a[j3 + 1];
113: for (ll = 0; ll < k; ll++) ay[ll] += stmp * ax[ll];
114: }
115: }
117: /* form inverse(u)*inverse(l) */
118: for (kb = 1; kb <= 3; ++kb) {
119: k = 4 - kb;
120: k3 = 4 * k;
121: kp1 = k + 1;
122: aa = a + k3;
123: for (i = kp1; i <= 4; ++i) {
124: work[i - 1] = aa[i];
125: aa[i] = 0.0;
126: }
127: for (j = kp1; j <= 4; ++j) {
128: stmp = work[j - 1];
129: ax = &a[4 * j + 1];
130: ay = &a[k3 + 1];
131: ay[0] += stmp * ax[0];
132: ay[1] += stmp * ax[1];
133: ay[2] += stmp * ax[2];
134: ay[3] += stmp * ax[3];
135: }
136: l = ipvt[k - 1];
137: if (l != k) {
138: ax = &a[k3 + 1];
139: ay = &a[4 * l + 1];
140: stmp = ax[0];
141: ax[0] = ay[0];
142: ay[0] = stmp;
143: stmp = ax[1];
144: ax[1] = ay[1];
145: ay[1] = stmp;
146: stmp = ax[2];
147: ax[2] = ay[2];
148: ay[2] = stmp;
149: stmp = ax[3];
150: ax[3] = ay[3];
151: ay[3] = stmp;
152: }
153: }
154: return 0;
155: }
157: #if defined(PETSC_HAVE_SSE)
158: #include PETSC_HAVE_SSE
160: PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4_SSE(float *a)
161: {
162: /*
163: This routine is converted from Intel's Small Matrix Library.
164: See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix
165: Order Number: 245043-001
166: March 1999
167: https://www.intel.com/content/www/us/en/homepage.html
169: Inverse of a 4x4 matrix via Kramer's Rule:
170: bool Invert4x4(SMLXMatrix &);
171: */
172: SSE_SCOPE_BEGIN;
173: SSE_INLINE_BEGIN_1(a)
175: /* ----------------------------------------------- */
177: SSE_LOADL_PS(SSE_ARG_1, FLOAT_0, XMM0)
178: SSE_LOADH_PS(SSE_ARG_1, FLOAT_4, XMM0)
180: SSE_LOADL_PS(SSE_ARG_1, FLOAT_8, XMM5)
181: SSE_LOADH_PS(SSE_ARG_1, FLOAT_12, XMM5)
183: SSE_COPY_PS(XMM3, XMM0)
184: SSE_SHUFFLE(XMM3, XMM5, 0x88)
186: SSE_SHUFFLE(XMM5, XMM0, 0xDD)
188: SSE_LOADL_PS(SSE_ARG_1, FLOAT_2, XMM0)
189: SSE_LOADH_PS(SSE_ARG_1, FLOAT_6, XMM0)
191: SSE_LOADL_PS(SSE_ARG_1, FLOAT_10, XMM6)
192: SSE_LOADH_PS(SSE_ARG_1, FLOAT_14, XMM6)
194: SSE_COPY_PS(XMM4, XMM0)
195: SSE_SHUFFLE(XMM4, XMM6, 0x88)
197: SSE_SHUFFLE(XMM6, XMM0, 0xDD)
199: /* ----------------------------------------------- */
201: SSE_COPY_PS(XMM7, XMM4)
202: SSE_MULT_PS(XMM7, XMM6)
204: SSE_SHUFFLE(XMM7, XMM7, 0xB1)
206: SSE_COPY_PS(XMM0, XMM5)
207: SSE_MULT_PS(XMM0, XMM7)
209: SSE_COPY_PS(XMM2, XMM3)
210: SSE_MULT_PS(XMM2, XMM7)
212: SSE_SHUFFLE(XMM7, XMM7, 0x4E)
214: SSE_COPY_PS(XMM1, XMM5)
215: SSE_MULT_PS(XMM1, XMM7)
216: SSE_SUB_PS(XMM1, XMM0)
218: SSE_MULT_PS(XMM7, XMM3)
219: SSE_SUB_PS(XMM7, XMM2)
221: SSE_SHUFFLE(XMM7, XMM7, 0x4E)
222: SSE_STORE_PS(SSE_ARG_1, FLOAT_4, XMM7)
224: /* ----------------------------------------------- */
226: SSE_COPY_PS(XMM0, XMM5)
227: SSE_MULT_PS(XMM0, XMM4)
229: SSE_SHUFFLE(XMM0, XMM0, 0xB1)
231: SSE_COPY_PS(XMM2, XMM6)
232: SSE_MULT_PS(XMM2, XMM0)
233: SSE_ADD_PS(XMM2, XMM1)
235: SSE_COPY_PS(XMM7, XMM3)
236: SSE_MULT_PS(XMM7, XMM0)
238: SSE_SHUFFLE(XMM0, XMM0, 0x4E)
240: SSE_COPY_PS(XMM1, XMM6)
241: SSE_MULT_PS(XMM1, XMM0)
242: SSE_SUB_PS(XMM2, XMM1)
244: SSE_MULT_PS(XMM0, XMM3)
245: SSE_SUB_PS(XMM0, XMM7)
247: SSE_SHUFFLE(XMM0, XMM0, 0x4E)
248: SSE_STORE_PS(SSE_ARG_1, FLOAT_12, XMM0)
250: /* ----------------------------------------------- */
252: SSE_COPY_PS(XMM7, XMM5)
253: SSE_SHUFFLE(XMM7, XMM5, 0x4E)
254: SSE_MULT_PS(XMM7, XMM6)
256: SSE_SHUFFLE(XMM7, XMM7, 0xB1)
258: SSE_SHUFFLE(XMM4, XMM4, 0x4E)
260: SSE_COPY_PS(XMM0, XMM4)
261: SSE_MULT_PS(XMM0, XMM7)
262: SSE_ADD_PS(XMM0, XMM2)
264: SSE_COPY_PS(XMM2, XMM3)
265: SSE_MULT_PS(XMM2, XMM7)
267: SSE_SHUFFLE(XMM7, XMM7, 0x4E)
269: SSE_COPY_PS(XMM1, XMM4)
270: SSE_MULT_PS(XMM1, XMM7)
271: SSE_SUB_PS(XMM0, XMM1)
272: SSE_STORE_PS(SSE_ARG_1, FLOAT_0, XMM0)
274: SSE_MULT_PS(XMM7, XMM3)
275: SSE_SUB_PS(XMM7, XMM2)
277: SSE_SHUFFLE(XMM7, XMM7, 0x4E)
279: /* ----------------------------------------------- */
281: SSE_COPY_PS(XMM1, XMM3)
282: SSE_MULT_PS(XMM1, XMM5)
284: SSE_SHUFFLE(XMM1, XMM1, 0xB1)
286: SSE_COPY_PS(XMM0, XMM6)
287: SSE_MULT_PS(XMM0, XMM1)
288: SSE_ADD_PS(XMM0, XMM7)
290: SSE_COPY_PS(XMM2, XMM4)
291: SSE_MULT_PS(XMM2, XMM1)
292: SSE_SUB_PS_M(XMM2, SSE_ARG_1, FLOAT_12)
294: SSE_SHUFFLE(XMM1, XMM1, 0x4E)
296: SSE_COPY_PS(XMM7, XMM6)
297: SSE_MULT_PS(XMM7, XMM1)
298: SSE_SUB_PS(XMM7, XMM0)
300: SSE_MULT_PS(XMM1, XMM4)
301: SSE_SUB_PS(XMM2, XMM1)
302: SSE_STORE_PS(SSE_ARG_1, FLOAT_12, XMM2)
304: /* ----------------------------------------------- */
306: SSE_COPY_PS(XMM1, XMM3)
307: SSE_MULT_PS(XMM1, XMM6)
309: SSE_SHUFFLE(XMM1, XMM1, 0xB1)
311: SSE_COPY_PS(XMM2, XMM4)
312: SSE_MULT_PS(XMM2, XMM1)
313: SSE_LOAD_PS(SSE_ARG_1, FLOAT_4, XMM0)
314: SSE_SUB_PS(XMM0, XMM2)
316: SSE_COPY_PS(XMM2, XMM5)
317: SSE_MULT_PS(XMM2, XMM1)
318: SSE_ADD_PS(XMM2, XMM7)
320: SSE_SHUFFLE(XMM1, XMM1, 0x4E)
322: SSE_COPY_PS(XMM7, XMM4)
323: SSE_MULT_PS(XMM7, XMM1)
324: SSE_ADD_PS(XMM7, XMM0)
326: SSE_MULT_PS(XMM1, XMM5)
327: SSE_SUB_PS(XMM2, XMM1)
329: /* ----------------------------------------------- */
331: SSE_MULT_PS(XMM4, XMM3)
333: SSE_SHUFFLE(XMM4, XMM4, 0xB1)
335: SSE_COPY_PS(XMM1, XMM6)
336: SSE_MULT_PS(XMM1, XMM4)
337: SSE_ADD_PS(XMM1, XMM7)
339: SSE_COPY_PS(XMM0, XMM5)
340: SSE_MULT_PS(XMM0, XMM4)
341: SSE_LOAD_PS(SSE_ARG_1, FLOAT_12, XMM7)
342: SSE_SUB_PS(XMM7, XMM0)
344: SSE_SHUFFLE(XMM4, XMM4, 0x4E)
346: SSE_MULT_PS(XMM6, XMM4)
347: SSE_SUB_PS(XMM1, XMM6)
349: SSE_MULT_PS(XMM5, XMM4)
350: SSE_ADD_PS(XMM5, XMM7)
352: /* ----------------------------------------------- */
354: SSE_LOAD_PS(SSE_ARG_1, FLOAT_0, XMM0)
355: SSE_MULT_PS(XMM3, XMM0)
357: SSE_COPY_PS(XMM4, XMM3)
358: SSE_SHUFFLE(XMM4, XMM3, 0x4E)
359: SSE_ADD_PS(XMM4, XMM3)
361: SSE_COPY_PS(XMM6, XMM4)
362: SSE_SHUFFLE(XMM6, XMM4, 0xB1)
363: SSE_ADD_SS(XMM6, XMM4)
365: SSE_COPY_PS(XMM3, XMM6)
366: SSE_RECIP_SS(XMM3, XMM6)
367: SSE_COPY_SS(XMM4, XMM3)
368: SSE_ADD_SS(XMM4, XMM3)
369: SSE_MULT_SS(XMM3, XMM3)
370: SSE_MULT_SS(XMM6, XMM3)
371: SSE_SUB_SS(XMM4, XMM6)
373: SSE_SHUFFLE(XMM4, XMM4, 0x00)
375: SSE_MULT_PS(XMM0, XMM4)
376: SSE_STOREL_PS(SSE_ARG_1, FLOAT_0, XMM0)
377: SSE_STOREH_PS(SSE_ARG_1, FLOAT_2, XMM0)
379: SSE_MULT_PS(XMM1, XMM4)
380: SSE_STOREL_PS(SSE_ARG_1, FLOAT_4, XMM1)
381: SSE_STOREH_PS(SSE_ARG_1, FLOAT_6, XMM1)
383: SSE_MULT_PS(XMM2, XMM4)
384: SSE_STOREL_PS(SSE_ARG_1, FLOAT_8, XMM2)
385: SSE_STOREH_PS(SSE_ARG_1, FLOAT_10, XMM2)
387: SSE_MULT_PS(XMM4, XMM5)
388: SSE_STOREL_PS(SSE_ARG_1, FLOAT_12, XMM4)
389: SSE_STOREH_PS(SSE_ARG_1, FLOAT_14, XMM4)
391: /* ----------------------------------------------- */
393: SSE_INLINE_END_1;
394: SSE_SCOPE_END;
395: return 0;
396: }
398: #endif