Actual source code: ex204.c
1: static char help[] = "Test ViennaCL Matrix Conversions";
3: #include <petscmat.h>
5: int main(int argc, char **args)
6: {
7: PetscMPIInt rank, size;
10: PetscInitialize(&argc, &args, (char *)0, help);
12: MPI_Comm_rank(PETSC_COMM_WORLD, &rank);
13: MPI_Comm_size(PETSC_COMM_WORLD, &size);
15: /* Construct a sequential ViennaCL matrix and vector */
16: if (rank == 0) {
17: Mat A_vcl;
18: Vec v_vcl, r_vcl;
19: PetscInt n = 17, m = 31, nz = 5, i, cnt, j;
20: const PetscScalar val = 1.0;
22: MatCreateSeqAIJViennaCL(PETSC_COMM_SELF, m, n, nz, NULL, &A_vcl);
24: /* Add nz arbitrary entries per row in arbitrary columns */
25: for (i = 0; i < m; ++i) {
26: for (cnt = 0; cnt < nz; ++cnt) {
27: j = (19 * cnt + (7 * i + 3)) % n;
28: MatSetValue(A_vcl, i, j, (PetscScalar)(0.3 * i + j), INSERT_VALUES);
29: }
30: }
31: MatAssemblyBegin(A_vcl, MAT_FINAL_ASSEMBLY);
32: MatAssemblyEnd(A_vcl, MAT_FINAL_ASSEMBLY);
34: VecCreateSeqViennaCL(PETSC_COMM_SELF, n, &v_vcl);
35: VecCreateSeqViennaCL(PETSC_COMM_SELF, m, &r_vcl);
36: VecSet(v_vcl, val);
38: MatMult(A_vcl, v_vcl, r_vcl);
40: VecDestroy(&v_vcl);
41: VecDestroy(&r_vcl);
42: MatDestroy(&A_vcl);
43: }
45: /* Create a sequential AIJ matrix on rank 0 convert it to a new ViennaCL matrix, and apply it to a ViennaCL vector */
46: if (rank == 0) {
47: Mat A, A_vcl;
48: Vec v, r, v_vcl, r_vcl, d_vcl;
49: PetscInt n = 17, m = 31, nz = 5, i, cnt, j;
50: const PetscScalar val = 1.0;
51: PetscReal dnorm;
52: const PetscReal tol = 1e-5;
54: MatCreateSeqAIJ(PETSC_COMM_SELF, m, n, nz, NULL, &A);
56: /* Add nz arbitrary entries per row in arbitrary columns */
57: for (i = 0; i < m; ++i) {
58: for (cnt = 0; cnt < nz; ++cnt) {
59: j = (19 * cnt + (7 * i + 3)) % n;
60: MatSetValue(A, i, j, (PetscScalar)(0.3 * i + j), INSERT_VALUES);
61: }
62: }
63: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
64: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
66: PetscObjectSetName((PetscObject)A, "Sequential CPU Matrix");
68: VecCreateSeq(PETSC_COMM_SELF, n, &v);
69: VecCreateSeq(PETSC_COMM_SELF, m, &r);
70: PetscObjectSetName((PetscObject)r, "CPU result vector");
71: VecSet(v, val);
72: MatMult(A, v, r);
74: MatConvert(A, MATSEQAIJVIENNACL, MAT_INITIAL_MATRIX, &A_vcl);
75: PetscObjectSetName((PetscObject)A_vcl, "New ViennaCL Matrix");
77: VecCreateSeqViennaCL(PETSC_COMM_SELF, n, &v_vcl);
78: VecCreateSeqViennaCL(PETSC_COMM_SELF, m, &r_vcl);
79: PetscObjectSetName((PetscObject)r_vcl, "ViennaCL result vector");
80: VecSet(v_vcl, val);
81: MatMult(A_vcl, v_vcl, r_vcl);
83: VecDuplicate(r_vcl, &d_vcl);
84: VecCopy(r_vcl, d_vcl);
85: VecAXPY(d_vcl, -1.0, r_vcl);
86: VecNorm(d_vcl, NORM_INFINITY, &dnorm);
89: VecDestroy(&v);
90: VecDestroy(&r);
91: VecDestroy(&v_vcl);
92: VecDestroy(&r_vcl);
93: VecDestroy(&d_vcl);
94: MatDestroy(&A);
95: MatDestroy(&A_vcl);
96: }
98: /* Create a sequential AIJ matrix on rank 0 convert it inplace to a new ViennaCL matrix, and apply it to a ViennaCL vector */
99: if (rank == 0) {
100: Mat A;
101: Vec v, r, v_vcl, r_vcl, d_vcl;
102: PetscInt n = 17, m = 31, nz = 5, i, cnt, j;
103: const PetscScalar val = 1.0;
104: PetscReal dnorm;
105: const PetscReal tol = 1e-5;
107: MatCreateSeqAIJ(PETSC_COMM_SELF, m, n, nz, NULL, &A);
109: /* Add nz arbitrary entries per row in arbitrary columns */
110: for (i = 0; i < m; ++i) {
111: for (cnt = 0; cnt < nz; ++cnt) {
112: j = (19 * cnt + (7 * i + 3)) % n;
113: MatSetValue(A, i, j, (PetscScalar)(0.3 * i + j), INSERT_VALUES);
114: }
115: }
116: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
117: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
119: PetscObjectSetName((PetscObject)A, "Sequential CPU Matrix");
121: VecCreateSeq(PETSC_COMM_SELF, n, &v);
122: VecCreateSeq(PETSC_COMM_SELF, m, &r);
123: PetscObjectSetName((PetscObject)r, "CPU result vector");
124: VecSet(v, val);
125: MatMult(A, v, r);
127: MatConvert(A, MATSEQAIJVIENNACL, MAT_INPLACE_MATRIX, &A);
128: PetscObjectSetName((PetscObject)A, "Converted ViennaCL Matrix");
130: VecCreateSeqViennaCL(PETSC_COMM_SELF, n, &v_vcl);
131: VecCreateSeqViennaCL(PETSC_COMM_SELF, m, &r_vcl);
132: PetscObjectSetName((PetscObject)r_vcl, "ViennaCL result vector");
133: VecSet(v_vcl, val);
134: MatMult(A, v_vcl, r_vcl);
136: VecDuplicate(r_vcl, &d_vcl);
137: VecCopy(r_vcl, d_vcl);
138: VecAXPY(d_vcl, -1.0, r_vcl);
139: VecNorm(d_vcl, NORM_INFINITY, &dnorm);
142: VecDestroy(&v);
143: VecDestroy(&r);
144: VecDestroy(&v_vcl);
145: VecDestroy(&r_vcl);
146: VecDestroy(&d_vcl);
147: MatDestroy(&A);
148: }
150: /* Create a parallel AIJ matrix, convert it to a new ViennaCL matrix, and apply it to a ViennaCL vector */
151: if (size > 1) {
152: Mat A, A_vcl;
153: Vec v, r, v_vcl, r_vcl, d_vcl;
154: PetscInt N = 17, M = 31, nz = 5, i, cnt, j, rlow, rhigh;
155: const PetscScalar val = 1.0;
156: PetscReal dnorm;
157: const PetscReal tol = 1e-5;
159: MatCreateAIJ(PETSC_COMM_WORLD, PETSC_DETERMINE, PETSC_DETERMINE, M, N, nz, NULL, nz, NULL, &A);
161: /* Add nz arbitrary entries per row in arbitrary columns */
162: MatGetOwnershipRange(A, &rlow, &rhigh);
163: for (i = rlow; i < rhigh; ++i) {
164: for (cnt = 0; cnt < nz; ++cnt) {
165: j = (19 * cnt + (7 * i + 3)) % N;
166: MatSetValue(A, i, j, (PetscScalar)(0.3 * i + j), INSERT_VALUES);
167: }
168: }
169: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
170: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
172: PetscObjectSetName((PetscObject)A, "MPI CPU Matrix");
174: MatCreateVecs(A, &v, &r);
175: PetscObjectSetName((PetscObject)r, "MPI CPU result vector");
176: VecSet(v, val);
177: MatMult(A, v, r);
179: MatConvert(A, MATMPIAIJVIENNACL, MAT_INITIAL_MATRIX, &A_vcl);
180: PetscObjectSetName((PetscObject)A_vcl, "New MPI ViennaCL Matrix");
182: MatCreateVecs(A_vcl, &v_vcl, &r_vcl);
183: PetscObjectSetName((PetscObject)r_vcl, "ViennaCL result vector");
184: VecSet(v_vcl, val);
185: MatMult(A_vcl, v_vcl, r_vcl);
187: VecDuplicate(r_vcl, &d_vcl);
188: VecCopy(r_vcl, d_vcl);
189: VecAXPY(d_vcl, -1.0, r_vcl);
190: VecNorm(d_vcl, NORM_INFINITY, &dnorm);
193: VecDestroy(&v);
194: VecDestroy(&r);
195: VecDestroy(&v_vcl);
196: VecDestroy(&r_vcl);
197: VecDestroy(&d_vcl);
198: MatDestroy(&A);
199: MatDestroy(&A_vcl);
200: }
202: /* Create a parallel AIJ matrix, convert it in-place to a ViennaCL matrix, and apply it to a ViennaCL vector */
203: if (size > 1) {
204: Mat A;
205: Vec v, r, v_vcl, r_vcl, d_vcl;
206: PetscInt N = 17, M = 31, nz = 5, i, cnt, j, rlow, rhigh;
207: const PetscScalar val = 1.0;
208: PetscReal dnorm;
209: const PetscReal tol = 1e-5;
211: MatCreateAIJ(PETSC_COMM_WORLD, PETSC_DETERMINE, PETSC_DETERMINE, M, N, nz, NULL, nz, NULL, &A);
213: /* Add nz arbitrary entries per row in arbitrary columns */
214: MatGetOwnershipRange(A, &rlow, &rhigh);
215: for (i = rlow; i < rhigh; ++i) {
216: for (cnt = 0; cnt < nz; ++cnt) {
217: j = (19 * cnt + (7 * i + 3)) % N;
218: MatSetValue(A, i, j, (PetscScalar)(0.3 * i + j), INSERT_VALUES);
219: }
220: }
221: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
222: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
224: PetscObjectSetName((PetscObject)A, "MPI CPU Matrix");
226: MatCreateVecs(A, &v, &r);
227: PetscObjectSetName((PetscObject)r, "MPI CPU result vector");
228: VecSet(v, val);
229: MatMult(A, v, r);
231: MatConvert(A, MATMPIAIJVIENNACL, MAT_INPLACE_MATRIX, &A);
232: PetscObjectSetName((PetscObject)A, "Converted MPI ViennaCL Matrix");
234: MatCreateVecs(A, &v_vcl, &r_vcl);
235: PetscObjectSetName((PetscObject)r_vcl, "ViennaCL result vector");
236: VecSet(v_vcl, val);
237: MatMult(A, v_vcl, r_vcl);
239: VecDuplicate(r_vcl, &d_vcl);
240: VecCopy(r_vcl, d_vcl);
241: VecAXPY(d_vcl, -1.0, r_vcl);
242: VecNorm(d_vcl, NORM_INFINITY, &dnorm);
245: VecDestroy(&v);
246: VecDestroy(&r);
247: VecDestroy(&v_vcl);
248: VecDestroy(&r_vcl);
249: VecDestroy(&d_vcl);
250: MatDestroy(&A);
251: }
253: PetscFinalize();
254: return 0;
255: }
257: /*TEST
259: build:
260: requires: viennacl
262: test:
263: output_file: output/ex204.out
265: test:
266: suffix: 2
267: nsize: 2
268: output_file: output/ex204.out
270: TEST*/