Actual source code: ex106.c


  2: static char help[] = "Test repeated LU factorizations. Used for checking memory leak\n\
  3:   -m <size> : problem size\n\
  4:   -mat_nonsym : use nonsymmetric matrix (default is symmetric)\n\n";

  6: #include <petscmat.h>
  7: int main(int argc, char **args)
  8: {
  9:   Mat           C, F;    /* matrix */
 10:   Vec           x, u, b; /* approx solution, RHS, exact solution */
 11:   PetscReal     norm;    /* norm of solution error */
 12:   PetscScalar   v, none = -1.0;
 13:   PetscInt      I, J, ldim, low, high, iglobal, Istart, Iend;
 14:   PetscInt      i, j, m = 3, n = 2, its;
 15:   PetscMPIInt   size, rank;
 16:   PetscBool     mat_nonsymmetric;
 17:   PetscInt      its_max;
 18:   MatFactorInfo factinfo;
 19:   IS            perm, iperm;

 22:   PetscInitialize(&argc, &args, (char *)0, help);
 23:   PetscOptionsGetInt(NULL, NULL, "-m", &m, NULL);
 24:   MPI_Comm_rank(PETSC_COMM_WORLD, &rank);
 25:   MPI_Comm_size(PETSC_COMM_WORLD, &size);
 26:   n = 2 * size;

 28:   /*
 29:      Set flag if we are doing a nonsymmetric problem; the default is symmetric.
 30:   */
 31:   PetscOptionsHasName(NULL, NULL, "-mat_nonsym", &mat_nonsymmetric);

 33:   /*
 34:      Create parallel matrix, specifying only its global dimensions.
 35:      When using MatCreate(), the matrix format can be specified at
 36:      runtime. Also, the parallel partitioning of the matrix is
 37:      determined by PETSc at runtime.
 38:   */
 39:   MatCreate(PETSC_COMM_WORLD, &C);
 40:   MatSetSizes(C, PETSC_DECIDE, PETSC_DECIDE, m * n, m * n);
 41:   MatSetFromOptions(C);
 42:   MatGetOwnershipRange(C, &Istart, &Iend);

 44:   /*
 45:      Set matrix entries matrix in parallel.
 46:       - Each processor needs to insert only elements that it owns
 47:         locally (but any non-local elements will be sent to the
 48:         appropriate processor during matrix assembly).
 49:       - Always specify global row and columns of matrix entries.
 50:   */
 51:   for (I = Istart; I < Iend; I++) {
 52:     v = -1.0;
 53:     i = I / n;
 54:     j = I - i * n;
 55:     if (i > 0) {
 56:       J = I - n;
 57:       MatSetValues(C, 1, &I, 1, &J, &v, ADD_VALUES);
 58:     }
 59:     if (i < m - 1) {
 60:       J = I + n;
 61:       MatSetValues(C, 1, &I, 1, &J, &v, ADD_VALUES);
 62:     }
 63:     if (j > 0) {
 64:       J = I - 1;
 65:       MatSetValues(C, 1, &I, 1, &J, &v, ADD_VALUES);
 66:     }
 67:     if (j < n - 1) {
 68:       J = I + 1;
 69:       MatSetValues(C, 1, &I, 1, &J, &v, ADD_VALUES);
 70:     }
 71:     v = 4.0;
 72:     MatSetValues(C, 1, &I, 1, &I, &v, ADD_VALUES);
 73:   }

 75:   /*
 76:      Make the matrix nonsymmetric if desired
 77:   */
 78:   if (mat_nonsymmetric) {
 79:     for (I = Istart; I < Iend; I++) {
 80:       v = -1.5;
 81:       i = I / n;
 82:       if (i > 1) {
 83:         J = I - n - 1;
 84:         MatSetValues(C, 1, &I, 1, &J, &v, ADD_VALUES);
 85:       }
 86:     }
 87:   } else {
 88:     MatSetOption(C, MAT_SYMMETRIC, PETSC_TRUE);
 89:     MatSetOption(C, MAT_SYMMETRY_ETERNAL, PETSC_TRUE);
 90:   }

 92:   /*
 93:      Assemble matrix, using the 2-step process:
 94:        MatAssemblyBegin(), MatAssemblyEnd()
 95:      Computations can be done while messages are in transition
 96:      by placing code between these two statements.
 97:   */
 98:   MatAssemblyBegin(C, MAT_FINAL_ASSEMBLY);
 99:   MatAssemblyEnd(C, MAT_FINAL_ASSEMBLY);

101:   its_max = 1000;
102:   /*
103:      Create parallel vectors.
104:       - When using VecSetSizes(), we specify only the vector's global
105:         dimension; the parallel partitioning is determined at runtime.
106:       - Note: We form 1 vector from scratch and then duplicate as needed.
107:   */
108:   VecCreate(PETSC_COMM_WORLD, &u);
109:   VecSetSizes(u, PETSC_DECIDE, m * n);
110:   VecSetFromOptions(u);
111:   VecDuplicate(u, &b);
112:   VecDuplicate(b, &x);

114:   /*
115:      Currently, all parallel PETSc vectors are partitioned by
116:      contiguous chunks across the processors.  Determine which
117:      range of entries are locally owned.
118:   */
119:   VecGetOwnershipRange(x, &low, &high);

121:   /*
122:     Set elements within the exact solution vector in parallel.
123:      - Each processor needs to insert only elements that it owns
124:        locally (but any non-local entries will be sent to the
125:        appropriate processor during vector assembly).
126:      - Always specify global locations of vector entries.
127:   */
128:   VecGetLocalSize(x, &ldim);
129:   for (i = 0; i < ldim; i++) {
130:     iglobal = i + low;
131:     v       = (PetscScalar)(i + 100 * rank);
132:     VecSetValues(u, 1, &iglobal, &v, INSERT_VALUES);
133:   }

135:   /*
136:      Assemble vector, using the 2-step process:
137:        VecAssemblyBegin(), VecAssemblyEnd()
138:      Computations can be done while messages are in transition,
139:      by placing code between these two statements.
140:   */
141:   VecAssemblyBegin(u);
142:   VecAssemblyEnd(u);

144:   /* Compute right-hand-side vector */
145:   MatMult(C, u, b);

147:   MatGetOrdering(C, MATORDERINGNATURAL, &perm, &iperm);
148:   its_max = 2000;
149:   for (i = 0; i < its_max; i++) {
150:     MatGetFactor(C, MATSOLVERPETSC, MAT_FACTOR_LU, &F);
151:     MatLUFactorSymbolic(F, C, perm, iperm, &factinfo);
152:     for (j = 0; j < 1; j++) MatLUFactorNumeric(F, C, &factinfo);
153:     MatSolve(F, b, x);
154:     MatDestroy(&F);
155:   }
156:   ISDestroy(&perm);
157:   ISDestroy(&iperm);

159:   /* Check the error */
160:   VecAXPY(x, none, u);
161:   VecNorm(x, NORM_2, &norm);
162:   PetscPrintf(PETSC_COMM_WORLD, "Norm of error %t\n", (double)norm);

164:   /* Free work space. */
165:   VecDestroy(&u);
166:   VecDestroy(&x);
167:   VecDestroy(&b);
168:   MatDestroy(&C);
169:   PetscFinalize();
170:   return 0;
171: }