Actual source code: mpb_aij.c

  1: #include <../src/mat/impls/aij/mpi/mpiaij.h>

  3: PetscErrorCode MatGetMultiProcBlock_MPIAIJ(Mat mat, MPI_Comm subComm, MatReuse scall, Mat *subMat)
  4: {
  5:   Mat_MPIAIJ  *aij  = (Mat_MPIAIJ *)mat->data;
  6:   Mat_SeqAIJ  *aijB = (Mat_SeqAIJ *)aij->B->data;
  7:   PetscMPIInt  subCommSize, subCommRank;
  8:   PetscMPIInt *commRankMap, subRank, rank, commRank;
  9:   PetscInt    *garrayCMap, col, i, j, *nnz, newRow, newCol;

 11:   MPI_Comm_size(subComm, &subCommSize);
 12:   MPI_Comm_rank(subComm, &subCommRank);
 13:   MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &commRank);

 15:   /* create subMat object with the relevant layout */
 16:   if (scall == MAT_INITIAL_MATRIX) {
 17:     MatCreate(subComm, subMat);
 18:     MatSetType(*subMat, MATMPIAIJ);
 19:     MatSetSizes(*subMat, mat->rmap->n, mat->cmap->n, PETSC_DECIDE, PETSC_DECIDE);
 20:     MatSetBlockSizesFromMats(*subMat, mat, mat);

 22:     /* need to setup rmap and cmap before Preallocation */
 23:     PetscLayoutSetUp((*subMat)->rmap);
 24:     PetscLayoutSetUp((*subMat)->cmap);
 25:   }

 27:   /* create a map of comm_rank from subComm to comm - should commRankMap and garrayCMap be kept for reused? */
 28:   PetscMalloc1(subCommSize, &commRankMap);
 29:   MPI_Allgather(&commRank, 1, MPI_INT, commRankMap, 1, MPI_INT, subComm);

 31:   /* Traverse garray and identify column indices [of offdiag mat] that
 32:    should be discarded. For the ones not discarded, store the newCol+1
 33:    value in garrayCMap */
 34:   PetscCalloc1(aij->B->cmap->n, &garrayCMap);
 35:   for (i = 0; i < aij->B->cmap->n; i++) {
 36:     col = aij->garray[i];
 37:     for (subRank = 0; subRank < subCommSize; subRank++) {
 38:       rank = commRankMap[subRank];
 39:       if ((col >= mat->cmap->range[rank]) && (col < mat->cmap->range[rank + 1])) {
 40:         garrayCMap[i] = (*subMat)->cmap->range[subRank] + col - mat->cmap->range[rank] + 1;
 41:         break;
 42:       }
 43:     }
 44:   }

 46:   if (scall == MAT_INITIAL_MATRIX) {
 47:     /* Compute preallocation for the offdiag mat */
 48:     PetscCalloc1(aij->B->rmap->n, &nnz);
 49:     for (i = 0; i < aij->B->rmap->n; i++) {
 50:       for (j = aijB->i[i]; j < aijB->i[i + 1]; j++) {
 51:         if (garrayCMap[aijB->j[j]]) nnz[i]++;
 52:       }
 53:     }
 54:     MatMPIAIJSetPreallocation(*(subMat), 0, NULL, 0, nnz);

 56:     /* reuse diag block with the new submat */
 57:     MatDestroy(&((Mat_MPIAIJ *)((*subMat)->data))->A);
 58:     ((Mat_MPIAIJ *)((*subMat)->data))->A = aij->A;
 59:     PetscObjectReference((PetscObject)aij->A);
 60:   } else if (((Mat_MPIAIJ *)(*subMat)->data)->A != aij->A) {
 61:     PetscObject obj = (PetscObject)((Mat_MPIAIJ *)((*subMat)->data))->A;
 62:     PetscObjectReference((PetscObject)obj);
 63:     ((Mat_MPIAIJ *)((*subMat)->data))->A = aij->A;
 64:     PetscObjectReference((PetscObject)aij->A);
 65:   }

 67:   /* Traverse aij->B and insert values into subMat */
 68:   if ((*subMat)->assembled) {
 69:     (*subMat)->was_assembled = PETSC_TRUE;
 70:     (*subMat)->assembled     = PETSC_FALSE;
 71:   }
 72:   for (i = 0; i < aij->B->rmap->n; i++) {
 73:     newRow = (*subMat)->rmap->range[subCommRank] + i;
 74:     for (j = aijB->i[i]; j < aijB->i[i + 1]; j++) {
 75:       newCol = garrayCMap[aijB->j[j]];
 76:       if (newCol) {
 77:         newCol--; /* remove the increment */
 78:         MatSetValues_MPIAIJ(*subMat, 1, &newRow, 1, &newCol, (aijB->a + j), INSERT_VALUES);
 79:       }
 80:     }
 81:   }
 82:   MatAssemblyBegin(*subMat, MAT_FINAL_ASSEMBLY);
 83:   MatAssemblyEnd(*subMat, MAT_FINAL_ASSEMBLY);

 85:   /* deallocate temporary data */
 86:   PetscFree(commRankMap);
 87:   PetscFree(garrayCMap);
 88:   if (scall == MAT_INITIAL_MATRIX) PetscFree(nnz);
 89:   return 0;
 90: }