Actual source code: ex16f90.F90
1: !
2: !
3: ! Tests MatDenseGetArray()
4: !
6: program main
7: #include <petsc/finclude/petscmat.h>
8: use petscmat
9: implicit none
11: Mat A
12: PetscErrorCode ierr
13: PetscInt i,j,m,n,iar(1),jar(1)
14: PetscInt one
15: PetscScalar v(1)
16: PetscScalar, pointer :: array(:,:)
17: PetscMPIInt rank
18: integer :: ashape(2)
19: character(len=80) :: string
21: PetscCallA(PetscInitialize(ierr))
22: m = 3
23: n = 2
24: one = 1
25: !
26: ! Create a parallel dense matrix shared by all processors
27: !
28: PetscCallA(MatCreateDense(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,m,n,PETSC_NULL_SCALAR,A,ierr))
30: !
31: ! Set values into the matrix. All processors set all values.
32: !
33: do 10, i=0,m-1
34: iar(1) = i
35: do 20, j=0,n-1
36: jar(1) = j
37: v(1) = 9.0/real(i+j+1)
38: PetscCallA(MatSetValues(A,one,iar,one,jar,v,INSERT_VALUES,ierr))
39: 20 continue
40: 10 continue
42: PetscCallA(MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr))
43: PetscCallA(MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr))
45: !
46: ! Print the matrix to the screen
47: !
48: PetscCallA(MatView(A,PETSC_VIEWER_STDOUT_WORLD,ierr))
50: !
51: ! Print the local matrix shape to the screen for each rank
52: !
53: PetscCallA(MatDenseGetArrayF90(A,array,ierr))
54: PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr))
55: ashape = shape(array)
56: write(string, '("[", i0, "]", " shape (", i0, ",", i0, ")", a1)') rank, ashape(1), ashape(2), new_line('a')
57: PetscCallA(PetscSynchronizedPrintf(PETSC_COMM_WORLD, string, ierr))
58: PetscCallA(PetscSynchronizedFlush(PETSC_COMM_WORLD,PETSC_STDOUT,ierr))
59: PetscCallA(MatDenseRestoreArrayF90(A,array,ierr))
60: !
61: ! Free the space used by the matrix
62: !
63: PetscCallA(MatDestroy(A,ierr))
64: PetscCallA(PetscFinalize(ierr))
65: end
67: !/*TEST
68: !
69: ! test:
70: ! nsize: 2
71: ! filter: sort -b
72: ! filter_output: sort -b
73: !
74: !TEST*/