Actual source code: ex2f.F90

  1: !
  2: !
  3: !  Description: Builds a parallel vector with 1 component on the first
  4: !               processor, 2 on the second, etc.  Then each processor adds
  5: !               one to all elements except the last rank.
  6: !
  7: ! -----------------------------------------------------------------------

  9:       program main
 10: #include <petsc/finclude/petscvec.h>
 11:       use petscvec
 12:       implicit none

 14: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 15: !                 Beginning of program
 16: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

 18:       Vec     x
 19:       PetscInt N,i,ione
 20:       PetscErrorCode ierr
 21:       PetscMPIInt rank
 22:       PetscScalar  one

 24:       PetscCallA(PetscInitialize(ierr))
 25:       one   = 1.0
 26:       PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr))

 28: !  Create a parallel vector.
 29: !   - In this case, we specify the size of the local portion on
 30: !     each processor, and PETSc computes the global size.  Alternatively,
 31: !     if we pass the global size and use PETSC_DECIDE for the
 32: !     local size PETSc will choose a reasonable partition trying
 33: !     to put nearly an equal number of elements on each processor.

 35:       N = rank + 1
 36:       PetscCallA(VecCreateMPI(PETSC_COMM_WORLD,N,PETSC_DECIDE,x,ierr))
 37:       PetscCallA(VecGetSize(x,N,ierr))
 38:       PetscCallA(VecSet(x,one,ierr))

 40: !  Set the vector elements.
 41: !   - Note that VecSetValues() uses 0-based row and column numbers
 42: !     in Fortran as well as in C.
 43: !   - Always specify global locations of vector entries.
 44: !   - Each processor can contribute any vector entries,
 45: !     regardless of which processor "owns" them; any nonlocal
 46: !     contributions will be transferred to the appropriate processor
 47: !     during the assembly process.
 48: !   - In this example, the flag ADD_VALUES indicates that all
 49: !     contributions will be added together.

 51:       ione = 1
 52:       do 100 i=0,N-rank-1
 53:          PetscCallA(VecSetValues(x,ione,i,one,ADD_VALUES,ierr))
 54:  100  continue

 56: !  Assemble vector, using the 2-step process:
 57: !    VecAssemblyBegin(), VecAssemblyEnd()
 58: !  Computations can be done while messages are in transition
 59: !  by placing code between these two statements.

 61:       PetscCallA(VecAssemblyBegin(x,ierr))
 62:       PetscCallA(VecAssemblyEnd(x,ierr))

 64: !     Test VecGetValues() with scalar entries
 65:       if (rank .eq. 0) then
 66:         ione = 0
 67:         PetscCallA(VecGetValues(x,ione,i,one,ierr))
 68:       endif

 70: !  View the vector; then destroy it.

 72:       PetscCallA(VecView(x,PETSC_VIEWER_STDOUT_WORLD,ierr))
 73:       PetscCallA(VecDestroy(x,ierr))

 75:       PetscCallA(PetscFinalize(ierr))
 76:       end

 78: !/*TEST
 79: !
 80: !     test:
 81: !       nsize: 2
 82: !       filter: grep -v " MPI process"
 83: !
 84: !TEST*/