Actual source code: ex88f.F90
1: !
2: ! Creates a tridiagonal sparse matrix explicitly in Fortran and solves a linear system with it
3: !
4: ! The matrix is provided in triples in a way that supports new nonzero values with the same nonzero structure
5: !
6: program main
7: #include <petsc/finclude/petscksp.h>
8: use petscksp
9: implicit none
11: PetscInt i,n,nz
12: PetscBool flg
13: PetscErrorCode ierr
14: PetscScalar,ALLOCATABLE :: a(:)
15: PetscScalar,pointer :: b(:)
17: PetscInt,ALLOCATABLE :: rows(:)
18: PetscInt,ALLOCATABLE :: cols(:)
20: Mat J
21: Vec rhs,solution
22: KSP ksp
24: PetscCallA(PetscInitialize(ierr))
26: n = 3
27: PetscCallA(PetscOptionsGetInt(PETSC_NULL_OPTIONS,PETSC_NULL_CHARACTER,'-n',n,flg,ierr))
28: nz = 3*n - 4;
30: PetscCallA(VecCreateSeq(PETSC_COMM_SELF,n,rhs,ierr))
31: PetscCallA(VecCreateSeq(PETSC_COMM_SELF,n,solution,ierr))
32: ALLOCATE (rows(nz),cols(nz),a(nz))
34: PetscCallA(VecGetArrayF90(rhs,b,ierr))
35: do i=1,n
36: b(i) = 1.0
37: enddo
38: PetscCallA(VecRestoreArrayF90(rhs,b,ierr))
40: rows(1) = 0; cols(1) = 0
41: a(1) = 1.0
42: do i=2,n-1
43: rows(2+3*(i-2)) = i-1; cols(2+3*(i-2)) = i-2
44: a(2+3*(i-2)) = -1.0;
45: rows(2+3*(i-2)+1) = i-1; cols(2+3*(i-2)+1) = i-1
46: a(2+3*(i-2)+1) = 2.0;
47: rows(2+3*(i-2)+2) = i-1; cols(2+3*(i-2)+2) = i
48: a(2+3*(i-2)+2) = -1.0;
49: enddo
50: rows(nz) = n-1; cols(nz) = n-1
51: a(nz) = 1.0
53: PetscCallA(MatCreate(PETSC_COMM_SELF,J,ierr))
54: PetscCallA(MatSetSizes(J,n,n,n,n,ierr))
55: PetscCallA(MatSetType(J,MATSEQAIJ,ierr))
56: PetscCallA(MatSetPreallocationCOO(J,nz,rows,cols,ierr))
57: PetscCallA(MatSetValuesCOO(J,a,INSERT_VALUES,ierr))
59: PetscCallA(KSPCreate(PETSC_COMM_SELF,ksp,ierr))
60: PetscCallA(KSPSetErrorIfNotConverged(ksp,PETSC_TRUE,ierr))
61: PetscCallA(KSPSetFromOptions(ksp,ierr))
62: PetscCallA(KSPSetOperators(ksp,J,J,ierr))
64: PetscCallA(KSPSolve(ksp,rhs,solution,ierr))
66: ! Keep the same size and nonzero structure of the matrix but change its numerical entries
67: do i=2,n-1
68: a(2+3*(i-2)+1) = 4.0;
69: enddo
70: PetscCallA(MatSetValuesCOO(J,a,INSERT_VALUES,ierr))
72: PetscCallA(KSPSolve(ksp,rhs,solution,ierr))
74: PetscCallA(KSPDestroy(ksp,ierr))
75: PetscCallA(VecDestroy(rhs,ierr))
76: PetscCallA(VecDestroy(solution,ierr))
77: PetscCallA(MatDestroy(J,ierr))
79: DEALLOCATE (rows, cols, a)
81: PetscCallA(PetscFinalize(ierr))
82: end
84: !/*TEST
85: !
86: ! test:
87: ! requires: defined(PETSC_USE_SINGLE_LIBRARY)
88: ! nsize: 3
89: ! filter: sed 's?ATOL?RTOL?g' | grep -v HERMITIAN | grep -v "shared memory" | grep -v "Mat_0"
90: ! # use the MPI Linear Solver Server
91: ! args: -n 20 -mpi_linear_solver_server -mpi_linear_solver_server_view -mpi_linear_solver_server_use_shared_memory false
92: ! # controls for the use of PCMPI on a particular system
93: ! args: -mpi_linear_solver_server_minimum_count_per_rank 5 -mpi_linear_solver_server_ksp_view
94: ! # the usual options for the linear solver (in this case using the server)
95: ! args: -ksp_monitor -ksp_converged_reason -ksp_view
96: !
97: ! test:
98: ! suffix: 2
99: ! requires: defined(PETSC_USE_SINGLE_LIBRARY)
100: ! nsize: 3
101: ! filter: sed 's?ATOL?RTOL?g' | grep -v HERMITIAN | grep -v "shared memory" | grep -v "Mat_0"
102: ! # use the MPI Linear Solver Server
103: ! args: -n 20 -mpi_linear_solver_server -mpi_linear_solver_server_view -mpi_linear_solver_server_use_shared_memory false
104: ! # controls for the use of PCMPI on a particular system
105: ! args: -mpi_linear_solver_server_ksp_view
106: ! # the usual options for the linear solver (in this case using the server)
107: ! args: -ksp_monitor -ksp_converged_reason -ksp_view
108: !
109: !TEST*/