ssor.jl 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. export ssor
  2. """
  3. x,flag,err,iter,resvec = ssor(A::SparseMatrixCSC, b::Vector; omega::Real=2/3, tol::Real=1e-2, maxIter::Int=1,out::Int=0)
  4. Symmetric successive over-relaxation applied to the linear system A*x = b.
  5. !! Note that upper/lower triangular matrix are never build to save memory !!
  6. Input:
  7. A - sparse matrix CSC
  8. b - right hand side vector
  9. omega - relaxation parameter (omega=1.0: Gauss Seidel)
  10. tol - error tolerance (default=1e-2)
  11. maxIter - maximum number of iterations (default=1)
  12. out - flag for output (-1 : no output, 0 : only errors, 1 : final status, 2: error at each iteration)
  13. Output:
  14. x - solution
  15. flag - exit flag ( 0 : desired tolerance achieved,
  16. -1 : maxIter reached without converging)
  17. err - error, i.e., norm(A*x-b)/norm(b)
  18. iter - number of iterations
  19. resvec - error at each iteration
  20. """
  21. function ssor(A::SparseMatrixCSC,b::Vector;tol::Real=1e-2,maxIter::Int=1,omega::Real=1.0,out::Int=0,storeInterm::Bool=false;)
  22. n = length(b)
  23. OmInvD = omega./diag(A) #D矩阵 并乘以omega
  24. x = zeros(eltype(A),n)
  25. r = copy(b)
  26. rnorm0 = norm(b)
  27. res = 0
  28. value = 0.0
  29. flag = -1
  30. iter = 0
  31. dx = 0
  32. for iter = 1:maxIter
  33. println(iter)
  34. #第一次迭代 按照列来
  35. for j = 1:n
  36. dx = r[j] * OmInvD[j]
  37. for gidx = A.colptr[j] : A.colptr[j+1]-1
  38. r[A.rowval[gidx]] -= A.nzval[gidx] * dx
  39. end
  40. x[j] += dx
  41. end
  42. for j = n:-1:1
  43. dx = r[j] * OmInvD[j]
  44. for gidx = A.colptr[j] : A.colptr[j+1]-1
  45. r[A.rowval[gidx]] -= A.nzval[gidx] * dx
  46. end
  47. x[j] += dx
  48. end
  49. res = norm(A*x-b) / rnorm0
  50. println(res)
  51. if res < tol
  52. flag = 0; break
  53. end
  54. end
  55. return x,flag,iter
  56. end