In [1]:
include("lecture20-linsys_sor.jl")
include("lecture20-linsys_jacobi.jl")
using Plots
using LaTeXStrings
pyplot(legend=false)

function numgridS(n)
  A = zeros(Int,n,n)
  tofill = (n-2)^2
  h = 1
  for r = 2:n-1
    for c = 2:n-1
      A[r,c] = h+(c-2)*(n-2)
    end
    h += 1
  end
  return A
end

#fdlaplacian from (http://nbviewer.jupyter.org/url/homepages.warwick.ac.uk/staff/C.Ortner/julia/Laplacian.ipynb)
# with modification
function fdlaplacian(G)
  # read info about G
  M, N = size(G)
  nonz = find(G)
  # define function to create a 1D laplacian and a sparse identity
  fdl1(m) = spdiagm((-ones(m-1),2*ones(m),-ones(m-1)), [-1,0,1])
  # laplace operator on the full grid
  A = kron(speye(M), fdl1(N)) + kron(fdl1(M), speye(N))
  # return the restriction to the coloured grid points
  return A[nonz, nonz]
end
In [2]:
## Does Gauss-Seidel always converge faster than Jacobi?
A = fdlaplacian(numgridS(22))
n = size(A,1)
b = ones(n);
x1,hist1 = linsys_jacobi(A,b)
x2,hist2 = linsys_sor(A,b,1,1e-8,1000)
 iter= 
In [3]:
A = fdlaplacian(numgridS(22))
@show size(A)
size(A) = (400, 400)
In [4]:
##
x1,hist1 = linsys_jacobi(A,b)
hist1_toplot = hist1[find(hist1)]
h = plot(hist1_toplot,yscale=:log10,linewidth=2,color=:red)
xlabel!(h,"Iteration")
ylabel!(h,"Residual norm upper-bound")
omegas = [0.1,0.5,0.9,1,1.2,1.5,1.8,1.9,1.95]
for i = 1:length(omegas)
  x,hist = linsys_sor(A,b,omegas[i])
  hist_toplot = hist[find(hist)]
  plot!(h,hist_toplot,yscale=:log10,color=:black)
  om = omegas[i]
  annotate!(h,[(length(hist),hist[end],
  text(join([L"\omega =",@sprintf("%.2f",omegas[i])]),12,:black,:left))])
  sleep(2)
  display(h)
end
 iter=       1  diff= 2.5e-01  res= 8.6e-01  time=    0.0
 iter=       2  diff= 2.5e-01  res= 8.4e-01  time=    0.0
 iter=       3  diff= 2.5e-01  res= 8.3e-01  time=    0.0
 iter=       4  diff= 2.5e-01  res= 8.2e-01  time=    0.0
 iter=       5  diff= 2.5e-01  res= 8.0e-01  time=    0.0
 iter=       6  diff= 2.5e-01  res= 7.9e-01  time=    0.0
 iter=       7  diff= 2.5e-01  res= 7.8e-01  time=    0.0
 iter=       8  diff= 2.5e-01  res= 7.7e-01  time=    0.0
 iter=       9  diff= 2.5e-01  res= 7.6e-01  time=    0.0
 iter=      10  diff= 2.5e-01  res= 7.5e-01  time=    0.0
 iter=      20  diff= 2.5e-01  res= 6.5e-01  time=    0.0
 iter=      30  diff= 2.4e-01  res= 5.8e-01  time=    0.0
 iter=      40  diff= 2.3e-01  res= 5.2e-01  time=    0.0
 iter=      50  diff= 2.1e-01  res= 4.6e-01  time=    0.0
 iter=      60  diff= 1.9e-01  res= 4.1e-01  time=    0.0
 iter=      70  diff= 1.7e-01  res= 3.7e-01  time=    0.0
 iter=      80  diff= 1.6e-01  res= 3.3e-01  time=    0.0
 iter=      90  diff= 1.4e-01  res= 2.9e-01  time=    0.0
 iter=     100  diff= 1.3e-01  res= 2.6e-01  time=    0.0
 iter=     200  diff= 4.1e-02  res= 8.6e-02  time=    0.0
 iter=     300  diff= 1.3e-02  res= 2.8e-02  time=    0.0
 iter=     400  diff= 4.3e-03  res= 9.1e-03  time=    0.0
 iter=     500  diff= 1.4e-03  res= 2.9e-03  time=    0.0
 iter=     600  diff= 4.6e-04  res= 9.6e-04  time=    0.0
 iter=     700  diff= 1.5e-04  res= 3.1e-04  time=    0.0
 iter=     800  diff= 4.9e-05  res= 1.0e-04  time=    0.0
 iter=     900  diff= 1.6e-05  res= 3.3e-05  time=    0.0
 iter=    1000  diff= 5.1e-06  res= 1.1e-05  time=    0.0
 iter= 
In [5]:
A = full(fdlaplacian(numgridS(4)))
Out[5]:
4×4 Array{Float64,2}:
  4.0  -1.0  -1.0   0.0
 -1.0   4.0   0.0  -1.0
 -1.0   0.0   4.0  -1.0
  0.0  -1.0  -1.0   4.0
In [6]:
@show A
A = [4.0 -1.0 -1.0 0.0; -1.0 4.0 0.0 -1.0; -1.0 0.0 4.0 -1.0; 0.0 -1.0 -1.0 4.0]
Out[6]:
4×4 Array{Float64,2}:
  4.0  -1.0  -1.0   0.0
 -1.0   4.0   0.0  -1.0
 -1.0   0.0   4.0  -1.0
  0.0  -1.0  -1.0   4.0
In [7]: