In [1]:
## Lecture 13
using LinearAlgebra
""" Compute a rotation matrix such that
  G = Givens(v, i, j, sigma=1)
  returns an orthogonal matrix G
  where
  (G*v)[j] = 0
  and
  (G*v)[i] has sign sigma.
  Also, the vector is modified so that we return G*v
"""
function Givens!(v::AbstractVector, i::Int, j::Int, sigma::Int=1)
  @assert i != j "the indices must be disjoint"
  v1 = v[i]
  v2 = v[j]
  d = sigma*sqrt(v1^2 + v2^2)
  c = v1/d
  s = v2/d
  G = Matrix(1.0I, length(v), length(v))
  G[i,i] = c
  G[j,j] = c
  G[i,j] = s
  G[j,i] = -s
  v[i] = d
  v[j] = 0
  return G, v
end
v = randn(10)
G, Gv = Givens!(copy(v), 9, 10)
@show norm(G*v - Gv)
norm(G * v - Gv) = 0.0
Out[1]:
0.0
In [2]:
##
""" Compute a QR factorization via a sequence of Givens
rotations. This is a pedagogical routine. It can be
greatly improved!
"""
function givens_qr!(A)
  m,n = size(A)
  Qt = Matrix(1.0I, m, m)
  for j=1:n
    v = @view A[:,j]
    Z = @view A[:,j+1:end]
    for i=m:-1:(1+j)
      # rotate i to i-1
      G = Givens!(v, i-1, i)[1]
      Qt = G*Qt
      Z .= G*Z
    end
  end
  return copy(Qt'), A
end
A = randn(10,4)
Q, R = givens_qr!(copy(A))
Out[2]:
([-0.2729484024675731 0.3046297375908751 … 0.0 0.0; 0.10014714338665962 -0.03060058022064607 … 0.0 0.0; … ; 0.17663773582375306 0.7133292981669226 … -0.3568560358041533 -0.1846858811784016; -0.04142046623944052 -0.2712796623235631 … -0.05074926778342423 0.13303813087298258], [2.1203565824424198 -1.5816214192268245 0.4764407470496841 0.44714495546503974; 0.0 1.3209718630325118 -0.3522812152903284 0.7142379017861858; … ; 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0])
In [3]:
## Givens least squares
function least_squares_givens(A,b)
  m,n = size(A)
  Q,R = givens_qr!(copy(A))
  bt = Q'*b
  x = R[1:n,1:n]\bt[1:n]
  return x
end
A = randn(10,4)
b = randn(10)
xtrue = A\b
norm(xtrue - least_squares_givens(A,b))
Out[3]:
5.708584620135474e-16
In [4]:
##