Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Julia 0.7 update #205

Merged
merged 11 commits into from
Jul 11, 2018
12 changes: 6 additions & 6 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
language: julia
os:
- linux
- linux
julia:
- 0.6
- nightly
- 0.7
- nightly
matrix:
allow_failures:
- julia: nightly
notifications:
email: false
email: false
after_success:
- julia -e 'Pkg.add("Coverage"); cd(Pkg.dir("IterativeSolvers")); using Coverage; Coveralls.submit(process_folder());'
- julia -e 'Pkg.add("Documenter"); cd(Pkg.dir("IterativeSolvers")); include(joinpath("docs", "make.jl"))'
- julia -e 'Pkg.add("Coverage"); cd(Pkg.dir("IterativeSolvers")); using Coverage; Coveralls.submit(process_folder());'
- julia -e 'Pkg.add("Documenter"); cd(Pkg.dir("IterativeSolvers")); include(joinpath("docs", "make.jl"))'
4 changes: 2 additions & 2 deletions REQUIRE
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
julia 0.6
julia 0.7

RecipesBase
RecipesBase 0.3.1
6 changes: 3 additions & 3 deletions benchmark/advection_diffusion.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ function advection_dominated(;N = 50, β = 1000.0)
Δ = laplace_matrix(Float64, N, 3) ./ -h^2

# And the dx bit.
∂x_1d = spdiagm((fill(-β / 2h, N - 1), fill(β / 2h, N - 1)), (-1, 1))
∂x_1d = spdiagm(-1 => fill(-β / 2h, N - 1), 1 => fill(β / 2h, N - 1))
∂x = kron(speye(N^2), ∂x_1d)

# Final matrix and rhs.
Expand All @@ -41,6 +41,6 @@ function laplace_matrix(::Type{T}, n, dims) where T
end

second_order_central_diff(::Type{T}, dim) where {T} = convert(
SparseMatrixCSC{T, Int},
SparseMatrixCSC{T, Int},
SymTridiagonal(fill(2 * one(T), dim), fill(-one(T), dim - 1))
)
)
14 changes: 7 additions & 7 deletions benchmark/benchmark-hessenberg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,24 +16,24 @@ function backslash_versus_givens(; n = 1_000, ms = 10 : 10 : 100)
println(m)

H = zeros(m + 1, m)

# Create an orthonormal basis for the Krylov subspace
V = rand(n, m + 1)
V[:, 1] /= norm(V[:, 1])

for i = 1 : m
# Expand
V[:, i + 1] = A * V[:, i]

# Orthogonalize
H[1 : i, i] = view(V, :, 1 : i)' * V[:, i + 1]
V[:, i + 1] -= view(V, :, 1 : i) * H[1 : i, i]

# Re-orthogonalize
update = view(V, :, 1 : i)' * V[:, i + 1]
H[1 : i, i] += update
V[:, i + 1] -= view(V, :, 1 : i) * update

# Normalize
H[i + 1, i] = norm(V[:, i + 1])
V[:, i + 1] /= H[i + 1, i]
Expand All @@ -43,11 +43,11 @@ function backslash_versus_givens(; n = 1_000, ms = 10 : 10 : 100)
rhs = [i == 1 ? 1.0 : 0.0 for i = 1 : size(H, 1)]

# Run the benchmark
results["givens_qr"][m] = @benchmark A_ldiv_B!(IterativeSolvers.Hessenberg(myH), myRhs) setup = (myH = copy($H); myRhs = copy($rhs))
results["givens_qr"][m] = @benchmark ldiv!(IterativeSolvers.Hessenberg(myH), myRhs) setup = (myH = copy($H); myRhs = copy($rhs))
results["backslash"][m] = @benchmark $H \ $rhs
end

return results
end

end
end
6 changes: 3 additions & 3 deletions benchmark/benchmark-linear-systems.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module LinearSystemsBench

import Base.A_ldiv_B!, Base.\
import Base.ldiv!, Base.\

using BenchmarkTools
using IterativeSolvers
Expand All @@ -12,7 +12,7 @@ struct DiagonalPreconditioner{T}
diag::Vector{T}
end

function A_ldiv_B!(y::AbstractVector{T}, A::DiagonalPreconditioner{T}, b::AbstractVector{T}) where T
function ldiv!(y::AbstractVector{T}, A::DiagonalPreconditioner{T}, b::AbstractVector{T}) where T
for i = 1 : length(b)
@inbounds y[i] = A.diag[i] \ b[i]
end
Expand All @@ -34,7 +34,7 @@ function cg(; n = 1_000_000, tol = 1e-6, maxiter::Int = 200)
println("Symmetric positive definite matrix of size ", n)
println("Eigenvalues in interval [0.01, 4.01]")
println("Tolerance = ", tol, "; max #iterations = ", maxiter)

# Dry run
initial = rand(n)
IterativeSolvers.cg!(copy(initial), A, b, Pl = P, maxiter = maxiter, tol = tol, log = false)
Expand Down
4 changes: 2 additions & 2 deletions benchmark/benchmark-svd-florida.jl
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ function runbenchmark(filename, benchmarkfilename)
#Choose the same normalized unit vector to start with
q = randn(n)
eltype(A) <: Complex && (q += im*randn(n))
scale!(q, inv(norm(q)))
rmul!(q, inv(norm(q)))
qm = randn(m)
eltype(A) <: Complex && (q += im*randn(m))
scale!(qm, inv(norm(qm)))
rmul!(qm, inv(norm(qm)))

#Number of singular values to request
nv = min(m, n, 10)
Expand Down
2 changes: 1 addition & 1 deletion docs/src/getting_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ Rather than constructing an explicit matrix `A` of the type `Matrix` or `SparseM
For matrix-free types of `A` the following interface is expected to be defined:

- `A*v` computes the matrix-vector product on a `v::AbstractVector`;
- `A_mul_B!(y, A, v)` computes the matrix-vector product on a `v::AbstractVector` in-place;
- `mul!(y, A, v)` computes the matrix-vector product on a `v::AbstractVector` in-place;
- `eltype(A)` returns the element type implicit in the equivalent matrix representation of `A`;
- `size(A, d)` returns the nominal dimensions along the `d`th axis in the equivalent matrix representation of `A`.

Expand Down
2 changes: 1 addition & 1 deletion docs/src/iterators.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ end
@show norm(b1 - A * x) / norm(b1)

# Copy the next right-hand side into the iterable
copy!(my_iterable.b, b2)
copyto!(my_iterable.b, b2)

for item in my_iterable
println("Iteration for rhs 2")
Expand Down
10 changes: 5 additions & 5 deletions docs/src/preconditioning.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@

Many iterative solvers have the option to provide left and right preconditioners (`Pl` and `Pr` resp.) in order to speed up convergence or prevent stagnation. They transform a problem $Ax = b$ into a better conditioned system $(P_l^{-1}AP_r^{-1})y = P_l^{-1}b$, where $x = P_r^{-1}y$.

These preconditioners should support the operations
These preconditioners should support the operations

- `A_ldiv_B!(y, P, x)` computes `P \ x` in-place of `y`;
- `A_ldiv_B!(P, x)` computes `P \ x` in-place of `x`;
- `ldiv!(y, P, x)` computes `P \ x` in-place of `y`;
- `ldiv!(P, x)` computes `P \ x` in-place of `x`;
- and `P \ x`.

If no preconditioners are passed to the solver, the method will default to
If no preconditioners are passed to the solver, the method will default to

```julia
Pl = Pr = IterativeSolvers.Identity()
Expand All @@ -18,4 +18,4 @@ Pl = Pr = IterativeSolvers.Identity()
IterativeSolvers.jl itself does not provide any other preconditioners besides `Identity()`, but recommends the following external packages:

- [ILU.jl](https://github.com/haampie/ILU.jl) for incomplete LU decompositions (using drop tolerance);
- [IncompleteSelectedInversion.jl](https://github.com/ettersi/IncompleteSelectedInversion.jl) for incomplete LDLt decompositions.
- [IncompleteSelectedInversion.jl](https://github.com/ettersi/IncompleteSelectedInversion.jl) for incomplete LDLt decompositions.
58 changes: 30 additions & 28 deletions src/bicgstabl.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
export bicgstabl, bicgstabl!, bicgstabl_iterator, bicgstabl_iterator!, BiCGStabIterable

import Base: start, next, done
using Printf
import Base: iterate

mutable struct BiCGStabIterable{precT, matT, solT, vecT <: AbstractVector, smallMatT <: AbstractMatrix, realT <: Real, scalarT <: Number}
A::matT
Expand Down Expand Up @@ -36,23 +36,23 @@ function bicgstabl_iterator!(x, A, b, l::Int = 2;

# Large vectors.
r_shadow = rand(T, n)
rs = Matrix{T}(n, l + 1)
rs = Matrix{T}(undef, n, l + 1)
us = zeros(T, n, l + 1)

residual = view(rs, :, 1)

# Compute the initial residual rs[:, 1] = b - A * x
# Avoid computing A * 0.
if initial_zero
copy!(residual, b)
copyto!(residual, b)
else
A_mul_B!(residual, A, x)
mul!(residual, A, x)
residual .= b .- residual
mv_products += 1
end

# Apply the left preconditioner
A_ldiv_B!(Pl, residual)
ldiv!(Pl, residual)

γ = zeros(T, l)
ω = σ = one(T)
Expand All @@ -76,35 +76,37 @@ end
@inline start(::BiCGStabIterable) = 0
@inline done(it::BiCGStabIterable, iteration::Int) = it.mv_products ≥ it.max_mv_products || converged(it)

function next(it::BiCGStabIterable, iteration::Int)
function iterate(it::BiCGStabIterable, iteration::Int=start(it))
if done(it, iteration) return nothing end

T = eltype(it.x)
L = 2 : it.l + 1

it.σ = -it.ω * it.σ

## BiCG part
for j = 1 : it.l
ρ = dot(it.r_shadow, view(it.rs, :, j))
β = ρ / it.σ

# us[:, 1 : j] .= rs[:, 1 : j] - β * us[:, 1 : j]
it.us[:, 1 : j] .= it.rs[:, 1 : j] .- β .* it.us[:, 1 : j]

# us[:, j + 1] = Pl \ (A * us[:, j])
next_u = view(it.us, :, j + 1)
A_mul_B!(next_u, it.A, view(it.us, :, j))
A_ldiv_B!(it.Pl, next_u)
mul!(next_u, it.A, view(it.us, :, j))
ldiv!(it.Pl, next_u)

it.σ = dot(it.r_shadow, next_u)
α = ρ / it.σ

it.rs[:, 1 : j] .-= α .* it.us[:, 2 : j + 1]

# rs[:, j + 1] = Pl \ (A * rs[:, j])
next_r = view(it.rs, :, j + 1)
A_mul_B!(next_r, it.A , view(it.rs, :, j))
A_ldiv_B!(it.Pl, next_r)
mul!(next_r, it.A , view(it.rs, :, j))
ldiv!(it.Pl, next_r)

# x = x + α * us[:, 1]
it.x .+= α .* view(it.us, :, 1)
end
Expand All @@ -113,13 +115,13 @@ function next(it::BiCGStabIterable, iteration::Int)
it.mv_products += 2 * it.l

## MR part

# M = rs' * rs
Ac_mul_B!(it.M, it.rs, it.rs)
mul!(it.M, adjoint(it.rs), it.rs)

# γ = M[L, L] \ M[L, 1]
F = lufact!(view(it.M, L, L))
A_ldiv_B!(it.γ, F, view(it.M, L, 1))
# γ = M[L, L] \ M[L, 1]
F = lu!(view(it.M, L, L))
ldiv!(it.γ, F, view(it.M, L, 1))

# This could even be BLAS 3 when combined.
BLAS.gemv!('N', -one(T), view(it.us, :, L), it.γ, one(T), view(it.us, :, 1))
Expand Down Expand Up @@ -155,9 +157,9 @@ bicgstabl(A, b, l = 2; kwargs...) = bicgstabl!(zerox(A, b), A, b, l; initial_zer
- `max_mv_products::Int = size(A, 2)`: maximum number of matrix vector products.
For BiCGStab(l) this is a less dubious term than "number of iterations";
- `Pl = Identity()`: left preconditioner of the method;
- `tol::Real = sqrt(eps(real(eltype(b))))`: tolerance for stopping condition `|r_k| / |r_0| ≤ tol`.
Note that (1) the true residual norm is never computed during the iterations,
only an approximation; and (2) if a preconditioner is given, the stopping condition is based on the
- `tol::Real = sqrt(eps(real(eltype(b))))`: tolerance for stopping condition `|r_k| / |r_0| ≤ tol`.
Note that (1) the true residual norm is never computed during the iterations,
only an approximation; and (2) if a preconditioner is given, the stopping condition is based on the
*preconditioned residual*.

# Return values
Expand All @@ -184,10 +186,10 @@ function bicgstabl!(x, A, b, l = 2;

# This doesn't yet make sense: the number of iters is smaller.
log && reserve!(history, :resnorm, max_mv_products)

# Actually perform CG
iterable = bicgstabl_iterator!(x, A, b, l; Pl = Pl, tol = tol, max_mv_products = max_mv_products, kwargs...)

if log
history.mvps = iterable.mv_products
end
Expand All @@ -200,10 +202,10 @@ function bicgstabl!(x, A, b, l = 2;
end
verbose && @printf("%3d\t%1.2e\n", iteration, iterable.residual)
end

verbose && println()
log && setconv(history, converged(iterable))
log && shrink!(history)

log ? (iterable.x, history) : iterable.x
end
Loading