From 0b45919f206058afbdf07f2ce2edfbfc30f158fc Mon Sep 17 00:00:00 2001 From: Jutho Haegeman Date: Thu, 14 Nov 2024 22:20:22 +0100 Subject: [PATCH 1/8] use MinimalVec from VectorInterface --- test/testsetup.jl | 73 ++++------------------------------------------- 1 file changed, 5 insertions(+), 68 deletions(-) diff --git a/test/testsetup.jl b/test/testsetup.jl index 33ff177..43deaec 100644 --- a/test/testsetup.jl +++ b/test/testsetup.jl @@ -30,83 +30,20 @@ function buildrealmap(A, B) return x -> A * x + B * conj(x) end -# Minimal vector type -# ------------------- -""" - MinimalVec{T<:Number,IP} - -Minimal interface for a vector. Can support either in-place assignments or not, depending on -`IP=true` or `IP=false`. -""" -struct MinimalVec{IP,V<:AbstractVector} - vec::V - function MinimalVec{IP}(vec::V) where {IP,V} - return new{IP,V}(vec) - end -end -const InplaceVec{V} = MinimalVec{true,V} -const OutplaceVec{V} = MinimalVec{false,V} - -isinplace(::Type{MinimalVec{IP,V}}) where {V,IP} = IP -isinplace(v::MinimalVec) = isinplace(typeof(v)) - -VI.scalartype(::Type{<:MinimalVec{IP,V}}) where {IP,V} = scalartype(V) - -function VI.zerovector(v::MinimalVec, S::Type{<:Number}) - return MinimalVec{isinplace(v)}(zerovector(v.vec, S)) -end -function VI.zerovector!(v::InplaceVec{V}) where {V} - zerovector!(v.vec) - return v -end -VI.zerovector!!(v::MinimalVec) = isinplace(v) ? zerovector!(v) : zerovector(v) - -function VI.scale(v::MinimalVec, α::Number) - return MinimalVec{isinplace(v)}(scale(v.vec, α)) -end -function VI.scale!(v::InplaceVec{V}, α::Number) where {V} - scale!(v.vec, α) - return v -end -function VI.scale!!(v::MinimalVec, α::Number) - return isinplace(v) ? scale!(v, α) : scale(v, α) -end -function VI.scale!(w::InplaceVec{V}, v::InplaceVec{W}, α::Number) where {V,W} - scale!(w.vec, v.vec, α) - return w -end -function VI.scale!!(w::MinimalVec, v::MinimalVec, α::Number) - isinplace(w) && return scale!(w, v, α) - return MinimalVec{false}(scale!!(copy(w.vec), v.vec, α)) -end - -function VI.add(y::MinimalVec, x::MinimalVec, α::Number, β::Number) - return MinimalVec{isinplace(y)}(add(y.vec, x.vec, α, β)) -end -function VI.add!(y::InplaceVec{W}, x::InplaceVec{V}, α::Number, β::Number) where {W,V} - add!(y.vec, x.vec, α, β) - return y -end -function VI.add!!(y::MinimalVec, x::MinimalVec, α::Number, β::Number) - return isinplace(y) ? add!(y, x, α, β) : add(y, x, α, β) -end - -VI.inner(x::MinimalVec, y::MinimalVec) = inner(x.vec, y.vec) -VI.norm(x::MinimalVec) = LinearAlgebra.norm(x.vec) - # Wrappers # -------- +using VectorInterface: MinimalSVec, MinimalMVec, MinimalVec # dispatch on val is necessary for type stability function wrapvec(v, ::Val{mode}) where {mode} return mode === :vector ? v : - mode === :inplace ? MinimalVec{true}(v) : - mode === :outplace ? MinimalVec{false}(v) : - mode === :mixed ? MinimalVec{false}(v) : + mode === :inplace ? MinimalMVec(v) : + mode === :outplace ? MinimalSVec(v) : + mode === :mixed ? MinimalSVec(v) : throw(ArgumentError("invalid mode ($mode)")) end function wrapvec2(v, ::Val{mode}) where {mode} - return mode === :mixed ? MinimalVec{true}(v) : wrapvec(v, mode) + return mode === :mixed ? MinimalMVec(v) : wrapvec(v, mode) end unwrapvec(v::MinimalVec) = v.vec From bd61266ba504d4b24678a9e7e140c7112c55ff8a Mon Sep 17 00:00:00 2001 From: Jutho Haegeman Date: Thu, 14 Nov 2024 22:23:23 +0100 Subject: [PATCH 2/8] change verbosity behaviour --- ext/KrylovKitChainRulesCoreExt/eigsolve.jl | 18 +++++++++--------- ext/KrylovKitChainRulesCoreExt/linsolve.jl | 2 +- ext/KrylovKitChainRulesCoreExt/svdsolve.jl | 12 ++++++------ test/ad/degenerateeigsolve.jl | 4 ++-- test/ad/eigsolve.jl | 14 ++++++++++---- test/ad/svdsolve.jl | 14 ++++++++++---- 6 files changed, 38 insertions(+), 26 deletions(-) diff --git a/ext/KrylovKitChainRulesCoreExt/eigsolve.jl b/ext/KrylovKitChainRulesCoreExt/eigsolve.jl index ab1dcab..17c7e17 100644 --- a/ext/KrylovKitChainRulesCoreExt/eigsolve.jl +++ b/ext/KrylovKitChainRulesCoreExt/eigsolve.jl @@ -135,7 +135,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, b = (zerovector(v), convert(T, Δλ)) else vdΔv = inner(v, Δv) - if alg_rrule.verbosity >= 0 + if alg_primal.verbosity >= 1 gauge = abs(imag(vdΔv)) gauge > alg_primal.tol && @warn "`eigsolve` cotangent for eigenvector $i is sensitive to gauge choice: (|gauge| = $gauge)" @@ -152,9 +152,9 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, return (y1, y2) end end - if info.converged >= i && reverse_info.converged == 0 && alg_rrule.verbosity >= 0 + if info.converged >= i && reverse_info.converged == 0 && alg_primal.verbosity >= 1 @warn "`eigsolve` cotangent linear problem ($i) did not converge, whereas the primal eigenvalue problem did: normres = $(reverse_info.normres)" - elseif abs(w[2]) > alg_rrule.tol && alg_rrule.verbosity >= 0 + elseif abs(w[2]) > alg_rrule.tol && alg_primal.verbosity >= 1 @warn "`eigsolve` cotangent linear problem ($i) returns unexpected result: error = $(w[2])" end ws[i] = w[1] @@ -185,7 +185,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, # components along subspace spanned by current eigenvectors tol = alg_primal.tol - if alg_rrule.verbosity >= 0 + if alg_primal.verbosity >= 1 mask = abs.(transpose(vals) .- vals) .< tol gaugepart = VdΔV[mask] - Diagonal(real(diag(VdΔV)))[mask] Δgauge = norm(gaugepart, Inf) @@ -263,7 +263,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, return (w′, conj.(vals) .* x) end end - if info.converged >= n && reverse_info.converged < n && alg_rrule.verbosity >= 0 + if info.converged >= n && reverse_info.converged < n && alg_primal.verbosity >= 1 @warn "`eigsolve` cotangent problem did not converge, whereas the primal eigenvalue problem did" end # cleanup and construct final result by renormalising the eigenvectors and explicitly @@ -276,7 +276,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, w, x = Ws[ic] factor = 1 / x[i] x[i] = zero(x[i]) - if alg_rrule.verbosity >= 0 + if alg_primal.verbosity >= 1 error = max(norm(x, Inf), abs(rvals[ic] - conj(vals[i]))) error > 10 * tol && @warn "`eigsolve` cotangent linear problem ($i) returns unexpected result: error = $error" @@ -308,7 +308,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, # components along subspace spanned by current eigenvectors tol = alg_primal.tol aVdΔV = rmul!(VdΔV - VdΔV', 1 / 2) - if alg_rrule.verbosity >= 0 + if alg_primal.verbosity >= 1 mask = abs.(transpose(vals) .- vals) .< tol gaugepart = view(aVdΔV, mask) gauge = norm(gaugepart, Inf) @@ -366,7 +366,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, return (w′, vals .* x) end end - if info.converged >= n && reverse_info.converged < n && alg_rrule.verbosity >= 0 + if info.converged >= n && reverse_info.converged < n && alg_primal.verbosity >= 1 @warn "`eigsolve` cotangent problem did not converge, whereas the primal eigenvalue problem did" end @@ -380,7 +380,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, factor = 1 / x[ic] x[ic] = zero(x[ic]) error = max(norm(x, Inf), abs(rvals[i] - conj(vals[ic]))) - if error > 5 * tol && alg_rrule.verbosity >= 0 + if error > 10 * tol && alg_primal.verbosity >= 1 @warn "`eigsolve` cotangent linear problem ($ic) returns unexpected result: error = $error" end ws[ic] = VectorInterface.add!!(zs[ic], Q(w), -factor) diff --git a/ext/KrylovKitChainRulesCoreExt/linsolve.jl b/ext/KrylovKitChainRulesCoreExt/linsolve.jl index 378348a..0db21ca 100644 --- a/ext/KrylovKitChainRulesCoreExt/linsolve.jl +++ b/ext/KrylovKitChainRulesCoreExt/linsolve.jl @@ -34,7 +34,7 @@ function make_linsolve_pullback(fᴴ, b, a₀, a₁, alg_rrule, construct∂f, x a₁))) ∂b, reverse_info = linsolve(fᴴ, x̄, x̄₀, alg_rrule, conj(a₀), conj(a₁)) - if info.converged > 0 && reverse_info.converged == 0 && alg_rrule.verbosity >= 0 + if info.converged > 0 && reverse_info.converged == 0 && alg_primal.verbosity >= 1 @warn "`linsolve` cotangent problem did not converge, whereas the primal linear problem did: normres = $(reverse_info.normres)" end x∂b = inner(x, ∂b) diff --git a/ext/KrylovKitChainRulesCoreExt/svdsolve.jl b/ext/KrylovKitChainRulesCoreExt/svdsolve.jl index c412adb..a1db890 100644 --- a/ext/KrylovKitChainRulesCoreExt/svdsolve.jl +++ b/ext/KrylovKitChainRulesCoreExt/svdsolve.jl @@ -112,7 +112,7 @@ function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, r udΔu = inner(u, Δu) vdΔv = inner(v, Δv) if (udΔu isa Complex) || (vdΔv isa Complex) - if alg_rrule.verbosity >= 0 + if alg_primal.verbosity >= 1 gauge = abs(imag(udΔu + vdΔv)) gauge > alg_primal.tol && @warn "`svdsolve` cotangents for singular vectors $i are sensitive to gauge choice: (|gauge| = $gauge)" @@ -131,7 +131,7 @@ function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, r return (x′, y′) end end - if info.converged >= i && reverse_info.converged == 0 && alg_rrule.verbosity >= 0 + if info.converged >= i && reverse_info.converged == 0 && alg_primal.verbosity >= 0 @warn "`svdsolve` cotangent linear problem ($i) did not converge, whereas the primal eigenvalue problem did: normres = $(reverse_info.normres)" end x = VectorInterface.add!!(x, u, Δs / 2) @@ -162,7 +162,7 @@ function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, r aVdΔV = rmul!(VdΔV - VdΔV', 1 / 2) tol = alg_primal.tol - if alg_rrule.verbosity >= 0 + if alg_primal.verbosity >= 1 mask = abs.(vals' .- vals) .< tol gaugepart = view(aUdΔU, mask) + view(aVdΔV, mask) gauge = norm(gaugepart, Inf) @@ -227,7 +227,7 @@ function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, r return (x′, y′, vals .* z) end end - if info.converged >= n && reverse_info.converged < n && alg_rrule.verbosity >= 0 + if info.converged >= n && reverse_info.converged < n && alg_primal.verbosity >= 1 @warn "`svdsolve` cotangent problem did not converge, whereas the primal singular value problem did" end @@ -236,13 +236,13 @@ function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, r for i in 1:n x, y, z = Ws[i] _, ic = findmax(abs, z) - if ic != i + if ic != i && alg_primal.verbosity >= 1 @warn "`svdsolve` cotangent linear problem ($ic) returns unexpected result" end factor = 1 / z[ic] z[ic] = zero(z[ic]) error = max(norm(z, Inf), abs(rvals[i] - vals[ic])) - if error > 5 * tol && alg_rrule.verbosity >= 0 + if error > 10 * tol && alg_primal.verbosity >= 1 @warn "`svdsolve` cotangent linear problem ($ic) returns unexpected result: error = $error vs tol = $tol" end xs[ic] = VectorInterface.add!!(xs[ic], x, -factor) diff --git a/test/ad/degenerateeigsolve.jl b/test/ad/degenerateeigsolve.jl index af68c7c..8be7185 100644 --- a/test/ad/degenerateeigsolve.jl +++ b/test/ad/degenerateeigsolve.jl @@ -94,8 +94,8 @@ end tol = 2 * N^2 * eps(real(T)) alg = Arnoldi(; tol=tol, krylovdim=2n) - alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=-1) - alg_rrule2 = GMRES(; tol=tol, krylovdim=2n, verbosity=-1) + alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n) + alg_rrule2 = GMRES(; tol=tol, krylovdim=2n) mat_example1, mat_example_fun1, mat_example_fd, Avec, Bvec, Cvec, xvec, vals, vecs = build_mat_example(A, B, C, diff --git a/test/ad/eigsolve.jl b/test/ad/eigsolve.jl index e27e192..fbe6948 100644 --- a/test/ad/eigsolve.jl +++ b/test/ad/eigsolve.jl @@ -205,8 +205,8 @@ end condA = cond(A) tol = n * condA * (T <: Real ? eps(T) : 4 * eps(real(T))) alg = Arnoldi(; tol=tol, krylovdim=n) - alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=-1) - alg_rrule2 = GMRES(; tol=tol, krylovdim=n + 1, verbosity=-1) + alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n) + alg_rrule2 = GMRES(; tol=tol, krylovdim=n + 1) config = Zygote.ZygoteRuleConfig() @testset for which in whichlist for alg_rrule in (alg_rrule1, alg_rrule2) @@ -269,11 +269,13 @@ end if T <: Complex @testset "test warnings and info" begin - alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=-1) + alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=0) + alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=0) (vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @test_logs pb((ZeroTangent(), im .* vecs[1:2] .+ vecs[2:-1:1], NoTangent())) + alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=0) (vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @@ -282,6 +284,7 @@ end pbs = @test_logs pb((ZeroTangent(), vecs[1:2], NoTangent())) @test norm(unthunk(pbs[1]), Inf) < condA * sqrt(eps(real(T))) + alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) (vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @@ -290,11 +293,13 @@ end pbs = @test_logs (:info,) pb((ZeroTangent(), vecs[1:2], NoTangent())) @test norm(unthunk(pbs[1]), Inf) < condA * sqrt(eps(real(T))) - alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=-1) + alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=0) + alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=0) (vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @test_logs pb((ZeroTangent(), im .* vecs[1:2] .+ vecs[2:-1:1], NoTangent())) + alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=0) (vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @@ -305,6 +310,7 @@ end pbs = @test_logs pb((ZeroTangent(), vecs[1:2], NoTangent())) @test norm(unthunk(pbs[1]), Inf) < condA * sqrt(eps(real(T))) + alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=1) (vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) diff --git a/test/ad/svdsolve.jl b/test/ad/svdsolve.jl index 08d6366..fd025ce 100644 --- a/test/ad/svdsolve.jl +++ b/test/ad/svdsolve.jl @@ -154,8 +154,8 @@ end howmany = 3 tol = 3 * n * condA * (T <: Real ? eps(T) : 4 * eps(real(T))) alg = GKL(; krylovdim=2n, tol=tol) - alg_rrule1 = Arnoldi(; tol=tol, krylovdim=4n, verbosity=-1) - alg_rrule2 = GMRES(; tol=tol, krylovdim=3n, verbosity=-1) + alg_rrule1 = Arnoldi(; tol=tol, krylovdim=4n) + alg_rrule2 = GMRES(; tol=tol, krylovdim=3n) config = Zygote.ZygoteRuleConfig() for alg_rrule in (alg_rrule1, alg_rrule2) # unfortunately, rrule does not seem type stable for function arguments, because the @@ -219,13 +219,15 @@ end end if T <: Complex @testset "test warnings and info" begin - alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=-1) + alg = GKL(; krylovdim=2n, tol=tol, verbosity=0) + alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=0) (vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @test_logs pb((ZeroTangent(), im .* lvecs[1:2] .+ lvecs[2:-1:1], ZeroTangent(), NoTangent())) + alg = GKL(; krylovdim=2n, tol=tol, verbosity=1) alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=0) (vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg; @@ -249,6 +251,7 @@ end (1 - im) .* rvecs[1:2] + rvecs[2:-1:1], NoTangent())) + alg = GKL(; krylovdim=2n, tol=tol, verbosity=1) alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=1) (vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg; @@ -272,13 +275,15 @@ end (1 - im) .* rvecs[1:2] + rvecs[2:-1:1], NoTangent())) - alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=-1) + alg = GKL(; krylovdim=2n, tol=tol, verbosity=0) + alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=0) (vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @test_logs pb((ZeroTangent(), im .* lvecs[1:2] .+ lvecs[2:-1:1], ZeroTangent(), NoTangent())) + alg = GKL(; krylovdim=2n, tol=tol, verbosity=1) alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=0) (vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg; @@ -305,6 +310,7 @@ end (1 - im) .* rvecs[1:2] + rvecs[2:-1:1], NoTangent())) + alg = GKL(; krylovdim=2n, tol=tol, verbosity=1) alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=1) (vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg; From 7f9d324af9a6df1f7b148442975c51ffbd092cef Mon Sep 17 00:00:00 2001 From: Jutho Date: Sat, 11 Jan 2025 00:32:14 +0100 Subject: [PATCH 3/8] change verbosity level structure --- Project.toml | 4 +- src/KrylovKit.jl | 42 ++++++--- src/eigsolve/arnoldi.jl | 135 ++++++++++++++--------------- src/eigsolve/eigsolve.jl | 8 +- src/eigsolve/geneigsolve.jl | 7 +- src/eigsolve/golubye.jl | 44 +++++----- src/eigsolve/lanczos.jl | 63 +++++++------- src/eigsolve/svdsolve.jl | 59 ++++++------- src/factorizations/arnoldi.jl | 14 +-- src/factorizations/gkl.jl | 17 ++-- src/factorizations/lanczos.jl | 14 +-- src/linsolve/bicgstab.jl | 80 ++++++++--------- src/linsolve/cg.jl | 51 ++++++----- src/linsolve/gmres.jl | 61 ++++++------- src/linsolve/linsolve.jl | 7 +- src/lssolve/lsmr.jl | 48 +++++------ src/lssolve/lssolve.jl | 7 +- src/matrixfun/expintegrator.jl | 151 +++++++++++++++++--------------- src/matrixfun/exponentiate.jl | 16 ++-- test/eigsolve.jl | 61 ++++++++++--- test/expintegrator.jl | 12 +-- test/factorize.jl | 128 +++++++++++++++++++++++++-- test/geneigsolve.jl | 40 ++++++++- test/gklfactorize.jl | 89 ------------------- test/linsolve.jl | 153 +++++++++++++++++++++++++++++---- test/lssolve.jl | 23 ++++- test/runtests.jl | 58 ++++++++----- test/schursolve.jl | 18 ++++ test/svdsolve.jl | 26 +++++- test/testsetup.jl | 2 +- 30 files changed, 876 insertions(+), 562 deletions(-) diff --git a/Project.toml b/Project.toml index 5e37fc0..a033e2c 100644 --- a/Project.toml +++ b/Project.toml @@ -22,6 +22,7 @@ ChainRulesCore = "1" ChainRulesTestUtils = "1" FiniteDifferences = "0.12" LinearAlgebra = "1" +Logging = "1" PackageExtensionCompat = "1" Printf = "1" Random = "1" @@ -36,9 +37,10 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a" FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" +Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" TestExtras = "5ed8adda-3752-4e41-b88a-e8b09835ee3a" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [targets] -test = ["Test", "Aqua", "TestExtras", "ChainRulesTestUtils", "ChainRulesCore", "FiniteDifferences", "Zygote"] +test = ["Test", "Aqua", "Logging", "TestExtras", "ChainRulesTestUtils", "ChainRulesCore", "FiniteDifferences", "Zygote"] diff --git a/src/KrylovKit.jl b/src/KrylovKit.jl index 3265687..b856a8f 100644 --- a/src/KrylovKit.jl +++ b/src/KrylovKit.jl @@ -102,9 +102,6 @@ function Base.iterate(r::SplitRange, i=1) end Base.length(r::SplitRange) = r.outerlength -# Algorithm types -include("algorithms.jl") - # Structures to store a list of basis vectors """ abstract type Basis{T} end @@ -122,14 +119,6 @@ See [`OrthonormalBasis`](@ref) for a specific implementation. """ abstract type Basis{T} end -include("orthonormal.jl") - -# Dense linear algebra structures and functions used in the algorithms below -include("dense/givens.jl") -include("dense/linalg.jl") -include("dense/packedhessenberg.jl") -include("dense/reflector.jl") - # Simple coordinate basis vector, i.e. a vector of all zeros and a single one on position `k`: """ SimpleBasisVector(m, k) @@ -164,6 +153,23 @@ end # apply operators include("apply.jl") +# Verbosity levels +const WARN_LEVEL = 1 +const STARTSTOP_LEVEL = 2 +const EACHITERATION_LEVEL = 3 + +# Algorithm types +include("algorithms.jl") + +# OrthonormalBasis, orthogonalization and orthonormalization methods +include("orthonormal.jl") + +# Dense linear algebra structures and functions used in the algorithms below +include("dense/givens.jl") +include("dense/linalg.jl") +include("dense/packedhessenberg.jl") +include("dense/reflector.jl") + # Krylov and related factorizations and their iterators include("factorizations/krylov.jl") include("factorizations/lanczos.jl") @@ -217,7 +223,19 @@ function Base.show(io::IO, info::ConvergenceInfo) " iterations and ", info.numops, " applications of the linear map;") - return println(io, "norms of residuals are given by $((info.normres...,)).") + return print(io, "norms of residuals are given by ", normres2string(info.normres), ".") +end + +# Convert residual norms into strings for info and warning printing +normres2string(β::Number) = @sprintf("%.2e", β) +function normres2string(β) + s = "(" + for i in 1:length(β) + s *= normres2string(β[i]) + i < length(β) && (s *= ", ") + end + s *= ")" + return s end # vectors with modified inner product diff --git a/src/eigsolve/arnoldi.jl b/src/eigsolve/arnoldi.jl index 0417c3e..d8484f1 100644 --- a/src/eigsolve/arnoldi.jl +++ b/src/eigsolve/arnoldi.jl @@ -107,32 +107,36 @@ restarts where a part of the current Krylov subspace is kept. """ function schursolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi) T, U, fact, converged, numiter, numops = _schursolve(A, x₀, howmany, which, alg) + howmany′ = howmany if eltype(T) <: Real && howmany < length(fact) && T[howmany + 1, howmany] != 0 - howmany += 1 + howmany′ += 1 + elseif size(T, 1) < howmany + howmany′ = size(T, 1) end if converged > howmany - howmany = converged + howmany′ = converged end - TT = view(T, 1:howmany, 1:howmany) + TT = view(T, 1:howmany′, 1:howmany′) values = schur2eigvals(TT) + vectors = let B = basis(fact) - [B * u for u in cols(U, 1:howmany)] + [B * u for u in cols(U, 1:howmany′)] end residuals = let r = residual(fact) - [scale(r, last(u)) for u in cols(U, 1:howmany)] + [scale(r, last(u)) for u in cols(U, 1:howmany′)] end - normresiduals = [normres(fact) * abs(last(u)) for u in cols(U, 1:howmany)] - - if alg.verbosity > 0 - if converged < howmany - @warn """Arnoldi schursolve finished without convergence after $numiter iterations: - * $converged eigenvalues converged - * norm of residuals = $((normresiduals...,))""" - else - @info """Arnoldi schursolve finished after $numiter iterations: - * $converged eigenvalues converged - * norm of residuals = $((normresiduals...,))""" - end + normresiduals = [normres(fact) * abs(last(u)) for u in cols(U, 1:howmany′)] + + if (converged < howmany) && alg.verbosity >= WARN_LEVEL + @warn """Arnoldi schursolve stopped without convergence after $numiter iterations: + * $converged eigenvalues converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" + elseif alg.verbosity >= STARTSTOP_LEVEL + @info """Arnoldi schursolve finished after $numiter iterations: + * $converged eigenvalues converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" end return TT, vectors, @@ -142,18 +146,20 @@ end function eigsolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi; alg_rrule=alg) T, U, fact, converged, numiter, numops = _schursolve(A, x₀, howmany, which, alg) + howmany′ = howmany if eltype(T) <: Real && howmany < length(fact) && T[howmany + 1, howmany] != 0 - howmany += 1 + howmany′ += 1 + elseif size(T, 1) < howmany + howmany′ = size(T, 1) end if converged > howmany - howmany = converged + howmany′ = converged end - d = min(howmany, size(T, 2)) - TT = view(T, 1:d, 1:d) + TT = view(T, 1:howmany′, 1:howmany′) values = schur2eigvals(TT) # Compute eigenvectors - V = view(U, :, 1:d) * schur2eigvecs(TT) + V = view(U, :, 1:howmany′) * schur2eigvecs(TT) vectors = let B = basis(fact) [B * v for v in cols(V)] end @@ -162,18 +168,16 @@ function eigsolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi; alg_rrul end normresiduals = [normres(fact) * abs(last(v)) for v in cols(V)] - if alg.verbosity > 0 - if converged < howmany - @warn """Arnoldi eigsolve finished without convergence after $numiter iterations: - * $converged eigenvalues converged - * norm of residuals = $((normresiduals...,)) - * number of operations = $numops""" - else - @info """Arnoldi eigsolve finished after $numiter iterations: - * $converged eigenvalues converged - * norm of residuals = $((normresiduals...,)) - * number of operations = $numops""" - end + if (converged < howmany) && alg.verbosity >= WARN_LEVEL + @warn """Arnoldi eigsolve stopped without convergence after $numiter iterations: + * $converged eigenvalues converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" + elseif alg.verbosity >= STARTSTOP_LEVEL + @info """Arnoldi eigsolve finished after $numiter iterations: + * $converged eigenvalues converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" end return values, vectors, @@ -298,12 +302,12 @@ function realeigsolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi; alg_ end i < howmany && throw(ArgumentError("only the first $i eigenvalues are real, which is less then the requested `howmany = $howmany`")) - howmany = max(howmany, min(i, converged)) - TT = view(T, 1:howmany, 1:howmany) + howmany′ = max(howmany, min(i, converged)) + TT = view(T, 1:howmany′, 1:howmany′) values = diag(TT) # Compute eigenvectors - V = view(U, :, 1:howmany) * schur2realeigvecs(TT) + V = view(U, :, 1:howmany′) * schur2realeigvecs(TT) vectors = let B = basis(fact) [(B * v)[] for v in cols(V)] end @@ -312,18 +316,16 @@ function realeigsolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi; alg_ end normresiduals = [normres(fact) * abs(last(v)) for v in cols(V)] - if alg.verbosity > 0 - if converged < howmany - @warn """Arnoldi realeigsolve finished without convergence after $numiter iterations: - * $converged eigenvalues converged - * norm of residuals = $((normresiduals...,)) - * number of operations = $numops""" - else - @info """Arnoldi realeigsolve finished after $numiter iterations: - * $converged eigenvalues converged - * norm of residuals = $((normresiduals...,)) - * number of operations = $numops""" - end + if (converged < howmany) && alg.verbosity >= WARN_LEVEL + @warn """Arnoldi realeigsolve stopped without convergence after $numiter iterations: + * $converged eigenvalues converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" + elseif alg.verbosity >= STARTSTOP_LEVEL + @info """Arnoldi realeigsolve finished after $numiter iterations: + * $converged eigenvalues converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" end return values, vectors, @@ -340,7 +342,7 @@ function _schursolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi) numiter = 1 # initialize arnoldi factorization iter = ArnoldiIterator(A, x₀, alg.orth) - fact = initialize(iter; verbosity=alg.verbosity - 2) + fact = initialize(iter; verbosity=alg.verbosity - EACHITERATION_LEVEL) numops = 1 sizehint!(fact, krylovdim) β = normres(fact) @@ -358,10 +360,11 @@ function _schursolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi) β = normres(fact) K = length(fact) - if β <= tol - if K < howmany - @warn "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), which is smaller than the number of requested eigenvalues (i.e. `howmany == $howmany`); setting `howmany = $K`." - howmany = K + if β <= tol && K < howmany + if alg.verbosity >= WARN_LEVEL + msg = "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), " + msg *= "which is smaller than the number of requested eigenvalues (i.e. `howmany == $howmany`)." + @warn msg end end if K == krylovdim || β <= tol || (alg.eager && K >= howmany) # process @@ -387,33 +390,23 @@ function _schursolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi) converged -= 1 end - if converged >= howmany + if converged >= howmany || β <= tol break - elseif alg.verbosity > 1 - msg = "Arnoldi schursolve in iter $numiter, krylovdim = $K: " - msg *= "$converged values converged, normres = (" - msg *= @sprintf("%.2e", abs(f[1])) - for i in 2:howmany - msg *= ", " - msg *= @sprintf("%.2e", abs(f[i])) - end - msg *= ")" - @info msg + elseif alg.verbosity >= EACHITERATION_LEVEL + @info "Arnoldi schursolve in iteration $numiter, step = $K: $converged values converged, normres = $(normres2string(abs.(f[1:howmany])))" end end if K < krylovdim # expand - fact = expand!(iter, fact; verbosity=alg.verbosity - 2) + fact = expand!(iter, fact; verbosity=alg.verbosity - EACHITERATION_LEVEL) numops += 1 else # shrink numiter == maxiter && break # Determine how many to keep keep = div(3 * krylovdim + 2 * converged, 5) # strictly smaller than krylovdim since converged < howmany <= krylovdim, at least equal to converged - if eltype(H) <: Real && H[keep + 1, keep] != 0 # we are in the middle of a 2x2 block - keep += 1 # conservative choice - keep >= krylovdim && - error("krylov dimension $(krylovdim) too small to compute $howmany eigenvalues") + if eltype(H) <: Real && H[keep + 1, keep] != 0 # we are in the middle of a 2x2 block; this cannot happen if keep == converged, so we can decrease keep + keep -= 1 # conservative choice end # Restore Arnoldi form in the first keep columns @@ -437,7 +430,7 @@ function _schursolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi) B[keep + 1] = scale!!(r, 1 / normres(fact)) # Shrink Arnoldi factorization - fact = shrink!(fact, keep) + fact = shrink!(fact, keep; verbosity=alg.verbosity - EACHITERATION_LEVEL) numiter += 1 end end diff --git a/src/eigsolve/eigsolve.jl b/src/eigsolve/eigsolve.jl index d687f8e..e3de25c 100644 --- a/src/eigsolve/eigsolve.jl +++ b/src/eigsolve/eigsolve.jl @@ -98,8 +98,12 @@ The return value is always of the form `vals, vecs, info = eigsolve(...)` with Keyword arguments and their default values are given by: - - `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message - at the end), 2 (information after every iteration), 3 (information per Krylov step) + - `verbosity::Int = 0`: verbosity level, i.e. + - 0 (suppress all messages) + - 1 (only warnings) + - 2 (one message with convergence info at the end) + - 3 (progress info after every iteration) + - 4+ (all of the above and additional information about the Lanczos or Arnoldi iteration) - `tol::Real`: the requested accuracy (corresponding to the 2-norm of the residual for Schur vectors, not the eigenvectors). If you work in e.g. single precision (`Float32`), you should definitely change the default value. diff --git a/src/eigsolve/geneigsolve.jl b/src/eigsolve/geneigsolve.jl index 10a8cb1..f27cb64 100644 --- a/src/eigsolve/geneigsolve.jl +++ b/src/eigsolve/geneigsolve.jl @@ -88,8 +88,11 @@ The return value is always of the form `vals, vecs, info = geneigsolve(...)` wit Keyword arguments and their default values are given by: - - `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message - at the end), 2 (information after every iteration), 3 (information per Krylov step) + - `verbosity::Int = 0`: verbosity level, i.e. + - 0 (suppress all messages) + - 1 (only warnings) + - 2 (one message with convergence info at the end) + - 3 (progress info after every iteration) - `tol::Real`: the requested accuracy, relative to the 2-norm of the corresponding eigenvectors, i.e. convergence is achieved if `norm((A - λB)x) < tol * norm(x)`. Because eigenvectors are now normalised such that `dot(x, B*x) = 1`, `norm(x)` is not diff --git a/src/eigsolve/golubye.jl b/src/eigsolve/golubye.jl index f6bc0ca..6534080 100644 --- a/src/eigsolve/golubye.jl +++ b/src/eigsolve/golubye.jl @@ -47,7 +47,12 @@ function geneigsolve(f, x₀, howmany::Int, which::Selector, alg::GolubYe) while true β = norm(r) if β <= tol && K < howmany - @warn "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), which is smaller than the number of requested eigenvalues (i.e. `howmany == $howmany`); setting `howmany = $K`." + if alg.verbosity >= WARN_LEVEL + msg = "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), " + msg *= "which is smaller than the number of requested eigenvalues (i.e. `howmany == $howmany`);" + msg *= "setting `howmany = $K`." + @warn msg + end howmany = K end if K == krylovdim - converged || β <= tol # process @@ -136,15 +141,8 @@ function geneigsolve(f, x₀, howmany::Int, which::Selector, alg::GolubYe) push!(residuals, r) push!(normresiduals, β) end - elseif alg.verbosity > 1 - msg = "Golub-Ye geneigsolve in iter $numiter: " - msg *= "$converged values converged, normres = (" - for i in 1:converged - msg *= @sprintf("%.2e", normresiduals[i]) - msg *= ", " - end - msg *= @sprintf("%.2e", β) * ")" - @info msg + elseif alg.verbosity >= EACHITERATION_LEVEL + @info "Golub-Ye geneigsolve in iter $numiter: $converged values converged, normres = $(normres2string(normresiduals))" end end @@ -162,8 +160,8 @@ function geneigsolve(f, x₀, howmany::Int, which::Selector, alg::GolubYe) HHA[K, K] = checkhermitian(α, n) push!(BV, bv) - if alg.verbosity > 2 - @info "Golub-Ye iteration $numiter, step $K: normres = $β" + if alg.verbosity >= EACHITERATION_LEVEL + @info "Golub-Ye iteration $numiter, step $K: normres = $(normres2string(β))" end else # restart numiter == maxiter && break @@ -185,18 +183,16 @@ function geneigsolve(f, x₀, howmany::Int, which::Selector, alg::GolubYe) numiter += 1 end end - if alg.verbosity > 0 - if converged < howmany - @warn """Golub-Ye geneigsolve finished without convergence after $numiter iterations: - * $converged eigenvalues converged - * norm of residuals = $((normresiduals...,)) - * number of operations = $numops""" - else - @info """Golub-Ye geneigsolve finished after $numiter iterations: - * $converged eigenvalues converged - * norm of residuals = $((normresiduals...,)) - * number of operations = $numops""" - end + if (converged < howmany) && alg.verbosity >= WARN_LEVEL + @warn """Golub-Ye geneigsolve stopped without convergence after $numiter iterations: + * $converged eigenvalues converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" + elseif alg.verbosity >= STARTSTOP_LEVEL + @info """Golub-Ye geneigsolve finished after $numiter iterations: + * $converged eigenvalues converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" end return values, diff --git a/src/eigsolve/lanczos.jl b/src/eigsolve/lanczos.jl index 5d64a87..3077b30 100644 --- a/src/eigsolve/lanczos.jl +++ b/src/eigsolve/lanczos.jl @@ -13,7 +13,7 @@ function eigsolve(A, x₀, howmany::Int, which::Selector, alg::Lanczos; # Initialize Lanczos factorization iter = LanczosIterator(A, x₀, alg.orth) - fact = initialize(iter; verbosity=alg.verbosity - 2) + fact = initialize(iter; verbosity=alg.verbosity - EACHITERATION_LEVEL) numops = 1 numiter = 1 sizehint!(fact, krylovdim) @@ -31,10 +31,11 @@ function eigsolve(A, x₀, howmany::Int, which::Selector, alg::Lanczos; K = length(fact) # diagonalize Krylov factorization - if β <= tol - if K < howmany - @warn "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), which is smaller than the number of requested eigenvalues (i.e. `howmany == $howmany`); setting `howmany = $K`." - howmany = K + if β <= tol && K < howmany + if alg.verbosity >= WARN_LEVEL + msg = "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), " + msg *= "which is smaller than the number of requested eigenvalues (i.e. `howmany == $howmany`)." + @warn msg end end if K == krylovdim || β <= tol || (alg.eager && K >= howmany) @@ -62,23 +63,16 @@ function eigsolve(A, x₀, howmany::Int, which::Selector, alg::Lanczos; end end - if converged >= howmany + if converged >= howmany || β <= tol break - elseif alg.verbosity > 1 - msg = "Lanczos eigsolve in iter $numiter, krylovdim = $K: " - msg *= "$converged values converged, normres = (" - msg *= @sprintf("%.2e", abs(f[1])) - for i in 2:howmany - msg *= ", " - msg *= @sprintf("%.2e", abs(f[i])) - end - msg *= ")" + elseif alg.verbosity >= EACHITERATION_LEVEL + @info "Lanczos eigsolve in iteration $numiter, step = $K: $converged values converged, normres = $(normres2string(abs.(f[1:howmany])))" @info msg end end - if K < krylovdim# expand Krylov factorization - fact = expand!(iter, fact; verbosity=alg.verbosity - 2) + if K < krylovdim # expand Krylov factorization + fact = expand!(iter, fact; verbosity=alg.verbosity - EACHITERATION_LEVEL) numops += 1 else ## shrink and restart if numiter == maxiter @@ -114,18 +108,21 @@ function eigsolve(A, x₀, howmany::Int, which::Selector, alg::Lanczos; B[keep + 1] = scale!!(r, 1 / β) # Shrink Lanczos factorization - fact = shrink!(fact, keep) + fact = shrink!(fact, keep; verbosity=alg.verbosity - EACHITERATION_LEVEL) numiter += 1 end end + howmany′ = howmany if converged > howmany - howmany = converged + howmany′ = converged + elseif length(D) < howmany + howmany′ = length(D) end - values = D[1:howmany] + values = D[1:howmany′] # Compute eigenvectors - V = view(U, :, 1:howmany) + V = view(U, :, 1:howmany′) # Compute convergence information vectors = let B = basis(fact) @@ -135,21 +132,19 @@ function eigsolve(A, x₀, howmany::Int, which::Selector, alg::Lanczos; [scale(r, last(v)) for v in cols(V)] end normresiduals = let f = f - map(i -> abs(f[i]), 1:howmany) + map(i -> abs(f[i]), 1:howmany′) end - if alg.verbosity > 0 - if converged < howmany - @warn """Lanczos eigsolve finished without convergence after $numiter iterations: - * $converged eigenvalues converged - * norm of residuals = $((normresiduals...,)) - * number of operations = $numops""" - else - @info """Lanczos eigsolve finished after $numiter iterations: - * $converged eigenvalues converged - * norm of residuals = $((normresiduals...,)) - * number of operations = $numops""" - end + if (converged < howmany) && alg.verbosity >= WARN_LEVEL + @warn """Lanczos eigsolve stopped without convergence after $numiter iterations: + * $converged eigenvalues converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" + elseif alg.verbosity >= STARTSTOP_LEVEL + @info """Lanczos eigsolve finished after $numiter iterations: + * $converged eigenvalues converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" end return values, diff --git a/src/eigsolve/svdsolve.jl b/src/eigsolve/svdsolve.jl index 2e8d5e3..7e91c0f 100644 --- a/src/eigsolve/svdsolve.jl +++ b/src/eigsolve/svdsolve.jl @@ -81,8 +81,12 @@ The return value is always of the form `vals, lvecs, rvecs, info = svdsolve(...) Keyword arguments and their default values are given by: - - `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message - at the end), 2 (information after every iteration), 3 (information per Krylov step) + - `verbosity::Int = 0`: verbosity level + - 0 (suppress all messages) + - 1 (only warnings) + - 2 (one message with convergence info at the end) + - 3 (progress info after every iteration) + - 4+ (all of the above and additional information about the GKL iteration) - `krylovdim`: the maximum dimension of the Krylov subspace that will be constructed. Note that the dimension of the vector space is not known or checked, e.g. `x₀` should not necessarily support the `Base.length` function. If you know the actual problem @@ -153,7 +157,7 @@ function svdsolve(A, x₀, howmany::Int, which::Symbol, alg::GKL; numiter = 1 # initialize GKL factorization iter = GKLIterator(A, x₀, alg.orth) - fact = initialize(iter; verbosity=alg.verbosity - 2) + fact = initialize(iter; verbosity=alg.verbosity - EACHITERATION_LEVEL) numops = 2 sizehint!(fact, krylovdim) β = normres(fact) @@ -171,10 +175,11 @@ function svdsolve(A, x₀, howmany::Int, which::Symbol, alg::GKL; β = normres(fact) K = length(fact) - if β < tol - if K < howmany - @warn "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), which is smaller than the number of requested singular values (i.e. `howmany == $howmany`); setting `howmany = $K`." - howmany = K + if β <= tol && K < howmany + if alg.verbosity >= WARN_LEVEL + msg = "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), " + msg *= "which is smaller than the number of requested eigenvalues (i.e. `howmany == $howmany`)." + @warn msg end end if K == krylovdim || β <= tol || (alg.eager && K >= howmany) @@ -201,23 +206,15 @@ function svdsolve(A, x₀, howmany::Int, which::Symbol, alg::GKL; converged += 1 end - if converged >= howmany + if converged >= howmany || β <= tol break - elseif alg.verbosity > 1 - msg = "GKL svdsolve in iter $numiter, krylovdim $krylovdim: " - msg *= "$converged values converged, normres = (" - msg *= @sprintf("%.2e", abs(f[1])) - for i in 2:howmany - msg *= ", " - msg *= @sprintf("%.2e", abs(f[i])) - end - msg *= ")" - @info msg + elseif alg.verbosity >= EACHITERATION_LEVEL + @info "GKL svdsolve in iteration $numiter, step $K: $converged values converged, normres = $(normres2string(abs.(f[1:howmany])))" end end if K < krylovdim # expand - fact = expand!(iter, fact; verbosity=alg.verbosity - 2) + fact = expand!(iter, fact; verbosity=alg.verbosity - EACHITERATION_LEVEL) numops += 2 else ## shrink and restart if numiter == maxiter @@ -270,7 +267,7 @@ function svdsolve(A, x₀, howmany::Int, which::Symbol, alg::GKL; fact.βs[j] = H[j + 1, j] end # Shrink GKL factorization - fact = shrink!(fact, keep) + fact = shrink!(fact, keep; verbosity=alg.verbosity - EACHITERATION_LEVEL) numiter += 1 end end @@ -296,18 +293,16 @@ function svdsolve(A, x₀, howmany::Int, which::Symbol, alg::GKL; normresiduals = let f = f map(i -> abs(f[i]), 1:howmany) end - if alg.verbosity > 0 - if converged < howmany - @warn """GKL svdsolve finished without convergence after $numiter iterations: - * $converged singular values converged - * norm of residuals = $((normresiduals...,)) - * number of operations = $numops""" - else - @info """GKL svdsolve finished after $numiter iterations: - * $converged singular values converged - * norm of residuals = $((normresiduals...,)) - * number of operations = $numops""" - end + if (converged < howmany) && alg.verbosity >= WARN_LEVEL + @warn """GKL svdsolve finished without convergence after $numiter iterations: + * $converged singular values converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" + elseif alg.verbosity >= STARTSTOP_LEVEL + @info """GKL svdsolve finished after $numiter iterations: + * $converged singular values converged + * norm of residuals = $(normres2string(normresiduals)) + * number of operations = $numops""" end return values, diff --git a/src/factorizations/arnoldi.jl b/src/factorizations/arnoldi.jl index ad7c743..f1ef0e0 100644 --- a/src/factorizations/arnoldi.jl +++ b/src/factorizations/arnoldi.jl @@ -170,7 +170,7 @@ function initialize(iter::ArnoldiIterator; verbosity::Int=0) V = OrthonormalBasis([v]) H = T[α, β] if verbosity > 0 - @info "Arnoldi iteration step 1: normres = $β" + @info "Arnoldi initiation at dimension 1: subspace normres = $(normres2string(β))" end return state = ArnoldiFactorization(1, V, H, r) end @@ -190,7 +190,7 @@ function initialize!(iter::ArnoldiIterator, state::ArnoldiFactorization; verbosi push!(H, α, β) state.r = r if verbosity > 0 - @info "Arnoldi iteration step 1: normres = $β" + @info "Arnoldi initiation at dimension 1: subspace normres = $(normres2string(β))" end return state end @@ -208,11 +208,11 @@ function expand!(iter::ArnoldiIterator, state::ArnoldiFactorization; verbosity:: H[m + k + 1] = β state.r = r if verbosity > 0 - @info "Arnoldi iteration step $k: normres = $β" + @info "Arnoldi expansion to dimension $k: subspace normres = $(normres2string(β))" end return state end -function shrink!(state::ArnoldiFactorization, k) +function shrink!(state::ArnoldiFactorization, k; verbosity::Int=0) length(state) <= k && return state V = state.V H = state.H @@ -222,7 +222,11 @@ function shrink!(state::ArnoldiFactorization, k) r = pop!(V) resize!(H, (k * k + 3 * k) >> 1) state.k = k - state.r = scale!!(r, normres(state)) + β = normres(state) + if verbosity > 0 + @info "Arnoldi reduction to dimension $k: subspace normres = $(normres2string(β))" + end + state.r = scale!!(r, β) return state end diff --git a/src/factorizations/gkl.jl b/src/factorizations/gkl.jl index 688b19f..94be459 100644 --- a/src/factorizations/gkl.jl +++ b/src/factorizations/gkl.jl @@ -212,9 +212,8 @@ function initialize(iter::GKLIterator; verbosity::Int=0) αs = S[α] βs = S[β] if verbosity > 0 - @info "GKL iteration step 1: normres = $β" + @info "GKL initiation at dimension 1: subspace normres = $(normres2string(β))" end - return GKLFactorization(1, U, V, αs, βs, r) end function initialize!(iter::GKLIterator, state::GKLFactorization; verbosity::Int=0) @@ -240,9 +239,8 @@ function initialize!(iter::GKLIterator, state::GKLFactorization; verbosity::Int= push!(βs, β) state.r = r if verbosity > 0 - @info "GKL iteration step 1: normres = $β" + @info "GKL initiation at dimension 1: subspace normres = $(normres2string(β))" end - return state end function expand!(iter::GKLIterator, state::GKLFactorization; verbosity::Int=0) @@ -262,12 +260,11 @@ function expand!(iter::GKLIterator, state::GKLFactorization; verbosity::Int=0) state.k += 1 state.r = r if verbosity > 0 - @info "GKL iteration step $(state.k): normres = $β" + @info "GKL expension to dimension $(state.k): subspace normres = $(normres2string(β))" end - return state end -function shrink!(state::GKLFactorization, k) +function shrink!(state::GKLFactorization, k; verbosity::Int=0) length(state) == length(state.V) || error("we cannot shrink GKLFactorization without keeping vectors") length(state) <= k && return state @@ -282,7 +279,11 @@ function shrink!(state::GKLFactorization, k) resize!(state.αs, k) resize!(state.βs, k) state.k = k - state.r = scale!!(r, normres(state)) + β = normres(state) + if verbosity > 0 + @info "GKL reduction to dimension $k: subspace normres = $(normres2string(β))" + end + state.r = scale!!(r, β) return state end diff --git a/src/factorizations/lanczos.jl b/src/factorizations/lanczos.jl index 4890ac1..9effd03 100644 --- a/src/factorizations/lanczos.jl +++ b/src/factorizations/lanczos.jl @@ -213,7 +213,7 @@ function initialize(iter::LanczosIterator; verbosity::Int=0) αs = [real(α)] βs = [β] if verbosity > 0 - @info "Lanczos iteration step 1: normres = $β" + @info "Lanczos initiation at dimension 1: subspace normres = $(normres2string(β))" end return LanczosFactorization(1, V, αs, βs, r) end @@ -237,7 +237,7 @@ function initialize!(iter::LanczosIterator, state::LanczosFactorization; verbosi push!(βs, β) state.r = r if verbosity > 0 - @info "Lanczos iteration step 1: normres = $β" + @info "Lanczos initiation at dimension 1: subspace normres = $(normres2string(β))" end return state end @@ -257,11 +257,11 @@ function expand!(iter::LanczosIterator, state::LanczosFactorization; verbosity:: state.k += 1 state.r = r if verbosity > 0 - @info "Lanczos iteration step $(state.k): normres = $β" + @info "Lanczos expansion to dimension $(state.k): subspace normres = $(normres2string(β))" end return state end -function shrink!(state::LanczosFactorization, k) +function shrink!(state::LanczosFactorization, k; verbosity::Int=0) length(state) == length(state.V) || error("we cannot shrink LanczosFactorization without keeping Lanczos vectors") length(state) <= k && return state @@ -273,7 +273,11 @@ function shrink!(state::LanczosFactorization, k) resize!(state.αs, k) resize!(state.βs, k) state.k = k - state.r = scale!!(r, normres(state)) + β = normres(state) + if verbosity > 0 + @info "Lanczos reduction to dimension $k: subspace normres = $(normres2string(β))" + end + state.r = scale!!(r, β) return state end diff --git a/src/linsolve/bicgstab.jl b/src/linsolve/bicgstab.jl index 9ab9a77..da46981 100644 --- a/src/linsolve/bicgstab.jl +++ b/src/linsolve/bicgstab.jl @@ -20,12 +20,14 @@ function linsolve(operator, b, x₀, alg::BiCGStab, a₀::Number=0, a₁::Number # Check for early return if normr < tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """BiCGStab linsolve converged without any iterations: - * norm of residual = $normr - * number of operations = 1""" + * norm of residual = $(normres2string(normr)) + * number of operations = $numops""" end return (x, ConvergenceInfo(1, r, normr, numiter, numops)) + elseif alg.verbosity >= STARTSTOP_LEVEL + @info "BiCGStab linsolve starts with norm of residual = $(normres2string(normr))" end # First iteration @@ -35,9 +37,11 @@ function linsolve(operator, b, x₀, alg::BiCGStab, a₀::Number=0, a₁::Number # Method fails if ρ is zero. if ρ ≈ 0.0 - @warn """BiCGStab linsolve errored after $numiter iterations: - * norm of residual = $normr - * number of operations = $numops""" + if alg.verbosity >= WARN_LEVEL + @warn """BiCGStab linsolve errored after $numiter iterations: + * norm of residual = $(normres2string(normr)) + * number of operations = $numops""" + end return (x, ConvergenceInfo(0, r, normr, numiter, numops)) end @@ -66,10 +70,10 @@ function linsolve(operator, b, x₀, alg::BiCGStab, a₀::Number=0, a₁::Number normr_act = norm(s) if normr_act < tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """BiCGStab linsolve converged at iteration $(numiter-1/2): - * norm of residual = $normr_act - * number of operations = $numops""" + * norm of residual = $(normres2string(normr_act)) + * number of operations = $numops""" end return (xhalf, ConvergenceInfo(1, s, normr_act, numiter, numops)) end @@ -97,23 +101,19 @@ function linsolve(operator, b, x₀, alg::BiCGStab, a₀::Number=0, a₁::Number normr_act = norm(r) if normr_act < tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """BiCGStab linsolve converged at iteration $(numiter): - * norm of residual = $normr_act - * number of operations = $numops""" + * norm of residual = $(normres2string(normr_act)) + * number of operations = $numops""" end return (x, ConvergenceInfo(1, r, normr_act, numiter, numops)) end end + if alg.verbosity >= EACHITERATION_LEVEL + @info "BiCGStab linsolve in iteration $numiter: normres = $(normres2string(normr))" + end - while numiter < maxiter - if alg.verbosity > 0 - msg = "BiCGStab linsolve in iter $numiter: " - msg *= "normres = " - msg *= @sprintf("%12e", normr) - @info msg - end - + while true numiter += 1 ρold = ρ ρ = inner(r_shadow, r) @@ -136,13 +136,6 @@ function linsolve(operator, b, x₀, alg::BiCGStab, a₀::Number=0, a₁::Number normr = norm(s) - if alg.verbosity > 0 - msg = "BiCGStab linsolve in iter $(numiter-1/2): " - msg *= "normres = " - msg *= @sprintf("%12e", normr) - @info msg - end - # Check for return at half step. if normr < tol # Compute non-approximate residual. @@ -152,14 +145,17 @@ function linsolve(operator, b, x₀, alg::BiCGStab, a₀::Number=0, a₁::Number normr_act = norm(s) if normr_act < tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """BiCGStab linsolve converged at iteration $(numiter-1/2): - * norm of residual = $normr_act - * number of operations = $numops""" + * norm of residual = $(normres2string(normr_act)) + * number of operations = $numops""" end return (xhalf, ConvergenceInfo(1, s, normr_act, numiter, numops)) end end + if alg.verbosity >= EACHITERATION_LEVEL + @info "BiCGStab linsolve in iteration $(numiter-1/2): normres = $(normres2string(normr))" + end ## GMRES part of the algorithm. t = apply(operator, s, α₀, α₁) @@ -183,20 +179,24 @@ function linsolve(operator, b, x₀, alg::BiCGStab, a₀::Number=0, a₁::Number normr_act = norm(r) if normr_act < tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """BiCGStab linsolve converged at iteration $(numiter): - * norm of residual = $normr_act - * number of operations = $numops""" + * norm of residual = $(normres2string(normr_act)) + * number of operations = $numops""" end return (x, ConvergenceInfo(1, r, normr_act, numiter, numops)) end end + if numiter >= maxiter + if alg.verbosity >= WARN_LEVEL + @warn """BiCGStab linsolve stopped without converging after $numiter iterations: + * norm of residual = $(normres2string(normr)) + * number of operations = $numops""" + end + return (x, ConvergenceInfo(0, r, normr, numiter, numops)) + end + if alg.verbosity >= EACHITERATION_LEVEL + @info "BiCGStab linsolve in iteration $numiter: normres = $(normres2string(normr))" + end end - - if alg.verbosity > 0 - @warn """BiCGStab linsolve finished without converging after $numiter iterations: - * norm of residual = $normr - * number of operations = $numops""" - end - return (x, ConvergenceInfo(0, r, normr, numiter, numops)) end diff --git a/src/linsolve/cg.jl b/src/linsolve/cg.jl index 7b10a1b..0b5eeac 100644 --- a/src/linsolve/cg.jl +++ b/src/linsolve/cg.jl @@ -20,12 +20,14 @@ function linsolve(operator, b, x₀, alg::CG, a₀::Real=0, a₁::Real=1; alg_rr # Check for early return if normr < tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """CG linsolve converged without any iterations: - * norm of residual = $normr - * number of operations = 1""" + * norm of residual = $(normres2string(normr)) + * number of operations = 1""" end return (x, ConvergenceInfo(1, r, normr, numiter, numops)) + elseif alg.verbosity >= STARTSTOP_LEVEL + @info "CG linsolve starts with norm of residual = $(normres2string(normr))" end # First iteration @@ -41,25 +43,23 @@ function linsolve(operator, b, x₀, alg::CG, a₀::Real=0, a₁::Real=1; alg_rr β = ρ / ρold numops += 1 numiter += 1 + if normr < tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """CG linsolve converged at iteration $numiter: - * norm of residual = $normr - * number of operations = $numops""" + * norm of residual = $(normres2string(normr)) + * number of operations = $numops""" end return (x, ConvergenceInfo(1, r, normr, numiter, numops)) end - if alg.verbosity > 1 - msg = "CG linsolve in iter $numiter: " - msg *= "normres = " - msg *= @sprintf("%.12e", normr) - @info msg + if alg.verbosity >= EACHITERATION_LEVEL + @info "CG linsolve in iteration $numiter: normres = $(normres2string(normr))" end # Check for early return normr < tol && return (x, ConvergenceInfo(1, r, normr, numiter, numops)) - while numiter < maxiter + while true p = add!!(p, r, 1, β) q = apply(operator, p, α₀, α₁) α = ρ / inner(p, q) @@ -80,24 +80,23 @@ function linsolve(operator, b, x₀, alg::CG, a₀::Real=0, a₁::Real=1; alg_rr numops += 1 numiter += 1 if normr < tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """CG linsolve converged at iteration $numiter: - * norm of residual = $normr - * number of operations = $numops""" + * norm of residual = $(normres2string(normr)) + * number of operations = $numops""" end return (x, ConvergenceInfo(1, r, normr, numiter, numops)) end - if alg.verbosity > 1 - msg = "CG linsolve in iter $numiter: " - msg *= "normres = " - msg *= @sprintf("%.12e", normr) - @info msg + if numiter >= maxiter + if alg.verbosity >= WARN_LEVEL + @warn """CG linsolve stopped without converging after $numiter iterations: + * norm of residual = $(normres2string(normr)) + * number of operations = $numops""" + end + return (x, ConvergenceInfo(0, r, normr, numiter, numops)) + end + if alg.verbosity >= EACHITERATION_LEVEL + @info "CG linsolve in iteration $numiter: normres = $(normres2string(normr))" end end - if alg.verbosity > 0 - @warn """CG linsolve finished without converging after $numiter iterations: - * norm of residual = $normr - * number of operations = $numops""" - end - return (x, ConvergenceInfo(0, r, normr, numiter, numops)) end diff --git a/src/linsolve/gmres.jl b/src/linsolve/gmres.jl index 480d28c..1b38c68 100644 --- a/src/linsolve/gmres.jl +++ b/src/linsolve/gmres.jl @@ -19,12 +19,14 @@ function linsolve(operator, b, x₀, alg::GMRES, a₀::Number=0, a₁::Number=1; # Check for early return if β < tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """GMRES linsolve converged without any iterations: - * norm of residual = $β - * number of operations = 1""" + * norm of residual = $(normres2string(β)) + * number of operations = 1""" end return (x, ConvergenceInfo(1, r, β, 0, 1)) + elseif alg.verbosity >= STARTSTOP_LEVEL + @info "GMRES linsolve starts with norm of residual = $(normres2string(β))" end # Initialize data structures @@ -35,11 +37,11 @@ function linsolve(operator, b, x₀, alg::GMRES, a₀::Number=0, a₁::Number=1; numops = 1 # operator has been applied once to determine T and r iter = ArnoldiIterator(operator, r, alg.orth) - fact = initialize(iter; verbosity=alg.verbosity - 2) + fact = initialize(iter) sizehint!(fact, alg.krylovdim) numops += 1 # start applies operator once - while numiter < maxiter # restart loop + while true # restart loop numiter += 1 y[1] = β k = 1 @@ -49,15 +51,12 @@ function linsolve(operator, b, x₀, alg::GMRES, a₀::Number=0, a₁::Number=1; y[2] = zero(T) lmul!(gs[1], y) β = convert(S, abs(y[2])) - if alg.verbosity > 2 - msg = "GMRES linsolve in iter $numiter; step $k: " - msg *= "normres = " - msg *= @sprintf("%.12e", β) - @info msg - end while (β > tol && length(fact) < krylovdim) # inner arnoldi loop - fact = expand!(iter, fact; verbosity=alg.verbosity - 2) + if alg.verbosity >= EACHITERATION_LEVEL + @info "GMRES linsolve in iteration $numiter; step $k: normres = $(normres2string(β))" + end + fact = expand!(iter, fact) numops += 1 # expand! applies the operator once k = length(fact) H = rayleighquotient(fact) @@ -83,18 +82,6 @@ function linsolve(operator, b, x₀, alg::GMRES, a₀::Number=0, a₁::Number=1; # New error β = convert(S, abs(y[k + 1])) - if alg.verbosity > 2 - msg = "GMRES linsolve in iter $numiter; step $k: " - msg *= "normres = " - msg *= @sprintf("%.12e", β) - @info msg - end - end - if alg.verbosity > 1 - msg = "GMRES linsolve in iter $numiter; finished at step $k: " - msg *= "normres = " - msg *= @sprintf("%.12e", β) - @info msg end # Solve upper triangular system @@ -123,24 +110,28 @@ function linsolve(operator, b, x₀, alg::GMRES, a₀::Number=0, a₁::Number=1; numops += 1 β = norm(r) if β < tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """GMRES linsolve converged at iteration $numiter, step $k: - * norm of residual = $β - * number of operations = $numops""" + * norm of residual = $(normres2string(β)) + * number of operations = $numops""" end return (x, ConvergenceInfo(1, r, β, numiter, numops)) end end + if numiter >= maxiter + if alg.verbosity >= WARN_LEVEL + @warn """GMRES linsolve stopped without converging after $numiter iterations: + * norm of residual = $(normres2string(β)) + * number of operations = $numops""" + end + return (x, ConvergenceInfo(0, r, β, numiter, numops)) + end + if alg.verbosity >= EACHITERATION_LEVEL + @info "GMRES linsolve in iteration $numiter; step $k: normres = $(normres2string(β))" + end # Restart Arnoldi factorization with new r iter = ArnoldiIterator(operator, r, alg.orth) - fact = initialize!(iter, fact; verbosity=alg.verbosity - 2) - end - - if alg.verbosity > 0 - @warn """GMRES linsolve finished without converging after $numiter iterations: - * norm of residual = $β - * number of operations = $numops""" + fact = initialize!(iter, fact) end - return (x, ConvergenceInfo(0, r, β, numiter, numops)) end diff --git a/src/linsolve/linsolve.jl b/src/linsolve/linsolve.jl index 0e54cb1..763e307 100644 --- a/src/linsolve/linsolve.jl +++ b/src/linsolve/linsolve.jl @@ -43,8 +43,11 @@ The return value is always of the form `x, info = linsolve(...)` with Keyword arguments are given by: - - `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message - at the end), 2 (information after every iteration), 3 (information per Krylov step) + - `verbosity::Int = 0`: verbosity level, i.e. + - 0 (suppress all messages) + - 1 (only warnings) + - 2 (information at the beginning and end) + - 3 (progress info after every iteration) - `atol::Real`: the requested accuracy, i.e. absolute tolerance, on the norm of the residual. - `rtol::Real`: the requested accuracy on the norm of the residual, relative to the norm diff --git a/src/lssolve/lsmr.jl b/src/lssolve/lsmr.jl index 0a2341d..865d12b 100644 --- a/src/lssolve/lsmr.jl +++ b/src/lssolve/lsmr.jl @@ -40,15 +40,17 @@ function lssolve(operator, b, alg::LSMR, λ_::Real=0) λ::S = convert(S, λ_) # Check for early return - if abs(ζ̄) < tol - if alg.verbosity > 0 + if absζ̄ < tol + if alg.verbosity > STARTSTOP_LEVEL @info """LSMR lssolve converged without any iterations: - * ‖b - A * x ‖ = $β - * ‖[b - A * x; λ * x] ‖ = $β - * ‖ Aᴴ(b - A x) - λ^2 x ‖ = $absζ̄ - * number of operations = $numops""" + * ‖b - A * x ‖ = $(normres2string(β)) + * ‖[b - A * x; λ * x] ‖ = $(normres2string(β)) + * ‖ Aᴴ(b - A x) - λ^2 x ‖ = $(normres2string(absζ̄)) + * number of operations = $numops""" end - return (x, ConvergenceInfo(1, r, abs(ζ̄), numiter, numops)) + return (x, ConvergenceInfo(1, r, absζ̄, numiter, numops)) + elseif alg.verbosity >= STARTSTOP_LEVEL + @info "LSMR lssolve starts with convergence measure ‖ Aᴴ(b - A x) - λ^2 x ‖ = $(normres2string(absζ̄))" end while true @@ -101,31 +103,27 @@ function lssolve(operator, b, alg::LSMR, λ_::Real=0) absζ̄ = abs(ζ̄) if absζ̄ <= tol - if alg.verbosity > 0 + if alg.verbosity >= STARTSTOP_LEVEL @info """LSMR lssolve converged at iteration $numiter: - * ‖ b - A x ‖ = $(norm(r)) - * ‖ x ‖ = $(norm(x)) - * ‖ Aᴴ(b - A x) - λ^2 x ‖ = $absζ̄ - * number of operations = $numops""" + * ‖ b - A x ‖ = $(normres2string(norm(r))) + * ‖ x ‖ = $(normres2string(norm(x))) + * ‖ Aᴴ(b - A x) - λ^2 x ‖ = $(normres2string(absζ̄)) + * number of operations = $numops""" end return (x, ConvergenceInfo(1, r, absζ̄, numiter, numops)) - elseif numiter >= maxiter - if alg.verbosity > 0 - normr = norm(r) - normx = norm(x) + end + if numiter >= maxiter + if alg.verbosity >= WARN_LEVEL @warn """LSMR lssolve finished without converging after $numiter iterations: - * ‖ b - A x ‖ = $(norm(r)) - * ‖ x ‖ = $(norm(x)) - * ‖ Aᴴ(b - A x) - λ^2 x ‖ = $absζ̄ - * number of operations = $numops""" + * ‖ b - A x ‖ = $(normres2string(norm(r))) + * ‖ x ‖ = $(normres2string(norm(x))) + * ‖ Aᴴ(b - A x) - λ^2 x ‖ = $(normres2string(absζ̄)) + * number of operations = $numops""" end return (x, ConvergenceInfo(0, r, absζ̄, numiter, numops)) end - if alg.verbosity > 1 - msg = "LSMR lssolve in iter $numiter: " - msg *= "convergence measure ‖ Aᴴ(b - A x) - λ^2 x ‖ = " - msg *= @sprintf("%.12e", absζ̄) - @info msg + if alg.verbosity >= EACHITERATION_LEVEL + @info "LSMR lssolve in iter $numiter: convergence measure ‖ Aᴴ(b - A x) - λ^2 x ‖ = $(normres2string(absζ̄))" end end end diff --git a/src/lssolve/lssolve.jl b/src/lssolve/lssolve.jl index 3392367..f81753c 100644 --- a/src/lssolve/lssolve.jl +++ b/src/lssolve/lssolve.jl @@ -71,8 +71,11 @@ The return value is always of the form `x, info = lssolve(...)` with Keyword arguments are given by: - - `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message - at the end), 2 (information after every iteration), 3 (information per Krylov step) + - `verbosity::Int = 0`: verbosity level, i.e. + - 0 (suppress all messages) + - 1 (only warnings) + - 2 (information at the beginning and end) + - 3 (progress info after every iteration) - `atol::Real`: the requested accuracy, i.e. absolute tolerance, on the norm of the residual. - `rtol::Real`: the requested accuracy on the norm of the residual, relative to the norm diff --git a/src/matrixfun/expintegrator.jl b/src/matrixfun/expintegrator.jl index 7c87c26..4800d10 100644 --- a/src/matrixfun/expintegrator.jl +++ b/src/matrixfun/expintegrator.jl @@ -17,12 +17,6 @@ linear map, i.e. a `AbstractMatrix` or just a general function or callable objec any eigenvalues with real part larger than zero, however, the solution to the ODE will diverge, i.e. the fixed point is not stable. -!!! warning - - The returned solution might be the solution of the ODE integrated up to a smaller time - ``t̃ = sign(t) * |t̃|`` with ``|t̃| < |t|``, when the required precision could not be - attained. Always check `info.converged > 0` or `info.residual == 0` (see below). - ### Arguments: The linear map `A` can be an `AbstractMatrix` (dense or sparse) or a general function or @@ -41,18 +35,16 @@ of any type and should be in the domain of `A`. The return value is always of the form `y, info = expintegrator(...)` with - `y`: the result of the computation, i.e. - ``y = ϕ₀(t̃*A)*u₀ + t̃*ϕ₁(t̃*A)*u₁ + t̃^2*ϕ₂(t̃*A)*u₂ + …`` - with ``t̃ = sign(t) * |t̃|`` with ``|t̃| <= |t|``, such that the accumulated error in - `y` per unit time is at most equal to the keyword argument `tol` + ``y = ϕ₀(t*A)*u₀ + t*ϕ₁(t*A)*u₁ + t^2*ϕ₂(t*A)*u₂ + …`` - `info`: an object of type [`ConvergenceInfo`], which has the following fields - + `info.converged::Int`: 0 or 1 if the solution `y` was evolved all the way up to the - requested time `t`. - + `info.residual`: there is no residual in the conventional sense, however, this - value equals the residual time `t - t̃`, i.e. it is zero if `info.converged == 1` + + `info.converged::Int`: 0 or 1 if the solution `y` at time `t` was found with an + error below the requested tolerance per unit time, i.e. if `info.normres <= tol * abs(t)` + + `info.residual::Nothing`: value `nothing`, there is no concept of a residual in + this case + `info.normres::Real`: a (rough) estimate of the total error accumulated in the - solution, should be smaller than `tol * |t̃|` + solution + `info.numops::Int`: number of times the linear map was applied, i.e. number of times `f` was called, or a vector was multiplied with `A` + `info.numiter::Int`: number of times the Krylov subspace was restarted (see below) @@ -61,8 +53,12 @@ The return value is always of the form `y, info = expintegrator(...)` with Keyword arguments and their default values are given by: - - `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message - at the end), 2 (information after every iteration), 3 (information per Krylov step) + - `verbosity::Int = 0`: verbosity level, i.e. + - 0 (suppress all messages) + - 1 (only warnings) + - 2 (one message with convergence info at the end) + - 3 (progress info after every iteration) + - 4+ (all of the above and additional information about the Lanczos or Arnoldi iteration) - `krylovdim = 30`: the maximum dimension of the Krylov subspace that will be constructed. Note that the dimension of the vector space is not known or checked, e.g. `x₀` should not necessarily support the `Base.length` function. If you know the actual problem @@ -116,7 +112,9 @@ function expintegrator(A, t::Number, u::Tuple, alg::Union{Lanczos,Arnoldi}) S = real(T) w₀ = scale(u₀, one(T)) - # krylovdim and related allocations + # maxiter, krylovdim and related allocations + maxiter = alg.maxiter + @assert maxiter >= 1 krylovdim = alg.krylovdim K = krylovdim HH = zeros(T, (krylovdim + p + 1, krylovdim + p + 1)) @@ -126,14 +124,23 @@ function expintegrator(A, t::Number, u::Tuple, alg::Union{Lanczos,Arnoldi}) totalerr = zero(η) sgn = sign(t) τ::S = abs(t) - τ₀::S = zero(τ) - Δτ::S = τ - τ₀ # don't try any clever initial guesses, rely on correction mechanism + if isfinite(τ) + Δτ = τ # don't try any clever initial guesses, rely on correction mechanism + Δτmin = τ / alg.maxiter + maxerr = τ * η + else + Δτ = oneunit(S) + Δτmin = zero(S) + maxerr = η + end + totaltimestring = @sprintf("%.2e", τ) # safety factors δ::S = 1.2 γ::S = 0.8 # initial vectors + τ₀ = zero(τ) w = Vector{typeof(w₀)}(undef, p + 1) w[1] = w₀ # reuse the result of apply computed earlier: @@ -151,11 +158,11 @@ function expintegrator(A, t::Number, u::Tuple, alg::Union{Lanczos,Arnoldi}) end v = zerovector(w₀) β = norm(w[p + 1]) - if β < alg.tol && p == 1 - if alg.verbosity > 0 - @info """expintegrate finished after 0 iterations, converged to fixed point up to error = $β""" + if β < η && p == 1 + if alg.verbosity >= STARTSTOP_LEVEL + @info "expintegrate finished after 0 iterations, converged to fixed point up to error = $(normres2string(β))" end - return w₀, ConvergenceInfo(1, zero(τ), β, 0, numops) + return w₀, ConvergenceInfo(1, nothing, β, 0, numops) end v = scale!!(v, w[p + 1], 1 / β) @@ -170,14 +177,17 @@ function expintegrator(A, t::Number, u::Tuple, alg::Union{Lanczos,Arnoldi}) sizehint!(fact, krylovdim) # start outer iteration loop - maxiter = alg.maxiter numiter = 1 while true K = length(fact) V = basis(fact) if K == krylovdim - Δτ = min(Δτ, τ - τ₀) + if numiter < maxiter + Δτ = min(Δτ, τ - τ₀) + else + Δτ = τ - τ₀ + end # Small matrix exponential and error estimation H = fill!(view(HH, 1:(K + p + 1), 1:(K + p + 1)), zero(T)) @@ -190,11 +200,11 @@ function expintegrator(A, t::Number, u::Tuple, alg::Union{Lanczos,Arnoldi}) ϵ = abs(Δτ^p * β * normres(fact) * expH[K, K + p + 1]) ω = ϵ / (Δτ * η) - q = K / 2 - while ω > one(ω) + q::S = K / 2 + while numiter < maxiter && ω > one(ω) && Δτ > Δτmin ϵ_prev = ϵ Δτ_prev = Δτ - Δτ *= (γ / ω)^(1 / (q + 1)) + Δτ = max(Δτ * (γ / ω)^(1 // (q + 1)), Δτmin) H = fill!(view(HH, 1:(K + p + 1), 1:(K + p + 1)), zero(T)) mul!(view(H, 1:K, 1:K), rayleighquotient(fact), sgn * Δτ) H[1, K + 1] = 1 @@ -208,6 +218,7 @@ function expintegrator(A, t::Number, u::Tuple, alg::Union{Lanczos,Arnoldi}) end # take time step + τ₀ = numiter < maxiter ? τ₀ + Δτ : τ # to avoid floating point errors totalerr += ϵ jfac = 1 for j in 1:(p - 1) @@ -218,19 +229,11 @@ function expintegrator(A, t::Number, u::Tuple, alg::Union{Lanczos,Arnoldi}) # add first correction w[p + 1] = add!!(w[p + 1], residual(fact), expH[K, K + p + 1]) w₀ = add!!(w₀, w[p + 1], β * (sgn * Δτ)^p) - τ₀ += Δτ # increase time step for next iteration: if ω < γ Δτ *= (γ / ω)^(1 / (q + 1)) end - - if alg.verbosity > 1 - msg = "expintegrate in iteration $numiter: " - msg *= "reached time " * @sprintf("%.2e", τ₀) - msg *= ", total error = " * @sprintf("%.4e", totalerr) - @info msg - end elseif normres(fact) <= ((τ - τ₀) * η) || alg.eager # Small matrix exponential and error estimation H = fill!(view(HH, 1:(K + p + 1), 1:(K + p + 1)), zero(T)) @@ -258,49 +261,57 @@ function expintegrator(A, t::Number, u::Tuple, alg::Union{Lanczos,Arnoldi}) end end if τ₀ >= τ - if alg.verbosity > 0 - @info """expintegrate finished after $numiter iterations: total error = $totalerr""" + if totalerr <= maxerr + if alg.verbosity >= STARTSTOP_LEVEL + @info """expintegrate finished after $numiter iterations: + * total error = $(normres2string(totalerr)) + * number of operations = $numops""" + end + return w₀, ConvergenceInfo(1, nothing, totalerr, numiter, numops) + else + if alg.verbosity >= WARN_LEVEL + @warn """expintegrate did not reach sufficiently small error after $numiter iterations: + * total error = $(normres2string(totalerr)) + * number of operations = $numops""" + end + return w₀, ConvergenceInfo(0, nothing, totalerr, numiter, numops) end - return w₀, ConvergenceInfo(1, zero(τ), totalerr, numiter, numops) end if K < krylovdim - fact = expand!(iter, fact; verbosity=alg.verbosity - 2) + fact = expand!(iter, fact; verbosity=alg.verbosity - EACHITERATION_LEVEL) numops += 1 else - if numiter == maxiter - if alg.verbosity > 0 - @warn """expintegrate finished without convergence after $numiter iterations: - total error = $totalerr, residual time = $(τ - τ₀)""" - end - return w₀, ConvergenceInfo(0, τ - τ₀, totalerr, numiter, numops) - else # reinitialize - for j in 1:p - w[j + 1] = apply(A, w[j]) - numops += 1 - lfac = 1 - for l in 0:(p - j) - w[j + 1] = add!!(w[j + 1], u[j + l + 1], (sgn * τ₀)^l / lfac) - lfac *= l + 1 - end + for j in 1:p + w[j + 1] = apply(A, w[j]) + numops += 1 + lfac = 1 + for l in 0:(p - j) + w[j + 1] = add!!(w[j + 1], u[j + l + 1], (sgn * τ₀)^l / lfac) + lfac *= l + 1 end - β = norm(w[p + 1]) - if β < alg.tol && p == 1 # w₀ is fixed point of ODE - if alg.verbosity > 0 - @info """expintegrate finished after $numiter iterations, converged to fixed point up to error = $β""" - end - return w₀, ConvergenceInfo(1, zero(τ), β, numiter, numops) + end + β = norm(w[p + 1]) + if β < η && p == 1 # w₀ is fixed point of ODE + if alg.verbosity >= STARTSTOP_LEVEL + @info "expintegrate finished after $numiter iterations, converged to fixed point up to error = $(normres2string(totalerr))" end - v = scale!!(v, w[p + 1], 1 / β) + return w₀, ConvergenceInfo(1, nothing, β, numiter, numops) + end + v = scale!!(v, w[p + 1], 1 / β) - if alg isa Lanczos - iter = LanczosIterator(A, w[p + 1], alg.orth) - else - iter = ArnoldiIterator(A, w[p + 1], alg.orth) - end - fact = initialize!(iter, fact; verbosity=alg.verbosity - 2) - numops += 1 - numiter += 1 + if alg.verbosity >= EACHITERATION_LEVEL + timestring = @sprintf("%.2e", τ₀) + @info "expintegrate in iteration $numiter: reached time $timestring of $totaltimestring, total error = $(normres2string(totalerr))" end + + if alg isa Lanczos + iter = LanczosIterator(A, w[p + 1], alg.orth) + else + iter = ArnoldiIterator(A, w[p + 1], alg.orth) + end + fact = initialize!(iter, fact; verbosity=alg.verbosity - EACHITERATION_LEVEL) + numops += 1 + numiter += 1 end end end diff --git a/src/matrixfun/exponentiate.jl b/src/matrixfun/exponentiate.jl index 8f4d63c..9de0d2b 100644 --- a/src/matrixfun/exponentiate.jl +++ b/src/matrixfun/exponentiate.jl @@ -27,12 +27,12 @@ The return value is always of the form `y, info = exponentiate(...)` with - `info`: an object of type [`ConvergenceInfo`], which has the following fields - + `info.converged::Int`: 0 or 1 if the solution `y` was approximated up to the - requested tolerance `tol`. + + `info.converged::Int`: 0 or 1 if the solution `y` at time `t` was found with an + error below the requested tolerance per unit time, i.e. if `info.normres <= tol * abs(t)` + `info.residual::Nothing`: value `nothing`, there is no concept of a residual in this case - + `info.normres::Real`: a (rough) estimate of the error between the approximate and - exact solution + + `info.normres::Real`: a (rough) estimate of the total error accumulated in the + solution + `info.numops::Int`: number of times the linear map was applied, i.e. number of times `f` was called, or a vector was multiplied with `A` + `info.numiter::Int`: number of times the Krylov subspace was restarted (see below) @@ -46,8 +46,12 @@ The return value is always of the form `y, info = exponentiate(...)` with Keyword arguments and their default values are given by: - - `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message - at the end), 2 (information after every iteration), 3 (information per Krylov step) + - `verbosity::Int = 0`: verbosity level, i.e. + - 0 (suppress all messages) + - 1 (only warnings) + - 2 (one message with convergence info at the end) + - 3 (progress info after every iteration) + - 4+ (all of the above and additional information about the Lanczos or Arnoldi iteration) - `krylovdim = 30`: the maximum dimension of the Krylov subspace that will be constructed. Note that the dimension of the vector space is not known or checked, e.g. `x₀` should not necessarily support the `Base.length` function. If you know the actual problem diff --git a/test/eigsolve.jl b/test/eigsolve.jl index 39493ab..e9c8342 100644 --- a/test/eigsolve.jl +++ b/test/eigsolve.jl @@ -12,7 +12,17 @@ wrapvec(v, Val(mode)), n1, :SR; krylovdim=n, maxiter=1, tol=tolerance(T), - verbosity=1) + verbosity=2) + @test_logs eigsolve(wrapop(A, Val(mode)), + wrapvec(v, Val(mode)), n1, :SR; + krylovdim=n, + maxiter=1, tol=tolerance(T), + verbosity=1) + @test_logs (:warn,) eigsolve(wrapop(A, Val(mode)), + wrapvec(v, Val(mode)), n1, :SR; + krylovdim=n1 + 1, + maxiter=1, tol=tolerance(T), + verbosity=1) @test KrylovKit.eigselector(wrapop(A, Val(mode)), scalartype(v); krylovdim=n, maxiter=1, tol=tolerance(T), ishermitian=true) isa Lanczos @@ -31,10 +41,11 @@ @test A * U1 ≈ U1 * Diagonal(D1) @test A * U2 ≈ U2 * Diagonal(D2) - @test_logs (:warn,) eigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n + 1, - :LM; - krylovdim=2n, - maxiter=1, tol=tolerance(T), verbosity=0) + @test_logs (:warn,) (:warn,) eigsolve(wrapop(A, Val(mode)), + wrapvec(v, Val(mode)), n + 1, + :LM; + krylovdim=2n, + maxiter=1, tol=tolerance(T), verbosity=1) end end end @@ -84,11 +95,32 @@ end A = rand(T, (n, n)) .- one(T) / 2 v = rand(T, (n,)) n1 = div(n, 2) - D1, V1, info1 = @test_logs (:info,) eigsolve(wrapop(A, Val(mode)), - wrapvec(v, Val(mode)), n1, :SR; - orth=orth, krylovdim=n, - maxiter=1, tol=tolerance(T), - verbosity=1) + alg = Arnoldi(; orth=orth, krylovdim=n, maxiter=1, tol=tolerance(T)) + D1, V1, info1 = @constinferred eigsolve(wrapop(A, Val(mode)), + wrapvec(v, Val(mode)), n1, :SR, alg) + @test_logs eigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n1, :SR; + orth=orth, krylovdim=n, maxiter=1, tol=tolerance(T), + verbosity=0) + @test_logs eigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n1, :SR; + orth=orth, krylovdim=n, maxiter=1, tol=tolerance(T), + verbosity=1) + @test_logs (:warn,) eigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n1, + :SR; + orth=orth, krylovdim=n1 + 2, maxiter=1, + tol=tolerance(T), + verbosity=1) + @test_logs (:info,) eigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n1, + :SR; + orth=orth, krylovdim=n, maxiter=1, + tol=tolerance(T), + verbosity=2) + @test_logs min_level = Warn eigsolve(wrapop(A, Val(mode)), + wrapvec(v, Val(mode)), + n1, :SR; + orth=orth, krylovdim=n, maxiter=1, + tol=tolerance(T), + verbosity=4) + @test KrylovKit.eigselector(wrapop(A, Val(mode)), eltype(v); orth=orth, krylovdim=n, maxiter=1, tol=tolerance(T)) isa Arnoldi @@ -124,10 +156,11 @@ end @test A * U2 ≈ U2 * Diagonal(D2) end - @test_logs (:warn,) eigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n + 1, - :LM; orth=orth, - krylovdim=2n, - maxiter=1, tol=tolerance(T), verbosity=0) + @test_logs (:warn,) (:warn,) eigsolve(wrapop(A, Val(mode)), + wrapvec(v, Val(mode)), n + 1, + :LM; orth=orth, + krylovdim=2n, + maxiter=1, tol=tolerance(T), verbosity=1) end end end diff --git a/test/expintegrator.jl b/test/expintegrator.jl index b4980d2..6bdf793 100644 --- a/test/expintegrator.jl +++ b/test/expintegrator.jl @@ -25,9 +25,9 @@ end alg = Lanczos(; orth=orth, krylovdim=n, maxiter=2, tol=tolerance(T), verbosity=2) for k in 1:n - w, = @test_logs (:info,) (:info,) exponentiate(wrapop(A, Val(mode)), 1, - wrapvec(view(V, :, k), - Val(mode)), alg) + w, = @test_logs (:info,) exponentiate(wrapop(A, Val(mode)), 1, + wrapvec(view(V, :, k), + Val(mode)), alg) W[:, k] = unwrapvec(w) end @test W ≈ exp(A) @@ -65,9 +65,9 @@ end alg = Arnoldi(; orth=orth, krylovdim=n, maxiter=2, tol=tolerance(T), verbosity=2) for k in 1:n - w, = @test_logs (:info,) (:info,) exponentiate(wrapop(A, Val(mode)), 1, - wrapvec(view(V, :, k), - Val(mode)), alg) + w, = @test_logs (:info,) exponentiate(wrapop(A, Val(mode)), 1, + wrapvec(view(V, :, k), + Val(mode)), alg) W[:, k] = unwrapvec(w) end @test W ≈ exp(A) diff --git a/test/factorize.jl b/test/factorize.jl index 7441bfe..e3f8140 100644 --- a/test/factorize.jl +++ b/test/factorize.jl @@ -10,9 +10,10 @@ v = rand(T, (n,)) A = (A + A') iter = LanczosIterator(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), orth) - verbosity = 3 - fact = @constinferred initialize(iter; verbosity=verbosity) - @constinferred expand!(iter, fact; verbosity=verbosity) + fact = @constinferred initialize(iter) + @constinferred expand!(iter, fact) + @test_logs initialize(iter; verbosity=0) + @test_logs (:info,) initialize(iter; verbosity=1) verbosity = 1 while length(fact) < n if verbosity == 1 @@ -22,16 +23,21 @@ end verbosity = 1 - verbosity # flipflop end - V = stack(unwrapvec, basis(fact)) H = rayleighquotient(fact) @test normres(fact) < 10 * n * eps(real(T)) @test V' * V ≈ I @test A * V ≈ V * H - @constinferred initialize!(iter, deepcopy(fact); verbosity=1) states = collect(Iterators.take(iter, n)) # collect tests size and eltype? @test rayleighquotient(last(states)) ≈ H + + @constinferred shrink!(fact, n - 1) + @test_logs (:info,) shrink!(fact, n - 2; verbosity=1) + @test_logs shrink!(fact, n - 3; verbosity=0) + @constinferred initialize!(iter, deepcopy(fact)) + @test_logs initialize!(iter, deepcopy(fact); verbosity=0) + @test_logs (:info,) initialize!(iter, deepcopy(fact); verbosity=1) end end end @@ -47,9 +53,10 @@ end A = rand(T, (n, n)) v = rand(T, (n,)) iter = ArnoldiIterator(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), orth) - verbosity = 3 - fact = @constinferred initialize(iter; verbosity=verbosity) - @constinferred expand!(iter, fact; verbosity=verbosity) + fact = @constinferred initialize(iter) + @constinferred expand!(iter, fact) + @test_logs initialize(iter; verbosity=0) + @test_logs (:info,) initialize(iter; verbosity=1) verbosity = 1 while length(fact) < n if verbosity == 1 @@ -67,9 +74,15 @@ end @test V' * V ≈ I @test A * V ≈ V * H - @constinferred initialize!(iter, deepcopy(fact); verbosity=1) states = collect(Iterators.take(iter, n)) # collect tests size and eltype? @test rayleighquotient(last(states)) ≈ H + + @constinferred shrink!(fact, n - 1) + @test_logs (:info,) shrink!(fact, n - 2; verbosity=1) + @test_logs shrink!(fact, n - 3; verbosity=0) + @constinferred initialize!(iter, deepcopy(fact)) + @test_logs initialize!(iter, deepcopy(fact); verbosity=0) + @test_logs (:info,) initialize!(iter, deepcopy(fact); verbosity=1) end end end @@ -161,3 +174,100 @@ end end end end + +# Test complete Golub-Kahan-Lanczos factorization +@testset "Complete Golub-Kahan-Lanczos factorization ($mode)" for mode in + (:vector, :inplace, + :outplace, :mixed) + scalartypes = mode === :vector ? (Float32, Float64, ComplexF32, ComplexF64) : + (ComplexF64,) + orths = mode === :vector ? (cgs2, mgs2, cgsr, mgsr) : (mgsr,) + @testset for T in scalartypes + @testset for orth in orths + A = rand(T, (n, n)) + v = A * rand(T, (n,)) # ensure v is in column space of A + iter = GKLIterator(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), orth) + fact = @constinferred initialize(iter) + @constinferred expand!(iter, fact) + @test_logs initialize(iter; verbosity=0) + @test_logs (:info,) initialize(iter; verbosity=1) + verbosity = 1 + while length(fact) < n + if verbosity == 1 + @test_logs (:info,) expand!(iter, fact; verbosity=verbosity) + else + @test_logs expand!(iter, fact; verbosity=verbosity) + end + verbosity = 1 - verbosity # flipflop + end + + U = stack(unwrapvec, basis(fact, :U)) + V = stack(unwrapvec, basis(fact, :V)) + B = rayleighquotient(fact) + @test normres(fact) < 10 * n * eps(real(T)) + @test U' * U ≈ I + @test V' * V ≈ I + @test A * V ≈ U * B + @test A' * U ≈ V * B' + + states = collect(Iterators.take(iter, n)) # collect tests size and eltype? + @test rayleighquotient(last(states)) ≈ B + + @constinferred shrink!(fact, n - 1) + @test_logs (:info,) shrink!(fact, n - 2; verbosity=1) + @test_logs shrink!(fact, n - 3; verbosity=0) + @constinferred initialize!(iter, deepcopy(fact)) + @test_logs initialize!(iter, deepcopy(fact); verbosity=0) + @test_logs (:info,) initialize!(iter, deepcopy(fact); verbosity=1) + end + end +end + +# Test incomplete Golub-Kahan-Lanczos factorization +@testset "Incomplete Golub-Kahan-Lanczos factorization ($mode)" for mode in + (:vector, :inplace, + :outplace, :mixed) + scalartypes = mode === :vector ? (Float32, Float64, ComplexF32, ComplexF64) : + (ComplexF64,) + orths = mode === :vector ? (cgs2, mgs2, cgsr, mgsr) : (mgsr,) + @testset for T in scalartypes + @testset for orth in orths + if T == Complex{Int} + A = rand(-100:100, (N, N)) + im * rand(-100:100, (N, N)) + v = rand(-100:100, (N,)) + else + A = rand(T, (N, N)) + v = rand(T, (N,)) + end + iter = @constinferred GKLIterator(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), + orth) + krylovdim = 3 * n + fact = @constinferred initialize(iter) + while normres(fact) > eps(float(real(T))) && length(fact) < krylovdim + @constinferred expand!(iter, fact) + Ũ, Ṽ, B, r̃, β, e = fact + U = stack(unwrapvec, Ũ) + V = stack(unwrapvec, Ṽ) + r = unwrapvec(r̃) + @test U' * U ≈ I + @test V' * V ≈ I + @test norm(r) ≈ β + @test A * V ≈ U * B + r * e' + @test A' * U ≈ V * B' + end + + fact = @constinferred shrink!(fact, div(n, 2)) + U = stack(unwrapvec, @constinferred basis(fact, :U)) + V = stack(unwrapvec, @constinferred basis(fact, :V)) + B = @constinferred rayleighquotient(fact) + r = unwrapvec(@constinferred residual(fact)) + β = @constinferred normres(fact) + e = @constinferred rayleighextension(fact) + @test U' * U ≈ I + @test V' * V ≈ I + @test norm(r) ≈ β + @test A * V ≈ U * B + r * e' + @test A' * U ≈ V * B' + end + end +end diff --git a/test/geneigsolve.jl b/test/geneigsolve.jl index d154b81..21fec18 100644 --- a/test/geneigsolve.jl +++ b/test/geneigsolve.jl @@ -19,7 +19,45 @@ n1, :SR; orth=orth, krylovdim=n, maxiter=1, tol=tolerance(T), ishermitian=true, isposdef=true, - verbosity=2) + verbosity=0) + + if info.converged < n1 + @test_logs geneigsolve((wrapop(A, Val(mode)), + wrapop(B, Val(mode))), + wrapvec(v, Val(mode)), + n1, :SR; orth=orth, krylovdim=n, + maxiter=1, tol=tolerance(T), + ishermitian=true, isposdef=true, + verbosity=0) + @test_logs geneigsolve((wrapop(A, Val(mode)), + wrapop(B, Val(mode))), + wrapvec(v, Val(mode)), + n1, :SR; orth=orth, krylovdim=n, + maxiter=1, tol=tolerance(T), + ishermitian=true, isposdef=true, + verbosity=1) + @test_logs (:warn,) geneigsolve((wrapop(A, Val(mode)), + wrapop(B, Val(mode))), + wrapvec(v, Val(mode)), + n1, :SR; orth=orth, krylovdim=n1 + 1, + maxiter=1, tol=tolerance(T), + ishermitian=true, isposdef=true, + verbosity=1) + @test_logs (:info,) geneigsolve((wrapop(A, Val(mode)), + wrapop(B, Val(mode))), + wrapvec(v, Val(mode)), + n1, :SR; orth=orth, krylovdim=n, + maxiter=1, tol=tolerance(T), + ishermitian=true, isposdef=true, + verbosity=2) + @test_logs min_level = Warn geneigsolve((wrapop(A, Val(mode)), + wrapop(B, Val(mode))), + wrapvec(v, Val(mode)), + n1, :SR; orth=orth, krylovdim=n, + maxiter=1, tol=tolerance(T), + ishermitian=true, isposdef=true, + verbosity=3) + end @test KrylovKit.geneigselector((wrapop(A, Val(mode)), wrapop(B, Val(mode))), scalartype(v); orth=orth, krylovdim=n, maxiter=1, tol=tolerance(T), ishermitian=true, diff --git a/test/gklfactorize.jl b/test/gklfactorize.jl index 859a5f7..e69de29 100644 --- a/test/gklfactorize.jl +++ b/test/gklfactorize.jl @@ -1,89 +0,0 @@ -# Test complete Golub-Kahan-Lanczos factorization -@testset "Complete Golub-Kahan-Lanczos factorization ($mode)" for mode in - (:vector, :inplace, - :outplace, :mixed) - scalartypes = mode === :vector ? (Float32, Float64, ComplexF32, ComplexF64) : - (ComplexF64,) - orths = mode === :vector ? (cgs2, mgs2, cgsr, mgsr) : (mgsr,) - @testset for T in scalartypes - @testset for orth in orths - A = rand(T, (n, n)) - v = A * rand(T, (n,)) # ensure v is in column space of A - iter = GKLIterator(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), orth) - verbosity = 3 - fact = @constinferred initialize(iter; verbosity=verbosity) - @constinferred expand!(iter, fact; verbosity=verbosity) - verbosity = 1 - while length(fact) < n - if verbosity == 1 - @test_logs (:info,) expand!(iter, fact; verbosity=verbosity) - else - @test_logs expand!(iter, fact; verbosity=verbosity) - end - verbosity = 1 - verbosity # flipflop - end - - U = stack(unwrapvec, basis(fact, :U)) - V = stack(unwrapvec, basis(fact, :V)) - B = rayleighquotient(fact) - @test normres(fact) < 10 * n * eps(real(T)) - @test U' * U ≈ I - @test V' * V ≈ I - @test A * V ≈ U * B - @test A' * U ≈ V * B' - - @constinferred initialize!(iter, deepcopy(fact); verbosity=1) - states = collect(Iterators.take(iter, n)) # collect tests size and eltype? - @test rayleighquotient(last(states)) ≈ B - end - end -end - -# Test incomplete Golub-Kahan-Lanczos factorization -@testset "Incomplete Golub-Kahan-Lanczos factorization ($mode)" for mode in - (:vector, :inplace, - :outplace, :mixed) - scalartypes = mode === :vector ? (Float32, Float64, ComplexF32, ComplexF64) : - (ComplexF64,) - orths = mode === :vector ? (cgs2, mgs2, cgsr, mgsr) : (mgsr,) - @testset for T in scalartypes - @testset for orth in orths - if T == Complex{Int} - A = rand(-100:100, (N, N)) + im * rand(-100:100, (N, N)) - v = rand(-100:100, (N,)) - else - A = rand(T, (N, N)) - v = rand(T, (N,)) - end - iter = @constinferred GKLIterator(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), - orth) - krylovdim = 3 * n - fact = @constinferred initialize(iter) - while normres(fact) > eps(float(real(T))) && length(fact) < krylovdim - @constinferred expand!(iter, fact) - Ũ, Ṽ, B, r̃, β, e = fact - U = stack(unwrapvec, Ũ) - V = stack(unwrapvec, Ṽ) - r = unwrapvec(r̃) - @test U' * U ≈ I - @test V' * V ≈ I - @test norm(r) ≈ β - @test A * V ≈ U * B + r * e' - @test A' * U ≈ V * B' - end - - fact = @constinferred shrink!(fact, div(n, 2)) - U = stack(unwrapvec, @constinferred basis(fact, :U)) - V = stack(unwrapvec, @constinferred basis(fact, :V)) - B = @constinferred rayleighquotient(fact) - r = unwrapvec(@constinferred residual(fact)) - β = @constinferred normres(fact) - e = @constinferred rayleighextension(fact) - @test U' * U ≈ I - @test V' * V ≈ I - @test norm(r) ≈ β - @test A * V ≈ U * B + r * e' - @test A' * U ≈ V * B' - end - end -end diff --git a/test/linsolve.jl b/test/linsolve.jl index d2a4211..50ef633 100644 --- a/test/linsolve.jl +++ b/test/linsolve.jl @@ -6,17 +6,38 @@ A = rand(T, (n, n)) A = sqrt(A * A') b = rand(T, n) - alg = CG(; maxiter=2n, tol=tolerance(T) * norm(b), verbosity=2) # because of loss of orthogonality, we choose maxiter = 2n + alg = CG(; maxiter=2n, tol=tolerance(T) * norm(b), verbosity=0) # because of loss of orthogonality, we choose maxiter = 2n x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); ishermitian=true, isposdef=true, maxiter=2n, krylovdim=1, rtol=tolerance(T), - verbosity=1) + verbosity=0) @test info.converged > 0 @test unwrapvec(b) ≈ A * unwrapvec(x) - x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x; - ishermitian=true, isposdef=true, maxiter=2n, - krylovdim=1, rtol=tolerance(T)) + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + ishermitian=true, isposdef=true, maxiter=2n, + krylovdim=1, rtol=tolerance(T), + verbosity=0) + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + ishermitian=true, isposdef=true, maxiter=2n, + krylovdim=1, rtol=tolerance(T), + verbosity=1) + @test_logs (:info,) (:info,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + ishermitian=true, isposdef=true, maxiter=2n, + krylovdim=1, rtol=tolerance(T), + verbosity=2) + @test_logs min_level = Warn linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + ishermitian=true, isposdef=true, maxiter=2n, + krylovdim=1, rtol=tolerance(T), + verbosity=3) + + x, info = linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) @test info.numops == 1 + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) + alg = CG(; maxiter=2n, tol=tolerance(T) * norm(b), verbosity=1) + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) + alg = CG(; maxiter=2n, tol=tolerance(T) * norm(b), verbosity=2) + @test_logs (:info,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) + alg = CG(; maxiter=2n, tol=tolerance(T) * norm(b), verbosity=0) A = rand(T, (n, n)) A = sqrt(A * A') @@ -37,10 +58,27 @@ end A = rand(T, (N, N)) A = sqrt(sqrt(A * A')) / N b = rand(T, N) - x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + x₀ = rand(T, N) + x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(x₀, Val(mode)); isposdef=true, maxiter=1, krylovdim=N, rtol=tolerance(T)) @test unwrapvec(b) ≈ A * unwrapvec(x) + unwrapvec(info.residual) + if info.converged == 0 + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(x₀, Val(mode)); + isposdef=true, maxiter=1, krylovdim=N, + rtol=tolerance(T), verbosity=0) + @test_logs (:warn,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(x₀, Val(mode)); + isposdef=true, maxiter=1, krylovdim=N, + rtol=tolerance(T), verbosity=1) + @test_logs (:info,) (:warn,) linsolve(wrapop(A, Val(mode)), + wrapvec(b, Val(mode)), + wrapvec(x₀, Val(mode)); + isposdef=true, maxiter=1, krylovdim=N, + rtol=tolerance(T), verbosity=2) + end α₀ = rand(real(T)) + 1 α₁ = rand(real(T)) @@ -59,18 +97,38 @@ end @testset for T in scalartypes A = rand(T, (n, n)) .- one(T) / 2 b = rand(T, n) - alg = GMRES(; krylovdim=n, maxiter=2, tol=tolerance(T) * norm(b), verbosity=2) + alg = GMRES(; krylovdim=n, maxiter=2, tol=tolerance(T) * norm(b), verbosity=0) x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); krylovdim=n, maxiter=2, - rtol=tolerance(T), verbosity=1) + rtol=tolerance(T), verbosity=0) @test info.converged == 1 @test unwrapvec(b) ≈ A * unwrapvec(x) + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + krylovdim=n, maxiter=2, + rtol=tolerance(T), verbosity=0) + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + krylovdim=n, maxiter=2, + rtol=tolerance(T), verbosity=1) + @test_logs (:info,) (:info,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + krylovdim=n, maxiter=2, + rtol=tolerance(T), verbosity=2) + @test_logs min_level = Warn linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + krylovdim=n, maxiter=2, + rtol=tolerance(T), verbosity=3) + + alg = GMRES(; krylovdim=n, maxiter=2, tol=tolerance(T) * norm(b), verbosity=0) x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) @test info.numops == 1 + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) + alg = GMRES(; krylovdim=n, maxiter=2, tol=tolerance(T) * norm(b), verbosity=1) + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) + alg = GMRES(; krylovdim=n, maxiter=2, tol=tolerance(T) * norm(b), verbosity=2) + @test_logs (:info,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) + alg = GMRES(; krylovdim=n, maxiter=2, tol=tolerance(T) * norm(b), verbosity=0) nreal = (T <: Real) ? n : 2n - algr = GMRES(; krylovdim=nreal, maxiter=2, tol=tolerance(T) * norm(b), verbosity=2) + algr = GMRES(; krylovdim=nreal, maxiter=2, tol=tolerance(T) * norm(b), verbosity=0) xr, infor = @constinferred reallinsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), zerovector(x), algr) @test infor.converged == 1 @@ -103,12 +161,30 @@ end A = rand(T, (N, N)) .- one(T) / 2 A = I - T(9 / 10) * A / maximum(abs, eigvals(A)) b = rand(T, N) - x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + x₀ = rand(T, N) + x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(x₀, Val(mode)); krylovdim=3 * n, maxiter=50, rtol=tolerance(T)) @test unwrapvec(b) ≈ A * unwrapvec(x) + unwrapvec(info.residual) + if info.converged == 0 + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(x₀, Val(mode)); + krylovdim=3 * n, + maxiter=50, rtol=tolerance(T), verbosity=0) + @test_logs (:warn,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(x₀, Val(mode)); + krylovdim=3 * n, + maxiter=50, rtol=tolerance(T), verbosity=1) + @test_logs (:info,) (:warn,) linsolve(wrapop(A, Val(mode)), + wrapvec(b, Val(mode)), + wrapvec(x₀, Val(mode)); + krylovdim=3 * n, + maxiter=50, rtol=tolerance(T), + verbosity=2) + end - alg = GMRES(; krylovdim=3 * n, maxiter=50, tol=tolerance(T) * norm(b), verbosity=2) + alg = GMRES(; krylovdim=3 * n, maxiter=50, tol=tolerance(T) * norm(b), verbosity=0) xr, infor = @constinferred reallinsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), zerovector(x), alg) @test unwrapvec(b) ≈ A * unwrapvec(xr) + unwrapvec(infor.residual) @@ -133,32 +209,79 @@ end end end -# Test BICGStab -@testset "BiCGStab ($mode)" for mode in (:vector, :inplace, :outplace) +# Test BiCGStab +@testset "BiCGStab small problem ($mode)" for mode in (:vector, :inplace, :outplace) scalartypes = mode === :vector ? (Float32, Float64, ComplexF32, ComplexF64) : (ComplexF64,) @testset for T in scalartypes A = rand(T, (n, n)) .- one(T) / 2 A = I - T(9 / 10) * A / maximum(abs, eigvals(A)) b = rand(T, n) - alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=2) + alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=0) x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), wrapvec(zerovector(b), Val(mode)), alg) @test info.converged > 0 @test unwrapvec(b) ≈ A * unwrapvec(x) + alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=0) + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(zerovector(b), Val(mode)), alg) + alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=1) + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(zerovector(b), Val(mode)), alg) + alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=2) + @test_logs (:info,) (:info,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(zerovector(b), Val(mode)), alg) + alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=3) + @test_logs min_level = Warn linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(zerovector(b), Val(mode)), alg) + alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=0) + x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) @test info.numops == 1 + alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=0) + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) + alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=1) + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) + alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=2) + @test_logs (:info,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) + alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=0) + α₀ = rand(real(T)) + 1 + α₁ = rand(real(T)) + x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(zerovector(b), Val(mode)), alg, α₀, α₁) + @test unwrapvec(b) ≈ (α₀ * I + α₁ * A) * unwrapvec(x) + @test info.converged > 0 + end +end + +@testset "BiCGStab large problem ($mode)" for mode in (:vector, :inplace, :outplace) + scalartypes = mode === :vector ? (Float32, Float64, ComplexF32, ComplexF64) : + (ComplexF64,) + @testset for T in scalartypes A = rand(T, (N, N)) .- one(T) / 2 b = rand(T, N) α₀ = maximum(abs, eigvals(A)) α₁ = -9 * rand(real(T)) / 10 - alg = BiCGStab(; maxiter=2, tol=tolerance(T) * norm(b), verbosity=1) + alg = BiCGStab(; maxiter=2, tol=tolerance(T) * norm(b), verbosity=0) x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), wrapvec(zerovector(b), Val(mode)), alg, α₀, α₁) @test unwrapvec(b) ≈ (α₀ * I + α₁ * A) * unwrapvec(x) + unwrapvec(info.residual) + if info.converged == 0 + @test_logs linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(zerovector(b), Val(mode)), alg, α₀, α₁) + alg = BiCGStab(; maxiter=2, tol=tolerance(T) * norm(b), verbosity=1) + @test_logs (:warn,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + wrapvec(zerovector(b), Val(mode)), alg, α₀, α₁) + alg = BiCGStab(; maxiter=2, tol=tolerance(T) * norm(b), verbosity=2) + @test_logs (:info,) (:warn,) linsolve(wrapop(A, Val(mode)), + wrapvec(b, Val(mode)), + wrapvec(zerovector(b), Val(mode)), alg, + α₀, α₁) + end + alg = BiCGStab(; maxiter=10 * N, tol=tolerance(T) * norm(b), verbosity=0) x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg, α₀, α₁) diff --git a/test/lssolve.jl b/test/lssolve.jl index 6b0f330..9988d6c 100644 --- a/test/lssolve.jl +++ b/test/lssolve.jl @@ -12,19 +12,36 @@ b = rand(T, 2 * n) tol = tol = 10 * n * eps(real(T)) - alg = LSMR(; maxiter=3, tol=tol, verbosity=2) - x, info = @constinferred lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), alg) + alg = LSMR(; maxiter=3, tol=tol, verbosity=0) + x, info = @constinferred lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + maxiter=3) r = b - A * unwrapvec(x) @test unwrapvec(info.residual) ≈ r @test info.normres ≈ norm(A' * r) + @test info.converged == 0 + @test_logs lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); maxiter=3, + verbosity=0) + @test_logs (:warn,) lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); maxiter=3, + verbosity=1) + @test_logs (:info,) (:warn,) lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); + maxiter=3, verbosity=2) - alg = LSMR(; maxiter=2 * n, tol=tol, verbosity=1) + alg = LSMR(; maxiter=2 * n, tol=tol, verbosity=0) # maxiter = 2 * n because of loss of orthogonality for single precision x, info = @constinferred lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), alg) @test info.converged > 0 @test abs(inner(V[:, end], unwrapvec(x))) < alg.tol @test unwrapvec(x) ≈ V * Diagonal(invS) * U' * b + @test_logs lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), alg) + alg = LSMR(; maxiter=2 * n, tol=tol, verbosity=1) + @test_logs lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), alg) + alg = LSMR(; maxiter=2 * n, tol=tol, verbosity=2) + @test_logs (:info,) (:info,) lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + alg) + alg = LSMR(; maxiter=2 * n, tol=tol, verbosity=3) + @test_logs min_level = Warn lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), + alg) λ = rand(real(T)) alg = LSMR(; maxiter=2 * n, tol=tol, verbosity=0) diff --git a/test/runtests.jl b/test/runtests.jl index e424fad..40841b1 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,7 +1,7 @@ using Random Random.seed!(76543210) -using Test, TestExtras +using Test, TestExtras, Logging using LinearAlgebra using KrylovKit using VectorInterface @@ -25,31 +25,47 @@ const mgsr = ModifiedGramSchmidtIR(η₀) # Tests # ----- t = time() -include("factorize.jl") -include("gklfactorize.jl") - -include("linsolve.jl") -include("lssolve.jl") -include("eigsolve.jl") -include("schursolve.jl") -include("geneigsolve.jl") -include("svdsolve.jl") -include("expintegrator.jl") - -include("linalg.jl") -include("nestedtuple.jl") - -include("ad/linsolve.jl") -include("ad/eigsolve.jl") -include("ad/degenerateeigsolve.jl") -include("ad/svdsolve.jl") +@testset "Krylov factorisations" verbose = true begin + include("factorize.jl") +end +@testset "Linear problems with linsolve" verbose = true begin + include("linsolve.jl") +end +@testset "Least squares problems with lssolve" verbose = true begin + include("lssolve.jl") +end +@testset "Eigenvalue problems with eigsolve" verbose = true begin + include("eigsolve.jl") + include("schursolve.jl") + include("geneigsolve.jl") +end +@testset "Singular value problems with svdsolve" verbose = true begin + include("svdsolve.jl") +end +@testset "Exponentiate and exponential integrator" verbose = true begin + include("expintegrator.jl") +end +@testset "Linear Algebra Utilities" verbose = true begin + include("linalg.jl") +end +@testset "Nested Tuples" verbose = true begin + include("nestedtuple.jl") +end +@testset "Differentiation rules" verbose = true begin + include("ad/linsolve.jl") + include("ad/eigsolve.jl") + include("ad/degenerateeigsolve.jl") + include("ad/svdsolve.jl") +end t = time() - t -println("Tests finished in $t seconds") # Issues # ------ -include("issues.jl") +@testset "Known issues" verbose = true begin + include("issues.jl") +end +println("Tests finished in $t seconds") module AquaTests using KrylovKit diff --git a/test/schursolve.jl b/test/schursolve.jl index aebd418..a6efcc7 100644 --- a/test/schursolve.jl +++ b/test/schursolve.jl @@ -11,6 +11,24 @@ T1, V1, D1, info1 = @constinferred schursolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n1, :SR, alg) + @test_logs schursolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n1, :SR, + alg) + alg = Arnoldi(; orth=orth, krylovdim=n, maxiter=1, tol=tolerance(T), + verbosity=1) + @test_logs schursolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n1, :SR, + alg) + alg = Arnoldi(; orth=orth, krylovdim=n1 + 1, maxiter=1, tol=tolerance(T), + verbosity=1) + @test_logs (:warn,) schursolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n1, + :SR, + alg) + alg = Arnoldi(; orth=orth, krylovdim=n, maxiter=1, tol=tolerance(T), + verbosity=2) + @test_logs (:info,) schursolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n1, + :SR, + alg) + + alg = Arnoldi(; orth=orth, krylovdim=n, maxiter=1, tol=tolerance(T)) n2 = n - n1 T2, V2, D2, info2 = schursolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n2, :LR, alg) diff --git a/test/svdsolve.jl b/test/svdsolve.jl index bad5157..fdc7799 100644 --- a/test/svdsolve.jl +++ b/test/svdsolve.jl @@ -9,8 +9,32 @@ S, lvecs, rvecs, info = @constinferred svdsolve(wrapop(A, Val(mode)), wrapvec(A[:, 1], Val(mode)), n, :LR, alg) - @test S ≈ svdvals(A) + @test info.converged == n + + n1 = div(n, 2) + @test_logs svdsolve(wrapop(A, Val(mode)), wrapvec(A[:, 1], Val(mode)), n1, :LR, + alg) + alg = GKL(; orth=orth, krylovdim=2 * n, maxiter=1, tol=tolerance(T), + verbosity=1) + @test_logs svdsolve(wrapop(A, Val(mode)), wrapvec(A[:, 1], Val(mode)), n1, :LR, + alg) + alg = GKL(; orth=orth, krylovdim=n1 + 1, maxiter=1, tol=tolerance(T), + verbosity=1) + @test_logs (:warn,) svdsolve(wrapop(A, Val(mode)), wrapvec(A[:, 1], Val(mode)), + n1, :LR, + alg) + alg = GKL(; orth=orth, krylovdim=2 * n, maxiter=1, tol=tolerance(T), + verbosity=2) + @test_logs (:info,) svdsolve(wrapop(A, Val(mode)), wrapvec(A[:, 1], Val(mode)), + n1, :LR, + alg) + alg = GKL(; orth=orth, krylovdim=2 * n, maxiter=1, tol=tolerance(T), + verbosity=4) + @test_logs min_level = Warn svdsolve(wrapop(A, Val(mode)), + wrapvec(A[:, 1], Val(mode)), + n1, :LR, + alg) U = stack(unwrapvec, lvecs) V = stack(unwrapvec, rvecs) diff --git a/test/testsetup.jl b/test/testsetup.jl index 43deaec..a24a5c0 100644 --- a/test/testsetup.jl +++ b/test/testsetup.jl @@ -10,7 +10,7 @@ using LinearAlgebra: LinearAlgebra # Utility functions # ----------------- "function for determining the precision of a type" -tolerance(T::Type{<:Number}) = eps(real(T))^(2 / 3) +tolerance(T::Type{<:Number}) = eps(real(T))^(2 // 3) "function for comparing sets of eigenvalues" function ≊(list1::AbstractVector, list2::AbstractVector) From bf5ff016adf1fa39b1e0da3fa88dd20bac1428de Mon Sep 17 00:00:00 2001 From: Jutho Date: Sat, 11 Jan 2025 01:19:47 +0100 Subject: [PATCH 4/8] make default verbosity warn, disable ad tests until fixed --- src/algorithms.jl | 133 ++++++++++++++++++++++----------------- src/eigsolve/eigsolve.jl | 16 ++--- src/linsolve/linsolve.jl | 26 ++++---- src/lssolve/lssolve.jl | 8 +-- test/lssolve.jl | 3 +- test/nestedtuple.jl | 12 ++-- test/runtests.jl | 14 ++--- test/schursolve.jl | 3 +- 8 files changed, 118 insertions(+), 97 deletions(-) diff --git a/src/algorithms.jl b/src/algorithms.jl index ce1dfc0..a9d5655 100644 --- a/src/algorithms.jl +++ b/src/algorithms.jl @@ -84,8 +84,12 @@ abstract type KrylovAlgorithm end # General purpose; good for linear systems, eigensystems and matrix functions """ - Lanczos(; krylovdim=KrylovDefaults.krylovdim, maxiter=KrylovDefaults.maxiter, - tol=KrylovDefaults.tol, orth=KrylovDefaults.orth, eager=false, verbosity=0) + Lanczos(; krylovdim=KrylovDefaults.krylovdim, + maxiter=KrylovDefaults.maxiter, + tol=KrylovDefaults.tol, + orth=KrylovDefaults.orth, + eager=false, + verbosity=KrylovDefaults.verbosity[]) Represents the Lanczos algorithm for building the Krylov subspace; assumes the linear operator is real symmetric or complex Hermitian. Can be used in `eigsolve` and @@ -111,18 +115,22 @@ struct Lanczos{O<:Orthogonalizer,S<:Real} <: KrylovAlgorithm verbosity::Int end function Lanczos(; - krylovdim::Int=KrylovDefaults.krylovdim, - maxiter::Int=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, + krylovdim::Int=KrylovDefaults.krylovdim[], + maxiter::Int=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], orth::Orthogonalizer=KrylovDefaults.orth, eager::Bool=false, - verbosity::Int=0) + verbosity::Int=KrylovDefaults.verbosity[]) return Lanczos(orth, krylovdim, maxiter, tol, eager, verbosity) end """ - GKL(; krylovdim=KrylovDefaults.krylovdim, maxiter=KrylovDefaults.maxiter, - tol=KrylovDefaults.tol, orth=KrylovDefaults.orth, verbosity=0) + GKL(; krylovdim=KrylovDefaults.krylovdim, + maxiter=KrylovDefaults.maxiter, + tol=KrylovDefaults.tol, + orth=KrylovDefaults.orth, + eager=false, + verbosity=KrylovDefaults.verbosity[]) Represents the Golub-Kahan-Lanczos bidiagonalization algorithm for sequentially building a Krylov-like factorization of a general matrix or linear operator with a bidiagonal reduced @@ -143,18 +151,22 @@ struct GKL{O<:Orthogonalizer,S<:Real} <: KrylovAlgorithm verbosity::Int end function GKL(; - krylovdim::Int=KrylovDefaults.krylovdim, - maxiter::Int=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, + krylovdim::Int=KrylovDefaults.krylovdim[], + maxiter::Int=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], orth::Orthogonalizer=KrylovDefaults.orth, eager::Bool=false, - verbosity::Int=0) + verbosity::Int=KrylovDefaults.verbosity[]) return GKL(orth, krylovdim, maxiter, tol, eager, verbosity) end """ - Arnoldi(; krylovdim=KrylovDefaults.krylovdim, maxiter=KrylovDefaults.maxiter, - tol=KrylovDefaults.tol, orth=KrylovDefaults.orth, eager=false, verbosity=0) + Arnoldi(; krylovdim=KrylovDefaults.krylovdim, + maxiter=KrylovDefaults.maxiter, + tol=KrylovDefaults.tol, + orth=KrylovDefaults.orth, + eager=false, + verbosity=KrylovDefaults.verbosity[]) Represents the Arnoldi algorithm for building the Krylov subspace for a general matrix or linear operator. Can be used in `eigsolve` and `exponentiate`. The corresponding algorithms @@ -180,18 +192,22 @@ struct Arnoldi{O<:Orthogonalizer,S<:Real} <: KrylovAlgorithm verbosity::Int end function Arnoldi(; - krylovdim::Int=KrylovDefaults.krylovdim, - maxiter::Int=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, + krylovdim::Int=KrylovDefaults.krylovdim[], + maxiter::Int=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], orth::Orthogonalizer=KrylovDefaults.orth, eager::Bool=false, - verbosity::Int=0) + verbosity::Int=KrylovDefaults.verbosity[]) return Arnoldi(orth, krylovdim, maxiter, tol, eager, verbosity) end """ - GolubYe(; krylovdim=KrylovDefaults.krylovdim, maxiter=KrylovDefaults.maxiter, - tol=KrylovDefaults.tol, orth=KrylovDefaults.orth, verbosity=0) + GolubYe(; krylovdim=KrylovDefaults.krylovdim, + maxiter=KrylovDefaults.maxiter, + tol=KrylovDefaults.tol, + orth=KrylovDefaults.orth, + eager=false, + verbosity=KrylovDefaults.verbosity[]) Represents the Golub-Ye algorithm for solving hermitian (symmetric) generalized eigenvalue problems `A x = λ B x` with positive definite `B`, without the need for inverting `B`. @@ -210,11 +226,11 @@ struct GolubYe{O<:Orthogonalizer,S<:Real} <: KrylovAlgorithm verbosity::Int end function GolubYe(; - krylovdim::Int=KrylovDefaults.krylovdim, - maxiter::Int=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, + krylovdim::Int=KrylovDefaults.krylovdim[], + maxiter::Int=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], orth::Orthogonalizer=KrylovDefaults.orth, - verbosity::Int=0) + verbosity::Int=KrylovDefaults.verbosity[]) return GolubYe(orth, krylovdim, maxiter, tol, verbosity) end @@ -222,7 +238,7 @@ end abstract type LinearSolver <: KrylovAlgorithm end """ - CG(; maxiter=KrylovDefaults.maxiter, tol=KrylovDefaults.tol, verbosity=0) + CG(; maxiter=KrylovDefaults.maxiter, tol=KrylovDefaults.tol, verbosity=KrylovDefaults.verbosity[]) Construct an instance of the conjugate gradient algorithm with specified parameters, which can be passed to `linsolve` in order to iteratively solve a linear system with a positive @@ -240,15 +256,18 @@ struct CG{S<:Real} <: LinearSolver verbosity::Int end function CG(; - maxiter::Integer=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, - verbosity::Int=0) + maxiter::Integer=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], + verbosity::Int=KrylovDefaults.verbosity[]) return CG(maxiter, tol, verbosity) end """ - GMRES(; krylovdim=KrylovDefaults.krylovdim, maxiter=KrylovDefaults.maxiter, - tol=KrylovDefaults.tol, orth::Orthogonalizer=KrylovDefaults.orth) + GMRES(; krylovdim=KrylovDefaults.krylovdim, + maxiter=KrylovDefaults.maxiter, + tol=KrylovDefaults.tol, + orth::Orthogonalizer=KrylovDefaults.orth, + verbosity=KrylovDefaults.verbosity[]) Construct an instance of the GMRES algorithm with specified parameters, which can be passed to `linsolve` in order to iteratively solve a linear system. The `GMRES` method will search @@ -273,17 +292,17 @@ struct GMRES{O<:Orthogonalizer,S<:Real} <: LinearSolver verbosity::Int end function GMRES(; - krylovdim::Integer=KrylovDefaults.krylovdim, - maxiter::Integer=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, + krylovdim::Integer=KrylovDefaults.krylovdim[], + maxiter::Integer=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], orth::Orthogonalizer=KrylovDefaults.orth, - verbosity::Int=0) + verbosity::Int=KrylovDefaults.verbosity[]) return GMRES(orth, maxiter, krylovdim, tol, verbosity) end # TODO """ - MINRES(; maxiter=KrylovDefaults.maxiter, tol=KrylovDefaults.tol) + MINRES(; maxiter=KrylovDefaults.maxiter, tol=KrylovDefaults.tol, verbosity=KrylovDefaults.verbosity[]) !!! warning "Not implemented yet" @@ -304,14 +323,14 @@ struct MINRES{S<:Real} <: LinearSolver verbosity::Int end function MINRES(; - maxiter::Integer=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, - verbosity::Int=0) + maxiter::Integer=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], + verbosity::Int=KrylovDefaults.verbosity[]) return MINRES(maxiter, tol, verbosity) end """ - BiCG(; maxiter=KrylovDefaults.maxiter, tol=KrylovDefaults.tol) + BiCG(; maxiter=KrylovDefaults.maxiter, tol=KrylovDefaults.tol, verbosity=KrylovDefaults.verbosity[]) !!! warning "Not implemented yet" @@ -331,14 +350,14 @@ struct BiCG{S<:Real} <: LinearSolver verbosity::Int end function BiCG(; - maxiter::Integer=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, - verbosity::Int=0) + maxiter::Integer=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], + verbosity::Int=KrylovDefaults.verbosity[]) return BiCG(maxiter, tol, verbosity) end """ - BiCGStab(; maxiter=KrylovDefaults.maxiter, tol=KrylovDefaults.tol) + BiCGStab(; maxiter=KrylovDefaults.maxiter, tol=KrylovDefaults.tol, verbosity=KrylovDefaults.verbosity[]) Construct an instance of the Biconjugate gradient algorithm with specified parameters, which can be passed to `linsolve` in order to iteratively solve a linear system general @@ -355,16 +374,16 @@ struct BiCGStab{S<:Real} <: LinearSolver verbosity::Int end function BiCGStab(; - maxiter::Integer=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, - verbosity::Int=0) + maxiter::Integer=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], + verbosity::Int=KrylovDefaults.verbosity[]) return BiCGStab(maxiter, tol, verbosity) end # Solving least squares problems abstract type LeastSquaresSolver <: KrylovAlgorithm end """ -LSMR(; maxiter=KrylovDefaults.maxiter, tol=KrylovDefaults.tol, verbosity=0) +LSMR(; maxiter=KrylovDefaults.maxiter, tol=KrylovDefaults.tol, verbosity=KrylovDefaults.verbosity[]) Represents the LSMR algorithm, which minimizes ``\\|Ax - b\\|^2 + \\|λx\\|^2`` in the Euclidean norm. If multiple solutions exists the minimum norm solution is returned. @@ -385,9 +404,9 @@ struct LSMR{S<:Real} <: LeastSquaresSolver verbosity::Int end function LSMR(; - maxiter::Integer=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, - verbosity::Int=0) + maxiter::Integer=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], + verbosity::Int=KrylovDefaults.verbosity[]) return LSMR(maxiter, tol, verbosity) end @@ -400,9 +419,10 @@ struct JacobiDavidson <: EigenSolver end """ module KrylovDefaults const orth = KrylovKit.ModifiedGramSchmidtIR() - const krylovdim = 30 - const maxiter = 100 - const tol = 1e-12 + const krylovdim = Ref(30) + const maxiter = Ref(100) + const tol = Ref(1e-12) + const verbosity = Ref(KrylovKit.WARN_LEVEL) end A module listing the default values for the typical parameters in Krylov based algorithms: @@ -424,7 +444,8 @@ A module listing the default values for the typical parameters in Krylov based a module KrylovDefaults using ..KrylovKit const orth = KrylovKit.ModifiedGramSchmidt2() # conservative choice -const krylovdim = 30 -const maxiter = 100 -const tol = 1e-12 +const krylovdim = Ref(30) +const maxiter = Ref(100) +const tol = Ref(1e-12) +const verbosity = Ref(KrylovKit.WARN_LEVEL) end diff --git a/src/eigsolve/eigsolve.jl b/src/eigsolve/eigsolve.jl index e3de25c..5d63ab9 100644 --- a/src/eigsolve/eigsolve.jl +++ b/src/eigsolve/eigsolve.jl @@ -227,12 +227,12 @@ function eigselector(f, T::Type; issymmetric::Bool=false, ishermitian::Bool=issymmetric && (T <: Real), - krylovdim::Int=KrylovDefaults.krylovdim, - maxiter::Int=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, + krylovdim::Int=KrylovDefaults.krylovdim[], + maxiter::Int=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], orth::Orthogonalizer=KrylovDefaults.orth, eager::Bool=false, - verbosity::Int=0, + verbosity::Int=KrylovDefaults.verbosity[], alg_rrule=nothing) if (T <: Real && issymmetric) || ishermitian return Lanczos(; krylovdim=krylovdim, @@ -254,12 +254,12 @@ function eigselector(A::AbstractMatrix, T::Type; issymmetric::Bool=T <: Real && LinearAlgebra.issymmetric(A), ishermitian::Bool=issymmetric || LinearAlgebra.ishermitian(A), - krylovdim::Int=KrylovDefaults.krylovdim, - maxiter::Int=KrylovDefaults.maxiter, - tol::Real=KrylovDefaults.tol, + krylovdim::Int=KrylovDefaults.krylovdim[], + maxiter::Int=KrylovDefaults.maxiter[], + tol::Real=KrylovDefaults.tol[], orth::Orthogonalizer=KrylovDefaults.orth, eager::Bool=false, - verbosity::Int=0, + verbosity::Int=KrylovDefaults.verbosity[], alg_rrule=nothing) if (T <: Real && issymmetric) || ishermitian return Lanczos(; krylovdim=krylovdim, diff --git a/src/linsolve/linsolve.jl b/src/linsolve/linsolve.jl index 763e307..41500de 100644 --- a/src/linsolve/linsolve.jl +++ b/src/linsolve/linsolve.jl @@ -65,9 +65,9 @@ Keyword arguments are given by: - `ishermitian::Bool`: if the linear map is hermitian - `isposdef::Bool`: if the linear map is positive definite -The default values are given by `atol = KrylovDefaults.tol`, `rtol = KrylovDefaults.tol`, -`tol = max(atol, rtol*norm(b))`, `krylovdim = KrylovDefaults.krylovdim`, -`maxiter = KrylovDefaults.maxiter`, `orth = KrylovDefaults.orth`; +The default values are given by `atol = KrylovDefaults.tol[]`, `rtol = KrylovDefaults.tol[]`, +`tol = max(atol, rtol*norm(b))`, `krylovdim = KrylovDefaults.krylovdim[]`, +`maxiter = KrylovDefaults.maxiter[]`, `orth = KrylovDefaults.orth`; see [`KrylovDefaults`](@ref) for details. The default value for the last three parameters depends on the method. If an @@ -128,13 +128,13 @@ function linselector(f, issymmetric::Bool=false, ishermitian::Bool=T <: Real && issymmetric, isposdef::Bool=false, - krylovdim::Int=KrylovDefaults.krylovdim, - maxiter::Int=KrylovDefaults.maxiter, - rtol::Real=KrylovDefaults.tol, - atol::Real=KrylovDefaults.tol, + krylovdim::Int=KrylovDefaults.krylovdim[], + maxiter::Int=KrylovDefaults.maxiter[], + rtol::Real=KrylovDefaults.tol[], + atol::Real=KrylovDefaults.tol[], tol::Real=max(atol, rtol * norm(b)), orth=KrylovDefaults.orth, - verbosity::Int=0) + verbosity::Int=KrylovDefaults.verbosity[]) if (T <: Real && issymmetric) || ishermitian if isposdef return CG(; maxiter=krylovdim * maxiter, tol=tol, verbosity=verbosity) @@ -155,13 +155,13 @@ function linselector(A::AbstractMatrix, issymmetric::Bool=T <: Real && LinearAlgebra.issymmetric(A), ishermitian::Bool=issymmetric || LinearAlgebra.ishermitian(A), isposdef::Bool=ishermitian ? LinearAlgebra.isposdef(A) : false, - krylovdim::Int=KrylovDefaults.krylovdim, - maxiter::Int=KrylovDefaults.maxiter, - rtol::Real=KrylovDefaults.tol, - atol::Real=KrylovDefaults.tol, + krylovdim::Int=KrylovDefaults.krylovdim[], + maxiter::Int=KrylovDefaults.maxiter[], + rtol::Real=KrylovDefaults.tol[], + atol::Real=KrylovDefaults.tol[], tol::Real=max(atol, rtol * norm(b)), orth=KrylovDefaults.orth, - verbosity::Int=0) + verbosity::Int=KrylovDefaults.verbosity[]) if (T <: Real && issymmetric) || ishermitian if isposdef return CG(; maxiter=krylovdim * maxiter, tol=tol, verbosity=verbosity) diff --git a/src/lssolve/lssolve.jl b/src/lssolve/lssolve.jl index f81753c..851dc22 100644 --- a/src/lssolve/lssolve.jl +++ b/src/lssolve/lssolve.jl @@ -99,11 +99,11 @@ Currently, only [`LSMR`](@ref) is available and thus selected. function lssolve end function lssolve(f, b, λ=0; - maxiter=KrylovDefaults.maxiter, - rtol::Real=KrylovDefaults.tol, - atol::Real=KrylovDefaults.tol, + maxiter=KrylovDefaults.maxiter[], + rtol::Real=KrylovDefaults.tol[], + atol::Real=KrylovDefaults.tol[], tol::Real=max(atol, rtol * norm(b)), - verbosity::Int=0) + verbosity::Int=KrylovDefaults.verbosity[]) alg = LSMR(; maxiter, tol, verbosity) return lssolve(f, b, alg, λ) end diff --git a/test/lssolve.jl b/test/lssolve.jl index 9988d6c..d40698d 100644 --- a/test/lssolve.jl +++ b/test/lssolve.jl @@ -12,9 +12,8 @@ b = rand(T, 2 * n) tol = tol = 10 * n * eps(real(T)) - alg = LSMR(; maxiter=3, tol=tol, verbosity=0) x, info = @constinferred lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); - maxiter=3) + maxiter=3, verbosity=0) r = b - A * unwrapvec(x) @test unwrapvec(info.residual) ≈ r @test info.normres ≈ norm(A' * r) diff --git a/test/nestedtuple.jl b/test/nestedtuple.jl index ec3973a..43d981b 100644 --- a/test/nestedtuple.jl +++ b/test/nestedtuple.jl @@ -1,16 +1,16 @@ # TODO: Remove RecursiveVec and simply use tuple when RecursiveVec is removed. -@testset "RecursiveVec - singular values full" begin +@testset "Nested tuples - singular values full" begin @testset for T in (Float32, Float64, ComplexF32, ComplexF64) @testset for orth in (cgs2, mgs2, cgsr, mgsr) A = rand(T, (n, n)) v = rand(T, (n,)) - v2 = RecursiveVec(v, zero(v)) + v2 = (v, zero(v)) alg = Lanczos(; orth=orth, krylovdim=2 * n, maxiter=1, tol=tolerance(T)) D, V, info = eigsolve(v2, n, :LR, alg) do x x1, x2 = x y1 = A * x2 y2 = A' * x1 - return RecursiveVec(y1, y2) + return (y1, y2) end @test info.converged >= n S = D[1:n] @@ -22,20 +22,20 @@ end end -@testset "RecursiveVec - singular values iteratively" begin +@testset "Nested tuples - singular values iteratively" begin @testset for T in (Float32, Float64, ComplexF32, ComplexF64) @testset for orth in (cgs2, mgs2, cgsr, mgsr) A = rand(T, (N, 2 * N)) v = rand(T, (N,)) w = rand(T, (2 * N,)) - v2 = RecursiveVec(v, w) + v2 = (v, w) alg = Lanczos(; orth=orth, krylovdim=n, maxiter=300, tol=tolerance(T)) n1 = div(n, 2) D, V, info = eigsolve(v2, n1, :LR, alg) do x x1, x2 = x y1 = A * x2 y2 = A' * x1 - return RecursiveVec(y1, y2) + return (y1, y2) end @test info.converged >= n1 S = D[1:n1] diff --git a/test/runtests.jl b/test/runtests.jl index 40841b1..34b7cac 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -48,16 +48,16 @@ end @testset "Linear Algebra Utilities" verbose = true begin include("linalg.jl") end -@testset "Nested Tuples" verbose = true begin +@testset "Singular value problems via eigsolve with nested tuples" verbose = true begin include("nestedtuple.jl") end -@testset "Differentiation rules" verbose = true begin - include("ad/linsolve.jl") - include("ad/eigsolve.jl") - include("ad/degenerateeigsolve.jl") - include("ad/svdsolve.jl") -end +# @testset "Differentiation rules" verbose = true begin +# include("ad/linsolve.jl") +# include("ad/eigsolve.jl") +# include("ad/degenerateeigsolve.jl") +# include("ad/svdsolve.jl") +# end t = time() - t # Issues diff --git a/test/schursolve.jl b/test/schursolve.jl index a6efcc7..57a8e6a 100644 --- a/test/schursolve.jl +++ b/test/schursolve.jl @@ -75,7 +75,8 @@ end @testset for orth in orths A = rand(T, (N, N)) .- one(T) / 2 v = rand(T, (N,)) - alg = Arnoldi(; orth=orth, krylovdim=3 * n, maxiter=10, tol=tolerance(T)) + alg = Arnoldi(; orth=orth, krylovdim=3 * n, maxiter=10, tol=tolerance(T), + verbosity=0) T1, V1, D1, info1 = @constinferred schursolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n, :SR, alg) From 942d740ea64983329b90062f88c80ca03275045e Mon Sep 17 00:00:00 2001 From: Jutho Date: Sat, 11 Jan 2025 02:03:55 +0100 Subject: [PATCH 5/8] update tests accordingly --- test/eigsolve.jl | 8 ++++---- test/geneigsolve.jl | 2 +- test/svdsolve.jl | 3 ++- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/test/eigsolve.jl b/test/eigsolve.jl index e9c8342..f2e45be 100644 --- a/test/eigsolve.jl +++ b/test/eigsolve.jl @@ -60,7 +60,7 @@ end A = (A + A') / 2 v = rand(T, (N,)) alg = Lanczos(; krylovdim=2 * n, maxiter=10, - tol=tolerance(T), eager=true) + tol=tolerance(T), eager=true, verbosity=0) D1, V1, info1 = @constinferred eigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n, :SR, alg) D2, V2, info2 = eigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n, :LR, @@ -174,7 +174,7 @@ end A = rand(T, (N, N)) .- one(T) / 2 v = rand(T, (N,)) alg = Arnoldi(; krylovdim=3 * n, maxiter=20, - tol=tolerance(T), eager=true) + tol=tolerance(T), eager=true, verbosity=0) D1, V1, info1 = @constinferred eigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n, :SR, alg) D2, V2, info2 = eigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n, :LR, @@ -241,7 +241,7 @@ end A = V * Diagonal(D) / V v = rand(T, (N,)) alg = Arnoldi(; krylovdim=3 * n, maxiter=20, - tol=tolerance(T), eager=true) + tol=tolerance(T), eager=true, verbosity=0) D1, V1, info1 = @constinferred realeigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n, :SR, alg) D2, V2, info2 = realeigsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n, @@ -289,7 +289,7 @@ end f = buildrealmap(A, B) v = rand(complex(T), (N,)) alg = Arnoldi(; krylovdim=3 * n, maxiter=20, - tol=tolerance(T), eager=true) + tol=tolerance(T), eager=true, verbosity=0) D1, V1, info1 = @constinferred realeigsolve(f, v, n, :SR, alg) D2, V2, info2 = realeigsolve(f, v, n, :LR, alg) D3, V3, info3 = realeigsolve(f, v, n, :LM, alg) diff --git a/test/geneigsolve.jl b/test/geneigsolve.jl index 21fec18..5986764 100644 --- a/test/geneigsolve.jl +++ b/test/geneigsolve.jl @@ -93,7 +93,7 @@ end B = sqrt(B * B') v = rand(T, (N,)) alg = GolubYe(; orth=orth, krylovdim=3 * n, maxiter=100, - tol=cond(B) * tolerance(T)) + tol=cond(B) * tolerance(T), verbosity=0) D1, V1, info1 = @constinferred geneigsolve((wrapop(A, Val(mode)), wrapop(B, Val(mode))), wrapvec(v, Val(mode)), diff --git a/test/svdsolve.jl b/test/svdsolve.jl index fdc7799..2d44bfc 100644 --- a/test/svdsolve.jl +++ b/test/svdsolve.jl @@ -55,7 +55,8 @@ end A = rand(T, (2 * N, N)) v = rand(T, (2 * N,)) n₁ = div(n, 2) - alg = GKL(; orth=orth, krylovdim=n, maxiter=10, tol=tolerance(T), eager=true) + alg = GKL(; orth=orth, krylovdim=n, maxiter=10, tol=tolerance(T), eager=true, + verbosity=0) S, lvecs, rvecs, info = @constinferred svdsolve(wrapop(A, Val(mode)), wrapvec(v, Val(mode)), n₁, :LR, alg) From de3c8de2209d7e143af9259013ed0138cf5fdf17 Mon Sep 17 00:00:00 2001 From: Jutho Date: Sat, 11 Jan 2025 12:03:08 +0100 Subject: [PATCH 6/8] bump VectorInterface requirement, fix tests for Julia 1.6 --- Project.toml | 2 +- test/eigsolve.jl | 23 +++++++++++++++++------ test/geneigsolve.jl | 16 +++++++++------- test/linsolve.jl | 22 +++++++++++++--------- test/lssolve.jl | 5 +++-- test/svdsolve.jl | 8 ++++---- 6 files changed, 47 insertions(+), 29 deletions(-) diff --git a/Project.toml b/Project.toml index a033e2c..3e73113 100644 --- a/Project.toml +++ b/Project.toml @@ -28,7 +28,7 @@ Printf = "1" Random = "1" Test = "1" TestExtras = "0.2,0.3" -VectorInterface = "0.4,0.5" +VectorInterface = "0.5" Zygote = "0.6" julia = "1.6" diff --git a/test/eigsolve.jl b/test/eigsolve.jl index f2e45be..0a4ed90 100644 --- a/test/eigsolve.jl +++ b/test/eigsolve.jl @@ -23,6 +23,17 @@ krylovdim=n1 + 1, maxiter=1, tol=tolerance(T), verbosity=1) + @test_logs (:info,) eigsolve(wrapop(A, Val(mode)), + wrapvec(v, Val(mode)), n1, :SR; + krylovdim=n, + maxiter=1, tol=tolerance(T), + verbosity=2) + @test_logs min_level = Logging.Warn eigsolve(wrapop(A, Val(mode)), + wrapvec(v, Val(mode)), + n1, :SR; + krylovdim=n, + maxiter=1, tol=tolerance(T), + verbosity=4) @test KrylovKit.eigselector(wrapop(A, Val(mode)), scalartype(v); krylovdim=n, maxiter=1, tol=tolerance(T), ishermitian=true) isa Lanczos @@ -114,12 +125,12 @@ end orth=orth, krylovdim=n, maxiter=1, tol=tolerance(T), verbosity=2) - @test_logs min_level = Warn eigsolve(wrapop(A, Val(mode)), - wrapvec(v, Val(mode)), - n1, :SR; - orth=orth, krylovdim=n, maxiter=1, - tol=tolerance(T), - verbosity=4) + @test_logs min_level = Logging.Warn eigsolve(wrapop(A, Val(mode)), + wrapvec(v, Val(mode)), + n1, :SR; + orth=orth, krylovdim=n, maxiter=1, + tol=tolerance(T), + verbosity=4) @test KrylovKit.eigselector(wrapop(A, Val(mode)), eltype(v); orth=orth, krylovdim=n, maxiter=1, diff --git a/test/geneigsolve.jl b/test/geneigsolve.jl index 5986764..b5c6815 100644 --- a/test/geneigsolve.jl +++ b/test/geneigsolve.jl @@ -50,13 +50,15 @@ maxiter=1, tol=tolerance(T), ishermitian=true, isposdef=true, verbosity=2) - @test_logs min_level = Warn geneigsolve((wrapop(A, Val(mode)), - wrapop(B, Val(mode))), - wrapvec(v, Val(mode)), - n1, :SR; orth=orth, krylovdim=n, - maxiter=1, tol=tolerance(T), - ishermitian=true, isposdef=true, - verbosity=3) + @test_logs min_level = Logging.Warn geneigsolve((wrapop(A, Val(mode)), + wrapop(B, Val(mode))), + wrapvec(v, Val(mode)), + n1, :SR; orth=orth, + krylovdim=n, + maxiter=1, tol=tolerance(T), + ishermitian=true, + isposdef=true, + verbosity=3) end @test KrylovKit.geneigselector((wrapop(A, Val(mode)), wrapop(B, Val(mode))), scalartype(v); orth=orth, krylovdim=n, diff --git a/test/linsolve.jl b/test/linsolve.jl index 50ef633..d2f4e96 100644 --- a/test/linsolve.jl +++ b/test/linsolve.jl @@ -25,10 +25,12 @@ ishermitian=true, isposdef=true, maxiter=2n, krylovdim=1, rtol=tolerance(T), verbosity=2) - @test_logs min_level = Warn linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); - ishermitian=true, isposdef=true, maxiter=2n, - krylovdim=1, rtol=tolerance(T), - verbosity=3) + @test_logs min_level = Logging.Warn linsolve(wrapop(A, Val(mode)), + wrapvec(b, Val(mode)); + ishermitian=true, isposdef=true, + maxiter=2n, + krylovdim=1, rtol=tolerance(T), + verbosity=3) x, info = linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, alg) @test info.numops == 1 @@ -112,9 +114,10 @@ end @test_logs (:info,) (:info,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); krylovdim=n, maxiter=2, rtol=tolerance(T), verbosity=2) - @test_logs min_level = Warn linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)); - krylovdim=n, maxiter=2, - rtol=tolerance(T), verbosity=3) + @test_logs min_level = Logging.Warn linsolve(wrapop(A, Val(mode)), + wrapvec(b, Val(mode)); + krylovdim=n, maxiter=2, + rtol=tolerance(T), verbosity=3) alg = GMRES(; krylovdim=n, maxiter=2, tol=tolerance(T) * norm(b), verbosity=0) x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, @@ -232,8 +235,9 @@ end @test_logs (:info,) (:info,) linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), wrapvec(zerovector(b), Val(mode)), alg) alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=3) - @test_logs min_level = Warn linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), - wrapvec(zerovector(b), Val(mode)), alg) + @test_logs min_level = Logging.Warn linsolve(wrapop(A, Val(mode)), + wrapvec(b, Val(mode)), + wrapvec(zerovector(b), Val(mode)), alg) alg = BiCGStab(; maxiter=4n, tol=tolerance(T) * norm(b), verbosity=0) x, info = @constinferred linsolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), x, diff --git a/test/lssolve.jl b/test/lssolve.jl index d40698d..efa2daf 100644 --- a/test/lssolve.jl +++ b/test/lssolve.jl @@ -39,8 +39,9 @@ @test_logs (:info,) (:info,) lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), alg) alg = LSMR(; maxiter=2 * n, tol=tol, verbosity=3) - @test_logs min_level = Warn lssolve(wrapop(A, Val(mode)), wrapvec(b, Val(mode)), - alg) + @test_logs min_level = Logging.Warn lssolve(wrapop(A, Val(mode)), + wrapvec(b, Val(mode)), + alg) λ = rand(real(T)) alg = LSMR(; maxiter=2 * n, tol=tol, verbosity=0) diff --git a/test/svdsolve.jl b/test/svdsolve.jl index 2d44bfc..d9e3b98 100644 --- a/test/svdsolve.jl +++ b/test/svdsolve.jl @@ -31,10 +31,10 @@ alg) alg = GKL(; orth=orth, krylovdim=2 * n, maxiter=1, tol=tolerance(T), verbosity=4) - @test_logs min_level = Warn svdsolve(wrapop(A, Val(mode)), - wrapvec(A[:, 1], Val(mode)), - n1, :LR, - alg) + @test_logs min_level = Logging.Warn svdsolve(wrapop(A, Val(mode)), + wrapvec(A[:, 1], Val(mode)), + n1, :LR, + alg) U = stack(unwrapvec, lvecs) V = stack(unwrapvec, rvecs) From 03d27b6a251976c4c4e34b348d7cc42858f7ab97 Mon Sep 17 00:00:00 2001 From: Jutho Date: Sun, 12 Jan 2025 23:58:22 +0100 Subject: [PATCH 7/8] update rrule verbosity and tests --- .../KrylovKitChainRulesCoreExt.jl | 1 + ext/KrylovKitChainRulesCoreExt/eigsolve.jl | 22 +++-- ext/KrylovKitChainRulesCoreExt/linsolve.jl | 3 +- ext/KrylovKitChainRulesCoreExt/svdsolve.jl | 14 +-- test/ad/degenerateeigsolve.jl | 25 +++--- test/ad/eigsolve.jl | 87 +++++++++---------- test/ad/linsolve.jl | 25 +++--- test/ad/svdsolve.jl | 67 +++++++------- test/runtests.jl | 16 ++-- 9 files changed, 136 insertions(+), 124 deletions(-) diff --git a/ext/KrylovKitChainRulesCoreExt/KrylovKitChainRulesCoreExt.jl b/ext/KrylovKitChainRulesCoreExt/KrylovKitChainRulesCoreExt.jl index bf31bf0..6867748 100644 --- a/ext/KrylovKitChainRulesCoreExt/KrylovKitChainRulesCoreExt.jl +++ b/ext/KrylovKitChainRulesCoreExt/KrylovKitChainRulesCoreExt.jl @@ -6,6 +6,7 @@ using LinearAlgebra using VectorInterface using KrylovKit: apply_normal, apply_adjoint +using KrylovKit: WARN_LEVEL, STARTSTOP_LEVEL, EACHITERATION_LEVEL include("utilities.jl") include("linsolve.jl") diff --git a/ext/KrylovKitChainRulesCoreExt/eigsolve.jl b/ext/KrylovKitChainRulesCoreExt/eigsolve.jl index 17c7e17..3716d07 100644 --- a/ext/KrylovKitChainRulesCoreExt/eigsolve.jl +++ b/ext/KrylovKitChainRulesCoreExt/eigsolve.jl @@ -135,7 +135,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, b = (zerovector(v), convert(T, Δλ)) else vdΔv = inner(v, Δv) - if alg_primal.verbosity >= 1 + if alg_rrule.verbosity >= WARN_LEVEL gauge = abs(imag(vdΔv)) gauge > alg_primal.tol && @warn "`eigsolve` cotangent for eigenvector $i is sensitive to gauge choice: (|gauge| = $gauge)" @@ -152,9 +152,11 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, return (y1, y2) end end - if info.converged >= i && reverse_info.converged == 0 && alg_primal.verbosity >= 1 + if info.converged >= i && reverse_info.converged == 0 && + alg_primal.verbosity >= WARN_LEVEL @warn "`eigsolve` cotangent linear problem ($i) did not converge, whereas the primal eigenvalue problem did: normres = $(reverse_info.normres)" - elseif abs(w[2]) > alg_rrule.tol && alg_primal.verbosity >= 1 + elseif abs(w[2]) > (alg_rrule.tol * norm(w[1])) && + alg_primal.verbosity >= WARN_LEVEL @warn "`eigsolve` cotangent linear problem ($i) returns unexpected result: error = $(w[2])" end ws[i] = w[1] @@ -185,7 +187,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, # components along subspace spanned by current eigenvectors tol = alg_primal.tol - if alg_primal.verbosity >= 1 + if alg_rrule.verbosity >= WARN_LEVEL mask = abs.(transpose(vals) .- vals) .< tol gaugepart = VdΔV[mask] - Diagonal(real(diag(VdΔV)))[mask] Δgauge = norm(gaugepart, Inf) @@ -263,7 +265,8 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, return (w′, conj.(vals) .* x) end end - if info.converged >= n && reverse_info.converged < n && alg_primal.verbosity >= 1 + if info.converged >= n && reverse_info.converged < n && + alg_primal.verbosity >= WARN_LEVEL @warn "`eigsolve` cotangent problem did not converge, whereas the primal eigenvalue problem did" end # cleanup and construct final result by renormalising the eigenvectors and explicitly @@ -276,7 +279,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, w, x = Ws[ic] factor = 1 / x[i] x[i] = zero(x[i]) - if alg_primal.verbosity >= 1 + if alg_primal.verbosity >= WARN_LEVEL error = max(norm(x, Inf), abs(rvals[ic] - conj(vals[i]))) error > 10 * tol && @warn "`eigsolve` cotangent linear problem ($i) returns unexpected result: error = $error" @@ -308,7 +311,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, # components along subspace spanned by current eigenvectors tol = alg_primal.tol aVdΔV = rmul!(VdΔV - VdΔV', 1 / 2) - if alg_primal.verbosity >= 1 + if alg_rrule.verbosity >= WARN_LEVEL mask = abs.(transpose(vals) .- vals) .< tol gaugepart = view(aVdΔV, mask) gauge = norm(gaugepart, Inf) @@ -366,7 +369,8 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, return (w′, vals .* x) end end - if info.converged >= n && reverse_info.converged < n && alg_primal.verbosity >= 1 + if info.converged >= n && reverse_info.converged < n && + alg_primal.verbosity >= WARN_LEVEL @warn "`eigsolve` cotangent problem did not converge, whereas the primal eigenvalue problem did" end @@ -380,7 +384,7 @@ function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, factor = 1 / x[ic] x[ic] = zero(x[ic]) error = max(norm(x, Inf), abs(rvals[i] - conj(vals[ic]))) - if error > 10 * tol && alg_primal.verbosity >= 1 + if error > 10 * tol && alg_primal.verbosity >= WARN_LEVEL @warn "`eigsolve` cotangent linear problem ($ic) returns unexpected result: error = $error" end ws[ic] = VectorInterface.add!!(zs[ic], Q(w), -factor) diff --git a/ext/KrylovKitChainRulesCoreExt/linsolve.jl b/ext/KrylovKitChainRulesCoreExt/linsolve.jl index 0db21ca..279b4c3 100644 --- a/ext/KrylovKitChainRulesCoreExt/linsolve.jl +++ b/ext/KrylovKitChainRulesCoreExt/linsolve.jl @@ -34,7 +34,8 @@ function make_linsolve_pullback(fᴴ, b, a₀, a₁, alg_rrule, construct∂f, x a₁))) ∂b, reverse_info = linsolve(fᴴ, x̄, x̄₀, alg_rrule, conj(a₀), conj(a₁)) - if info.converged > 0 && reverse_info.converged == 0 && alg_primal.verbosity >= 1 + if info.converged > 0 && reverse_info.converged == 0 && + alg_primal.verbosity >= WARN_LEVEL @warn "`linsolve` cotangent problem did not converge, whereas the primal linear problem did: normres = $(reverse_info.normres)" end x∂b = inner(x, ∂b) diff --git a/ext/KrylovKitChainRulesCoreExt/svdsolve.jl b/ext/KrylovKitChainRulesCoreExt/svdsolve.jl index a1db890..200575f 100644 --- a/ext/KrylovKitChainRulesCoreExt/svdsolve.jl +++ b/ext/KrylovKitChainRulesCoreExt/svdsolve.jl @@ -112,7 +112,7 @@ function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, r udΔu = inner(u, Δu) vdΔv = inner(v, Δv) if (udΔu isa Complex) || (vdΔv isa Complex) - if alg_primal.verbosity >= 1 + if alg_rrule.verbosity >= WARN_LEVEL gauge = abs(imag(udΔu + vdΔv)) gauge > alg_primal.tol && @warn "`svdsolve` cotangents for singular vectors $i are sensitive to gauge choice: (|gauge| = $gauge)" @@ -131,7 +131,8 @@ function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, r return (x′, y′) end end - if info.converged >= i && reverse_info.converged == 0 && alg_primal.verbosity >= 0 + if info.converged >= i && reverse_info.converged == 0 && + alg_primal.verbosity >= WARN_LEVEL @warn "`svdsolve` cotangent linear problem ($i) did not converge, whereas the primal eigenvalue problem did: normres = $(reverse_info.normres)" end x = VectorInterface.add!!(x, u, Δs / 2) @@ -162,7 +163,7 @@ function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, r aVdΔV = rmul!(VdΔV - VdΔV', 1 / 2) tol = alg_primal.tol - if alg_primal.verbosity >= 1 + if alg_rrule.verbosity >= WARN_LEVEL mask = abs.(vals' .- vals) .< tol gaugepart = view(aUdΔU, mask) + view(aVdΔV, mask) gauge = norm(gaugepart, Inf) @@ -227,7 +228,8 @@ function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, r return (x′, y′, vals .* z) end end - if info.converged >= n && reverse_info.converged < n && alg_primal.verbosity >= 1 + if info.converged >= n && reverse_info.converged < n && + alg_primal.verbosity >= WARN_LEVEL @warn "`svdsolve` cotangent problem did not converge, whereas the primal singular value problem did" end @@ -236,13 +238,13 @@ function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, r for i in 1:n x, y, z = Ws[i] _, ic = findmax(abs, z) - if ic != i && alg_primal.verbosity >= 1 + if ic != i && alg_primal.verbosity >= WARN_LEVEL @warn "`svdsolve` cotangent linear problem ($ic) returns unexpected result" end factor = 1 / z[ic] z[ic] = zero(z[ic]) error = max(norm(z, Inf), abs(rvals[i] - vals[ic])) - if error > 10 * tol && alg_primal.verbosity >= 1 + if error > 10 * tol && alg_primal.verbosity >= WARN_LEVEL @warn "`svdsolve` cotangent linear problem ($ic) returns unexpected result: error = $error vs tol = $tol" end xs[ic] = VectorInterface.add!!(xs[ic], x, -factor) diff --git a/test/ad/degenerateeigsolve.jl b/test/ad/degenerateeigsolve.jl index 8be7185..f3b9886 100644 --- a/test/ad/degenerateeigsolve.jl +++ b/test/ad/degenerateeigsolve.jl @@ -3,6 +3,7 @@ module DegenerateEigsolveAD using KrylovKit, LinearAlgebra using Random, Test, TestExtras using ChainRulesCore, ChainRulesTestUtils, Zygote, FiniteDifferences +using ..TestSetup Random.seed!(987654321) fdm = ChainRulesTestUtils._fdm @@ -92,22 +93,16 @@ end M = [zeros(T, n, 2n) A; B zeros(T, n, 2n); zeros(T, n, n) C zeros(T, n, n)] x = randn(T, N) - tol = 2 * N^2 * eps(real(T)) + tol = tolerance(T) #2 * N^2 * eps(real(T)) alg = Arnoldi(; tol=tol, krylovdim=2n) - alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n) - alg_rrule2 = GMRES(; tol=tol, krylovdim=2n) - mat_example1, mat_example_fun1, mat_example_fd, Avec, Bvec, Cvec, xvec, vals, vecs = build_mat_example(A, - B, - C, - x, - alg, - alg_rrule1) - mat_example2, mat_example_fun2, mat_example_fd, Avec, Bvec, Cvec, xvec, vals, vecs = build_mat_example(A, - B, - C, - x, - alg, - alg_rrule2) + alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=0) + alg_rrule2 = GMRES(; tol=tol, krylovdim=2n, verbosity=0) + #! format: off + mat_example1, mat_example_fun1, mat_example_fd, Avec, Bvec, Cvec, xvec, vals, vecs = + build_mat_example(A, B, C, x, alg, alg_rrule1) + mat_example2, mat_example_fun2, mat_example_fd, Avec, Bvec, Cvec, xvec, vals, vecs = + build_mat_example(A, B, C, x, alg, alg_rrule2) + #! format: on (JA, JB, JC, Jx) = FiniteDifferences.jacobian(fdm, mat_example_fd, Avec, Bvec, Cvec, xvec) (JA1, JB1, JC1, Jx1) = Zygote.jacobian(mat_example1, Avec, Bvec, Cvec, xvec) diff --git a/test/ad/eigsolve.jl b/test/ad/eigsolve.jl index fbe6948..749a1c7 100644 --- a/test/ad/eigsolve.jl +++ b/test/ad/eigsolve.jl @@ -2,6 +2,7 @@ module EigsolveAD using KrylovKit, LinearAlgebra using Random, Test, TestExtras using ChainRulesCore, ChainRulesTestUtils, Zygote, FiniteDifferences +using ..TestSetup Random.seed!(987654321) fdm = ChainRulesTestUtils._fdm @@ -203,10 +204,10 @@ end howmany = 3 condA = cond(A) - tol = n * condA * (T <: Real ? eps(T) : 4 * eps(real(T))) + tol = tolerance(T) # n * condA * (T <: Real ? eps(T) : 4 * eps(real(T))) alg = Arnoldi(; tol=tol, krylovdim=n) - alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n) - alg_rrule2 = GMRES(; tol=tol, krylovdim=n + 1) + alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=0) + alg_rrule2 = GMRES(; tol=tol, krylovdim=n + 1, verbosity=0) config = Zygote.ZygoteRuleConfig() @testset for which in whichlist for alg_rrule in (alg_rrule1, alg_rrule2) @@ -226,13 +227,10 @@ end end for alg_rrule in (alg_rrule1, alg_rrule2) - mat_example, mat_example_fun, mat_example_fd, Avec, xvec, vals, vecs, howmany = build_mat_example(A, - x, - howmany, - which, - alg, - alg_rrule) - + #! format: off + mat_example, mat_example_fun, mat_example_fd, Avec, xvec, vals, vecs, howmany = + build_mat_example(A, x, howmany, which, alg, alg_rrule) + #! format: on (JA, Jx) = FiniteDifferences.jacobian(fdm, mat_example_fd, Avec, xvec) (JA1, Jx1) = Zygote.jacobian(mat_example, Avec, xvec) (JA2, Jx2) = Zygote.jacobian(mat_example_fun, Avec, xvec) @@ -275,8 +273,8 @@ end :LR, alg; alg_rrule=alg_rrule) @test_logs pb((ZeroTangent(), im .* vecs[1:2] .+ vecs[2:-1:1], NoTangent())) - alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) - alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=0) + alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=0) + alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) (vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @test_logs (:warn,) pb((ZeroTangent(), im .* vecs[1:2] .+ vecs[2:-1:1], @@ -285,7 +283,7 @@ end @test norm(unthunk(pbs[1]), Inf) < condA * sqrt(eps(real(T))) alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) - alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) + alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=2) (vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @test_logs (:warn,) (:info,) pb((ZeroTangent(), im .* vecs[1:2] .+ vecs[2:-1:1], @@ -299,8 +297,8 @@ end :LR, alg; alg_rrule=alg_rrule) @test_logs pb((ZeroTangent(), im .* vecs[1:2] .+ vecs[2:-1:1], NoTangent())) - alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) - alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=0) + alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=0) + alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=1) (vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @test_logs (:warn,) (:warn,) pb((ZeroTangent(), @@ -311,13 +309,14 @@ end @test norm(unthunk(pbs[1]), Inf) < condA * sqrt(eps(real(T))) alg = Arnoldi(; tol=tol, krylovdim=n, verbosity=1) - alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=1) + alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=2) (vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) - @test_logs (:warn,) (:info,) (:warn,) (:info,) pb((ZeroTangent(), - im .* vecs[1:2] .+ - vecs[2:-1:1], - NoTangent())) + @test_logs (:warn,) (:info,) (:info,) (:warn,) (:info,) (:info,) pb((ZeroTangent(), + im .* + vecs[1:2] .+ + vecs[2:-1:1], + NoTangent())) pbs = @test_logs (:info,) (:info,) pb((ZeroTangent(), vecs[1:2], NoTangent())) @test norm(unthunk(pbs[1]), Inf) < condA * sqrt(eps(real(T))) end @@ -338,26 +337,24 @@ end d = 2 * (rand(T, N) .- one(T) / 2) howmany = 2 - tol = 2 * N^2 * eps(real(T)) + tol = tolerance(T) # 2 * N^2 * eps(real(T)) alg = Arnoldi(; tol=tol, krylovdim=2n) - alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=-1) - alg_rrule2 = GMRES(; tol=tol, krylovdim=2n, verbosity=-1) + alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=0) + alg_rrule2 = GMRES(; tol=tol, krylovdim=2n, verbosity=0) @testset for alg_rrule in (alg_rrule1, alg_rrule2) - fun_example, fun_example_fd, Avec, xvec, cvec, dvec, vals, vecs, howmany = build_fun_example(A, - x, - c, - d, - howmany, - which, - alg, - alg_rrule) + #! format: off + fun_example, fun_example_fd, Avec, xvec, cvec, dvec, vals, vecs, howmany = + build_fun_example(A, x, c, d, howmany, which, alg, alg_rrule) + #! format: on (JA, Jx, Jc, Jd) = FiniteDifferences.jacobian(fdm, fun_example_fd, Avec, xvec, cvec, dvec) (JA′, Jx′, Jc′, Jd′) = Zygote.jacobian(fun_example, Avec, xvec, cvec, dvec) - @test JA ≈ JA′ - @test Jc ≈ Jc′ - @test Jd ≈ Jd′ + + rtol = cond(A + c * d') * sqrt(eps(real(T))) + @test isapprox(JA, JA′; rtol=rtol) + @test isapprox(Jc, Jc′; rtol=rtol) + @test isapprox(Jd, Jd′; rtol=rtol) end end end @@ -372,24 +369,22 @@ end c = 2 * (rand(T, N) .- one(T) / 2) howmany = 2 - tol = 2 * N^2 * eps(real(T)) + tol = tolerance(T) alg = Lanczos(; tol=tol, krylovdim=2n) - alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=-1) - alg_rrule2 = GMRES(; tol=tol, krylovdim=2n, verbosity=-1) + alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=0) + alg_rrule2 = GMRES(; tol=tol, krylovdim=2n, verbosity=0) @testset for alg_rrule in (alg_rrule1, alg_rrule2) - fun_example, fun_example_fd, Avec, xvec, cvec, vals, vecs, howmany = build_hermitianfun_example(A, - x, - c, - howmany, - which, - alg, - alg_rrule) + #! format: off + fun_example, fun_example_fd, Avec, xvec, cvec, vals, vecs, howmany = + build_hermitianfun_example(A, x, c, howmany, which, alg, alg_rrule) + #! format: on (JA, Jx, Jc) = FiniteDifferences.jacobian(fdm, fun_example_fd, Avec, xvec, cvec) (JA′, Jx′, Jc′) = Zygote.jacobian(fun_example, Avec, xvec, cvec) - @test JA ≈ JA′ - @test Jc ≈ Jc′ + rtol = cond(A + c * c') * sqrt(eps(real(T))) + @test isapprox(JA, JA′; rtol=rtol) + @test isapprox(Jc, Jc′; rtol=rtol) end end end diff --git a/test/ad/linsolve.jl b/test/ad/linsolve.jl index b3220cd..13e1c90 100644 --- a/test/ad/linsolve.jl +++ b/test/ad/linsolve.jl @@ -2,6 +2,7 @@ module LinsolveAD using KrylovKit, LinearAlgebra using Random, Test, TestExtras using ChainRulesCore, ChainRulesTestUtils, Zygote, FiniteDifferences +using ..TestSetup fdm = ChainRulesTestUtils._fdm n = 10 @@ -75,7 +76,7 @@ end x = 2 * (rand(T, n) .- one(T) / 2) condA = cond(A) - tol = condA * (T <: Real ? eps(T) : 4 * eps(real(T))) + tol = tolerance(T) #condA * (T <: Real ? eps(T) : 4 * eps(real(T))) alg = GMRES(; tol=tol, krylovdim=n, maxiter=1) config = Zygote.ZygoteRuleConfig() @@ -105,25 +106,27 @@ end f = rand(T) # mix algorithms] - tol = N^2 * eps(real(T)) + tol = tolerance(T) # N^2 * eps(real(T)) alg1 = GMRES(; tol=tol, krylovdim=20) alg2 = BiCGStab(; tol=tol, maxiter=100) # BiCGStab seems to require slightly smaller tolerance for tests to work for (alg, alg_rrule) in ((alg1, alg2), (alg2, alg1)) - fun_example, Avec, bvec, cvec, dvec, evec, fvec = build_fun_example(A, b, c, d, e, - f, alg, - alg_rrule) + #! format: off + fun_example, Avec, bvec, cvec, dvec, evec, fvec = + build_fun_example(A, b, c, d, e, f, alg, alg_rrule) + #! format: on (JA, Jb, Jc, Jd, Je, Jf) = FiniteDifferences.jacobian(fdm, fun_example, Avec, bvec, cvec, dvec, evec, fvec) (JA′, Jb′, Jc′, Jd′, Je′, Jf′) = Zygote.jacobian(fun_example, Avec, bvec, cvec, dvec, evec, fvec) - @test JA ≈ JA′ - @test Jb ≈ Jb′ - @test Jc ≈ Jc′ - @test Jd ≈ Jd′ - @test Je ≈ Je′ - @test Jf ≈ Jf′ + rtol = cond(A + c * d') * sqrt(eps(real(T))) + @test isapprox(JA, JA′; rtol=rtol) + @test isapprox(Jb, Jb′; rtol=rtol) + @test isapprox(Jc, Jc′; rtol=rtol) + @test isapprox(Jd, Jd′; rtol=rtol) + @test isapprox(Je, Je′; rtol=rtol) + @test isapprox(Jf, Jf′; rtol=rtol) end end end diff --git a/test/ad/svdsolve.jl b/test/ad/svdsolve.jl index fd025ce..bf6ec38 100644 --- a/test/ad/svdsolve.jl +++ b/test/ad/svdsolve.jl @@ -154,8 +154,8 @@ end howmany = 3 tol = 3 * n * condA * (T <: Real ? eps(T) : 4 * eps(real(T))) alg = GKL(; krylovdim=2n, tol=tol) - alg_rrule1 = Arnoldi(; tol=tol, krylovdim=4n) - alg_rrule2 = GMRES(; tol=tol, krylovdim=3n) + alg_rrule1 = Arnoldi(; tol=tol, krylovdim=4n, verbosity=0) + alg_rrule2 = GMRES(; tol=tol, krylovdim=3n, verbosity=0) config = Zygote.ZygoteRuleConfig() for alg_rrule in (alg_rrule1, alg_rrule2) # unfortunately, rrule does not seem type stable for function arguments, because the @@ -228,7 +228,7 @@ end NoTangent())) alg = GKL(; krylovdim=2n, tol=tol, verbosity=1) - alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=0) + alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=1) (vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @@ -252,7 +252,7 @@ end NoTangent())) alg = GKL(; krylovdim=2n, tol=tol, verbosity=1) - alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=1) + alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=2) (vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @@ -284,7 +284,7 @@ end NoTangent())) alg = GKL(; krylovdim=2n, tol=tol, verbosity=1) - alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=0) + alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=1) (vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) @@ -311,31 +311,41 @@ end NoTangent())) alg = GKL(; krylovdim=2n, tol=tol, verbosity=1) - alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=1) + alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=2) (vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg; alg_rrule=alg_rrule) - @test_logs (:warn,) (:info,) (:warn,) (:info,) pb((ZeroTangent(), - im .* lvecs[1:2] .+ - lvecs[2:-1:1], ZeroTangent(), - NoTangent())) - @test_logs (:warn,) (:info,) (:warn,) (:info,) pb((ZeroTangent(), lvecs[2:-1:1], - im .* rvecs[1:2] .+ - rvecs[2:-1:1], ZeroTangent(), + @test_logs (:warn,) (:info,) (:info,) (:warn,) (:info,) (:info,) pb((ZeroTangent(), + im .* + lvecs[1:2] .+ + lvecs[2:-1:1], + ZeroTangent(), + NoTangent())) + @test_logs (:warn,) (:info,) (:info,) (:warn,) (:info,) (:info,) pb((ZeroTangent(), + lvecs[2:-1:1], + im .* + rvecs[1:2] .+ + rvecs[2:-1:1], + ZeroTangent(), + NoTangent())) + @test_logs (:info,) (:info,) (:info,) (:info,) pb((ZeroTangent(), + lvecs[1:2] .+ lvecs[2:-1:1], + ZeroTangent(), NoTangent())) - @test_logs (:info,) (:info,) pb((ZeroTangent(), lvecs[1:2] .+ lvecs[2:-1:1], - ZeroTangent(), - NoTangent())) - @test_logs (:warn,) (:info,) (:warn,) (:info,) pb((ZeroTangent(), - im .* lvecs[1:2] .+ + @test_logs (:warn,) (:info,) (:info,) (:warn,) (:info,) (:info,) pb((ZeroTangent(), + im .* + lvecs[1:2] .+ + lvecs[2:-1:1], + +im .* + rvecs[1:2] + + rvecs[2:-1:1], + NoTangent())) + @test_logs (:info,) (:info,) (:info,) (:info,) pb((ZeroTangent(), + (1 + im) .* lvecs[1:2] .+ lvecs[2:-1:1], - +im .* rvecs[1:2] + + (1 - im) .* rvecs[1:2] + rvecs[2:-1:1], NoTangent())) - @test_logs (:info,) (:info,) pb((ZeroTangent(), - (1 + im) .* lvecs[1:2] .+ lvecs[2:-1:1], - (1 - im) .* rvecs[1:2] + rvecs[2:-1:1], - NoTangent())) end end end @@ -354,13 +364,10 @@ end alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=-1) alg_rrule2 = GMRES(; tol=tol, krylovdim=2n, verbosity=-1) for alg_rrule in (alg_rrule1, alg_rrule2) - fun_example_ad, fun_example_fd, Avec, xvec, cvec, dvec, vals, lvecs, rvecs = build_fun_example(A, - x, - c, - d, - howmany, - alg, - alg_rrule) + #! format: off + fun_example_ad, fun_example_fd, Avec, xvec, cvec, dvec, vals, lvecs, rvecs = + build_fun_example(A, x, c, d, howmany, alg, alg_rrule) + #! format: on (JA, Jx, Jc, Jd) = FiniteDifferences.jacobian(fdm, fun_example_fd, Avec, xvec, cvec, dvec) diff --git a/test/runtests.jl b/test/runtests.jl index 34b7cac..3958786 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -52,12 +52,16 @@ end include("nestedtuple.jl") end -# @testset "Differentiation rules" verbose = true begin -# include("ad/linsolve.jl") -# include("ad/eigsolve.jl") -# include("ad/degenerateeigsolve.jl") -# include("ad/svdsolve.jl") -# end +@testset "Linsolve differentiation rules" verbose = true begin + include("ad/linsolve.jl") +end +@testset "Eigsolve differentiation rules" verbose = true begin + include("ad/eigsolve.jl") + include("ad/degenerateeigsolve.jl") +end +@testset "Svdsolve differentiation rules" verbose = true begin + include("ad/svdsolve.jl") +end t = time() - t # Issues From 0cecee8c512fb65b3d83ed5f450722854b0f36fe Mon Sep 17 00:00:00 2001 From: Jutho Date: Mon, 13 Jan 2025 00:18:51 +0100 Subject: [PATCH 8/8] (hopefully) final fixes in tests --- test/ad/linsolve.jl | 2 +- test/gklfactorize.jl | 0 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 test/gklfactorize.jl diff --git a/test/ad/linsolve.jl b/test/ad/linsolve.jl index 13e1c90..2cb35e7 100644 --- a/test/ad/linsolve.jl +++ b/test/ad/linsolve.jl @@ -120,7 +120,7 @@ end fvec) (JA′, Jb′, Jc′, Jd′, Je′, Jf′) = Zygote.jacobian(fun_example, Avec, bvec, cvec, dvec, evec, fvec) - rtol = cond(A + c * d') * sqrt(eps(real(T))) + rtol = 2 * cond(A + c * d') * sqrt(eps(real(T))) @test isapprox(JA, JA′; rtol=rtol) @test isapprox(Jb, Jb′; rtol=rtol) @test isapprox(Jc, Jc′; rtol=rtol) diff --git a/test/gklfactorize.jl b/test/gklfactorize.jl deleted file mode 100644 index e69de29..0000000