From 493e6817ebc83258d081e99075e711455b4ac2e8 Mon Sep 17 00:00:00 2001 From: chase Date: Thu, 8 Jan 2015 17:53:42 -0500 Subject: [PATCH 1/3] LAPACK: This commit adds the ordschur functionality for generalized schur decompositons by plugging into LAPACK's tgsen function. --- base/linalg/factorization.jl | 5 ++ base/linalg/lapack.jl | 141 ++++++++++++++++++++++++++++++++--- test/linalg1.jl | 95 ++++++++++++++--------- 3 files changed, 196 insertions(+), 45 deletions(-) diff --git a/base/linalg/factorization.jl b/base/linalg/factorization.jl index 9f04c3d8ea100..4ccc6644dd6a0 100644 --- a/base/linalg/factorization.jl +++ b/base/linalg/factorization.jl @@ -721,6 +721,11 @@ schurfact!{T<:BlasFloat}(A::StridedMatrix{T}, B::StridedMatrix{T}) = Generalized schurfact{T<:BlasFloat}(A::StridedMatrix{T},B::StridedMatrix{T}) = schurfact!(copy(A),copy(B)) schurfact{TA,TB}(A::StridedMatrix{TA}, B::StridedMatrix{TB}) = (S = promote_type(Float32,typeof(one(TA)/norm(one(TA))),TB); schurfact!(S != TA ? convert(AbstractMatrix{S},A) : copy(A), S != TB ? convert(AbstractMatrix{S},B) : copy(B))) +ordschur!{Ty<:BlasFloat}(S::StridedMatrix{Ty}, T::StridedMatrix{Ty}, Q::StridedMatrix{Ty}, Z::StridedMatrix{Ty}, select::Array{Int}) = GeneralizedSchur(LinAlg.LAPACK.tgsen!(select, S, T, Q, Z)...) +ordschur{Ty<:BlasFloat}(S::StridedMatrix{Ty}, T::StridedMatrix{Ty}, Q::StridedMatrix{Ty}, Z::StridedMatrix{Ty}, select::Array{Int}) = ordschur!(copy(S), copy(T), copy(Q), copy(Z), select) +ordschur!{Ty<:BlasFloat}(gschur::GeneralizedSchur{Ty}, select::Array{Int}) = (res=ordschur!(gschur.S, gschur.T, gschur.Q, gschur.Z, select); gschur[:alpha][:]=res[:alpha]; gschur[:beta][:]=res[:beta]; res) +ordschur{Ty<:BlasFloat}(gschur::GeneralizedSchur{Ty}, select::Array{Int}) = ordschur(gschur.S, gschur.T, gschur.Q, gschur.Z, select) + function getindex(F::GeneralizedSchur, d::Symbol) d == :S && return F.S d == :T && return F.T diff --git a/base/linalg/lapack.jl b/base/linalg/lapack.jl index 385280100db9e..0458e80a4c3b7 100644 --- a/base/linalg/lapack.jl +++ b/base/linalg/lapack.jl @@ -3540,9 +3540,9 @@ for (gees, gges, elty, relty) in end end # Reorder Schur forms -for (trsen, elty) in - ((:dtrsen_,:Float64), - (:strsen_,:Float32)) +for (trsen, tgsen, elty) in + ((:dtrsen_, :dtgsen_, :Float64), + (:strsen_, :stgsen_, :Float32)) @eval begin function trsen!(select::Array{Int}, T::StridedMatrix{$elty}, Q::StridedMatrix{$elty}) # * .. Scalar Arguments .. @@ -3556,7 +3556,8 @@ for (trsen, elty) in # DOUBLE PRECISION Q( LDQ, * ), T( LDT, * ), WI( * ), WORK( * ), WR( * ) chkstride1(T, Q) n = chksquare(T) - ld = max(1, n) + ldt = max(1, stride(T, 2)) + ldq = max(1, stride(Q, 2)) wr = similar(T, $elty, n) wi = similar(T, $elty, n) m = sum(select) @@ -3572,10 +3573,10 @@ for (trsen, elty) in (Ptr{BlasChar}, Ptr{BlasChar}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt}, Ptr{Void}, Ptr{Void}, - Ptr{$elty}, Ptr {BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, + Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), &'N', &'V', select, &n, - T, &ld, Q, &ld, + T, &ldt, Q, &ldq, wr, wi, &m, C_NULL, C_NULL, work, &lwork, iwork, &liwork, info) @@ -3589,12 +3590,71 @@ for (trsen, elty) in end T, Q, all(wi .== 0) ? wr : complex(wr, wi) end + function tgsen!(select::Array{Int}, S::StridedMatrix{$elty}, T::StridedMatrix{$elty}, + Q::StridedMatrix{$elty}, Z::StridedMatrix{$elty}) +# * .. Scalar Arguments .. +# * LOGICAL WANTQ, WANTZ +# * INTEGER IJOB, INFO, LDA, LDB, LDQ, LDZ, LIWORK, LWORK, +# * $ M, N +# * DOUBLE PRECISION PL, PR +# * .. +# * .. Array Arguments .. +# * LOGICAL SELECT( * ) +# * INTEGER IWORK( * ) +# * DOUBLE PRECISION A( LDA, * ), ALPHAI( * ), ALPHAR( * ), +# * $ B( LDB, * ), BETA( * ), DIF( * ), Q( LDQ, * ), +# * $ WORK( * ), Z( LDZ, * ) +# * .. + chkstride1(S, T, Q, Z) + n, nt, nq, nz = chksquare(S, T, Q, Z) + n==nt==nq==nz || throw(DimensionMismatch("matrices are not of same size")) + lds = max(1, stride(S, 2)) + ldt = max(1, stride(T, 2)) + ldq = max(1, stride(Q, 2)) + ldz = max(1, stride(Z, 2)) + m = sum(select) + alphai = similar(T, $elty, n) + alphar = similar(T, $elty, n) + beta = similar(T, $elty, n) + lwork = blas_int(-1) + work = Array($elty, 1) + liwork = blas_int(-1) + iwork = Array(BlasInt, 1) + info = Array(BlasInt, 1) + select = convert(Array{BlasInt}, select) + + for i = 1:2 + ccall(($(blasfunc(tgsen)), liblapack), Void, + (Ptr{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, + Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, + Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, + Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, + Ptr{BlasInt}, Ptr{Void}, Ptr{Void}, Ptr{Void}, + Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, + Ptr{BlasInt}), + &0, &1, &1, select, + &n, S, &lds, T, + &ldt, alphar, alphai, beta, + Q, &ldq, Z, &ldz, + &m, C_NULL, C_NULL, C_NULL, + work, &lwork, iwork, &liwork, + info) + @lapackerror + if i == 1 # only estimated optimal lwork, liwork + lwork = blas_int(real(work[1])) + work = Array($elty, lwork) + liwork = blas_int(real(iwork[1])) + iwork = Array(BlasInt, liwork) + end + end + S, T, complex(alphar, alphai), beta, Q, Z + end end end -for (trsen, elty) in - ((:ztrsen_,:Complex128), - (:ctrsen_,:Complex64)) +for (trsen, tgsen, elty) in + ((:ztrsen_, :ztgsen_, :Complex128), + (:ctrsen_, :ctgsen_, :Complex64)) @eval begin function trsen!(select::Array{Int}, T::StridedMatrix{$elty}, Q::StridedMatrix{$elty}) # * .. Scalar Arguments .. @@ -3607,7 +3667,8 @@ for (trsen, elty) in # COMPLEX Q( LDQ, * ), T( LDT, * ), W( * ), WORK( * ) chkstride1(T, Q) n = chksquare(T) - ld = max(1, n) + ldt = max(1, stride(T, 2)) + ldq = max(1, stride(Q, 2)) w = similar(T, $elty, n) m = sum(select) work = Array($elty, 1) @@ -3623,7 +3684,7 @@ for (trsen, elty) in Ptr{$elty}, Ptr {BlasInt}, Ptr{BlasInt}), &'N', &'V', select, &n, - T, &ld, Q, &ld, + T, &ldt, Q, &ldq, w, &m, C_NULL, C_NULL, work, &lwork, info) @@ -3635,6 +3696,64 @@ for (trsen, elty) in end T, Q, w end + function tgsen!(select::Array{Int}, S::StridedMatrix{$elty}, T::StridedMatrix{$elty}, + Q::StridedMatrix{$elty}, Z::StridedMatrix{$elty}) +# * .. Scalar Arguments .. +# * LOGICAL WANTQ, WANTZ +# * INTEGER IJOB, INFO, LDA, LDB, LDQ, LDZ, LIWORK, LWORK, +# * $ M, N +# * DOUBLE PRECISION PL, PR +# * .. +# * .. Array Arguments .. +# * LOGICAL SELECT( * ) +# * INTEGER IWORK( * ) +# * DOUBLE PRECISION DIF( * ) +# * COMPLEX*16 A( LDA, * ), ALPHA( * ), B( LDB, * ), +# * $ BETA( * ), Q( LDQ, * ), WORK( * ), Z( LDZ, * ) +# * .. + chkstride1(S, T, Q, Z) + n, nt, nq, nz = chksquare(S, T, Q, Z) + n==nt==nq==nz || throw(DimensionMismatch("matrices are not of same size")) + lds = max(1, stride(S, 2)) + ldt = max(1, stride(T, 2)) + ldq = max(1, stride(Q, 2)) + ldz = max(1, stride(Z, 2)) + m = sum(select) + alpha = similar(T, $elty, n) + beta = similar(T, $elty, n) + lwork = blas_int(-1) + work = Array($elty, 1) + liwork = blas_int(-1) + iwork = Array(BlasInt, 1) + info = Array(BlasInt, 1) + select = convert(Array{BlasInt}, select) + + for i = 1:2 + ccall(($(blasfunc(tgsen)), liblapack), Void, + (Ptr{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, + Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, + Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty}, + Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, + Ptr{BlasInt}, Ptr{Void}, Ptr{Void}, Ptr{Void}, + Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, + Ptr{BlasInt}), + &0, &1, &1, select, + &n, S, &lds, T, + &ldt, alpha, beta, + Q, &ldq, Z, &ldz, + &m, C_NULL, C_NULL, C_NULL, + work, &lwork, iwork, &liwork, + info) + @lapackerror + if i == 1 # only estimated optimal lwork, liwork + lwork = blas_int(real(work[1])) + work = Array($elty, lwork) + liwork = blas_int(real(iwork[1])) + iwork = Array(BlasInt, liwork) + end + end + S, T, alpha, beta, Q, Z + end end end diff --git a/test/linalg1.jl b/test/linalg1.jl index 56eb4865909b9..e06f8a0bdb960 100644 --- a/test/linalg1.jl +++ b/test/linalg1.jl @@ -3,6 +3,11 @@ debug = false import Base.LinAlg: BlasComplex, BlasFloat, BlasReal, QRPivoted n = 10 + +# Split n into 2 parts for tests needing two matrices +n1 = div(n, 2) +n2 = 2*n1 + srand(1234321) a = rand(n,n) @@ -110,12 +115,12 @@ debug && println("(Automatic) Square LU decomposition") @test norm(a*(lua\b) - b, 1) < ε*κ*n*2 # Two because the right hand side has two columns debug && println("Thin LU") - lua = lufact(a[:,1:5]) - @test_approx_eq lua[:L]*lua[:U] lua[:P]*a[:,1:5] + lua = lufact(a[:,1:n1]) + @test_approx_eq lua[:L]*lua[:U] lua[:P]*a[:,1:n1] debug && println("Fat LU") - lua = lufact(a[1:5,:]) - @test_approx_eq lua[:L]*lua[:U] lua[:P]*a[1:5,:] + lua = lufact(a[1:n1,:]) + @test_approx_eq lua[:L]*lua[:U] lua[:P]*a[1:n1,:] debug && println("QR decomposition (without pivoting)") qra = qrfact(a, pivot=false) @@ -126,23 +131,23 @@ debug && println("QR decomposition (without pivoting)") @test_approx_eq_eps a*(qra\b) b 3000ε debug && println("(Automatic) Fat (pivoted) QR decomposition") # Pivoting is only implemented for BlasFloats - qrpa = factorize(a[1:5,:]) + qrpa = factorize(a[1:n1,:]) q,r = qrpa[:Q], qrpa[:R] if isa(qrpa,QRPivoted) p = qrpa[:p] end # Reconsider if pivoted QR gets implemented in julia - @test_approx_eq q'*full(q, thin=false) eye(5) - @test_approx_eq q*full(q, thin=false)' eye(5) - @test_approx_eq q*r isa(qrpa,QRPivoted) ? a[1:5,p] : a[1:5,:] - @test_approx_eq isa(qrpa, QRPivoted) ? q*r[:,invperm(p)] : q*r a[1:5,:] - @test_approx_eq_eps a[1:5,:]*(qrpa\b[1:5]) b[1:5] 5000ε + @test_approx_eq q'*full(q, thin=false) eye(n1) + @test_approx_eq q*full(q, thin=false)' eye(n1) + @test_approx_eq q*r isa(qrpa,QRPivoted) ? a[1:n1,p] : a[1:n1,:] + @test_approx_eq isa(qrpa, QRPivoted) ? q*r[:,invperm(p)] : q*r a[1:n1,:] + @test_approx_eq_eps a[1:n1,:]*(qrpa\b[1:n1]) b[1:n1] 5000ε debug && println("(Automatic) Thin (pivoted) QR decomposition") # Pivoting is only implemented for BlasFloats - qrpa = factorize(a[:,1:5]) + qrpa = factorize(a[:,1:n1]) q,r = qrpa[:Q], qrpa[:R] if isa(qrpa, QRPivoted) p = qrpa[:p] end # Reconsider if pivoted QR gets implemented in julia @test_approx_eq q'*full(q, thin=false) eye(n) @test_approx_eq q*full(q, thin=false)' eye(n) - @test_approx_eq q*r isa(qrpa, QRPivoted) ? a[:,p] : a[:,1:5] - @test_approx_eq isa(qrpa, QRPivoted) ? q*r[:,invperm(p)] : q*r a[:,1:5] + @test_approx_eq q*r isa(qrpa, QRPivoted) ? a[:,p] : a[:,1:n1] + @test_approx_eq isa(qrpa, QRPivoted) ? q*r[:,invperm(p)] : q*r a[:,1:n1] debug && println("symmetric eigen-decomposition") if eltya != BigFloat && eltyb != BigFloat # Revisit when implemented in julia @@ -164,19 +169,22 @@ debug && println("non-symmetric eigen decomposition") debug && println("symmetric generalized eigenproblem") if eltya != BigFloat && eltyb != BigFloat # Revisit when implemented in julia - a610 = a[:,6:10] - f = eigfact(asym[1:5,1:5], a610'a610) - @test_approx_eq asym[1:5,1:5]*f[:vectors] scale(a610'a610*f[:vectors], f[:values]) - @test_approx_eq f[:values] eigvals(asym[1:5,1:5], a610'a610) - @test_approx_eq_eps prod(f[:values]) prod(eigvals(asym[1:5,1:5]/(a610'a610))) 200ε + asym_sg = asym[1:n1, 1:n1] + a_sg = a[:,n1+1:n2] + f = eigfact(asym_sg, a_sg'a_sg) + @test_approx_eq asym_sg*f[:vectors] scale(a_sg'a_sg*f[:vectors], f[:values]) + @test_approx_eq f[:values] eigvals(asym_sg, a_sg'a_sg) + @test_approx_eq_eps prod(f[:values]) prod(eigvals(asym_sg/(a_sg'a_sg))) 200ε end debug && println("Non-symmetric generalized eigenproblem") if eltya != BigFloat && eltyb != BigFloat # Revisit when implemented in julia - f = eigfact(a[1:5,1:5], a[6:10,6:10]) - @test_approx_eq a[1:5,1:5]*f[:vectors] scale(a[6:10,6:10]*f[:vectors], f[:values]) - @test_approx_eq f[:values] eigvals(a[1:5,1:5], a[6:10,6:10]) - @test_approx_eq_eps prod(f[:values]) prod(eigvals(a[1:5,1:5]/a[6:10,6:10])) 50000ε + a1_nsg = a[1:n1, 1:n1] + a2_nsg = a[n1+1:n2, n1+1:n2] + f = eigfact(a1_nsg, a2_nsg) + @test_approx_eq a1_nsg*f[:vectors] scale(a2_nsg*f[:vectors], f[:values]) + @test_approx_eq f[:values] eigvals(a1_nsg, a2_nsg) + @test_approx_eq_eps prod(f[:values]) prod(eigvals(a1_nsg/a2_nsg)) 50000ε end debug && println("Schur") @@ -202,13 +210,31 @@ debug && println("Reorder Schur") debug && println("Generalized Schur") if eltya != BigFloat && eltyb != BigFloat # Revisit when implemented in julia - f = schurfact(a[1:5,1:5], a[6:10,6:10]) - @test_approx_eq f[:Q]*f[:S]*f[:Z]' a[1:5,1:5] - @test_approx_eq f[:Q]*f[:T]*f[:Z]' a[6:10,6:10] + a1_sf = a[1:n1, 1:n1] + a2_sf = a[n1+1:n2, n1+1:n2] + f = schurfact(a1_sf, a2_sf) + @test_approx_eq f[:Q]*f[:S]*f[:Z]' a1_sf + @test_approx_eq f[:Q]*f[:T]*f[:Z]' a2_sf @test istriu(f[:S]) || iseltype(a,Real) @test istriu(f[:T]) || iseltype(a,Real) end +debug && println("Reorder Generalized Schur") + if eltya != BigFloat && eltyb != BigFloat # Revisit when implemented in Julia + a1_sf = a[1:n1, 1:n1] + a2_sf = a[n1+1:n2, n1+1:n2] + NS = schurfact(a1_sf, a2_sf) + # Currently just testing with selecting gen eig values < 1 + select = int(real(NS[:values] .* conj(NS[:values])) .< 1) + m = sum(select) + S = ordschur(NS, select) + # Make sure that the new factorization stil factors matrix + @test_approx_eq S[:Q]*S[:S]*S[:Z]' a1_sf + @test_approx_eq S[:Q]*S[:T]*S[:Z]' a2_sf + # Make sure that we have sorted it correctly + @test_approx_eq NS[:values][find(select)] S[:values][1:m] + end + debug && println("singular value decomposition") if eltya != BigFloat && eltyb != BigFloat # Revisit when implemented in julia usv = svdfact(a) @@ -217,9 +243,10 @@ debug && println("singular value decomposition") debug && println("Generalized svd") if eltya != BigFloat && eltyb != BigFloat # Revisit when implemented in julia - gsvd = svdfact(a,a[1:5,:]) + a_svd = a[1:n1, :] + gsvd = svdfact(a,a_svd) @test_approx_eq gsvd[:U]*gsvd[:D1]*gsvd[:R]*gsvd[:Q]' a - @test_approx_eq gsvd[:V]*gsvd[:D2]*gsvd[:R]*gsvd[:Q]' a[1:5,:] + @test_approx_eq gsvd[:V]*gsvd[:D2]*gsvd[:R]*gsvd[:Q]' a_svd end debug && println("Solve square general system of equations") @@ -231,10 +258,10 @@ debug && println("Solve square general system of equations") debug && println("Test nullspace") if eltya != BigFloat && eltyb != BigFloat # Revisit when implemented in julia - a15null = nullspace(a[:,1:5]') - @test rank([a[:,1:5] a15null]) == 10 - @test_approx_eq_eps norm(a[:,1:5]'a15null, Inf) zero(eltya) 300ε - @test_approx_eq_eps norm(a15null'a[:,1:5], Inf) zero(eltya) 400ε + a15null = nullspace(a[:,1:n1]') + @test rank([a[:,1:n1] a15null]) == 10 + @test_approx_eq_eps norm(a[:,1:n1]'a15null, Inf) zero(eltya) 300ε + @test_approx_eq_eps norm(a15null'a[:,1:n1], Inf) zero(eltya) 400ε @test size(nullspace(b), 2) == 0 end @@ -244,9 +271,9 @@ debug && println("\ntype of a: ", eltya, "\n") debug && println("Test pinv") if eltya != BigFloat # Revisit when implemented in julia - pinva15 = pinv(a[:,1:5]) - @test_approx_eq a[:,1:5]*pinva15*a[:,1:5] a[:,1:5] - @test_approx_eq pinva15*a[:,1:5]*pinva15 pinva15 + pinva15 = pinv(a[:,1:n1]) + @test_approx_eq a[:,1:n1]*pinva15*a[:,1:n1] a[:,1:n1] + @test_approx_eq pinva15*a[:,1:n1]*pinva15 pinva15 end # if isreal(a) From 6b466692c0fbc39cdf00147495efcc280136eff2 Mon Sep 17 00:00:00 2001 From: chase Date: Mon, 12 Jan 2015 14:38:19 -0500 Subject: [PATCH 2/3] LAPACK: Fix stride arguments (by changing to stride(?, 2) for multiple LAPACK functions. --- base/linalg/lapack.jl | 29 ++++++++++++++++------------- test/linalg1.jl | 1 - 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/base/linalg/lapack.jl b/base/linalg/lapack.jl index 0458e80a4c3b7..014bff044545b 100644 --- a/base/linalg/lapack.jl +++ b/base/linalg/lapack.jl @@ -1245,8 +1245,8 @@ for (geevx, ggev, elty) in chkstride1(A,B) n, m = chksquare(A,B) n==m || throw(DimensionMismatch("matrices must have same size")) - lda = max(1, n) - ldb = max(1, n) + lda = max(1, stride(A, 2)) + ldb = max(1, stride(B, 2)) alphar = similar(A, $elty, n) alphai = similar(A, $elty, n) beta = similar(A, $elty, n) @@ -1351,7 +1351,8 @@ for (geevx, ggev, elty, relty) in chkstride1(A, B) n, m = chksquare(A, B) n==m || throw(DimensionMismatch("matrices must have same size")) - lda = ldb = max(1, n) + lda = max(1, stride(A, 2)) + ldb = max(1, stride(B, 2)) alpha = similar(A, $elty, n) beta = similar(A, $elty, n) ldvl = jobvl == 'V' ? n : 1 @@ -2920,7 +2921,8 @@ for (syev, syevr, sygvd, elty) in chkstride1(A, B) n, m = chksquare(A, B) n==m || throw(DimensionMismatch("Matrices must have same size")) - lda = ldb = max(1, n) + lda = max(1, stride(A, 2)) + ldb = max(1, stride(B, 2)) w = similar(A, $elty, n) work = Array($elty, 1) lwork = -one(BlasInt) @@ -3071,7 +3073,8 @@ for (syev, syevr, sygvd, elty, relty) in chkstride1(A, B) n, m = chksquare(A, B) n==m || throw(DimensionMismatch("Matrices must have same size")) - lda = ldb = max(1, n) + lda = max(1, stride(A, 2)) + ldb = max(1, stride(B, 2)) w = similar(A, $relty, n) work = Array($elty, 1) lwork = -one(BlasInt) @@ -3307,7 +3310,7 @@ for (gehrd, elty) in Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}), &n, &ilo, &ihi, A, - &max(1,n), tau, work, &lwork, + &max(1, stride(A, 2)), tau, work, &lwork, info) @lapackerror if lwork < 0 @@ -3346,7 +3349,7 @@ for (orghr, elty) in Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}), &n, &ilo, &ihi, A, - &max(1,n), tau, work, &lwork, + &max(1, stride(A, 2)), tau, work, &lwork, info) @lapackerror if lwork < 0 @@ -3389,7 +3392,7 @@ for (gees, gges, elty) in Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{Void}, Ptr{BlasInt}), &jobvs, &'N', C_NULL, &n, - A, &max(1, n), sdim, wr, + A, &max(1, stride(A, 2)), sdim, wr, wi, vs, &ldvs, work, &lwork, C_NULL, info) @lapackerror @@ -3433,8 +3436,8 @@ for (gees, gges, elty) in Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{Void}, Ptr{BlasInt}), &jobvsl, &jobvsr, &'N', C_NULL, - &n, A, &max(1,n), B, - &max(1,n), &sdim, alphar, alphai, + &n, A, &max(1,stride(A, 2)), B, + &max(1,stride(B, 2)), &sdim, alphar, alphai, beta, vsl, &ldvsl, vsr, &ldvsr, work, &lwork, C_NULL, info) @@ -3479,7 +3482,7 @@ for (gees, gges, elty, relty) in Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{$relty}, Ptr{Void}, Ptr{BlasInt}), &jobvs, &sort, C_NULL, &n, - A, &max(1, n), &sdim, w, + A, &max(1, stride(A, 2)), &sdim, w, vs, &ldvs, work, &lwork, rwork, C_NULL, info) @lapackerror @@ -3524,8 +3527,8 @@ for (gees, gges, elty, relty) in Ptr{$elty}, Ptr{BlasInt}, Ptr{$relty}, Ptr{Void}, Ptr{BlasInt}), &jobvsl, &jobvsr, &'N', C_NULL, - &n, A, &max(1,n), B, - &max(1,n), &sdim, alpha, beta, + &n, A, &max(1, stride(A, 2)), B, + &max(1, stride(B, 2)), &sdim, alpha, beta, vsl, &ldvsl, vsr, &ldvsr, work, &lwork, rwork, C_NULL, info) diff --git a/test/linalg1.jl b/test/linalg1.jl index e06f8a0bdb960..5ae80a224f15a 100644 --- a/test/linalg1.jl +++ b/test/linalg1.jl @@ -264,7 +264,6 @@ debug && println("Test nullspace") @test_approx_eq_eps norm(a15null'a[:,1:n1], Inf) zero(eltya) 400ε @test size(nullspace(b), 2) == 0 end - end # for eltyb debug && println("\ntype of a: ", eltya, "\n") From 02234008e155add64194d0884e474d5c4d0a340c Mon Sep 17 00:00:00 2001 From: chase Date: Wed, 14 Jan 2015 11:39:25 -0500 Subject: [PATCH 3/3] DOCS: Added documentation for the generalized schur version of ordschur. --- doc/stdlib/linalg.rst | 50 ++++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/doc/stdlib/linalg.rst b/doc/stdlib/linalg.rst index 4f1b83831787a..29382bbe1d2bc 100644 --- a/doc/stdlib/linalg.rst +++ b/doc/stdlib/linalg.rst @@ -69,7 +69,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f ``F[:L]`` ``L`` (lower triangular) part of ``LU`` ✓ ✓ ``F[:U]`` ``U`` (upper triangular) part of ``LU`` ✓ ✓ ``F[:p]`` (right) permutation ``Vector`` ✓ ✓ - ``F[:P]`` (right) permutation ``Matrix`` ✓ + ``F[:P]`` (right) permutation ``Matrix`` ✓ ``F[:q]`` left permutation ``Vector`` ✓ ``F[:Rs]`` ``Vector`` of scaling factors ✓ ``F[:(:)]`` ``(L,U,p,q,Rs)`` components ✓ @@ -111,7 +111,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. function:: qr(A, [pivot=false,][thin=true]) -> Q, R, [p] - Compute the (pivoted) QR factorization of ``A`` such that either ``A = Q*R`` or ``A[:,p] = Q*R``. Also see ``qrfact``. The default is to compute a thin factorization. Note that ``R`` is not extended with zeros when the full ``Q`` is requested. + Compute the (pivoted) QR factorization of ``A`` such that either ``A = Q*R`` or ``A[:,p] = Q*R``. Also see ``qrfact``. The default is to compute a thin factorization. Note that ``R`` is not extended with zeros when the full ``Q`` is requested. .. function:: qrfact(A,[pivot=false]) -> F @@ -189,7 +189,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f Computes eigenvalues and eigenvectors of ``A``. See :func:`eigfact` for details on the ``balance`` keyword argument. - + .. doctest:: julia> eig([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) @@ -198,7 +198,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0) - + ``eig`` is a wrapper around :func:`eigfact`, extracting all parts of the factorization to a tuple; where possible, using :func:`eigfact` is recommended. @@ -206,7 +206,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. function:: eig(A, B) -> D, V Computes generalized eigenvalues and vectors of ``A`` with respect to ``B``. - + ``eig`` is a wrapper around :func:`eigfact`, extracting all parts of the factorization to a tuple; where possible, using :func:`eigfact` is recommended. @@ -248,7 +248,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f factorization object ``F`` which contains the eigenvalues in ``F[:values]`` and the eigenvectors in the columns of the matrix ``F[:vectors]``. (The ``k``th eigenvector can be obtained from the slice ``F[:vectors][:, k]``.) - + The following functions are available for ``Eigen`` objects: ``inv``, ``det``. @@ -300,7 +300,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. function:: ordschur(Q, T, select) -> Schur - Reorders the Schur factorization of a real matrix ``A=Q*T*Q'`` according to the logical array ``select`` returning a Schur object ``F``. The selected eigenvalues appear in the leading diagonal of ``F[:Schur]`` and the the corresponding leading columns of ``F[:vectors]`` form an orthonormal basis of the corresponding right invariant subspace. A complex conjugate pair of eigenvalues must be either both included or excluded via ``select``. + Reorders the Schur factorization of a real matrix ``A=Q*T*Q'`` according to the logical array ``select`` returning a Schur object ``F``. The selected eigenvalues appear in the leading diagonal of ``F[:Schur]`` and the the corresponding leading columns of ``F[:vectors]`` form an orthonormal basis of the corresponding right invariant subspace. A complex conjugate pair of eigenvalues must be either both included or excluded via ``select``. .. function:: ordschur!(Q, T, select) -> Schur @@ -322,6 +322,22 @@ Linear algebra functions in Julia are largely implemented by calling functions f See :func:`schurfact` +.. function:: ordschur(S, T, Q, Z, select) -> GeneralizedSchur + + Reorders the Generalized Schur factorization of a matrix ``(A, B) = (Q*S*Z^{H}, Q*T*Z^{H})`` according to the logical array ``select`` and returns a GeneralizedSchur object ``GS``. The selected eigenvalues appear in the leading diagonal of both``(GS[:S], GS[:T])`` and the left and right unitary/orthogonal Schur vectors are also reordered such that ``(A, B) = GS[:Q]*(GS[:S], GS[:T])*GS[:Z]^{H}`` still holds and the generalized eigenvalues of ``A`` and ``B`` can still be obtained with ``GS[:alpha]./GS[:beta]``. + +.. function:: ordschur!(S, T, Q, Z, select) -> GeneralizedSchur + + Reorders the Generalized Schur factorization of a matrix by overwriting the matrices ``(S, T, Q, Z)`` in the process. See :func:`ordschur`. + +.. function:: ordschur(GS, select) -> GeneralizedSchur + + Reorders the Generalized Schur factorization of a Generalized Schur object. See :func:`ordschur`. + +.. function:: ordschur!(GS, select) -> GeneralizedSchur + + Reorders the Generalized Schur factorization of a Generalized Schur object by overwriting the object with the new factorization. See :func:`ordschur`. + .. function:: svdfact(A, [thin=true]) -> SVD Compute the Singular Value Decomposition (SVD) of ``A`` and return an ``SVD`` object. ``U``, ``S``, ``V`` and ``Vt`` can be obtained from the factorization ``F`` with ``F[:U]``, ``F[:S]``, ``F[:V]`` and ``F[:Vt]``, such that ``A = U*diagm(S)*Vt``. If ``thin`` is ``true``, an economy mode decomposition is returned. The algorithm produces ``Vt`` and hence ``Vt`` is more efficient to extract than ``V``. The default is to produce a thin decomposition. @@ -501,7 +517,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. function:: linreg(x, y) -> [a; b] - Linear Regression. Returns ``a`` and ``b`` such that ``a+b*x`` is the closest line to the given points ``(x,y)``. In other words, this function determines parameters ``[a, b]`` that minimize the squared error between ``y`` and ``a+b*x``. + Linear Regression. Returns ``a`` and ``b`` such that ``a+b*x`` is the closest line to the given points ``(x,y)``. In other words, this function determines parameters ``[a, b]`` that minimize the squared error between ``y`` and ``a+b*x``. **Example**:: @@ -522,7 +538,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. function:: lyap(A, C) - Computes the solution ``X`` to the continuous Lyapunov equation ``AX + XA' + C = 0``, where no eigenvalue of ``A`` has a zero real part and no two eigenvalues are negative complex conjugates of each other. + Computes the solution ``X`` to the continuous Lyapunov equation ``AX + XA' + C = 0``, where no eigenvalue of ``A`` has a zero real part and no two eigenvalues are negative complex conjugates of each other. .. function:: sylvester(A, B, C) @@ -594,8 +610,8 @@ Linear algebra functions in Julia are largely implemented by calling functions f * ``v0``: starting vector from which to start the iterations ``eigs`` returns the ``nev`` requested eigenvalues in ``d``, the corresponding Ritz vectors ``v`` (only if ``ritzvec=true``), the number of converged eigenvalues ``nconv``, the number of iterations ``niter`` and the number of matrix vector multiplications ``nmult``, as well as the final residual vector ``resid``. - - .. note:: The ``sigma`` and ``which`` keywords interact: the description of eigenvalues searched for by ``which`` do _not_ necessarily refer to the eigenvalues of ``A``, but rather the linear operator constructed by the specification of the iteration mode implied by ``sigma``. + + .. note:: The ``sigma`` and ``which`` keywords interact: the description of eigenvalues searched for by ``which`` do _not_ necessarily refer to the eigenvalues of ``A``, but rather the linear operator constructed by the specification of the iteration mode implied by ``sigma``. =============== ================================== ================================== ``sigma`` iteration mode ``which`` refers to eigenvalues of @@ -860,16 +876,16 @@ Usually a function has 4 methods defined, one each for ``Float64``, .. function:: trsv!(ul, tA, dA, A, b) - Overwrite ``b`` with the solution to ``A*x = b`` or one of the other two - variants determined by ``tA`` (transpose A) and ``ul`` (triangle of ``A`` - used). ``dA`` indicates if ``A`` is unit-triangular (the diagonal is assumed + Overwrite ``b`` with the solution to ``A*x = b`` or one of the other two + variants determined by ``tA`` (transpose A) and ``ul`` (triangle of ``A`` + used). ``dA`` indicates if ``A`` is unit-triangular (the diagonal is assumed to be all ones). Returns the updated ``b``. .. function:: trsv(ul, tA, dA, A, b) - Returns the solution to ``A*x = b`` or one of the other two variants - determined by ``tA`` (transpose A) and ``ul`` (triangle of ``A`` is used.) - ``dA`` indicates if ``A`` is unit-triangular (the diagonal is assumed to be + Returns the solution to ``A*x = b`` or one of the other two variants + determined by ``tA`` (transpose A) and ``ul`` (triangle of ``A`` is used.) + ``dA`` indicates if ``A`` is unit-triangular (the diagonal is assumed to be all ones).