diff --git a/NEWS.md b/NEWS.md index ff31ac8120d01..31243e5ae7584 100644 --- a/NEWS.md +++ b/NEWS.md @@ -316,6 +316,8 @@ Library improvements * REPL Undo via Ctrl-/ and Ctrl-_ + * `diagm` now accepts several diagonal index/vector pairs ([#24047]). + * New function `equalto(x)`, which returns a function that compares its argument to `x` using `isequal` ([#23812]). @@ -467,10 +469,16 @@ Deprecated or removed * The tuple-of-types form of `cfunction`, `cfunction(f, returntype, (types...))`, has been deprecated in favor of the tuple-type form `cfunction(f, returntype, Tuple{types...})` ([#23066]). + * `diagm(v::AbstractVector, k::Integer=0)` has been deprecated in favor of + `diagm(k => v)` ([#24047]). + + * `diagm(x::Number)` has been deprecated in favor of `fill(x, 1, 1)` ([#24047]). + * `diagm(A::SparseMatrixCSC)` has been deprecated in favor of `spdiagm(sparsevec(A))` ([#23341]). - * `diagm(A::BitMatrix)` has been deprecated, use `diagm(vec(A))` instead ([#23373]). + * `diagm(A::BitMatrix)` has been deprecated, use `diagm(0 => vec(A))` or + `BitMatrix(Diagonal(vec(A)))` instead ([#23373], [#24047]). * `ℯ` (written as `\mscre` or `\euler`) is now the only (by default) exported name for Euler's number, and the type has changed from `Irrational{:e}` to diff --git a/base/deprecated.jl b/base/deprecated.jl index b809e159b1b23..a482ab49f32cd 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -1641,7 +1641,7 @@ import .LinAlg: diagm @deprecate diagm(A::SparseMatrixCSC) sparse(Diagonal(sparsevec(A))) # PR #23373 -@deprecate diagm(A::BitMatrix) diagm(vec(A)) +@deprecate diagm(A::BitMatrix) BitMatrix(Diagonal(vec(A))) # PR 23341 @eval GMP @deprecate gmp_version() version() false @@ -1870,6 +1870,19 @@ end nothing end +function diagm(v::BitVector) + depwarn(string("diagm(v::BitVector) is deprecated, use diagm(0 => v) or ", + "BitMatrix(Diagonal(v)) instead"), :diagm) + return BitMatrix(Diagonal(v)) +end +function diagm(v::AbstractVector) + depwarn(string("diagm(v::AbstractVector) is deprecated, use diagm(0 => v) or ", + "Matrix(Diagonal(v)) instead"), :diagm) + return Matrix(Diagonal(v)) +end +@deprecate diagm(v::AbstractVector, k::Integer) diagm(k => v) +@deprecate diagm(x::Number) fill(x, 1, 1) + # issue #20816 @deprecate strwidth textwidth @deprecate charwidth textwidth diff --git a/base/linalg/bitarray.jl b/base/linalg/bitarray.jl index 4c6981cb561b5..ec4c879758879 100644 --- a/base/linalg/bitarray.jl +++ b/base/linalg/bitarray.jl @@ -74,7 +74,7 @@ function tril(B::BitMatrix, k::Integer=0) A end -## diag and related +## diag function diag(B::BitMatrix) n = minimum(size(B)) @@ -85,15 +85,6 @@ function diag(B::BitMatrix) v end -function diagm(v::BitVector) - n = length(v) - a = falses(n, n) - for i=1:n - a[i,i] = v[i] - end - a -end - ## norm and rank svd(A::BitMatrix) = svd(float(A)) diff --git a/base/linalg/dense.jl b/base/linalg/dense.jl index 4178b16ed223b..12df45aff7beb 100644 --- a/base/linalg/dense.jl +++ b/base/linalg/dense.jl @@ -246,7 +246,8 @@ diagind(A::AbstractMatrix, k::Integer=0) = diagind(size(A,1), size(A,2), k) diag(M, k::Integer=0) The `k`th diagonal of a matrix, as a vector. -Use [`diagm`](@ref) to construct a diagonal matrix. + +See also: [`diagm`](@ref) # Examples ```jldoctest @@ -265,29 +266,50 @@ julia> diag(A,1) diag(A::AbstractMatrix, k::Integer=0) = A[diagind(A,k)] """ - diagm(v, k::Integer=0) + diagm(kv::Pair{<:Integer,<:AbstractVector}...) + +Construct a square matrix from `Pair`s of diagonals and vectors. +Vector `kv.second` will be placed on the `kv.first` diagonal. -Construct a matrix by placing `v` on the `k`th diagonal. This constructs a full matrix; if -you want a storage-efficient version with fast arithmetic, use [`Diagonal`](@ref) instead. +See also: [`spdiagm`](@ref), [`Diagonal`](@ref) # Examples ```jldoctest -julia> diagm([1,2,3],1) +julia> diagm(1 => [1,2,3]) 4×4 Array{Int64,2}: 0 1 0 0 0 0 2 0 0 0 0 3 0 0 0 0 + +julia> diagm(1 => [1,2,3], -1 => [4,5]) +4×4 Array{Int64,2}: + 0 1 0 0 + 4 0 2 0 + 0 5 0 3 + 0 0 0 0 ``` """ -function diagm(v::AbstractVector{T}, k::Integer=0) where T - n = length(v) + abs(k) - A = zeros(T,n,n) - A[diagind(A,k)] = v - A +function diagm(kv::Pair{<:Integer,<:AbstractVector}...) + A = diagm_container(kv...) + for p in kv + inds = diagind(A, p.first) + for (i, val) in enumerate(p.second) + A[inds[i]] += val + end + end + return A +end +function diagm_container(kv::Pair{<:Integer,<:AbstractVector}...) + T = promote_type(map(x -> eltype(x.second), kv)...) + n = mapreduce(x -> length(x.second) + abs(x.first), max, kv) + return zeros(T, n, n) +end +function diagm_container(kv::Pair{<:Integer,<:BitVector}...) + n = mapreduce(x -> length(x.second) + abs(x.first), max, kv) + return falses(n, n) end -diagm(x::Number) = (X = Matrix{typeof(x)}(1,1); X[1,1] = x; X) function trace(A::Matrix{T}) where T n = checksquare(A) diff --git a/base/linalg/diagonal.jl b/base/linalg/diagonal.jl index 2804c0a5df0d2..b8f0b53ebd7de 100644 --- a/base/linalg/diagonal.jl +++ b/base/linalg/diagonal.jl @@ -52,7 +52,7 @@ Diagonal{T}(V::AbstractVector) where {T} = Diagonal{T}(convert(AbstractVector{T} convert(::Type{Diagonal{T}}, D::Diagonal{T}) where {T} = D convert(::Type{Diagonal{T}}, D::Diagonal) where {T} = Diagonal{T}(convert(AbstractVector{T}, D.diag)) convert(::Type{AbstractMatrix{T}}, D::Diagonal) where {T} = convert(Diagonal{T}, D) -convert(::Type{Matrix}, D::Diagonal) = diagm(D.diag) +convert(::Type{Matrix}, D::Diagonal) = diagm(0 => D.diag) convert(::Type{Array}, D::Diagonal) = convert(Matrix, D) full(D::Diagonal) = convert(Array, D) diff --git a/base/linalg/generic.jl b/base/linalg/generic.jl index e02abf1d5d840..3fd239416bb14 100644 --- a/base/linalg/generic.jl +++ b/base/linalg/generic.jl @@ -739,13 +739,13 @@ of the [`eltype`](@ref) of `A`. julia> rank(eye(3)) 3 -julia> rank(diagm([1, 0, 2])) +julia> rank(0 => diagm([1, 0, 2])) 2 -julia> rank(diagm([1, 0.001, 2]), 0.1) +julia> rank(diagm(0 => [1, 0.001, 2]), 0.1) 2 -julia> rank(diagm([1, 0.001, 2]), 0.00001) +julia> rank(diagm(0 => [1, 0.001, 2]), 0.00001) 3 ``` """ diff --git a/base/linalg/lapack.jl b/base/linalg/lapack.jl index 3cfc9b0427017..29dd090a41732 100644 --- a/base/linalg/lapack.jl +++ b/base/linalg/lapack.jl @@ -1161,17 +1161,17 @@ of `A`. `fact` may be `E`, in which case `A` will be equilibrated and copied to `AF`; `F`, in which case `AF` and `ipiv` from a previous `LU` factorization are inputs; or `N`, in which case `A` will be copied to `AF` and then factored. If `fact = F`, `equed` may be `N`, meaning `A` has not been -equilibrated; `R`, meaning `A` was multiplied by `diagm(R)` from the left; -`C`, meaning `A` was multiplied by `diagm(C)` from the right; or `B`, meaning -`A` was multiplied by `diagm(R)` from the left and `diagm(C)` from the right. +equilibrated; `R`, meaning `A` was multiplied by `Diagonal(R)` from the left; +`C`, meaning `A` was multiplied by `Diagonal(C)` from the right; or `B`, meaning +`A` was multiplied by `Diagonal(R)` from the left and `Diagonal(C)` from the right. If `fact = F` and `equed = R` or `B` the elements of `R` must all be positive. If `fact = F` and `equed = C` or `B` the elements of `C` must all be positive. Returns the solution `X`; `equed`, which is an output if `fact` is not `N`, and describes the equilibration that was performed; `R`, the row equilibration diagonal; `C`, the column equilibration diagonal; `B`, which may be overwritten -with its equilibrated form `diagm(R)*B` (if `trans = N` and `equed = R,B`) or -`diagm(C)*B` (if `trans = T,C` and `equed = C,B`); `rcond`, the reciprocal +with its equilibrated form `Diagonal(R)*B` (if `trans = N` and `equed = R,B`) or +`Diagonal(C)*B` (if `trans = T,C` and `equed = C,B`); `rcond`, the reciprocal condition number of `A` after equilbrating; `ferr`, the forward error bound for each solution vector in `X`; `berr`, the forward error bound for each solution vector in `X`; and `work`, the reciprocal pivot growth factor. diff --git a/base/linalg/svd.jl b/base/linalg/svd.jl index f18faf9ded8b3..7ab282a4a6dee 100644 --- a/base/linalg/svd.jl +++ b/base/linalg/svd.jl @@ -32,7 +32,7 @@ end Compute the singular value decomposition (SVD) of `A` and return an `SVD` object. `U`, `S`, `V` and `Vt` can be obtained from the factorization `F` with `F[:U]`, -`F[:S]`, `F[:V]` and `F[:Vt]`, such that `A = U*diagm(S)*Vt`. +`F[:S]`, `F[:V]` and `F[:Vt]`, such that `A = U*Diagonal(S)*Vt`. The algorithm produces `Vt` and hence `Vt` is more efficient to extract than `V`. The singular values in `S` are sorted in descending order. @@ -52,7 +52,7 @@ julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.] julia> F = svdfact(A) Base.LinAlg.SVD{Float64,Float64,Array{Float64,2}}([0.0 1.0 0.0 0.0; 1.0 0.0 0.0 0.0; 0.0 0.0 0.0 -1.0; 0.0 0.0 1.0 0.0], [3.0, 2.23607, 2.0, 0.0], [-0.0 0.0 … -0.0 0.0; 0.447214 0.0 … 0.0 0.894427; -0.0 1.0 … -0.0 0.0; 0.0 0.0 … 1.0 0.0]) -julia> F[:U] * diagm(F[:S]) * F[:Vt] +julia> F[:U] * Diagonal(F[:S]) * F[:Vt] 4×5 Array{Float64,2}: 1.0 0.0 0.0 0.0 2.0 0.0 0.0 3.0 0.0 0.0 @@ -71,7 +71,7 @@ svdfact(x::Integer; thin::Bool=true) = svdfact(float(x), thin=thin) svd(A; thin::Bool=true) -> U, S, V Computes the SVD of `A`, returning `U`, vector `S`, and `V` such that -`A == U*diagm(S)*V'`. The singular values in `S` are sorted in descending order. +`A == U*Diagonal(S)*V'`. The singular values in `S` are sorted in descending order. If `thin=true` (default), a thin SVD is returned. For a ``M \\times N`` matrix `A`, `U` is ``M \\times M`` for a full SVD (`thin=false`) and @@ -93,7 +93,7 @@ julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.] julia> U, S, V = svd(A) ([0.0 1.0 0.0 0.0; 1.0 0.0 0.0 0.0; 0.0 0.0 0.0 -1.0; 0.0 0.0 1.0 0.0], [3.0, 2.23607, 2.0, 0.0], [-0.0 0.447214 -0.0 0.0; 0.0 0.0 1.0 0.0; … ; -0.0 0.0 -0.0 1.0; 0.0 0.894427 0.0 0.0]) -julia> U*diagm(S)*V' +julia> U*Diagonal(S)*V' 4×5 Array{Float64,2}: 1.0 0.0 0.0 0.0 2.0 0.0 0.0 3.0 0.0 0.0 @@ -266,17 +266,17 @@ function getindex(obj::GeneralizedSVD{T}, d::Symbol) where T elseif d == :D1 m = size(obj.U, 1) if m - obj.k - obj.l >= 0 - return [eye(T, obj.k) zeros(T, obj.k, obj.l); zeros(T, obj.l, obj.k) diagm(obj.a[obj.k + 1:obj.k + obj.l]); zeros(T, m - obj.k - obj.l, obj.k + obj.l)] + return [eye(T, obj.k) zeros(T, obj.k, obj.l); zeros(T, obj.l, obj.k) Diagonal(obj.a[obj.k + 1:obj.k + obj.l]); zeros(T, m - obj.k - obj.l, obj.k + obj.l)] else - return [eye(T, m, obj.k) [zeros(T, obj.k, m - obj.k); diagm(obj.a[obj.k + 1:m])] zeros(T, m, obj.k + obj.l - m)] + return [eye(T, m, obj.k) [zeros(T, obj.k, m - obj.k); Diagonal(obj.a[obj.k + 1:m])] zeros(T, m, obj.k + obj.l - m)] end elseif d == :D2 m = size(obj.U, 1) p = size(obj.V, 1) if m - obj.k - obj.l >= 0 - return [zeros(T, obj.l, obj.k) diagm(obj.b[obj.k + 1:obj.k + obj.l]); zeros(T, p - obj.l, obj.k + obj.l)] + return [zeros(T, obj.l, obj.k) Diagonal(obj.b[obj.k + 1:obj.k + obj.l]); zeros(T, p - obj.l, obj.k + obj.l)] else - return [zeros(T, p, obj.k) [diagm(obj.b[obj.k + 1:m]); zeros(T, obj.k + p - m, m - obj.k)] [zeros(T, m - obj.k, obj.k + obj.l - m); eye(T, obj.k + p - m, obj.k + obj.l - m)]] + return [zeros(T, p, obj.k) [Diagonal(obj.b[obj.k + 1:m]); zeros(T, obj.k + p - m, m - obj.k)] [zeros(T, m - obj.k, obj.k + obj.l - m); eye(T, obj.k + p - m, obj.k + obj.l - m)]] end elseif d == :R return obj.R diff --git a/base/linalg/uniformscaling.jl b/base/linalg/uniformscaling.jl index eecc66f1c4243..6dfd55183d96a 100644 --- a/base/linalg/uniformscaling.jl +++ b/base/linalg/uniformscaling.jl @@ -220,7 +220,7 @@ function isapprox(J::UniformScaling,A::AbstractMatrix; rtol::Real=rtoldefault(promote_leaf_eltypes(A),eltype(J),atol), nans::Bool=false, norm::Function=vecnorm) n = checksquare(A) - Jnorm = norm === vecnorm ? abs(J.λ)*sqrt(n) : (norm === Base.norm ? abs(J.λ) : norm(diagm(fill(J.λ, n)))) + Jnorm = norm === vecnorm ? abs(J.λ)*sqrt(n) : (norm === Base.norm ? abs(J.λ) : norm(Diagonal(fill(J.λ, n)))) return norm(A - J) <= max(atol, rtol*max(norm(A), Jnorm)) end isapprox(A::AbstractMatrix,J::UniformScaling;kwargs...) = isapprox(J,A;kwargs...) diff --git a/test/arrayops.jl b/test/arrayops.jl index 74d142a37d968..a0250d988cb22 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -232,7 +232,7 @@ end tmp[rng...] = A[rng...] @test tmp == cat(3,zeros(Int,2,3),[0 0 0; 0 47 52],zeros(Int,2,3),[0 0 0; 0 127 132]) - @test cat([1,2],1,2,3.,4.,5.) == diagm([1,2,3.,4.,5.]) + @test cat([1,2],1,2,3.,4.,5.) == diagm(0 => [1,2,3.,4.,5.]) blk = [1 2;3 4] tmp = cat([1,3],blk,blk) @test tmp[1:2,1:2,1] == blk @@ -2044,7 +2044,7 @@ end # module AutoRetType @testset "concatenations of dense matrices/vectors yield dense matrices/vectors" begin N = 4 densevec = ones(N) - densemat = diagm(ones(N)) + densemat = diagm(0 => ones(N)) # Test that concatenations of homogeneous pairs of either dense matrices or dense vectors # (i.e., Matrix-Matrix concatenations, and Vector-Vector concatenations) yield dense arrays for densearray in (densevec, densemat) diff --git a/test/bitarray.jl b/test/bitarray.jl index f83308fde0fc4..3263dd612e4dc 100644 --- a/test/bitarray.jl +++ b/test/bitarray.jl @@ -1406,7 +1406,11 @@ timesofar("cat") @check_bit_operation qr(b1) b1 = bitrand(v1) - @check_bit_operation diagm(b1) BitMatrix + @check_bit_operation diagm(0 => b1) BitMatrix + + b1 = bitrand(v1) + b2 = bitrand(v1) + @check_bit_operation diagm(-1 => b1, 1 => b2) BitMatrix b1 = bitrand(n1, n1) @check_bit_operation diag(b1) diff --git a/test/broadcast.jl b/test/broadcast.jl index e88f5a0403d1d..8990e8eefc3c7 100644 --- a/test/broadcast.jl +++ b/test/broadcast.jl @@ -135,7 +135,7 @@ for arr in (identity, as_sub) @test A == fill(7, 2, 2) A = arr(zeros(3,3)) broadcast_setindex!(A, 10:12, 1:3, 1:3) - @test A == diagm(10:12) + @test A == diagm(0 => 10:12) @test_throws BoundsError broadcast_setindex!(A, 7, [1,-1], [1 2]) for f in ((==), (<) , (!=), (<=)) diff --git a/test/linalg/arnoldi.jl b/test/linalg/arnoldi.jl index f3d55036b6c48..4c76198b9cbbe 100644 --- a/test/linalg/arnoldi.jl +++ b/test/linalg/arnoldi.jl @@ -186,7 +186,7 @@ end # Ensure singular values from svds are in # the correct order @testset "singular values ordered correctly" begin - B = sparse(diagm([1.0, 2.0, 34.0, 5.0, 6.0])) + B = sparse(Diagonal([1.0, 2.0, 34.0, 5.0, 6.0])) S3 = svds(B, ritzvec=false, nsv=2) @test S3[1][:S] ≈ [34.0, 6.0] S4 = svds(B, nsv=2) diff --git a/test/linalg/bidiag.jl b/test/linalg/bidiag.jl index 166babb7074f9..111dfc4f242de 100644 --- a/test/linalg/bidiag.jl +++ b/test/linalg/bidiag.jl @@ -97,12 +97,12 @@ srand(1) @testset "Constructor and basic properties" begin @test size(T, 1) == size(T, 2) == n @test size(T) == (n, n) - @test Array(T) == diagm(dv) + diagm(ev, uplo == :U ? 1 : -1) + @test Array(T) == diagm(0 => dv, (uplo == :U ? 1 : -1) => ev) @test Bidiagonal(Array(T), uplo) == T @test big.(T) == T - @test Array(abs.(T)) == abs.(diagm(dv)) + abs.(diagm(ev, uplo == :U ? 1 : -1)) - @test Array(real(T)) == real(diagm(dv)) + real(diagm(ev, uplo == :U ? 1 : -1)) - @test Array(imag(T)) == imag(diagm(dv)) + imag(diagm(ev, uplo == :U ? 1 : -1)) + @test Array(abs.(T)) == abs.(diagm(0 => dv, (uplo == :U ? 1 : -1) => ev)) + @test Array(real(T)) == real(diagm(0 => dv, (uplo == :U ? 1 : -1) => ev)) + @test Array(imag(T)) == imag(diagm(0 => dv, (uplo == :U ? 1 : -1) => ev)) end @testset for func in (conj, transpose, adjoint) @@ -241,7 +241,7 @@ srand(1) Test.test_approx_eq_modphase(u1, u2) Test.test_approx_eq_modphase(v1, v2) end - @test 0 ≈ vecnorm(u2*diagm(d2)*v2'-Tfull) atol=n*max(n^2*eps(relty),vecnorm(u1*diagm(d1)*v1'-Tfull)) + @test 0 ≈ vecnorm(u2*Diagonal(d2)*v2'-Tfull) atol=n*max(n^2*eps(relty),vecnorm(u1*Diagonal(d1)*v1'-Tfull)) @inferred svdvals(T) @inferred svd(T) end @@ -264,7 +264,7 @@ srand(1) TriSym = SymTridiagonal(T.dv, T.ev) @test Array(TriSym*T) ≈ Array(TriSym)*Array(T) # test pass-through of A_mul_B! for AbstractTriangular*Bidiagonal - Tri = UpperTriangular(diagm(T.ev, 1)) + Tri = UpperTriangular(diagm(1 => T.ev)) @test Array(Tri*T) ≈ Array(Tri)*Array(T) end diff --git a/test/linalg/blas.jl b/test/linalg/blas.jl index 1c637a10aac0b..af92239f48bbc 100644 --- a/test/linalg/blas.jl +++ b/test/linalg/blas.jl @@ -227,7 +227,7 @@ srand(100) bH = zeros(elty,2,n) bH[1,2:n] = ev bH[2,:] = dv - fullH = diagm(dv) + diagm(conj(ev),-1) + diagm(ev,1) + fullH = diagm(0 => dv, -1 => conj(ev), 1 => ev) @test BLAS.hbmv('U',1,bH,x) ≈ fullH*x end end diff --git a/test/linalg/dense.jl b/test/linalg/dense.jl index 3413b0140feeb..45dc7a79977d9 100644 --- a/test/linalg/dense.jl +++ b/test/linalg/dense.jl @@ -112,35 +112,28 @@ bimg = randn(n,2)/2 end end # end for loop over arraytype - @testset "Numbers" begin - α = rand(eltya) - A = zeros(eltya,1,1) - A[1,1] = α - @test diagm(α) == A # Test behavior of `diagm` when passed a scalar - end - @testset "Factorize" begin d = rand(eltya,n) e = rand(eltya,n-1) e2 = rand(eltya,n-1) f = rand(eltya,n-2) - A = diagm(d) + A = diagm(0 => d) @test factorize(A) == Diagonal(d) - A += diagm(e,-1) + A += diagm(-1 => e) @test factorize(A) == Bidiagonal(d,e,:L) - A += diagm(f,-2) + A += diagm(-2 => f) @test factorize(A) == LowerTriangular(A) - A = diagm(d) + diagm(e,1) + A = diagm(0 => d, 1 => e) @test factorize(A) == Bidiagonal(d,e,:U) if eltya <: Real - A = diagm(d) + diagm(e,1) + diagm(e,-1) + A = diagm(0 => d, 1 => e, -1 => e) @test Matrix(factorize(A)) ≈ Matrix(factorize(SymTridiagonal(d,e))) - A = diagm(d) + diagm(e,1) + diagm(e,-1) + diagm(f,2) + diagm(f,-2) + A = diagm(0 => d, 1 => e, -1 => e, 2 => f, -2 => f) @test inv(factorize(A)) ≈ inv(factorize(Symmetric(A))) end - A = diagm(d) + diagm(e,1) + diagm(e2,-1) + A = diagm(0 => d, 1 => e, -1 => e2) @test Matrix(factorize(A)) ≈ Matrix(factorize(Tridiagonal(e2,d,e))) - A = diagm(d) + diagm(e,1) + diagm(f,2) + A = diagm(0 => d, 1 => e, 2 => f) @test factorize(A) == UpperTriangular(A) end end # for eltya diff --git a/test/linalg/diagonal.jl b/test/linalg/diagonal.jl index fd3d1bbfd9c1b..d2afb5280501f 100644 --- a/test/linalg/diagonal.jl +++ b/test/linalg/diagonal.jl @@ -17,7 +17,7 @@ srand(1) UU+=im*convert(Matrix{elty}, randn(n,n)) end D = Diagonal(dd) - DM = diagm(dd) + DM = Matrix(Diagonal(dd)) @testset "constructor" begin for x in (dd, GenericArray(dd)) @@ -121,7 +121,7 @@ srand(1) end d = convert(Vector{elty}, randn(n)) D2 = Diagonal(d) - DM2= diagm(d) + DM2= Matrix(Diagonal(d)) @testset "Binary operations" begin for op in (+, -, *) @test Array(op(D, D2)) ≈ op(DM, DM2) @@ -222,7 +222,7 @@ srand(1) #logdet if relty <: Real ld=convert(Vector{relty},rand(n)) - @test logdet(Diagonal(ld)) ≈ logdet(diagm(ld)) + @test logdet(Diagonal(ld)) ≈ logdet(Matrix(Diagonal(ld))) end @testset "similar" begin diff --git a/test/linalg/lapack.jl b/test/linalg/lapack.jl index 9f43a885d21b5..7596c050484f3 100644 --- a/test/linalg/lapack.jl +++ b/test/linalg/lapack.jl @@ -104,7 +104,7 @@ end C = rand(elty,6,6) D = copy(C) D = LAPACK.gbtrs!('N',2,1,6,AB,ipiv,D) - A = diagm(dl2,-2) + diagm(dl,-1) + diagm(d) + diagm(du,1) + A = diagm(-2 => dl2, -1 => dl, 0 => d, 1 => du) @test A\C ≈ D @test_throws DimensionMismatch LAPACK.gbtrs!('N',2,1,6,AB,ipiv,ones(elty,7,6)) @test_throws Base.LinAlg.LAPACKException LAPACK.gbtrf!(2,1,6,zeros(AB)) @@ -499,7 +499,7 @@ end @testset "posv and some errors for friends" begin @testset for elty in (Float32, Float64, Complex64, Complex128) A = rand(elty,10,10)/100 - A += real(diagm(10*real(rand(elty,10)))) + A += real(diagm(0 => 10*real(rand(elty,10)))) if elty <: Complex A = A + A' else diff --git a/test/linalg/rowvector.jl b/test/linalg/rowvector.jl index 9676f43b42ba7..780da554608b9 100644 --- a/test/linalg/rowvector.jl +++ b/test/linalg/rowvector.jl @@ -91,7 +91,7 @@ end end @testset "Left Division" begin - mat = diagm([1,2,3]) + mat = Matrix(Diagonal([1,2,3])) v = [2,3,4] rv = v.' @@ -101,7 +101,7 @@ end @testset "Multiplication" begin v = [1,2,3] rv = v.' - mat = diagm([1,2,3]) + mat = Matrix(Diagonal([1,2,3])) @test (rv*v) === 14 @test (rv*mat)::RowVector == [1 4 9] @@ -137,7 +137,7 @@ end z = [1+im,2,3] cz = z' - mat = diagm([1+im,2,3]) + mat = Matrix(Diagonal([1+im,2,3])) @test cz*z === 15 + 0im @@ -181,7 +181,7 @@ end end @testset "Right Division" begin - mat = diagm([1,2,3]) + mat = Matrix(Diagonal([1,2,3])) v = [2,3,4] rv = v.' @@ -197,7 +197,7 @@ end end @testset "Sparse ambiguity methods" begin - mat = sparse(diagm([1,2,3])) + mat = sparse(Diagonal([1,2,3])) v = [2,3,4] rv = v.' diff --git a/test/linalg/special.jl b/test/linalg/special.jl index 9e77c72537deb..bc1cfb0f34baf 100644 --- a/test/linalg/special.jl +++ b/test/linalg/special.jl @@ -140,8 +140,8 @@ end # Test concatenating pairwise combinations of special matrices with sparse matrices, # dense matrices, or dense vectors densevec = ones(N) - densemat = diagm(ones(N)) - spmat = sparse(Diagonal(ones(N))) + densemat = diagm(0 => densevec) + spmat = spdiagm(0 => densevec) for specialmat in specialmats # --> Tests applicable only to pairs of matrices for othermat in (spmat, densemat) diff --git a/test/linalg/symmetric.jl b/test/linalg/symmetric.jl index ddb94980582f4..21429d2ce003c 100644 --- a/test/linalg/symmetric.jl +++ b/test/linalg/symmetric.jl @@ -341,9 +341,9 @@ end #Issue #7647: test xsyevr, xheevr, xstevr drivers. @testset "Eigenvalues in interval for $(typeof(Mi7647))" for Mi7647 in - (Symmetric(diagm(1.0:3.0)), - Hermitian(diagm(1.0:3.0)), - Hermitian(diagm(complex(1.0:3.0))), + (Symmetric(diagm(0 => 1.0:3.0)), + Hermitian(diagm(0 => 1.0:3.0)), + Hermitian(diagm(0 => complex(1.0:3.0))), SymTridiagonal([1.0:3.0;], zeros(2))) @test eigmin(Mi7647) == eigvals(Mi7647, 0.5, 1.5)[1] == 1.0 @test eigmax(Mi7647) == eigvals(Mi7647, 2.5, 3.5)[1] == 3.0 diff --git a/test/linalg/triangular.jl b/test/linalg/triangular.jl index 79f9b2e2b37a8..e704b2fcd0bf5 100644 --- a/test/linalg/triangular.jl +++ b/test/linalg/triangular.jl @@ -110,7 +110,7 @@ for elty1 in (Float32, Float64, BigFloat, Complex64, Complex128, Complex{BigFloa @test tril(A1,1) == t1(tril(tril(Matrix(A1), 1))) @test_throws ArgumentError tril!(A1, -n - 2) @test_throws ArgumentError tril!(A1, n) - @test triu(A1,0) == t1(diagm(diag(A1))) + @test triu(A1,0) == t1(diagm(0 => diag(A1))) @test triu(A1,-1) == t1(tril(triu(A1.data,-1))) @test triu(A1,1) == LowerTriangular(zeros(A1.data)) @test_throws ArgumentError triu!(A1, -n) @@ -121,7 +121,7 @@ for elty1 in (Float32, Float64, BigFloat, Complex64, Complex128, Complex{BigFloa @test triu(A1,-1) == t1(triu(triu(Matrix(A1), -1))) @test_throws ArgumentError triu!(A1, -n) @test_throws ArgumentError triu!(A1, n + 2) - @test tril(A1,0) == t1(diagm(diag(A1))) + @test tril(A1,0) == t1(diagm(0 => diag(A1))) @test tril(A1,1) == t1(triu(tril(A1.data,1))) @test tril(A1,-1) == UpperTriangular(zeros(A1.data)) @test_throws ArgumentError tril!(A1, -n - 2) @@ -250,7 +250,7 @@ for elty1 in (Float32, Float64, BigFloat, Complex64, Complex128, Complex{BigFloa if !(elty1 in (BigFloat, Complex{BigFloat})) # Not handled yet vals, vecs = eig(A1) if (t1 == UpperTriangular || t1 == LowerTriangular) && elty1 != Int # Cannot really handle degenerate eigen space and Int matrices will probably have repeated eigenvalues. - @test vecs*diagm(vals)/vecs ≈ A1 atol=sqrt(eps(float(real(one(vals[1])))))*(norm(A1,Inf)*n)^2 + @test vecs*diagm(0 => vals)/vecs ≈ A1 atol=sqrt(eps(float(real(one(vals[1])))))*(norm(A1,Inf)*n)^2 end end @@ -476,10 +476,10 @@ for eltya in (Float32, Float64, Complex64, Complex128, BigFloat, Int) end # Issue 10742 and similar -@test istril(UpperTriangular(diagm([1,2,3,4]))) -@test istriu(LowerTriangular(diagm([1,2,3,4]))) -@test isdiag(UpperTriangular(diagm([1,2,3,4]))) -@test isdiag(LowerTriangular(diagm([1,2,3,4]))) +@test istril(UpperTriangular(diagm(0 => [1,2,3,4]))) +@test istriu(LowerTriangular(diagm(0 => [1,2,3,4]))) +@test isdiag(UpperTriangular(diagm(0 => [1,2,3,4]))) +@test isdiag(LowerTriangular(diagm(0 => [1,2,3,4]))) @test !isdiag(UpperTriangular(rand(4, 4))) @test !isdiag(LowerTriangular(rand(4, 4))) diff --git a/test/sparse/cholmod.jl b/test/sparse/cholmod.jl index 9e00297a13d78..48744801989be 100644 --- a/test/sparse/cholmod.jl +++ b/test/sparse/cholmod.jl @@ -643,9 +643,9 @@ Fnew = deserialize(b) # test \ for Factor and StridedVecOrMat let x = rand(5), - A = cholfact(sparse(diagm(x.\1))) + A = cholfact(sparse(Diagonal(x.\1))) @test A\view(ones(10),1:2:10) ≈ x - @test A\view(eye(5,5),:,:) ≈ diagm(x) + @test A\view(eye(5,5),:,:) ≈ Matrix(Diagonal(x)) end # Real factorization and complex rhs diff --git a/test/sparse/sparse.jl b/test/sparse/sparse.jl index 9cd700b3e68bf..f5e815a22b370 100644 --- a/test/sparse/sparse.jl +++ b/test/sparse/sparse.jl @@ -598,7 +598,7 @@ end a116 = reshape(1:(ni*nj), ni, nj) s116 = sparse(a116) - ad116 = diagm(diag(a116)) + ad116 = diagm(0 => diag(a116)) sd116 = sparse(ad116) for (aa116, ss116) in [(a116, s116), (ad116, sd116)] @@ -1413,11 +1413,11 @@ end @test spdiagm(0 => ones(2), 1 => ones(2)) == [1.0 1.0 0.0; 0.0 1.0 1.0; 0.0 0.0 0.0] for (x, y) in ((rand(5), rand(4)),(sparse(rand(5)), sparse(rand(4)))) - @test spdiagm(-1 => x)::SparseMatrixCSC == diagm(x, -1) - @test spdiagm( 0 => x)::SparseMatrixCSC == diagm(x, 0) == sparse(Diagonal(x)) - @test spdiagm(-1 => x)::SparseMatrixCSC == diagm(x, -1) - @test spdiagm(0 => x, -1 => y)::SparseMatrixCSC == diagm(x) + diagm(y, -1) - @test spdiagm(0 => x, 1 => y)::SparseMatrixCSC == diagm(x) + diagm(y, 1) + @test spdiagm(-1 => x)::SparseMatrixCSC == diagm(-1 => x) + @test spdiagm( 0 => x)::SparseMatrixCSC == diagm( 0 => x) == sparse(Diagonal(x)) + @test spdiagm(-1 => x)::SparseMatrixCSC == diagm(-1 => x) + @test spdiagm(0 => x, -1 => y)::SparseMatrixCSC == diagm(0 => x, -1 => y) + @test spdiagm(0 => x, 1 => y)::SparseMatrixCSC == diagm(0 => x, 1 => y) end # promotion @test spdiagm(0 => [1,2], 1 => [3.5], -1 => [4+5im]) == [1 3.5; 4+5im 2] @@ -1777,8 +1777,8 @@ end @testset "sparse and dense concatenations" begin N = 4 densevec = ones(N) - densemat = diagm(ones(N)) - spmat = sparse(Diagonal(ones(N))) + densemat = diagm(0 => densevec) + spmat = spdiagm(0 => densevec) # Test that concatenations of pairs of sparse matrices yield sparse arrays @test issparse(vcat(spmat, spmat)) @test issparse(hcat(spmat, spmat))