diff --git a/NEWS.md b/NEWS.md index 67485da0b7ada..d6137569b63e4 100644 --- a/NEWS.md +++ b/NEWS.md @@ -217,6 +217,10 @@ This section lists changes that do not have deprecation warnings. longer the case; now bindings will only exist for packages brought into scope by typing `using Package` or `import Package` ([#17997]). + * `spdiagm` now always return a square matrix. + To explicitly provide a resulting size `m × n` use: + `I, J, V = SparseArrays.spdiagm_internal(args); sparse(I, J, V, m, n)` ([#23757]). + * `slicedim(b::BitVector, 1, x)` now consistently returns the same thing that `b[x]` would, consistent with its documentation. Previously it would return a `BitArray{0}` for scalar `x` ([#20233]). @@ -470,6 +474,13 @@ Deprecated or removed * `contains(eq, itr, item)` is deprecated in favor of `any` with a predicate ([#23716]). + * `spdiagm(x::AbstractVector)` has been deprecated in favor of `sparse(Diagonal(x))` ([#23757]). + + * `spdiagm(x::AbstractVector, d::Integer)` has been deprecated in favor of `spdiagm(x => d)` ([#23757]). + + * `spdiagm(x::Tuple{<:AbstractVector}, d::Tuple{<:Integer})` has been deprecated in favor of + `spdiagm(x[1] => d[1], x[2] => d[2], ...)` ([#23757]). + * Constructors for `LibGit2.UserPasswordCredentials` and `LibGit2.SSHCredentials` which take a `prompt_if_incorrect` argument are deprecated. Instead, prompting behavior is controlled using the `allow_prompt` keyword in the `LibGit2.CredentialPayload` constructor ([#23690]). diff --git a/base/deprecated.jl b/base/deprecated.jl index bd758c9518491..e06d058cb6e34 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -1828,6 +1828,13 @@ end @deprecate contains(eq::Function, itr, x) any(y->eq(y,x), itr) +# PR #23757 +import .SparseArrays.spdiagm +@deprecate spdiagm(x::AbstractVector) sparse(Diagonal(x)) +@deprecate spdiagm(x::AbstractVector, d::Number) spdiagm(x => d) +@deprecate spdiagm(x, d) spdiagm((x[i] => d[i] for i in 1:length(x))...) +@deprecate spdiagm(x, d, m::Integer, n::Integer) spdiagm((x[i] => d[i] for i in 1:length(x))...) + # PR #23690 # `SSHCredentials` and `UserPasswordCredentials` constructors using `prompt_if_incorrect` # are deprecated in base/libgit2/types.jl. diff --git a/base/linalg/arnoldi.jl b/base/linalg/arnoldi.jl index ec0b8e0338190..1918cf626bcca 100644 --- a/base/linalg/arnoldi.jl +++ b/base/linalg/arnoldi.jl @@ -52,7 +52,7 @@ final residual vector `resid`. # Examples ```jldoctest -julia> A = spdiagm(1:4); +julia> A = Diagonal(1:4); julia> λ, ϕ = eigs(A, nev = 2); @@ -145,7 +145,7 @@ final residual vector `resid`. # Examples ```jldoctest -julia> A = speye(4, 4); B = spdiagm(1:4); +julia> A = speye(4, 4); B = Diagonal(1:4); julia> λ, ϕ = eigs(A, B, nev = 2); @@ -379,7 +379,7 @@ iterations derived from [`eigs`](@ref). # Examples ```jldoctest -julia> A = spdiagm(1:4); +julia> A = Diagonal(1:4); julia> s = svds(A, nsv = 2)[1]; diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index a48e782e34db4..da11076250e5d 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -1117,7 +1117,7 @@ For expert drivers and additional information, see [`permute!`](@ref). # Examples ```jldoctest -julia> A = spdiagm(0 => [1, 2, 3, 4], 1 => [5, 6, 7]) +julia> A = spdiagm([1, 2, 3, 4] => 0, [5, 6, 7] => 1) 4×4 SparseMatrixCSC{Int64,Int64} with 7 stored entries: [1, 1] = 1 [1, 2] = 5 @@ -1174,7 +1174,7 @@ and no space beyond that passed in. If `trim` is `true`, this method trims `A.ro # Examples ```jldoctest -julia> A = spdiagm([1, 2, 3, 4]) +julia> A = sparse(Diagonal([1, 2, 3, 4])) 4×4 SparseMatrixCSC{Int64,Int64} with 4 stored entries: [1, 1] = 1 [2, 2] = 2 @@ -2953,7 +2953,7 @@ stored and otherwise do nothing. Derivative forms: # Examples ```jldoctest -julia> A = spdiagm([1, 2, 3, 4]) +julia> A = sparse(Diagonal([1, 2, 3, 4])) 4×4 SparseMatrixCSC{Int64,Int64} with 4 stored entries: [1, 1] = 1 [2, 2] = 2 @@ -3299,23 +3299,23 @@ function istril(A::SparseMatrixCSC) end -function spdiagm_internal(kv::Pair{<:Integer, <:AbstractVector}...) +function spdiagm_internal(kv::Pair{<:AbstractVector, <:Integer}...) ncoeffs = 0 for p in kv - ncoeffs += length(p.second) + ncoeffs += length(p.first) end I = Vector{Int}(ncoeffs) J = Vector{Int}(ncoeffs) - V = Vector{promote_type(map(x -> eltype(x.second), kv)...)}(ncoeffs) + V = Vector{promote_type(map(x -> eltype(x.first), kv)...)}(ncoeffs) i = 0 for p in kv - numel = length(p.second) - if p.first < 0 - row = -p.first + numel = length(p.first) + if p.second < 0 + row = -p.second col = 0 - elseif p.first > 0 + elseif p.second > 0 row = 0 - col = p.first + col = p.second else row = 0 col = 0 @@ -3323,21 +3323,21 @@ function spdiagm_internal(kv::Pair{<:Integer, <:AbstractVector}...) r = 1+i:numel+i I[r] = row+1:row+numel J[r] = col+1:col+numel - copy!(view(V, r), p.second) + copy!(view(V, r), p.first) i += numel end return I, J, V end """ - spdiagm(kv::Pair{<:Integer, <:AbstractVector}...) + spdiagm(kv::Pair{<:AbstractVector, <:Integer}...) -Construct a sparse diagonal matrix from the diagonal-number/vector pair `kv`, -placing each vector `kv.second` on the `kv.first` diagonal. +Construct a sparse diagonal matrix from pairs of vectors and diagonals. +Vector `kv.first` will be placed on the `kv.second` diagonal. # Examples ```jldoctest -julia> spdiagm(-1 => [1,2,3,4], 1 => [4,3,2,1]) +julia> spdiagm([1,2,3,4] => -1, [4,3,2,1] => 1) 5×5 SparseMatrixCSC{Int64,Int64} with 8 stored entries: [2, 1] = 1 [1, 2] = 4 @@ -3349,7 +3349,7 @@ julia> spdiagm(-1 => [1,2,3,4], 1 => [4,3,2,1]) [4, 5] = 1 ``` """ -function spdiagm(kv::Pair{<:Integer, <:AbstractVector}...) +function spdiagm(kv::Pair{<:AbstractVector, <:Integer}...) I, J, V = spdiagm_internal(kv...) n = max(dimlub(I), dimlub(J)) return sparse(I, J, V, n, n) diff --git a/test/linalg/special.jl b/test/linalg/special.jl index 1742a27a138e7..c65d0c3b2b970 100644 --- a/test/linalg/special.jl +++ b/test/linalg/special.jl @@ -141,7 +141,7 @@ end # dense matrices, or dense vectors densevec = ones(N) densemat = diagm(ones(N)) - spmat = spdiagm(ones(N)) + spmat = sparse(Diagonal(ones(N))) for specialmat in specialmats # --> Tests applicable only to pairs of matrices for othermat in (spmat, densemat) diff --git a/test/linalg/uniformscaling.jl b/test/linalg/uniformscaling.jl index 3cb8feb5b0148..5028be75a3ca7 100644 --- a/test/linalg/uniformscaling.jl +++ b/test/linalg/uniformscaling.jl @@ -16,8 +16,8 @@ srand(123) @test one(UniformScaling(rand(Complex128))) == one(UniformScaling{Complex128}) @test eltype(one(UniformScaling(rand(Complex128)))) == Complex128 @test -one(UniformScaling(2)) == UniformScaling(-1) - @test sparse(3I,4,5) == spdiagm(fill(3,4),0,4,5) - @test sparse(3I,5,4) == spdiagm(fill(3,4),0,5,4) + @test sparse(3I,4,5) == sparse(1:4, 1:4, 3, 4, 5) + @test sparse(3I,5,4) == sparse(1:4, 1:4, 3, 5, 4) @test norm(UniformScaling(1+im)) ≈ sqrt(2) end diff --git a/test/perf/sparse/fem.jl b/test/perf/sparse/fem.jl index 77fb47e7c6782..2289e0dddff05 100644 --- a/test/perf/sparse/fem.jl +++ b/test/perf/sparse/fem.jl @@ -5,7 +5,7 @@ # assemble the finite-difference laplacian function fdlaplacian(N) # create a 1D laplacian and a sparse identity - fdl1 = spdiagm((ones(N-1),-2*ones(N),ones(N-1)), [-1,0,1]) + fdl1 = spdiagm(ones(N-1) => -1 ,-2*ones(N) => 0, ones(N-1) => 1) # laplace operator on the full grid return kron(speye(N), fdl1) + kron(fdl1, speye(N)) end diff --git a/test/sparse/cholmod.jl b/test/sparse/cholmod.jl index 89099316441c2..ce28cfc886d12 100644 --- a/test/sparse/cholmod.jl +++ b/test/sparse/cholmod.jl @@ -563,7 +563,7 @@ Asp = As[p,p] LDp = sparse(ldltfact(Asp, perm=[1,2,3])[:LD]) # LDp = sparse(Fs[:LD]) Lp, dp = Base.SparseArrays.CHOLMOD.getLd!(copy(LDp)) -Dp = spdiagm(dp) +Dp = sparse(Diagonal(dp)) @test Fs\b ≈ Af\b @test Fs[:UP]\(Fs[:PtLD]\b) ≈ Af\b @test Fs[:DUP]\(Fs[:PtL]\b) ≈ Af\b diff --git a/test/sparse/sparse.jl b/test/sparse/sparse.jl index b6f9cc29bf05b..176938f738efa 100644 --- a/test/sparse/sparse.jl +++ b/test/sparse/sparse.jl @@ -236,7 +236,7 @@ end b = sprandn(5, 5, 0.2) @test (maximum(abs.(a\b - Array(a)\Array(b))) < 1000*eps()) - a = spdiagm(randn(5)) + im*spdiagm(randn(5)) + a = sparse(Diagonal(randn(5) + im*randn(5))) b = randn(5,3) @test (maximum(abs.(a*b - Array(a)*b)) < 100*eps()) @test (maximum(abs.(a'b - Array(a)'b)) < 100*eps()) @@ -483,12 +483,6 @@ end end end -@testset "construction of diagonal SparseMatrixCSCs" begin - @test Array(spdiagm((ones(2), ones(2)), (0, -1), 3, 3)) == - [1.0 0.0 0.0; 1.0 1.0 0.0; 0.0 1.0 0.0] - @test Array(spdiagm(ones(2), -1, 3, 3)) == diagm(ones(2), -1) -end - @testset "issue #4986, reinterpret" begin sfe22 = speye(Float64, 2) mfe22 = eye(Float64, 2) @@ -1312,10 +1306,6 @@ end @test norm(Array(D) - Array(S)) == 0.0 end -@testset "spdiagm promotion" begin - @test spdiagm(([1,2],[3.5],[4+5im]), (0,1,-1), 2,2) == [1 3.5; 4+5im 2] -end - @testset "error conditions for reinterpret, reshape, and squeeze" begin local A = sprand(Bool, 5, 5, 0.2) @test_throws ArgumentError reinterpret(Complex128, A) @@ -1431,10 +1421,18 @@ end end @testset "spdiagm" begin - v = sprand(10, 0.4) - @test spdiagm(v)::SparseMatrixCSC == diagm(Vector(v)) - @test spdiagm(sparse(ones(5)))::SparseMatrixCSC == speye(5) - @test spdiagm(sparse(zeros(5)))::SparseMatrixCSC == spzeros(5,5) + @test spdiagm(ones(2) => 0, ones(2) => -1) == [1.0 0.0 0.0; 1.0 1.0 0.0; 0.0 1.0 0.0] + @test spdiagm(ones(2) => 0, ones(2) => 1) == [1.0 1.0 0.0; 0.0 1.0 1.0; 0.0 0.0 0.0] + + for (x, y) in ((rand(5), rand(4)),(sparse(rand(5)), sparse(rand(4)))) + @test spdiagm(x => -1)::SparseMatrixCSC == diagm(x, -1) + @test spdiagm(x => 0)::SparseMatrixCSC == diagm(x, 0) == sparse(Diagonal(x)) + @test spdiagm(x => -1)::SparseMatrixCSC == diagm(x, -1) + @test spdiagm(x => 0, y => -1)::SparseMatrixCSC == diagm(x) + diagm(y, -1) + @test spdiagm(x => 0, y => 1)::SparseMatrixCSC == diagm(x) + diagm(y, 1) + end + # promotion + @test spdiagm([1,2] => 0, [3.5] => 1, [4+5im] => -1) == [1 3.5; 4+5im 2] end @testset "diag" begin @@ -1709,16 +1707,16 @@ end @testset "factorization" begin local A - A = spdiagm(rand(5)) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2) + A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2) A = A + A' @test !Base.USE_GPL_LIBS || abs(det(factorize(Hermitian(A)))) ≈ abs(det(factorize(Array(A)))) - A = spdiagm(rand(5)) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2) + A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2) + im*sprandn(5, 5, 0.2) A = A*A' @test !Base.USE_GPL_LIBS || abs(det(factorize(Hermitian(A)))) ≈ abs(det(factorize(Array(A)))) - A = spdiagm(rand(5)) + sprandn(5, 5, 0.2) + A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2) A = A + A.' @test !Base.USE_GPL_LIBS || abs(det(factorize(Symmetric(A)))) ≈ abs(det(factorize(Array(A)))) - A = spdiagm(rand(5)) + sprandn(5, 5, 0.2) + A = sparse(Diagonal(rand(5))) + sprandn(5, 5, 0.2) A = A*A.' @test !Base.USE_GPL_LIBS || abs(det(factorize(Symmetric(A)))) ≈ abs(det(factorize(Array(A)))) @test factorize(triu(A)) == triu(A) @@ -1788,7 +1786,7 @@ end N = 4 densevec = ones(N) densemat = diagm(ones(N)) - spmat = spdiagm(ones(N)) + spmat = sparse(Diagonal(ones(N))) # Test that concatenations of pairs of sparse matrices yield sparse arrays @test issparse(vcat(spmat, spmat)) @test issparse(hcat(spmat, spmat)) @@ -1829,7 +1827,8 @@ end # are called. (Issue #18705.) EDIT: #19239 unified broadcast over a single sparse matrix, # eliminating the former operation classes. @testset "issue #18705" begin - @test isa(sin.(spdiagm(1.0:5.0)), SparseMatrixCSC) + S = sparse(Diagonal(collect(1.0:5.0))) + @test isa(sin.(S), SparseMatrixCSC) end @testset "issue #19225" begin @@ -1867,7 +1866,8 @@ end # Check that `broadcast` methods specialized for unary operations over # `SparseMatrixCSC`s determine a reasonable return type. @testset "issue #18974" begin - @test eltype(sin.(spdiagm(Int64(1):Int64(4)))) == Float64 + S = sparse(Diagonal(collect(Int64(1):Int64(4)))) + @test eltype(sin.(S)) == Float64 end # Check calling of unary minus method specialized for SparseMatrixCSCs