diff --git a/NEWS.md b/NEWS.md index b1592f2d63e9c..5e8f35fd77543 100644 --- a/NEWS.md +++ b/NEWS.md @@ -511,6 +511,13 @@ This section lists changes that do not have deprecation warnings. This change makes `@schedule` redundant with `@async`, so `@schedule` has been deprecated ([#27164]). + * `norm(A::AbstractMatrix, p=2)` computes no longer the operator/matrix norm but the `norm` of `A` + as for other iterables, i.e. as if it were a vector. Especially, `norm(A::AbstractMatrix)` is the + Frobenius norm. To compute the operator/matrix norm, use the new function `opnorm` ([#27401]). + + * `dot(u, v)` now acts recursively. Instead of `sum(u[i]' * v[i] for i in ...)`, it computes + `sum(dot(u[i], v[i]) for i in ...)`, similarly to `vecdot` before ([#27401]). + Library improvements -------------------- @@ -1274,6 +1281,8 @@ Deprecated or removed * The functions `eigs` and `svds` have been moved to the `Arpack.jl` package ([#27616]). + * `vecdot` and `vecnorm` are deprecated in favor of `dot` and `norm`, respectively ([#27401]). + Command-line option changes --------------------------- @@ -1610,3 +1619,4 @@ Command-line option changes [#27189]: https://github.com/JuliaLang/julia/issues/27189 [#27212]: https://github.com/JuliaLang/julia/issues/27212 [#27248]: https://github.com/JuliaLang/julia/issues/27248 +[#27401]: https://github.com/JuliaLang/julia/issues/27401 diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 3b3d6c95d8a0b..6e7d122da83cc 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -299,7 +299,6 @@ Linear algebra functions in Julia are largely implemented by calling functions f Base.:*(::AbstractMatrix, ::AbstractMatrix) Base.:\(::AbstractMatrix, ::AbstractVecOrMat) LinearAlgebra.dot -LinearAlgebra.vecdot LinearAlgebra.cross LinearAlgebra.factorize LinearAlgebra.Diagonal @@ -358,7 +357,7 @@ LinearAlgebra.diag LinearAlgebra.diagm LinearAlgebra.rank LinearAlgebra.norm -LinearAlgebra.vecnorm +LinearAlgebra.opnorm LinearAlgebra.normalize! LinearAlgebra.normalize LinearAlgebra.cond diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 69fe8cb3139d8..d841f2fc8f2e5 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -126,6 +126,7 @@ export qr!, lq, lq!, + opnorm, rank, rdiv!, schur, @@ -143,8 +144,6 @@ export triu, tril!, triu!, - vecdot, - vecnorm, # Operators \, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index f670bf5e59b61..0f422b70d44cb 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -5,7 +5,7 @@ ## BLAS cutoff threshold constants const SCAL_CUTOFF = 2048 -const DOT_CUTOFF = 128 +#TODO const DOT_CUTOFF = 128 const ASUM_CUTOFF = 32 const NRM2_CUTOFF = 32 @@ -137,11 +137,11 @@ function norm(x::StridedVector{T}, rx::Union{UnitRange{TI},AbstractRange{TI}}) w GC.@preserve x BLAS.nrm2(length(rx), pointer(x)+(first(rx)-1)*sizeof(T), step(rx)) end -vecnorm1(x::Union{Array{T},StridedVector{T}}) where {T<:BlasReal} = - length(x) < ASUM_CUTOFF ? generic_vecnorm1(x) : BLAS.asum(x) +norm1(x::Union{Array{T},StridedVector{T}}) where {T<:BlasReal} = + length(x) < ASUM_CUTOFF ? generic_norm1(x) : BLAS.asum(x) -vecnorm2(x::Union{Array{T},StridedVector{T}}) where {T<:BlasFloat} = - length(x) < NRM2_CUTOFF ? generic_vecnorm2(x) : BLAS.nrm2(x) +norm2(x::Union{Array{T},StridedVector{T}}) where {T<:BlasFloat} = + length(x) < NRM2_CUTOFF ? generic_norm2(x) : BLAS.nrm2(x) """ triu!(M, k::Integer) @@ -509,7 +509,7 @@ function exp!(A::StridedMatrix{T}) where T<:BlasFloat return copytri!(parent(exp(Hermitian(A))), 'U', true) end ilo, ihi, scale = LAPACK.gebal!('B', A) # modifies A - nA = norm(A, 1) + nA = opnorm(A, 1) Inn = Matrix{T}(I, n, n) ## For sufficiently small nA, use lower order Padé-Approximations if (nA <= 2.1) @@ -1370,8 +1370,8 @@ function cond(A::AbstractMatrix, p::Real=2) end throw(ArgumentError("p-norm must be 1, 2 or Inf, got $p")) end -_cond1Inf(A::StridedMatrix{<:BlasFloat}, p::Real) = _cond1Inf(lu(A), p, norm(A, p)) -_cond1Inf(A::AbstractMatrix, p::Real) = norm(A, p)*norm(inv(A), p) +_cond1Inf(A::StridedMatrix{<:BlasFloat}, p::Real) = _cond1Inf(lu(A), p, opnorm(A, p)) +_cond1Inf(A::AbstractMatrix, p::Real) = opnorm(A, p)*opnorm(inv(A), p) ## Lyapunov and Sylvester equation diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl index f79afa785b553..6842c21a895ca 100644 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ b/stdlib/LinearAlgebra/src/deprecated.jl @@ -6,6 +6,12 @@ using Base: @deprecate, depwarn @deprecate cond(F::LinearAlgebra.LU, p::Integer) cond(convert(AbstractArray, F), p) +# deprecate vecnorm in favor of norm +@deprecate vecnorm norm + +# deprecate vecdot in favor of dot +@deprecate vecdot dot + # PR #22188 export cholfact, cholfact! @deprecate cholfact!(A::StridedMatrix, uplo::Symbol, ::Type{Val{false}}) cholesky!(Hermitian(A, uplo), Val(false)) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 78bf499553ef0..bed2605cb75a2 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -247,10 +247,10 @@ tril!(M::AbstractMatrix) = tril!(M,0) diag(A::AbstractVector) = throw(ArgumentError("use diagm instead of diag to construct a diagonal matrix")) ########################################################################################### -# Inner products and norms +# Dot products and norms -# special cases of vecnorm; note that they don't need to handle isempty(x) -function generic_vecnormMinusInf(x) +# special cases of norm; note that they don't need to handle isempty(x) +function generic_normMinusInf(x) (v, s) = iterate(x)::Tuple minabs = norm(v) while true @@ -263,7 +263,7 @@ function generic_vecnormMinusInf(x) return float(minabs) end -function generic_vecnormInf(x) +function generic_normInf(x) (v, s) = iterate(x)::Tuple maxabs = norm(v) while true @@ -276,7 +276,7 @@ function generic_vecnormInf(x) return float(maxabs) end -function generic_vecnorm1(x) +function generic_norm1(x) (v, s) = iterate(x)::Tuple av = float(norm(v)) T = typeof(av) @@ -295,8 +295,8 @@ norm_sqr(x) = norm(x)^2 norm_sqr(x::Number) = abs2(x) norm_sqr(x::Union{T,Complex{T},Rational{T}}) where {T<:Integer} = abs2(float(x)) -function generic_vecnorm2(x) - maxabs = vecnormInf(x) +function generic_norm2(x) + maxabs = normInf(x) (maxabs == 0 || isinf(maxabs)) && return maxabs (v, s) = iterate(x)::Tuple T = typeof(maxabs) @@ -323,10 +323,10 @@ end # Compute L_p norm ‖x‖ₚ = sum(abs(x).^p)^(1/p) # (Not technically a "norm" for p < 1.) -function generic_vecnormp(x, p) +function generic_normp(x, p) (v, s) = iterate(x)::Tuple if p > 1 || p < -1 # might need to rescale to avoid overflow - maxabs = p > 1 ? vecnormInf(x) : vecnormMinusInf(x) + maxabs = p > 1 ? normInf(x) : normMinusInf(x) (maxabs == 0 || isinf(maxabs)) && return maxabs T = typeof(maxabs) else @@ -354,85 +354,123 @@ function generic_vecnormp(x, p) end end -vecnormMinusInf(x) = generic_vecnormMinusInf(x) -vecnormInf(x) = generic_vecnormInf(x) -vecnorm1(x) = generic_vecnorm1(x) -vecnorm2(x) = generic_vecnorm2(x) -vecnormp(x, p) = generic_vecnormp(x, p) +normMinusInf(x) = generic_normMinusInf(x) +normInf(x) = generic_normInf(x) +norm1(x) = generic_norm1(x) +norm2(x) = generic_norm2(x) +normp(x, p) = generic_normp(x, p) + """ - vecnorm(A, p::Real=2) + norm(A, p::Real=2) For any iterable container `A` (including arrays of any dimension) of numbers (or any element type for which `norm` is defined), compute the `p`-norm (defaulting to `p=2`) as if `A` were a vector of the corresponding length. -The `p`-norm is defined as: +The `p`-norm is defined as ```math \\|A\\|_p = \\left( \\sum_{i=1}^n | a_i | ^p \\right)^{1/p} ``` -with ``a_i`` the entries of ``A`` and ``n`` its length. +with ``a_i`` the entries of ``A``, ``| a_i |`` the [`norm`](@ref) of ``a_i``, and +``n`` the length of ``A``. Since the `p`-norm is computed using the [`norm`](@ref)s +of the entries of `A`, the `p`-norm of a vector of vectors is not compatible with +the interpretation of it as a block vector in general if `p != 2`. `p` can assume any numeric value (even though not all values produce a -mathematically valid vector norm). In particular, `vecnorm(A, Inf)` returns the largest value -in `abs(A)`, whereas `vecnorm(A, -Inf)` returns the smallest. If `A` is a matrix and `p=2`, +mathematically valid vector norm). In particular, `norm(A, Inf)` returns the largest value +in `abs.(A)`, whereas `norm(A, -Inf)` returns the smallest. If `A` is a matrix and `p=2`, then this is equivalent to the Frobenius norm. +The second argument `p` is not necessarily a part of the interface for `norm`, i.e. a custom +type may only implement `norm(A)` without second argument. + +Use [`opnorm`](@ref) to compute the operator norm of a matrix. + # Examples ```jldoctest -julia> vecnorm([1 2 3; 4 5 6; 7 8 9]) +julia> v = [3, -2, 6] +3-element Array{Int64,1}: + 3 + -2 + 6 + +julia> norm(v) +7.0 + +julia> norm(v, 1) +11.0 + +julia> norm(v, Inf) +6.0 + +julia> norm([1 2 3; 4 5 6; 7 8 9]) +16.881943016134134 + +julia> norm([1 2 3 4 5 6 7 8 9]) 16.881943016134134 -julia> vecnorm([1 2 3 4 5 6 7 8 9]) +julia> norm(1:9) 16.881943016134134 + +julia> norm(hcat(v,v), 1) == norm(vcat(v,v), 1) != norm([v,v], 1) +true + +julia> norm(hcat(v,v), 2) == norm(vcat(v,v), 2) == norm([v,v], 2) +true + +julia> norm(hcat(v,v), Inf) == norm(vcat(v,v), Inf) != norm([v,v], Inf) +true ``` """ -function vecnorm(itr, p::Real=2) +function norm(itr, p::Real=2) isempty(itr) && return float(norm(zero(eltype(itr)))) if p == 2 - return vecnorm2(itr) + return norm2(itr) elseif p == 1 - return vecnorm1(itr) + return norm1(itr) elseif p == Inf - return vecnormInf(itr) + return normInf(itr) elseif p == 0 return typeof(float(norm(first(itr))))(count(!iszero, itr)) elseif p == -Inf - return vecnormMinusInf(itr) + return normMinusInf(itr) else - vecnormp(itr,p) + normp(itr, p) end end """ - vecnorm(x::Number, p::Real=2) + norm(x::Number, p::Real=2) -For numbers, return ``\\left( |x|^p \\right) ^{1/p}``. +For numbers, return ``\\left( |x|^p \\right)^{1/p}``. # Examples ```jldoctest -julia> vecnorm(2, 1) +julia> norm(2, 1) 2 -julia> vecnorm(-2, 1) +julia> norm(-2, 1) 2 -julia> vecnorm(2, 2) +julia> norm(2, 2) 2 -julia> vecnorm(-2, 2) +julia> norm(-2, 2) 2 -julia> vecnorm(2, Inf) +julia> norm(2, Inf) 2 -julia> vecnorm(-2, Inf) +julia> norm(-2, Inf) 2 ``` """ -@inline vecnorm(x::Number, p::Real=2) = p == 0 ? (x==0 ? zero(abs(x)) : oneunit(abs(x))) : abs(x) +@inline norm(x::Number, p::Real=2) = p == 0 ? (x==0 ? zero(abs(x)) : oneunit(abs(x))) : abs(x) + -function norm1(A::AbstractMatrix{T}) where T +# special cases of opnorm +function opnorm1(A::AbstractMatrix{T}) where T m, n = size(A) Tnorm = typeof(float(real(zero(T)))) Tsum = promote_type(Float64, Tnorm) @@ -448,13 +486,15 @@ function norm1(A::AbstractMatrix{T}) where T end return convert(Tnorm, nrm) end -function norm2(A::AbstractMatrix{T}) where T + +function opnorm2(A::AbstractMatrix{T}) where T m,n = size(A) - if m == 1 || n == 1 return vecnorm2(A) end + if m == 1 || n == 1 return norm2(A) end Tnorm = typeof(float(real(zero(T)))) (m == 0 || n == 0) ? zero(Tnorm) : convert(Tnorm, svdvals(A)[1]) end -function normInf(A::AbstractMatrix{T}) where T + +function opnormInf(A::AbstractMatrix{T}) where T m,n = size(A) Tnorm = typeof(float(real(zero(T)))) Tsum = promote_type(Float64, Tnorm) @@ -471,58 +511,25 @@ function normInf(A::AbstractMatrix{T}) where T return convert(Tnorm, nrm) end -""" - norm(A::AbstractArray, p::Real=2) - -Compute the `p`-norm of a vector or the operator norm of a matrix `A`, -defaulting to the 2-norm. - - norm(A::AbstractVector, p::Real=2) - -For vectors, this is equivalent to [`vecnorm`](@ref) and equal to: -```math -\\|A\\|_p = \\left( \\sum_{i=1}^n | a_i | ^p \\right)^{1/p} -``` -with ``a_i`` the entries of ``A`` and ``n`` its length. - -`p` can assume any numeric value (even though not all values produce a -mathematically valid vector norm). In particular, `norm(A, Inf)` returns the largest value -in `abs(A)`, whereas `norm(A, -Inf)` returns the smallest. - -# Examples -```jldoctest -julia> v = [3, -2, 6] -3-element Array{Int64,1}: - 3 - -2 - 6 - -julia> norm(v) -7.0 - -julia> norm(v, Inf) -6.0 -``` -""" -norm(x::AbstractVector, p::Real=2) = vecnorm(x, p) """ - norm(A::AbstractMatrix, p::Real=2) + opnorm(A::AbstractMatrix, p::Real=2) -For matrices, the matrix norm induced by the vector `p`-norm is used, where valid values of -`p` are `1`, `2`, or `Inf`. (Note that for sparse matrices, `p=2` is currently not -implemented.) Use [`vecnorm`](@ref) to compute the Frobenius norm. +Compute the operator norm (or matrix norm) induced by the vector `p`-norm, +where valid values of `p` are `1`, `2`, or `Inf`. (Note that for sparse matrices, +`p=2` is currently not implemented.) Use [`norm`](@ref) to compute the Frobenius +norm. -When `p=1`, the matrix norm is the maximum absolute column sum of `A`: +When `p=1`, the operator norm is the maximum absolute column sum of `A`: ```math \\|A\\|_1 = \\max_{1 ≤ j ≤ n} \\sum_{i=1}^m | a_{ij} | ``` with ``a_{ij}`` the entries of ``A``, and ``m`` and ``n`` its dimensions. -When `p=2`, the matrix norm is the spectral norm, equal to the largest +When `p=2`, the operator norm is the spectral norm, equal to the largest singular value of `A`. -When `p=Inf`, the matrix norm is the maximum absolute row sum of `A`: +When `p=Inf`, the operator norm is the maximum absolute row sum of `A`: ```math \\|A\\|_\\infty = \\max_{1 ≤ i ≤ m} \\sum _{j=1}^n | a_{ij} | ``` @@ -534,40 +541,44 @@ julia> A = [1 -2 -3; 2 3 -1] 1 -2 -3 2 3 -1 -julia> norm(A, Inf) +julia> opnorm(A, Inf) 6.0 + +julia> opnorm(A, 1) +5.0 ``` """ -function norm(A::AbstractMatrix, p::Real=2) +function opnorm(A::AbstractMatrix, p::Real=2) if p == 2 - return norm2(A) + return opnorm2(A) elseif p == 1 - return norm1(A) + return opnorm1(A) elseif p == Inf - return normInf(A) + return opnormInf(A) else throw(ArgumentError("invalid p-norm p=$p. Valid: 1, 2, Inf")) end end """ - norm(x::Number, p::Real=2) + opnorm(x::Number, p::Real=2) For numbers, return ``\\left( |x|^p \\right)^{1/p}``. -This is equivalent to [`vecnorm`](@ref). +This is equivalent to [`norm`](@ref). """ -@inline norm(x::Number, p::Real=2) = vecnorm(x, p) +@inline opnorm(x::Number, p::Real=2) = norm(x, p) """ - norm(A::Adjoint{<:Any,<:AbstracVector}, q::Real=2) - norm(A::Transpose{<:Any,<:AbstracVector}, q::Real=2) + opnorm(A::Adjoint{<:Any,<:AbstracVector}, q::Real=2) + opnorm(A::Transpose{<:Any,<:AbstracVector}, q::Real=2) -For Adjoint/Transpose-wrapped vectors, return the ``q``-norm of `A`, which is -equivalent to the p-norm with value `p = q/(q-1)`. They coincide at `p = q = 2`. +For Adjoint/Transpose-wrapped vectors, return the operator ``q``-norm of `A`, which is +equivalent to the `p`-norm with value `p = q/(q-1)`. They coincide at `p = q = 2`. +Use [`norm`](@ref) to compute the `p` norm of `A` as a vector. The difference in norm between a vector space and its dual arises to preserve -the relationship between duality and the inner product, and the result is -consistent with the p-norm of `1 × n` matrix. +the relationship between duality and the dot product, and the result is +consistent with the operator `p`-norm of a `1 × n` matrix. # Examples ```jldoctest @@ -575,31 +586,42 @@ julia> v = [1; im]; julia> vc = v'; -julia> norm(vc, 1) +julia> opnorm(vc, 1) 1.0 +julia> norm(vc, 1) +2.0 + julia> norm(v, 1) 2.0 +julia> opnorm(vc, 2) +1.4142135623730951 + julia> norm(vc, 2) 1.4142135623730951 julia> norm(v, 2) 1.4142135623730951 -julia> norm(vc, Inf) +julia> opnorm(vc, Inf) 2.0 +julia> norm(vc, Inf) +1.0 + julia> norm(v, Inf) 1.0 ``` """ -norm(v::TransposeAbsVec, q::Real) = q == Inf ? norm(v.parent, 1) : norm(v.parent, q/(q-1)) -norm(v::AdjointAbsVec, q::Real) = q == Inf ? norm(conj(v.parent), 1) : norm(conj(v.parent), q/(q-1)) -norm(v::AdjointAbsVec) = norm(conj(v.parent)) -norm(v::TransposeAbsVec) = norm(v.parent) +opnorm(v::TransposeAbsVec, q::Real) = q == Inf ? norm(v.parent, 1) : norm(v.parent, q/(q-1)) +opnorm(v::AdjointAbsVec, q::Real) = q == Inf ? norm(conj(v.parent), 1) : norm(conj(v.parent), q/(q-1)) +opnorm(v::AdjointAbsVec) = norm(conj(v.parent)) +opnorm(v::TransposeAbsVec) = norm(v.parent) -function vecdot(x::AbstractArray, y::AbstractArray) +norm(v::Union{TransposeAbsVec,AdjointAbsVec}, p::Real) = norm(v.parent, p) + +function dot(x::AbstractArray, y::AbstractArray) lx = _length(x) if lx != _length(y) throw(DimensionMismatch("first array has length $(lx) which does not match the length of the second, $(_length(y)).")) @@ -609,32 +631,36 @@ function vecdot(x::AbstractArray, y::AbstractArray) end s = zero(dot(first(x), first(y))) for (Ix, Iy) in zip(eachindex(x), eachindex(y)) - @inbounds s += dot(x[Ix], y[Iy]) + @inbounds s += dot(x[Ix], y[Iy]) end s end """ - vecdot(x, y) + dot(x, y) + x ⋅ y For any iterable containers `x` and `y` (including arrays of any dimension) of numbers (or -any element type for which `dot` is defined), compute the Euclidean dot product (the sum of -`dot(x[i],y[i])`) as if they were vectors. +any element type for which `dot` is defined), compute the dot product (or inner product +or scalar product), i.e. the sum of `dot(x[i],y[i])`, as if they were vectors. + +`x ⋅ y` (where `⋅` can be typed by tab-completing `\\cdot` in the REPL) is a synonym for +`dot(x, y)`. # Examples ```jldoctest -julia> vecdot(1:5, 2:6) +julia> dot(1:5, 2:6) 70 julia> x = fill(2., (5,5)); julia> y = fill(3., (5,5)); -julia> vecdot(x, y) +julia> dot(x, y) 150.0 ``` """ -function vecdot(x, y) # arbitrary iterables +function dot(x, y) # arbitrary iterables ix = iterate(x) iy = iterate(y) if ix === nothing @@ -664,16 +690,15 @@ function vecdot(x, y) # arbitrary iterables return s end -vecdot(x::Number, y::Number) = conj(x) * y - -dot(x::Number, y::Number) = vecdot(x, y) +dot(x::Number, y::Number) = conj(x) * y """ dot(x, y) - ⋅(x,y) + x ⋅ y -Compute the dot product between two vectors. For complex vectors, the first vector is conjugated. -When the vectors have equal lengths, calling `dot` is semantically equivalent to `sum(vx'vy for (vx,vy) in zip(x, y))`. +Compute the dot product between two vectors. For complex vectors, the first +vector is conjugated. When the vectors have equal lengths, calling `dot` is +semantically equivalent to `sum(dot(vx,vy) for (vx,vy) in zip(x, y))`. # Examples ```jldoctest @@ -685,28 +710,25 @@ julia> dot([im; im], [1; 1]) ``` """ function dot(x::AbstractVector, y::AbstractVector) - if length(x) != length(y) - throw(DimensionMismatch("dot product arguments have lengths $(length(x)) and $(length(y))")) + if length(LinearIndices(x)) != length(LinearIndices(y)) + throw(DimensionMismatch("dot product arguments have unequal lengths $(length(LinearIndices(x))) and $(length(LinearIndices(y)))")) end ix = iterate(x) if ix === nothing # we only need to check the first vector, since equal lengths have been asserted - return zero(eltype(x))'zero(eltype(y)) + return dot(zero(eltype(x)), zero(eltype(y))) end iy = iterate(y) - s = ix[1]'iy[1] + s = dot(ix[1], iy[1]) ix, iy = iterate(x, ix[2]), iterate(y, iy[2]) while ix != nothing - s += ix[1]'iy[1] + s += dot(ix[1], iy[1]) ix = iterate(x, ix[2]) iy = iterate(y, iy[2]) end return s end -# Call optimized BLAS methods for vectors of numbers -dot(x::AbstractVector{<:Number}, y::AbstractVector{<:Number}) = vecdot(x, y) - ########################################################################################### @@ -877,7 +899,7 @@ cond(x::Number) = x == 0 ? Inf : 1.0 cond(x::Number, p) = cond(x) #Skeel condition numbers -condskeel(A::AbstractMatrix, p::Real=Inf) = norm(abs.(inv(A))*abs.(A), p) +condskeel(A::AbstractMatrix, p::Real=Inf) = opnorm(abs.(inv(A))*abs.(A), p) """ condskeel(M, [x, p::Real=Inf]) @@ -1326,7 +1348,7 @@ promote_leaf_eltypes(x::Union{AbstractArray,Tuple}) = mapreduce(promote_leaf_elt function isapprox(x::AbstractArray, y::AbstractArray; atol::Real=0, rtol::Real=Base.rtoldefault(promote_leaf_eltypes(x),promote_leaf_eltypes(y),atol), - nans::Bool=false, norm::Function=vecnorm) + nans::Bool=false, norm::Function=norm) d = norm(x - y) if isfinite(d) return d <= max(atol, rtol*max(norm(x), norm(y))) @@ -1341,7 +1363,7 @@ end Normalize the vector `v` in-place so that its `p`-norm equals unity, i.e. `norm(v, p) == 1`. -See also [`normalize`](@ref) and [`vecnorm`](@ref). +See also [`normalize`](@ref) and [`norm`](@ref). """ function normalize!(v::AbstractVector, p::Real=2) nrm = norm(v, p) @@ -1370,7 +1392,7 @@ end Normalize the vector `v` so that its `p`-norm equals unity, i.e. `norm(v, p) == vecnorm(v, p) == 1`. -See also [`normalize!`](@ref) and [`vecnorm`](@ref). +See also [`normalize!`](@ref) and [`norm`](@ref). # Examples ```jldoctest diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl index 7b6e4a54437ea..8609ef2dc1ba3 100644 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -4,10 +4,10 @@ matprod(x, y) = x*y + x*y -# Dot products +# dot products -vecdot(x::Union{DenseArray{T},StridedVector{T}}, y::Union{DenseArray{T},StridedVector{T}}) where {T<:BlasReal} = BLAS.dot(x, y) -vecdot(x::Union{DenseArray{T},StridedVector{T}}, y::Union{DenseArray{T},StridedVector{T}}) where {T<:BlasComplex} = BLAS.dotc(x, y) +dot(x::Union{DenseArray{T},StridedVector{T}}, y::Union{DenseArray{T},StridedVector{T}}) where {T<:BlasReal} = BLAS.dot(x, y) +dot(x::Union{DenseArray{T},StridedVector{T}}, y::Union{DenseArray{T},StridedVector{T}}) where {T<:BlasComplex} = BLAS.dotc(x, y) function dot(x::Vector{T}, rx::Union{UnitRange{TI},AbstractRange{TI}}, y::Vector{T}, ry::Union{UnitRange{TI},AbstractRange{TI}}) where {T<:BlasReal,TI<:Integer} if length(rx) != length(ry) diff --git a/stdlib/LinearAlgebra/src/rowvector.jl b/stdlib/LinearAlgebra/src/rowvector.jl index 1b63a35d70625..01ea9fe8c35fa 100644 --- a/stdlib/LinearAlgebra/src/rowvector.jl +++ b/stdlib/LinearAlgebra/src/rowvector.jl @@ -12,7 +12,7 @@ recursively). By convention, a vector can be multiplied by a matrix on its left (`A * v`) whereas a row vector can be multiplied by a matrix on its right (such that `RowVector(v) * A = RowVector(transpose(A) * v)`). It differs from a `1×n`-sized matrix by the facts that its transpose returns a vector and the -inner product `RowVector(v1) * v2` returns a scalar, but will otherwise behave similarly. +dot product `RowVector(v1) * v2` returns a scalar, but will otherwise behave similarly. # Examples ```jldoctest diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index eb21fefd8e33d..029501cc49bd2 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -1952,7 +1952,7 @@ function powm!(A0::UpperTriangular{<:BlasFloat}, p::Real) ArgumentError("p must be a real number in (-1,1), got $p") end - normA0 = norm(A0, 1) + normA0 = opnorm(A0, 1) rmul!(A0, 1/normA0) theta = [1.53e-5, 2.25e-3, 1.92e-2, 6.08e-2, 1.25e-1, 2.03e-1, 2.84e-1] @@ -2050,8 +2050,8 @@ function log(A0::UpperTriangular{T}) where T<:BlasFloat end AmI = A - I - d2 = sqrt(norm(AmI^2, 1)) - d3 = cbrt(norm(AmI^3, 1)) + d2 = sqrt(opnorm(AmI^2, 1)) + d3 = cbrt(opnorm(AmI^3, 1)) alpha2 = max(d2, d3) foundm = false if alpha2 <= theta[2] @@ -2062,9 +2062,9 @@ function log(A0::UpperTriangular{T}) where T<:BlasFloat while !foundm more = false if s > s0 - d3 = cbrt(norm(AmI^3, 1)) + d3 = cbrt(opnorm(AmI^3, 1)) end - d4 = norm(AmI^4, 1)^(1/4) + d4 = opnorm(AmI^4, 1)^(1/4) alpha3 = max(d3, d4) if alpha3 <= theta[tmax] local j @@ -2083,7 +2083,7 @@ function log(A0::UpperTriangular{T}) where T<:BlasFloat end if !more - d5 = norm(AmI^5, 1)^(1/5) + d5 = opnorm(AmI^5, 1)^(1/5) alpha4 = max(d4, d5) eta = min(alpha3, alpha4) if eta <= theta[tmax] @@ -2250,8 +2250,8 @@ function invsquaring(A0::UpperTriangular, theta) end AmI = A - I - d2 = sqrt(norm(AmI^2, 1)) - d3 = cbrt(norm(AmI^3, 1)) + d2 = sqrt(opnorm(AmI^2, 1)) + d3 = cbrt(opnorm(AmI^3, 1)) alpha2 = max(d2, d3) foundm = false if alpha2 <= theta[2] @@ -2262,9 +2262,9 @@ function invsquaring(A0::UpperTriangular, theta) while !foundm more = false if s > s0 - d3 = cbrt(norm(AmI^3, 1)) + d3 = cbrt(opnorm(AmI^3, 1)) end - d4 = norm(AmI^4, 1)^(1/4) + d4 = opnorm(AmI^4, 1)^(1/4) alpha3 = max(d3, d4) if alpha3 <= theta[tmax] local j @@ -2287,7 +2287,7 @@ function invsquaring(A0::UpperTriangular, theta) end if !more - d5 = norm(AmI^5, 1)^(1/5) + d5 = opnorm(AmI^5, 1)^(1/5) alpha4 = max(d4, d5) eta = min(alpha3, alpha4) if eta <= theta[tmax] diff --git a/stdlib/LinearAlgebra/src/uniformscaling.jl b/stdlib/LinearAlgebra/src/uniformscaling.jl index d9861d373cdb4..67d2456a5c316 100644 --- a/stdlib/LinearAlgebra/src/uniformscaling.jl +++ b/stdlib/LinearAlgebra/src/uniformscaling.jl @@ -128,7 +128,7 @@ function (-)(J::UniformScaling, A::AbstractMatrix) end inv(J::UniformScaling) = UniformScaling(inv(J.λ)) -norm(J::UniformScaling, p::Real=2) = abs(J.λ) +opnorm(J::UniformScaling, p::Real=2) = opnorm(J.λ, p) function det(J::UniformScaling{T}) where T if isone(J.λ) @@ -194,11 +194,11 @@ end function isapprox(J::UniformScaling, A::AbstractMatrix; atol::Real = 0, rtol::Real = Base.rtoldefault(promote_leaf_eltypes(A), eltype(J), atol), - nans::Bool = false, norm::Function = vecnorm) + nans::Bool = false, norm::Function = norm) n = checksquare(A) - normJ = norm === LinearAlgebra.norm ? abs(J.λ) : - norm === vecnorm ? abs(J.λ) * sqrt(n) : - norm(Diagonal(fill(J.λ, n))) + normJ = norm === opnorm ? abs(J.λ) : + norm === LinearAlgebra.norm ? abs(J.λ) * sqrt(n) : + norm(Diagonal(fill(J.λ, n))) return norm(A - J) <= max(atol, rtol * max(norm(A), normJ)) end isapprox(A::AbstractMatrix, J::UniformScaling; kwargs...) = isapprox(J, A; kwargs...) diff --git a/stdlib/LinearAlgebra/test/adjtrans.jl b/stdlib/LinearAlgebra/test/adjtrans.jl index f52b2bbb248d0..ae140d5f7a8c8 100644 --- a/stdlib/LinearAlgebra/test/adjtrans.jl +++ b/stdlib/LinearAlgebra/test/adjtrans.jl @@ -413,19 +413,30 @@ end @test (Transpose(complexvec) / Transpose(complexmat))::Transpose ≈ rowcomplexvec / copy(Transpose(complexmat)) end -@testset "norm of Adjoint/Transpose-wrapped vectors" begin +@testset "norm and opnorm of Adjoint/Transpose-wrapped vectors" begin # definitions are in base/linalg/generic.jl realvec, complexvec = [3, -4], [3im, -4im] - # one norm result should be maximum(abs.(realvec)) == 4 + # one norm result should be sum(abs.(realvec)) == 7 # two norm result should be sqrt(sum(abs.(realvec))) == 5 - # inf norm result should be sum(abs.(realvec)) == 7 + # inf norm result should be maximum(abs.(realvec)) == 4 for v in (realvec, complexvec) @test norm(Adjoint(v)) ≈ 5 - @test norm(Adjoint(v), 1) ≈ 4 - @test norm(Adjoint(v), Inf) ≈ 7 + @test norm(Adjoint(v), 1) ≈ 7 + @test norm(Adjoint(v), Inf) ≈ 4 @test norm(Transpose(v)) ≈ 5 - @test norm(Transpose(v), 1) ≈ 4 - @test norm(Transpose(v), Inf) ≈ 7 + @test norm(Transpose(v), 1) ≈ 7 + @test norm(Transpose(v), Inf) ≈ 4 + end + # one opnorm result should be maximum(abs.(realvec)) == 4 + # two opnorm result should be sqrt(sum(abs.(realvec))) == 5 + # inf opnorm result should be sum(abs.(realvec)) == 7 + for v in (realvec, complexvec) + @test opnorm(Adjoint(v)) ≈ 5 + @test opnorm(Adjoint(v), 1) ≈ 4 + @test opnorm(Adjoint(v), Inf) ≈ 7 + @test opnorm(Transpose(v)) ≈ 5 + @test opnorm(Transpose(v), 1) ≈ 4 + @test opnorm(Transpose(v), Inf) ≈ 7 end end diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index 4e4b2da53c301..b354d8b6fe2ae 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -251,7 +251,7 @@ srand(1) test_approx_eq_modphase(u1, u2) test_approx_eq_modphase(copy(v1), copy(v2)) end - @test 0 ≈ vecnorm(u2*Diagonal(d2)*v2'-Tfull) atol=n*max(n^2*eps(relty),vecnorm(u1*Diagonal(d1)*v1'-Tfull)) + @test 0 ≈ norm(u2*Diagonal(d2)*v2'-Tfull) atol=n*max(n^2*eps(relty),norm(u1*Diagonal(d1)*v1'-Tfull)) @inferred svdvals(T) @inferred svd(T) end diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl index be35e774ac8d9..ed192251a958a 100644 --- a/stdlib/LinearAlgebra/test/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -236,18 +236,18 @@ end end end - @testset "Matrix (Operator)" begin + @testset "Matrix (Operator) opnorm" begin A = fill(elty(1),10,10) As = view(A,1:5,1:5) - @test norm(A, 1) ≈ 10 - elty <: Union{BigFloat,Complex{BigFloat},BigInt} || @test norm(A, 2) ≈ 10 - @test norm(A, Inf) ≈ 10 - @test norm(As, 1) ≈ 5 - elty <: Union{BigFloat,Complex{BigFloat},BigInt} || @test norm(As, 2) ≈ 5 - @test norm(As, Inf) ≈ 5 + @test opnorm(A, 1) ≈ 10 + elty <: Union{BigFloat,Complex{BigFloat},BigInt} || @test opnorm(A, 2) ≈ 10 + @test opnorm(A, Inf) ≈ 10 + @test opnorm(As, 1) ≈ 5 + elty <: Union{BigFloat,Complex{BigFloat},BigInt} || @test opnorm(As, 2) ≈ 5 + @test opnorm(As, Inf) ≈ 5 end - @testset "Absolute homogeneity, triangle inequality, & vecnorm" begin + @testset "Absolute homogeneity, triangle inequality, & norm" begin for i = 1:10 Ainit = elty <: Integer ? convert(Matrix{elty}, rand(1:10, mmat, nmat)) : elty <: Complex ? convert(Matrix{elty}, complex.(randn(mmat, nmat), randn(mmat, nmat))) : @@ -269,9 +269,9 @@ end elty <: Union{BigFloat,Complex{BigFloat},BigInt} || @test norm(A + B) <= norm(A) + norm(B) # two is default @test norm(A + B,Inf) <= norm(A,Inf) + norm(B,Inf) - # vecnorm: - for p = -2:3 - @test norm(reshape(A, length(A)), p) == vecnorm(A, p) + # norm + for p in (-Inf, Inf, (-2:3)...) + @test norm(A, p) == norm(vec(A), p) end end end @@ -577,7 +577,7 @@ end @test coth(acoth(coth(A))) ≈ coth(A) # Definition of principal values (Aprahamian & Higham, 2016, pp. 4-5) - abstol = sqrt(eps(real(elty))) * vecnorm(acosh(A)) + abstol = sqrt(eps(real(elty))) * norm(acosh(A)) @test all(z -> (0 < real(z) < π || abs(real(z)) < abstol && imag(z) >= 0 || abs(real(z) - π) < abstol && imag(z) <= 0), @@ -796,7 +796,7 @@ end r = (elty <: Complex ? adjoint : transpose)(rand(elty, 5)) cm = rand(elty, 5, 1) rm = rand(elty, 1, 5) - @testset "inner products" begin + @testset "dot products" begin test_div_pinv_consistency(r, c) test_div_pinv_consistency(rm, c) test_div_pinv_consistency(r, cm) diff --git a/stdlib/LinearAlgebra/test/generic.jl b/stdlib/LinearAlgebra/test/generic.jl index f56a95e0b9211..6675e025a7912 100644 --- a/stdlib/LinearAlgebra/test/generic.jl +++ b/stdlib/LinearAlgebra/test/generic.jl @@ -197,14 +197,14 @@ end @test norm([2.4e-322, 4.4e-323]) ≈ 2.47e-322 @test norm([2.4e-322, 4.4e-323], 3) ≈ 2.4e-322 -@test_throws ArgumentError norm(Matrix{Float64}(undef,5,5),5) +@test_throws ArgumentError opnorm(Matrix{Float64}(undef,5,5),5) -@testset "generic vecnorm for arrays of arrays" begin +@testset "generic norm for arrays of arrays" begin x = Vector{Int}[[1,2], [3,4]] @test @inferred(norm(x)) ≈ sqrt(30) @test norm(x, 0) == length(x) - @test norm(x, 1) ≈ sqrt(5) + 5 - @test norm(x, 3) ≈ cbrt(sqrt(125)+125) + @test norm(x, 1) ≈ 5+sqrt(5) + @test norm(x, 3) ≈ cbrt(5^3 +sqrt(5)^3) end @testset "LinearAlgebra.axp(b)y! for element type without commutative multiplication" begin diff --git a/stdlib/LinearAlgebra/test/givens.jl b/stdlib/LinearAlgebra/test/givens.jl index 25a11959812cc..c2fc783547917 100644 --- a/stdlib/LinearAlgebra/test/givens.jl +++ b/stdlib/LinearAlgebra/test/givens.jl @@ -38,7 +38,7 @@ using LinearAlgebra: rmul!, lmul! @test_throws DimensionMismatch lmul!(G, A) @test_throws DimensionMismatch rmul!(A, adjoint(G)) @test abs.(A) ≈ abs.(hessenberg(Ac).H) - @test norm(R*Matrix{elty}(I, 10, 10)) ≈ one(elty) + @test opnorm(R*Matrix{elty}(I, 10, 10)) ≈ one(elty) I10 = Matrix{elty}(I, 10, 10) G, _ = givens(one(elty),zero(elty),9,10) diff --git a/stdlib/LinearAlgebra/test/matmul.jl b/stdlib/LinearAlgebra/test/matmul.jl index 171265dfbd04c..db906303c89ac 100644 --- a/stdlib/LinearAlgebra/test/matmul.jl +++ b/stdlib/LinearAlgebra/test/matmul.jl @@ -223,28 +223,29 @@ end @test_throws DimensionMismatch dot(x, 1:2, y, 1:3) @test_throws BoundsError dot(x, 1:4, y, 1:4) @test_throws BoundsError dot(x, 1:3, y, 2:4) - @test dot(x, 1:2,y, 1:2) == convert(elty, 12.5) + @test dot(x, 1:2, y, 1:2) == convert(elty, 12.5) @test transpose(x)*y == convert(elty, 29.0) - @test_throws MethodError dot(rand(elty, 2, 2), randn(elty, 2, 2)) - X = convert(Vector{Matrix{elty}},[reshape(1:4, 2, 2), fill(1, 2, 2)]) - res = convert(Matrix{elty}, [7.0 13.0; 13.0 27.0]) - @test dot(X, X) == res + X = convert(Matrix{elty},[1.0 2.0; 3.0 4.0]) + Y = convert(Matrix{elty},[1.5 2.5; 3.5 4.5]) + @test dot(X, Y) == convert(elty, 35.0) + Z = convert(Vector{Matrix{elty}},[reshape(1:4, 2, 2), fill(1, 2, 2)]) + @test dot(Z, Z) == convert(elty, 34.0) end -vecdot_(x,y) = invoke(vecdot, Tuple{Any,Any}, x,y) -@testset "generic vecdot" begin +dot_(x,y) = invoke(dot, Tuple{Any,Any}, x,y) +@testset "generic dot" begin AA = [1+2im 3+4im; 5+6im 7+8im] BB = [2+7im 4+1im; 3+8im 6+5im] for A in (copy(AA), view(AA, 1:2, 1:2)), B in (copy(BB), view(BB, 1:2, 1:2)) - @test vecdot(A,B) == dot(vec(A),vec(B)) == vecdot_(A,B) == vecdot(float.(A),float.(B)) - @test vecdot(Int[], Int[]) == 0 == vecdot_(Int[], Int[]) - @test_throws MethodError vecdot(Any[], Any[]) - @test_throws MethodError vecdot_(Any[], Any[]) - for n1 = 0:2, n2 = 0:2, d in (vecdot, vecdot_) + @test dot(A,B) == dot(vec(A),vec(B)) == dot_(A,B) == dot(float.(A),float.(B)) + @test dot(Int[], Int[]) == 0 == dot_(Int[], Int[]) + @test_throws MethodError dot(Any[], Any[]) + @test_throws MethodError dot_(Any[], Any[]) + for n1 = 0:2, n2 = 0:2, d in (dot, dot_) if n1 != n2 @test_throws DimensionMismatch d(1:n1, 1:n2) else - @test d(1:n1, 1:n2) ≈ vecnorm(1:n1)^2 + @test d(1:n1, 1:n2) ≈ norm(1:n1)^2 end end end @@ -424,7 +425,7 @@ end @test transpose(Xv2)*Xv2 ≈ XtX @test (transpose(Xv3)*Xv3)[1] ≈ XtX @test Xv1'*Xv1 ≈ XcX - @test Xv2'*Xv2 ≈ XcX + @test Xv2'*Xv2 ≈ norm(Xv2)^2 @test (Xv3'*Xv3)[1] ≈ XcX @test (Xv1*transpose(Xv1))[1] ≈ XXt @test Xv2*transpose(Xv2) ≈ XXt diff --git a/stdlib/LinearAlgebra/test/pinv.jl b/stdlib/LinearAlgebra/test/pinv.jl index 42da2e1b7b791..489f7c0c8e421 100644 --- a/stdlib/LinearAlgebra/test/pinv.jl +++ b/stdlib/LinearAlgebra/test/pinv.jl @@ -89,14 +89,14 @@ end function test_pinv(a,m,n,tol1,tol2,tol3) apinv = @inferred pinv(a) - @test vecnorm(a*apinv*a-a)/vecnorm(a) ≈ 0 atol=tol1 + @test norm(a*apinv*a-a)/norm(a) ≈ 0 atol=tol1 x0 = randn(n); b = a*x0; x = apinv*b - @test vecnorm(a*x-b)/vecnorm(b) ≈ 0 atol=tol1 + @test norm(a*x-b)/norm(b) ≈ 0 atol=tol1 apinv = pinv(a,sqrt(eps(real(one(eltype(a)))))) - @test vecnorm(a*apinv*a-a)/vecnorm(a) ≈ 0 atol=tol2 + @test norm(a*apinv*a-a)/norm(a) ≈ 0 atol=tol2 x0 = randn(n); b = a*x0; x = apinv*b - @test vecnorm(a*x-b)/vecnorm(b) ≈ 0 atol=tol2 + @test norm(a*x-b)/norm(b) ≈ 0 atol=tol2 end @testset for eltya in (Float32, Float64, ComplexF32, ComplexF64) diff --git a/stdlib/LinearAlgebra/test/qr.jl b/stdlib/LinearAlgebra/test/qr.jl index 2f9f0d763cbc3..91106676c52f6 100644 --- a/stdlib/LinearAlgebra/test/qr.jl +++ b/stdlib/LinearAlgebra/test/qr.jl @@ -174,7 +174,7 @@ end @testset "Issue 7304" begin A = [-√.5 -√.5; -√.5 √.5] Q = rectangularQ(qr(A).Q) - @test vecnorm(A-Q) < eps() + @test norm(A-Q) < eps() end @testset "qr on AbstractVector" begin diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 46aaeaf3140c0..13e2aeba68e7a 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -251,7 +251,7 @@ for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFlo if !(elty1 in (BigFloat, Complex{BigFloat})) # Not handled yet vals, vecs = eigen(A1) if (t1 == UpperTriangular || t1 == LowerTriangular) && elty1 != Int # Cannot really handle degenerate eigen space and Int matrices will probably have repeated eigenvalues. - @test vecs*diagm(0 => vals)/vecs ≈ A1 atol=sqrt(eps(float(real(one(vals[1])))))*(norm(A1,Inf)*n)^2 + @test vecs*diagm(0 => vals)/vecs ≈ A1 atol=sqrt(eps(float(real(one(vals[1])))))*(opnorm(A1,Inf)*n)^2 end end diff --git a/stdlib/LinearAlgebra/test/uniformscaling.jl b/stdlib/LinearAlgebra/test/uniformscaling.jl index 03db4c90f6109..02d2ea36f16df 100644 --- a/stdlib/LinearAlgebra/test/uniformscaling.jl +++ b/stdlib/LinearAlgebra/test/uniformscaling.jl @@ -20,7 +20,7 @@ srand(123) @test -one(UniformScaling(2)) == UniformScaling(-1) @test sparse(3I,4,5) == sparse(1:4, 1:4, 3, 4, 5) @test sparse(3I,5,4) == sparse(1:4, 1:4, 3, 5, 4) - @test norm(UniformScaling(1+im)) ≈ sqrt(2) + @test opnorm(UniformScaling(1+im)) ≈ sqrt(2) end @testset "conjugation of UniformScaling" begin diff --git a/stdlib/SparseArrays/src/SparseArrays.jl b/stdlib/SparseArrays/src/SparseArrays.jl index 33330420d385a..af2aa64f6db3d 100644 --- a/stdlib/SparseArrays/src/SparseArrays.jl +++ b/stdlib/SparseArrays/src/SparseArrays.jl @@ -12,9 +12,9 @@ using Base.Sort: Forward using LinearAlgebra import Base: +, -, *, \, /, &, |, xor, == -import LinearAlgebra: mul!, ldiv!, rdiv!, chol, adjoint!, diag, dot, eigen, - issymmetric, istril, istriu, lu, tr, transpose!, tril!, triu!, vecdot, - vecnorm, cond, diagm, factorize, ishermitian, norm, lmul!, rmul!, tril, triu +import LinearAlgebra: mul!, ldiv!, rdiv!, chol, adjoint!, diag, eigen, dot, + issymmetric, istril, istriu, lu, tr, transpose!, tril!, triu!, + cond, diagm, factorize, ishermitian, norm, opnorm, lmul!, rmul!, tril, triu import Base: @get!, acos, acosd, acot, acotd, acsch, asech, asin, asind, asinh, atan, atand, atanh, broadcast!, conj!, cos, cosc, cosd, cosh, cospi, cot, diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index c70bc6ac12c17..3d6abf879f60f 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -203,11 +203,11 @@ function spmatmul(A::SparseMatrixCSC{Tv,Ti}, B::SparseMatrixCSC{Tv,Ti}; return C end -# Frobenius inner product: trace(A'B) -function vecdot(A::SparseMatrixCSC{T1,S1},B::SparseMatrixCSC{T2,S2}) where {T1,T2,S1,S2} +# Frobenius dot/inner product: trace(A'B) +function dot(A::SparseMatrixCSC{T1,S1},B::SparseMatrixCSC{T2,S2}) where {T1,T2,S1,S2} m, n = size(A) size(B) == (m,n) || throw(DimensionMismatch("matrices must have the same dimensions")) - r = vecdot(zero(T1), zero(T2)) + r = dot(zero(T1), zero(T2)) @inbounds for j = 1:n ia = A.colptr[j]; ia_nxt = A.colptr[j+1] ib = B.colptr[j]; ib_nxt = B.colptr[j+1] @@ -223,7 +223,7 @@ function vecdot(A::SparseMatrixCSC{T1,S1},B::SparseMatrixCSC{T2,S2}) where {T1,T ib < ib_nxt || break rb = B.rowval[ib] else # ra == rb - r += vecdot(A.nzval[ia], B.nzval[ib]) + r += dot(A.nzval[ia], B.nzval[ib]) ia += oneunit(S1); ib += oneunit(S2) ia < ia_nxt && ib < ib_nxt || break ra = A.rowval[ia]; rb = B.rowval[ib] @@ -560,15 +560,15 @@ end diff(a::SparseMatrixCSC; dims::Integer) = dims==1 ? sparse_diff1(a) : sparse_diff2(a) ## norm and rank -vecnorm(A::SparseMatrixCSC, p::Real=2) = vecnorm(view(A.nzval, 1:nnz(A)), p) +norm(A::SparseMatrixCSC, p::Real=2) = norm(view(A.nzval, 1:nnz(A)), p) -function norm(A::SparseMatrixCSC,p::Real=2) +function opnorm(A::SparseMatrixCSC, p::Real=2) m, n = size(A) if m == 0 || n == 0 || isempty(A) return float(real(zero(eltype(A)))) elseif m == 1 || n == 1 # TODO: compute more efficiently using A.nzval directly - return norm(Array(A), p) + return opnorm(Array(A), p) else Tnorm = typeof(float(real(zero(eltype(A))))) Tsum = promote_type(Float64,Tnorm) @@ -583,7 +583,7 @@ function norm(A::SparseMatrixCSC,p::Real=2) end return convert(Tnorm, nA) elseif p==2 - throw(ArgumentError("2-norm not yet implemented for sparse matrices. Try norm(Array(A)) or norm(A, p) where p=1 or Inf.")) + throw(ArgumentError("2-norm not yet implemented for sparse matrices. Try opnorm(Array(A)) or opnorm(A, p) where p=1 or Inf.")) elseif p==Inf rowSum = zeros(Tsum,m) for i=1:length(A.nzval) @@ -592,7 +592,7 @@ function norm(A::SparseMatrixCSC,p::Real=2) return convert(Tnorm, maximum(rowSum)) end end - throw(ArgumentError("invalid p-norm p=$p. Valid: 1, Inf")) + throw(ArgumentError("invalid operator p-norm p=$p. Valid: 1, Inf")) end # TODO rank @@ -600,12 +600,12 @@ end # cond function cond(A::SparseMatrixCSC, p::Real=2) if p == 1 - normAinv = normestinv(A) - normA = norm(A, 1) + normAinv = opnormestinv(A) + normA = opnorm(A, 1) return normA * normAinv elseif p == Inf - normAinv = normestinv(copy(A')) - normA = norm(A, Inf) + normAinv = opnormestinv(copy(A')) + normA = opnorm(A, Inf) return normA * normAinv elseif p == 2 throw(ArgumentError("2-norm condition number is not implemented for sparse matrices, try cond(Array(A), 2) instead")) @@ -614,7 +614,7 @@ function cond(A::SparseMatrixCSC, p::Real=2) end end -function normestinv(A::SparseMatrixCSC{T}, t::Integer = min(2,maximum(size(A)))) where T +function opnormestinv(A::SparseMatrixCSC{T}, t::Integer = min(2,maximum(size(A)))) where T maxiter = 5 # Check the input n = checksquare(A) diff --git a/stdlib/SparseArrays/src/sparsevector.jl b/stdlib/SparseArrays/src/sparsevector.jl index 313220b80b735..f30ea679b88d2 100644 --- a/stdlib/SparseArrays/src/sparsevector.jl +++ b/stdlib/SparseArrays/src/sparsevector.jl @@ -1320,7 +1320,7 @@ for f in [:sum, :maximum, :minimum], op in [:abs, :abs2] end end -vecnorm(x::SparseVectorUnion, p::Real=2) = vecnorm(nonzeros(x), p) +norm(x::SparseVectorUnion, p::Real=2) = norm(nonzeros(x), p) ### linalg.jl @@ -1391,9 +1391,9 @@ function dot(x::AbstractVector{Tx}, y::SparseVectorUnion{Ty}) where {Tx<:Number, length(y) == n || throw(DimensionMismatch()) nzind = nonzeroinds(y) nzval = nonzeros(y) - s = zero(Tx) * zero(Ty) + s = dot(zero(Tx), zero(Ty)) for i = 1:length(nzind) - s += conj(x[nzind[i]]) * nzval[i] + s += dot(x[nzind[i]], nzval[i]) end return s end @@ -1403,9 +1403,9 @@ function dot(x::SparseVectorUnion{Tx}, y::AbstractVector{Ty}) where {Tx<:Number, length(x) == n || throw(DimensionMismatch()) nzind = nonzeroinds(x) nzval = nonzeros(x) - s = zero(Tx) * zero(Ty) + s = dot(zero(Tx), zero(Ty)) @inbounds for i = 1:length(nzind) - s += conj(nzval[i]) * y[nzind[i]] + s += dot(nzval[i], y[nzind[i]]) end return s end diff --git a/stdlib/SparseArrays/test/sparse.jl b/stdlib/SparseArrays/test/sparse.jl index 9684279b5873c..6309efc11b8a0 100644 --- a/stdlib/SparseArrays/test/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -351,13 +351,13 @@ end end end -@testset "sparse Frobenius inner product" begin +@testset "sparse Frobenius dot/inner product" begin for i = 1:5 A = sprand(ComplexF64,10,15,0.4) B = sprand(ComplexF64,10,15,0.5) - @test vecdot(A,B) ≈ vecdot(Matrix(A),Matrix(B)) + @test dot(A,B) ≈ dot(Matrix(A),Matrix(B)) end - @test_throws DimensionMismatch vecdot(sprand(5,5,0.2),sprand(5,6,0.2)) + @test_throws DimensionMismatch dot(sprand(5,5,0.2),sprand(5,6,0.2)) end sA = sprandn(3, 7, 0.5) @@ -1568,8 +1568,8 @@ end @test norm(A) == zero(eltype(A)) A = sparse([1.0]) @test norm(A) == 1.0 - @test_throws ArgumentError norm(sprand(5,5,0.2),3) - @test_throws ArgumentError norm(sprand(5,5,0.2),2) + @test_throws ArgumentError opnorm(sprand(5,5,0.2),3) + @test_throws ArgumentError opnorm(sprand(5,5,0.2),2) end @testset "ishermitian/issymmetric" begin @@ -1691,30 +1691,30 @@ end Ac = sprandn(10,10,.1) + im* sprandn(10,10,.1) Ar = sprandn(10,10,.1) Ai = ceil.(Int,Ar*100) - @test norm(Ac,1) ≈ norm(Array(Ac),1) - @test norm(Ac,Inf) ≈ norm(Array(Ac),Inf) - @test vecnorm(Ac) ≈ vecnorm(Array(Ac)) - @test norm(Ar,1) ≈ norm(Array(Ar),1) - @test norm(Ar,Inf) ≈ norm(Array(Ar),Inf) - @test vecnorm(Ar) ≈ vecnorm(Array(Ar)) - @test norm(Ai,1) ≈ norm(Array(Ai),1) - @test norm(Ai,Inf) ≈ norm(Array(Ai),Inf) - @test vecnorm(Ai) ≈ vecnorm(Array(Ai)) + @test opnorm(Ac,1) ≈ opnorm(Array(Ac),1) + @test opnorm(Ac,Inf) ≈ opnorm(Array(Ac),Inf) + @test norm(Ac) ≈ norm(Array(Ac)) + @test opnorm(Ar,1) ≈ opnorm(Array(Ar),1) + @test opnorm(Ar,Inf) ≈ opnorm(Array(Ar),Inf) + @test norm(Ar) ≈ norm(Array(Ar)) + @test opnorm(Ai,1) ≈ opnorm(Array(Ai),1) + @test opnorm(Ai,Inf) ≈ opnorm(Array(Ai),Inf) + @test norm(Ai) ≈ norm(Array(Ai)) Ai = trunc.(Int, Ar*100) - @test norm(Ai,1) ≈ norm(Array(Ai),1) - @test norm(Ai,Inf) ≈ norm(Array(Ai),Inf) - @test vecnorm(Ai) ≈ vecnorm(Array(Ai)) + @test opnorm(Ai,1) ≈ opnorm(Array(Ai),1) + @test opnorm(Ai,Inf) ≈ opnorm(Array(Ai),Inf) + @test norm(Ai) ≈ norm(Array(Ai)) Ai = round.(Int, Ar*100) - @test norm(Ai,1) ≈ norm(Array(Ai),1) - @test norm(Ai,Inf) ≈ norm(Array(Ai),Inf) - @test vecnorm(Ai) ≈ vecnorm(Array(Ai)) + @test opnorm(Ai,1) ≈ opnorm(Array(Ai),1) + @test opnorm(Ai,Inf) ≈ opnorm(Array(Ai),Inf) + @test norm(Ai) ≈ norm(Array(Ai)) # make certain entries in nzval beyond # the range specified in colptr do not - # impact vecnorm of a sparse matrix + # impact norm of a sparse matrix foo = sparse(1.0I, 4, 4) resize!(foo.nzval, 5) setindex!(foo.nzval, NaN, 5) - @test vecnorm(foo) == 2.0 + @test norm(foo) == 2.0 end @testset "sparse matrix cond" begin @@ -1724,10 +1724,10 @@ end @test cond(A, 1) == 1.0 # For a discussion of the tolerance, see #14778 if Base.USE_GPL_LIBS - @test 0.99 <= cond(Ar, 1) \ norm(Ar, 1) * norm(inv(Array(Ar)), 1) < 3 - @test 0.99 <= cond(Ac, 1) \ norm(Ac, 1) * norm(inv(Array(Ac)), 1) < 3 - @test 0.99 <= cond(Ar, Inf) \ norm(Ar, Inf) * norm(inv(Array(Ar)), Inf) < 3 - @test 0.99 <= cond(Ac, Inf) \ norm(Ac, Inf) * norm(inv(Array(Ac)), Inf) < 3 + @test 0.99 <= cond(Ar, 1) \ opnorm(Ar, 1) * opnorm(inv(Array(Ar)), 1) < 3 + @test 0.99 <= cond(Ac, 1) \ opnorm(Ac, 1) * opnorm(inv(Array(Ac)), 1) < 3 + @test 0.99 <= cond(Ar, Inf) \ opnorm(Ar, Inf) * opnorm(inv(Array(Ar)), Inf) < 3 + @test 0.99 <= cond(Ac, Inf) \ opnorm(Ac, Inf) * opnorm(inv(Array(Ac)), Inf) < 3 end @test_throws ArgumentError cond(A,2) @test_throws ArgumentError cond(A,3) @@ -1737,21 +1737,21 @@ end @test_throws DimensionMismatch cond(Arect, Inf) end -@testset "sparse matrix normestinv" begin +@testset "sparse matrix opnormestinv" begin srand(1234) Ac = sprandn(20,20,.5) + im* sprandn(20,20,.5) Aci = ceil.(Int64, 100*sprand(20,20,.5)) + im*ceil.(Int64, sprand(20,20,.5)) Ar = sprandn(20,20,.5) Ari = ceil.(Int64, 100*Ar) if Base.USE_GPL_LIBS - # NOTE: normestinv is probabilistic, so requires a fixed seed (set above in srand(1234)) - @test SparseArrays.normestinv(Ac,3) ≈ norm(inv(Array(Ac)),1) atol=1e-4 - @test SparseArrays.normestinv(Aci,3) ≈ norm(inv(Array(Aci)),1) atol=1e-4 - @test SparseArrays.normestinv(Ar) ≈ norm(inv(Array(Ar)),1) atol=1e-4 - @test_throws ArgumentError SparseArrays.normestinv(Ac,0) - @test_throws ArgumentError SparseArrays.normestinv(Ac,21) - end - @test_throws DimensionMismatch SparseArrays.normestinv(sprand(3,5,.9)) + # NOTE: opnormestinv is probabilistic, so requires a fixed seed (set above in srand(1234)) + @test SparseArrays.opnormestinv(Ac,3) ≈ opnorm(inv(Array(Ac)),1) atol=1e-4 + @test SparseArrays.opnormestinv(Aci,3) ≈ opnorm(inv(Array(Aci)),1) atol=1e-4 + @test SparseArrays.opnormestinv(Ar) ≈ opnorm(inv(Array(Ar)),1) atol=1e-4 + @test_throws ArgumentError SparseArrays.opnormestinv(Ac,0) + @test_throws ArgumentError SparseArrays.opnormestinv(Ac,21) + end + @test_throws DimensionMismatch SparseArrays.opnormestinv(sprand(3,5,.9)) end @testset "issue #13008" begin diff --git a/stdlib/SparseArrays/test/sparsevector.jl b/stdlib/SparseArrays/test/sparsevector.jl index cf37d09489b88..e2f047a3eb0ee 100644 --- a/stdlib/SparseArrays/test/sparsevector.jl +++ b/stdlib/SparseArrays/test/sparsevector.jl @@ -708,16 +708,16 @@ end ### Reduction -@testset "sum, vecnorm" begin +@testset "sum, norm" begin x = spv_x1 @test sum(x) == 4.0 @test sum(abs, x) == 5.5 @test sum(abs2, x) == 14.375 - @test vecnorm(x) == sqrt(14.375) - @test vecnorm(x, 1) == 5.5 - @test vecnorm(x, 2) == sqrt(14.375) - @test vecnorm(x, Inf) == 3.5 + @test norm(x) == sqrt(14.375) + @test norm(x, 1) == 5.5 + @test norm(x, 2) == sqrt(14.375) + @test norm(x, Inf) == 3.5 end @testset "maximum, minimum" begin diff --git a/stdlib/SuiteSparse/test/cholmod.jl b/stdlib/SuiteSparse/test/cholmod.jl index 57a250bd4e8aa..bf255e145b0ab 100644 --- a/stdlib/SuiteSparse/test/cholmod.jl +++ b/stdlib/SuiteSparse/test/cholmod.jl @@ -290,8 +290,8 @@ end @test_throws BoundsError ADense[6, 1] @test_throws BoundsError ADense[1, 6] @test copy(ADense) == ADense - @test CHOLMOD.norm_dense(ADense, 1) ≈ norm(A, 1) - @test CHOLMOD.norm_dense(ADense, 0) ≈ norm(A, Inf) + @test CHOLMOD.norm_dense(ADense, 1) ≈ opnorm(A, 1) + @test CHOLMOD.norm_dense(ADense, 0) ≈ opnorm(A, Inf) @test_throws ArgumentError CHOLMOD.norm_dense(ADense, 2) @test_throws ArgumentError CHOLMOD.norm_dense(ADense, 3) diff --git a/test/complex.jl b/test/complex.jl index 6dde71594ff53..67be8f2a5fa4d 100644 --- a/test/complex.jl +++ b/test/complex.jl @@ -929,7 +929,7 @@ end @inferred sin(x) @inferred cos(x) @inferred norm(x) - @inferred vecnorm(x) + @inferred opnorm(x) end end diff --git a/test/offsetarray.jl b/test/offsetarray.jl index 3a97c7feda760..2d5c73744e724 100644 --- a/test/offsetarray.jl +++ b/test/offsetarray.jl @@ -376,9 +376,9 @@ I = findall(!iszero, z) @test mean(A_3_3, dims=2) == median(A_3_3, dims=2) == OffsetArray(reshape([4,5,6],(3,1)), (A_3_3.offsets[1],0)) @test sum(OffsetArray(fill(1,3000), -1000)) == 3000 -@test vecnorm(v) ≈ vecnorm(parent(v)) -@test vecnorm(A) ≈ vecnorm(parent(A)) -@test vecdot(v, v) ≈ vecdot(v0, v0) +@test norm(v) ≈ norm(parent(v)) +@test norm(A) ≈ norm(parent(A)) +@test dot(v, v) ≈ dot(v0, v0) # Prior to its removal from Base, cumsum_kbn was used here. To achieve the same level of # accuracy in the tests, we need to use BigFloats with enlarged precision.