diff --git a/src/genericrand.jl b/src/genericrand.jl index 7ec10f12f..9f737a3be 100644 --- a/src/genericrand.jl +++ b/src/genericrand.jl @@ -26,6 +26,7 @@ rand(rng::AbstractRNG, s::Sampleable, dim1::Int, moredims::Int...) = # default fallback (redefined for univariate distributions) function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}) + Base.depwarn("Please implement `rand(rng::AbstractRNG, s::$(typeof(s)))`. The default fallback will be removed", :rand) return @inbounds rand!(rng, s, Array{eltype(s)}(undef, size(s))) end @@ -45,7 +46,8 @@ end # this is a workaround for sampleables that incorrectly base `eltype` on the parameters function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate,Continuous}) - return @inbounds rand!(rng, sampler(s), Array{float(eltype(s))}(undef, size(s))) + Base.depwarn("Please implement `rand(rng::AbstractRNG, s::$(typeof(s))`. The default fallback will be removed", :rand) + return @inbounds rand!(rng, s, Array{float(eltype(s))}(undef, size(s))) end """ @@ -63,9 +65,6 @@ form as specified above. The rules are summarized as below: """ function rand! end Base.@propagate_inbounds rand!(s::Sampleable, X::AbstractArray) = rand!(default_rng(), s, X) -Base.@propagate_inbounds function rand!(rng::AbstractRNG, s::Sampleable, X::AbstractArray) - return _rand!(rng, s, X) -end # default definitions for arraylike variates @inline function rand!( @@ -73,6 +72,7 @@ end s::Sampleable{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}, ) where {N} + Base.depwarn("Please implement `Random.rand!(rng::Random.AbstractRNG, s::$(typeof(s)), x::AbstractArray{<:Real,$N})`, the default fallback will be removed.", :rand!) @boundscheck begin size(x) == size(s) || throw(DimensionMismatch("inconsistent array dimensions")) end @@ -93,7 +93,8 @@ end throw(DimensionMismatch("inconsistent array dimensions")) end # the function barrier fixes performance issues if `sampler(s)` is type unstable - return _rand!(rng, sampler(s), x) + _rand!(rng, sampler(s), x) + return x end function _rand!( diff --git a/src/mixtures/mixturemodel.jl b/src/mixtures/mixturemodel.jl index c3d3b1f91..38fddbb6c 100644 --- a/src/mixtures/mixturemodel.jl +++ b/src/mixtures/mixturemodel.jl @@ -470,16 +470,14 @@ end Base.length(s::MixtureSampler) = length(first(s.csamplers)) -rand(rng::AbstractRNG, s::MixtureSampler{Univariate}) = +# mixture sampler +rand(rng::AbstractRNG, s::MixtureSampler) = rand(rng, s.csamplers[rand(rng, s.psampler)]) -rand(rng::AbstractRNG, d::MixtureModel{Univariate}) = - rand(rng, component(d, rand(rng, d.prior))) -# multivariate mixture sampler for a vector -_rand!(rng::AbstractRNG, s::MixtureSampler{Multivariate}, x::AbstractVector{<:Real}) = - @inbounds rand!(rng, s.csamplers[rand(rng, s.psampler)], x) # if only a single sample is requested, no alias table is created -_rand!(rng::AbstractRNG, d::MixtureModel{Multivariate}, x::AbstractVector{<:Real}) = - @inbounds rand!(rng, component(d, rand(rng, d.prior)), x) +rand(rng::AbstractRNG, s::MixtureModel) = rand(rng, s.csamplers[rand(rng, s.psampler)]) +Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::MixtureModel{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}) where {N} + return rand!(rng, component(d, rand(rng, d.prior)), x) +end sampler(d::MixtureModel) = MixtureSampler(d) diff --git a/src/mixtures/unigmm.jl b/src/mixtures/unigmm.jl index f1d808ec1..af0daee7b 100644 --- a/src/mixtures/unigmm.jl +++ b/src/mixtures/unigmm.jl @@ -25,10 +25,12 @@ probs(d::UnivariateGMM) = probs(d.prior) mean(d::UnivariateGMM) = dot(d.means, probs(d)) -rand(d::UnivariateGMM) = (k = rand(d.prior); d.means[k] + randn() * d.stds[k]) - -rand(rng::AbstractRNG, d::UnivariateGMM) = - (k = rand(rng, d.prior); d.means[k] + randn(rng) * d.stds[k]) +function rand(rng::AbstractRNG, d::UnivariateGMM) + k = rand(rng, d.prior) + μ = d.means[k] + σ = d.std[k] + return muladd(randn(rng, float(Base.promote_typeof(μ, σ))), σ, μ) +end params(d::UnivariateGMM) = (d.means, d.stds, d.prior) @@ -38,6 +40,22 @@ struct UnivariateGMMSampler{VT1<:AbstractVector{<:Real},VT2<:AbstractVector{<:Re psampler::AliasTable end -rand(rng::AbstractRNG, s::UnivariateGMMSampler) = - (k = rand(rng, s.psampler); s.means[k] + randn(rng) * s.stds[k]) +function rand(rng::AbstractRNG, s::UnivariateGMMSampler) + k = rand(rng, s.psampler) + μ = d.means[k] + σ = d.stds[k] + return muladd(randn(rng, float(Base.promote_typeof(μ, σ))), σ, μ) +end +function rand(rng::AbstractRNG, s::UnivariateGMMSampler, x::AbstractArray{<:Real}) + psampler = s.psampler + means = s.means + stds = s.stds + randn!(rng, x) + for i in eachindex(x) + k = rand(rng, psampler) + x[i] = muladd(x[i], stds[k], means[k]) + end + return x +end + sampler(d::UnivariateGMM) = UnivariateGMMSampler(d.means, d.stds, sampler(d.prior)) diff --git a/src/multivariate/dirichletmultinomial.jl b/src/multivariate/dirichletmultinomial.jl index b8eb51991..c2d60fecc 100644 --- a/src/multivariate/dirichletmultinomial.jl +++ b/src/multivariate/dirichletmultinomial.jl @@ -99,8 +99,10 @@ end # Sampling rand(rng::AbstractRNG, d::DirichletMultinomial) = multinom_rand(rng, ntrials(d), rand(rng, Dirichlet(d.α))) -_rand!(rng::AbstractRNG, d::DirichletMultinomial, x::AbstractVector{<:Real}) = +@inline function rand!(rng::AbstractRNG, d::DirichletMultinomial, x::AbstractVector{<:Real}) + @boundscheck length(d) == length(x) multinom_rand!(rng, ntrials(d), rand(rng, Dirichlet(d.α)), x) +end # Fit Model # Using https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2945396/pdf/nihms205488.pdf diff --git a/src/multivariate/multinomial.jl b/src/multivariate/multinomial.jl index 76d4776e0..dc9126ee7 100644 --- a/src/multivariate/multinomial.jl +++ b/src/multivariate/multinomial.jl @@ -166,7 +166,7 @@ end # if only a single sample is requested, no alias table is created rand(rng::AbstractRNG, d::Multinomial) = multinom_rand(rng, ntrials(d), probs(d)) -_rand!(rng::AbstractRNG, d::Multinomial, x::AbstractVector{<:Real}) = +rand!(rng::AbstractRNG, d::Multinomial, x::AbstractVector{<:Real}) = multinom_rand!(rng, ntrials(d), probs(d), x) sampler(d::Multinomial) = MultinomialSampler(ntrials(d), probs(d)) diff --git a/src/multivariate/mvlogitnormal.jl b/src/multivariate/mvlogitnormal.jl index 26af12730..47989aadb 100644 --- a/src/multivariate/mvlogitnormal.jl +++ b/src/multivariate/mvlogitnormal.jl @@ -99,11 +99,15 @@ function rand(rng::AbstractRNG, d::MvLogitNormal, n::Int) return x end -function _rand!(rng::AbstractRNG, d::MvLogitNormal, x::AbstractVecOrMat{<:Real}) - y = @views _drop1(x) - rand!(rng, d.normal, y) - _softmax1!(x, y) - return x +for N in (1, 2) + @eval begin + Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::MvLogitNormal, x::AbstractArray{<:Real, $N}) + y = @views _drop1(x) + rand!(rng, d.normal, y) + _softmax1!(x, y) + return x + end + end end # Fitting diff --git a/src/multivariate/mvlognormal.jl b/src/multivariate/mvlognormal.jl index 738a93dff..b95e5018c 100644 --- a/src/multivariate/mvlognormal.jl +++ b/src/multivariate/mvlognormal.jl @@ -241,10 +241,14 @@ function rand(rng::AbstractRNG, d::MvLogNormal, n::Int) return xs end -function _rand!(rng::AbstractRNG, d::MvLogNormal, x::AbstractVecOrMat{<:Real}) - _rand!(rng, d.normal, x) - map!(exp, x, x) - return x +for N in (1, 2) + @eval begin + Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::MvLogNormal, x::AbstractArray{<:Real,$N}) + rand!(rng, d.normal, x) + map!(exp, x, x) + return x + end + end end _logpdf(d::MvLogNormal, x::AbstractVecOrMat{T}) where {T<:Real} = insupport(d, x) ? (_logpdf(d.normal, log.(x)) - sum(log.(x))) : -Inf diff --git a/src/multivariate/mvnormal.jl b/src/multivariate/mvnormal.jl index 6abdd2643..95c4ff6e6 100644 --- a/src/multivariate/mvnormal.jl +++ b/src/multivariate/mvnormal.jl @@ -282,16 +282,16 @@ function rand(rng::AbstractRNG, d::MvNormal, n::Int) return x end -function _rand!(rng::AbstractRNG, d::MvNormal, x::VecOrMat) +Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::MvNormal, x::VecOrMat{<:Real}) unwhiten!(d.Σ, randn!(rng, x)) x .+= d.μ return x end # Workaround: randn! only works for Array, but not generally for AbstractArray -function _rand!(rng::AbstractRNG, d::MvNormal, x::AbstractVector) +Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::MvNormal, x::AbstractVector{<:Real}) for i in eachindex(x) - @inbounds x[i] = randn(rng, eltype(x)) + x[i] = randn(rng, eltype(x)) end unwhiten!(d.Σ, x) x .+= d.μ diff --git a/src/multivariate/product.jl b/src/multivariate/product.jl index 439c6ccb7..ab2df7ba1 100644 --- a/src/multivariate/product.jl +++ b/src/multivariate/product.jl @@ -32,8 +32,11 @@ end length(d::Product) = length(d.v) -_rand!(rng::AbstractRNG, d::Product, x::AbstractVector{<:Real}) = - map!(Base.Fix1(rand, rng), x, d.v) +rand(rng::AbstractRNG, d::Product) = map(Base.Fix1(rand, rng), d.v) +Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::Product, x::AbstractVector{<:Real}) + return map!(Base.Fix1(rand, rng), x, d.v) +end + function _logpdf(d::Product, x::AbstractVector{<:Real}) dists = d.v if isempty(dists) diff --git a/src/multivariate/vonmisesfisher.jl b/src/multivariate/vonmisesfisher.jl index e4fe981fe..6b7fa1f2c 100644 --- a/src/multivariate/vonmisesfisher.jl +++ b/src/multivariate/vonmisesfisher.jl @@ -77,11 +77,10 @@ _logpdf(d::VonMisesFisher, x::AbstractVector{T}) where {T<:Real} = d.logCκ + d. sampler(d::VonMisesFisher) = VonMisesFisherSampler(d.μ, d.κ) -_rand!(rng::AbstractRNG, d::VonMisesFisher, x::AbstractVector) = - _rand!(rng, sampler(d), x) -_rand!(rng::AbstractRNG, d::VonMisesFisher, x::AbstractMatrix) = - _rand!(rng, sampler(d), x) - +rand(rng::AbstractRNG, d::VonMisesFisher) = rand(rng, sampler(d)) +Base.@propagate_inbounds function rand!(rng::AbstractRNG, d::VonMisesFisher, x::AbstractVector) + return rand!(rng, sampler(d), x) +end ### Estimation diff --git a/src/product.jl b/src/product.jl index 40638010b..e0258133f 100644 --- a/src/product.jl +++ b/src/product.jl @@ -7,7 +7,7 @@ independent `M`-dimensional distributions by stacking them. Users should use [`product_distribution`](@ref) to construct a product distribution of independent distributions instead of constructing a `ProductDistribution` directly. """ -struct ProductDistribution{N,M,D,S<:ValueSupport,T} <: Distribution{ArrayLikeVariate{N},S} +struct ProductDistribution{N,M,D,S<:ValueSupport} <: Distribution{ArrayLikeVariate{N},S} dists::D size::Dims{N} @@ -15,7 +15,7 @@ struct ProductDistribution{N,M,D,S<:ValueSupport,T} <: Distribution{ArrayLikeVar if isempty(dists) throw(ArgumentError("a product distribution must consist of at least one distribution")) end - return new{N,M,D,_product_valuesupport(dists),_product_eltype(dists)}( + return new{N,M,D,_product_valuesupport(dists)}( dists, _product_size(dists), ) @@ -32,15 +32,11 @@ end # default definitions (type stable e.g. for arrays with concrete `eltype`) _product_valuesupport(dists) = mapreduce(value_support ∘ typeof, promote_type, dists) -_product_eltype(dists) = mapreduce(eltype, promote_type, dists) # type-stable and faster implementations for tuples function _product_valuesupport(dists::NTuple{<:Any,Distribution}) return __product_promote_type(value_support, typeof(dists)) end -function _product_eltype(dists::NTuple{<:Any,Distribution}) - return __product_promote_type(eltype, typeof(dists)) -end __product_promote_type(f::F, ::Type{Tuple{D}}) where {F,D<:Distribution} = f(D) function __product_promote_type(f::F, ::Type{T}) where {F,T} @@ -67,7 +63,7 @@ const VectorOfUnivariateDistribution{D,S<:ValueSupport,T} = ProductDistribution{ const MatrixOfUnivariateDistribution{D,S<:ValueSupport,T} = ProductDistribution{2,0,D,S,T} const ArrayOfUnivariateDistribution{N,D,S<:ValueSupport,T} = ProductDistribution{N,0,D,S,T} -const FillArrayOfUnivariateDistribution{N,D<:Fill{<:Any,N},S<:ValueSupport,T} = ProductDistribution{N,0,D,S,T} +const FillArrayOfUnivariateDistribution{N,D<:FillArrays.AbstractFill{<:Any,N},S<:ValueSupport,T} = ProductDistribution{N,0,D,S,T} ## General definitions size(d::ProductDistribution) = d.size @@ -104,8 +100,11 @@ length(d::VectorOfUnivariateDistribution) = length(d.dists) ## For matrix distributions cov(d::ProductDistribution{2}, ::Val{false}) = reshape(cov(d), size(d)..., size(d)...) -# `_rand!` for arrays of univariate distributions -function _rand!( +# Arrays of univariate distributions +function rand(rng::AbstractRNG, d::ArrayOfUnivariateDistribution) + return map(Base.Fix1(rand, rng), d.dists) +end +function rand!( rng::AbstractRNG, d::ArrayOfUnivariateDistribution{N}, x::AbstractArray{<:Real,N}, @@ -129,8 +128,14 @@ function __logpdf(d::ArrayOfUnivariateDistribution, x::AbstractArray{<:Real,N}) return sum(Broadcast.instantiate(broadcasted)) end -# more efficient implementation of `_rand!` for `Fill` array of univariate distributions -function _rand!( +# more efficient sampling for `Fill` array of univariate distributions +function rand( + rng::AbstractRNG, + d::FillArrayOfUnivariateDistribution, +) + return @inbounds rand(rng, sampler(first(d.dists)), size(d)) +end +function rand!( rng::AbstractRNG, d::FillArrayOfUnivariateDistribution{N}, x::AbstractArray{<:Real,N}, @@ -152,13 +157,19 @@ function __logpdf( return @inbounds loglikelihood(first(d.dists), x) end -# `_rand! for arrays of distributions -function _rand!( +# sampling for arrays of distributions +function rand(rng::AbstractRNG, d::ProductDistribution) + x = let rng = rng, d = d + mapreduce(di -> vec(rand(rng, di)), hcat, vec(d.dists)) + end + return reshape(x, size(d)) +end +Base.@propagate_inbounds function rand!( rng::AbstractRNG, d::ProductDistribution{N,M}, A::AbstractArray{<:Real,N}, ) where {N,M} - @inbounds for (di, Ai) in zip(d.dists, eachvariate(A, ArrayLikeVariate{M})) + for (di, Ai) in zip(d.dists, eachvariate(A, ArrayLikeVariate{M})) rand!(rng, di, Ai) end return A @@ -180,32 +191,48 @@ function __logpdf( return sum(Broadcast.instantiate(broadcasted)) end -# more efficient implementation of `_rand!` for `Fill` arrays of distributions +# more efficient sampling for `Fill` arrays of distributions +function rand(rng::AbstractRNG, d::ProductDistribution{<:Any,<:Any,<:FillArrays.AbstractFill}) + rand(rng, sampler(first(d.dists)), A) + x = let rng = rng, d = d + mapreduce(di -> vec(rand(rng, di)), hcat, vec(d.dists)) + end + return reshape(x, size(d)) +end +function _product_rand(rng::AbstractRNG, spl::Sampleable{ArrayLikeVariate{N}}, dims::Dims) where N + xi = rand(rng, spl) + x = Array{eltype(xi)}(undef, dims) + copyto!(x, xi) + vx = reshape(x, ntuple(i -> i <= N ? size(xi, i) : Colon(), N + 1)) + @inbounds rand!(rng, spl, @view(vx[ntuple(i -> i <= N ? Colon() : 2:lastindex(vx, N + 1), N + 1)...])) + return x +end + function _rand!( rng::AbstractRNG, - d::ProductDistribution{N,M,<:Fill}, + d::ProductDistribution{N,M,<:FillArrays.AbstractFill}, A::AbstractArray{<:Real,N}, ) where {N,M} @inbounds rand!(rng, sampler(first(d.dists)), A) return A end -# more efficient implementation of `_logpdf` for `Fill` arrays of distributions +# more efficient implementation of `_logpdf` for `AbstractFill` arrays of distributions # we have to fix a method ambiguity function _logpdf( - d::ProductDistribution{N,M,<:Fill}, + d::ProductDistribution{N,M,<:FillArrays.AbstractFill}, x::AbstractArray{<:Real,N}, ) where {N,M} return __logpdf(d, x) end function _logpdf( - d::ProductDistribution{2,M,<:Fill}, + d::ProductDistribution{2,M,<:FillArrays.AbstractFill}, x::AbstractMatrix{<:Real}, ) where {M} return __logpdf(d, x) end function __logpdf( - d::ProductDistribution{N,M,<:Fill}, + d::ProductDistribution{N,M,<:FillArrays.AbstractFill}, x::AbstractArray{<:Real,N}, ) where {N,M} return @inbounds loglikelihood(first(d.dists), x) diff --git a/src/samplers/multinomial.jl b/src/samplers/multinomial.jl index c09c92ace..bd339e726 100644 --- a/src/samplers/multinomial.jl +++ b/src/samplers/multinomial.jl @@ -53,10 +53,12 @@ function MultinomialSampler(n::Int, prob::Vector{<:Real}) end function rand(rng::AbstractRNG, s::MultinomialSampler) - return _rand!(rng, s, Vector{Int}(undef, length(s.prob))) + x = Vector{Int}(undef, length(s.prob)) + return rand!(rng, s, x) end -function _rand!(rng::AbstractRNG, s::MultinomialSampler, +@inline function rand!(rng::AbstractRNG, s::MultinomialSampler, x::AbstractVector{<:Real}) + @boundscheck length(s) == length(x) n = s.n k = length(s) if n^2 > k diff --git a/src/samplers/vonmisesfisher.jl b/src/samplers/vonmisesfisher.jl index fd3eb2df0..a77b6628d 100644 --- a/src/samplers/vonmisesfisher.jl +++ b/src/samplers/vonmisesfisher.jl @@ -26,8 +26,15 @@ Base.length(s::VonMisesFisherSampler) = length(s.v) return x end - -function _rand!(rng::AbstractRNG, spl::VonMisesFisherSampler, x::AbstractVector) +# Currently, the VonMisesFisherSampler is written for `Float64` +# TODO: Generalize to other number types +function rand(rng::AbstractRNG, spl::VonMisesFisherSampler) + x = Vector{Float64}(undef, length(spl)) + @inbounds rand!(rng, spl, x) + return x +end +@inline function rand!(rng::AbstractRNG, spl::VonMisesFisherSampler, x::AbstractVector{<:Real}) + @boundscheck length(spl) == length(x) w = _vmf_genw(rng, spl) p = spl.p x[1] = w diff --git a/src/univariate/locationscale.jl b/src/univariate/locationscale.jl index 7e4eda771..de08a5d7b 100644 --- a/src/univariate/locationscale.jl +++ b/src/univariate/locationscale.jl @@ -51,8 +51,7 @@ end function AffineDistribution(μ::T, σ::T, ρ::UnivariateDistribution; check_args::Bool=true) where {T<:Real} @check_args AffineDistribution (σ, !iszero(σ)) - _T = promote_type(eltype(ρ), T) - return AffineDistribution{_T}(_T(μ), _T(σ), ρ) + return AffineDistribution{T}(μ, σ, ρ) end function AffineDistribution(μ::Real, σ::Real, ρ::UnivariateDistribution; check_args::Bool=true) diff --git a/test/censored.jl b/test/censored.jl index 27f30cadf..d37137078 100644 --- a/test/censored.jl +++ b/test/censored.jl @@ -100,7 +100,7 @@ end d = Censored(Normal(0.0, 1.0), -1, 2) @test d isa Censored - @test eltype(d) === Float64 + @test @test_deprecated(eltype(d)) === Float64 @test params(d) === (params(Normal(0.0, 1.0))..., -1, 2) @test partype(d) === Float64 @test @inferred extrema(d) == (-1, 2) @@ -115,7 +115,7 @@ end d = Censored(Cauchy(0, 1), nothing, 2) @test d isa Censored - @test eltype(d) === Base.promote_type(eltype(Cauchy(0, 1)), Int) + @test @test_deprecated(eltype(d)) === Base.promote_type(@test_deprecated(eltype(Cauchy(0, 1))), Int) @test params(d) === (params(Cauchy(0, 1))..., nothing, 2) @test partype(d) === Float64 @test extrema(d) == (-Inf, 2.0) @@ -129,7 +129,7 @@ end d = Censored(Gamma(1, 2), 2, nothing) @test d isa Censored - @test eltype(d) === Base.promote_type(eltype(Gamma(1, 2)), Int) + @test @test_deprecated(eltype(d)) === Base.promote_type(@test_deprecated(eltype(Gamma(1, 2))), Int) @test params(d) === (params(Gamma(1, 2))..., 2, nothing) @test partype(d) === Float64 @test extrema(d) == (2.0, Inf) diff --git a/test/cholesky/lkjcholesky.jl b/test/cholesky/lkjcholesky.jl index b9afb59be..0de4c7d59 100644 --- a/test/cholesky/lkjcholesky.jl +++ b/test/cholesky/lkjcholesky.jl @@ -17,9 +17,10 @@ using FiniteDifferences dmat = LKJ(p, d.η) marginal = Distributions._marginal(dmat) ndraws = length(xs) - zs = Array{eltype(d)}(undef, p, p, ndraws) - for k in 1:ndraws - zs[:, :, k] = Matrix(xs[k]) + zs = if VERSION >= v"1.9" + stack(xs) + else + reduce((x , y) -> cat(x, y; dims=3), xs) end @testset "LKJCholesky marginal moments" begin @@ -132,7 +133,7 @@ using FiniteDifferences @test partype(d) <: Float64 m = mode(d) - @test m isa Cholesky{eltype(d)} + @test m isa Cholesky{Float64} @test Matrix(m) ≈ I end for (d, η) in ((2, 4), (2, 1), (3, 1)), T in (Float32, Float64) diff --git a/test/matrixreshaped.jl b/test/matrixreshaped.jl index 8f5de605f..439526c80 100644 --- a/test/matrixreshaped.jl +++ b/test/matrixreshaped.jl @@ -84,7 +84,7 @@ function test_matrixreshaped(rng, d1, sizes) end @testset "MatrixReshaped eltype" begin for d in d1s - @test eltype(d) === eltype(d1) + @test @test_deprecated(eltype(d)) === @test_deprecated(eltype(d1)) end end @testset "MatrixReshaped logpdf" begin diff --git a/test/mixture.jl b/test/mixture.jl index 0b25a2346..2d5f14a1b 100644 --- a/test/mixture.jl +++ b/test/mixture.jl @@ -7,15 +7,8 @@ using ForwardDiff: Dual function test_mixture(g::UnivariateMixture, n::Int, ns::Int, rng::Union{AbstractRNG, Missing} = missing) - if g isa UnivariateGMM - T = eltype(g.means) - else - T = eltype(typeof(g)) - end - X = zeros(T, n) - for i = 1:n - X[i] = rand(g) - end + X = rand(g, n) + @test eltype(X) isa @test_deprecated(eltype(g)) K = ncomponents(g) pr = @inferred(probs(g)) diff --git a/test/multivariate/dirichlet.jl b/test/multivariate/dirichlet.jl index 99ee4893a..01f8be11d 100644 --- a/test/multivariate/dirichlet.jl +++ b/test/multivariate/dirichlet.jl @@ -18,7 +18,7 @@ rng = MersenneTwister(123) d = Dirichlet(3, T(2)) @test length(d) == 3 - @test eltype(d) === float(T) + @test @test_deprecated(eltype(d)) === float(T) @test d.alpha == [2, 2, 2] @test d.alpha0 == 6 @@ -53,7 +53,7 @@ rng = MersenneTwister(123) v = [2, 1, 3] d = Dirichlet(T.(v)) - @test eltype(d) === float(T) + @test @test_deprecated(eltype(d)) === float(T) @test Dirichlet([2, 1, 3]).alpha == d.alpha @test length(d) == length(v) @@ -91,7 +91,7 @@ rng = MersenneTwister(123) v = [2, 1, 3] d = Dirichlet(Float32.(v)) - @test eltype(d) === Float32 + @test @test_deprecated(eltype(d)) === Float32 x = func[1](d) @test isa(x, Vector{Float32}) diff --git a/test/multivariate/jointorderstatistics.jl b/test/multivariate/jointorderstatistics.jl index d5d65a752..118cf04e8 100644 --- a/test/multivariate/jointorderstatistics.jl +++ b/test/multivariate/jointorderstatistics.jl @@ -52,7 +52,7 @@ using Distributions, LinearAlgebra, Random, SpecialFunctions, Statistics, Test @test length(d) == length(r) @test params(d) == (params(dist)..., d.n, d.ranks) @test partype(d) === partype(dist) - @test eltype(d) === eltype(dist) + @test @test_deprecated(eltype(d)) === @test_deprecated(eltype(dist)) length(r) == n && @test JointOrderStatistics(dist, n) == d end diff --git a/test/multivariate/mvlogitnormal.jl b/test/multivariate/mvlogitnormal.jl index cb53c9b37..6b6aea016 100644 --- a/test/multivariate/mvlogitnormal.jl +++ b/test/multivariate/mvlogitnormal.jl @@ -15,8 +15,8 @@ function test_mvlogitnormal(d::MvLogitNormal; nsamples::Int=10^6) @test length(d) == length(dnorm) + 1 @test params(d) == params(dnorm) @test partype(d) == partype(dnorm) - @test eltype(d) == eltype(dnorm) - @test eltype(typeof(d)) == eltype(typeof(dnorm)) + @test @test_deprecated(eltype(d)) === @test_deprecated(eltype(dnorm)) + @test @test_deprecated(eltype(typeof(d))) === @test_deprecated(eltype(typeof(dnorm))) @test location(d) == mean(dnorm) @test minimum(d) == fill(0, length(d)) @test maximum(d) == fill(1, length(d)) diff --git a/test/multivariate/product.jl b/test/multivariate/product.jl index c452f96f7..060e7b9ce 100644 --- a/test/multivariate/product.jl +++ b/test/multivariate/product.jl @@ -20,7 +20,7 @@ using Distributions: Product @test d_product isa Product # Check that methods for `Product` are consistent. @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf.(ds, x)) @test mean(d_product) == mean.(ds) @test var(d_product) == var.(ds) @@ -43,7 +43,7 @@ end @test d_product isa Product # Check that methods for `Product` are consistent. @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf.(ds, x)) @test mean(d_product) == mean.(ds) @test var(d_product) == var.(ds) @@ -73,7 +73,7 @@ end @test d_product isa Product # Check that methods for `Product` are consistent. @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(logpdf(d_product, x)) ≈ sum(logpdf.(ds, x)) @test mean(d_product) == mean.(ds) @test var(d_product) == var.(ds) diff --git a/test/product.jl b/test/product.jl index 16f12328d..05bbb2ddc 100644 --- a/test/product.jl +++ b/test/product.jl @@ -23,7 +23,7 @@ using LinearAlgebra # Check that methods for `ProductDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds2, d_product2)) @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test mean(d_product) == mean.(ds) @test var(d_product) == var.(ds) @test cov(d_product) == Diagonal(var.(ds)) @@ -63,7 +63,7 @@ end # Check that methods for `VectorOfUnivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds1, d_product2), (ds3, d_product3)) @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(mean(d_product)) == mean.(ds) @test @inferred(var(d_product)) == var.(ds) @test @inferred(cov(d_product)) == Diagonal(var.(ds)) @@ -113,7 +113,7 @@ end # Check that methods for `VectorOfUnivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds1, d_product3), (ds3, d_product2)) @test length(d_product) == length(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(mean(d_product)) == mean.(ds) @test @inferred(var(d_product)) == var.(ds) @test @inferred(cov(d_product)) == Diagonal(var.(ds)) @@ -148,7 +148,7 @@ end ds_vec = vcat(ds...) @test length(d_product) == 3 - @test eltype(d_product) === Float64 + @test @test_deprecated(eltype(d_product)) === Float64 @test @inferred(mean(d_product)) == mean.(ds_vec) @test @inferred(var(d_product)) == var.(ds_vec) @test @inferred(cov(d_product)) == Diagonal(var.(ds_vec)) @@ -187,7 +187,7 @@ end # Check that methods for `MatrixOfUnivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds2, d_product2)) @test size(d_product) == size(ds) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(mean(d_product)) == mean.(ds) @test @inferred(var(d_product)) == var.(ds) @test @inferred(cov(d_product)) == Diagonal(vec(var.(ds))) @@ -225,7 +225,7 @@ end # Check that methods for `VectorOfMultivariateDistribution` are consistent. for (ds, d_product) in ((ds1, d_product1), (ds2, d_product2)) @test size(d_product) == (length(ds[1]), size(ds)...) - @test eltype(d_product) === eltype(ds[1]) + @test @test_deprecated(eltype(d_product)) === @test_deprecated(eltype(ds[1])) @test @inferred(mean(d_product)) == reshape(mapreduce(mean, (x, y) -> cat(x, y; dims=ndims(ds) + 1), ds), size(d_product)) @test @inferred(var(d_product)) == reshape(mapreduce(var, (x, y) -> cat(x, y; dims=ndims(ds) + 1), ds), size(d_product)) @test @inferred(cov(d_product)) == Diagonal(mapreduce(var, vcat, ds)) diff --git a/test/reshaped.jl b/test/reshaped.jl index 54bf9be8d..30dcd408b 100644 --- a/test/reshaped.jl +++ b/test/reshaped.jl @@ -78,7 +78,7 @@ # eltype for d in d1s - @test eltype(d) === eltype(d1) + @test @test_deprecated(eltype(d)) === @test_deprecated(eltype(d1)) end # logpdf diff --git a/test/testutils.jl b/test/testutils.jl index 9af33584a..d78f9859f 100644 --- a/test/testutils.jl +++ b/test/testutils.jl @@ -360,9 +360,8 @@ end function get_evalsamples(d::DiscreteUnivariateDistribution, q::Float64) # samples for testing evaluation functions (even spacing) - T = eltype(typeof(d)) - lv = (islowerbounded(d) ? minimum(d) : floor(T,quantile(d, q/2)))::T - hv = (isupperbounded(d) ? maximum(d) : ceil(T,cquantile(d, q/2)))::T + lv = islowerbounded(d) ? minimum(d) : quantile(d, q/2) + hv = isupperbounded(d) ? maximum(d) : cquantile(d, q/2) @assert lv <= hv return lv:hv end diff --git a/test/types.jl b/test/types.jl index 4eefd5d96..05e2689c0 100644 --- a/test/types.jl +++ b/test/types.jl @@ -34,8 +34,8 @@ using ForwardDiff: Dual Distributions.mvtdist(one(T), Matrix{T}(I, 2, 2)), ) for dist in dists - @test eltype(typeof(dist)) === T - @test eltype(rand(dist)) === eltype(dist) + @test @test_deprecated(eltype(typeof(dist))) === T + @test eltype(rand(dist)) === @test_deprecated(eltype(dist)) end end end diff --git a/test/univariate/continuous.jl b/test/univariate/continuous.jl index 3a1da94b9..a52e75b09 100644 --- a/test/univariate/continuous.jl +++ b/test/univariate/continuous.jl @@ -32,11 +32,11 @@ using ForwardDiff n64 = Normal(1., 0.1) nbig = Normal(big(pi), big(ℯ)) - @test eltype(typeof(n32)) === Float32 + @test @test_deprecated(eltype(typeof(n32))) === Float32 @test eltype(rand(n32)) === Float32 @test eltype(rand(n32, 4)) === Float32 - @test eltype(typeof(n64)) === Float64 + @test @test_deprecated(eltype(typeof(n64))) === Float64 @test eltype(rand(n64)) === Float64 @test eltype(rand(n64, 4)) === Float64 end diff --git a/test/univariate/continuous/gumbel.jl b/test/univariate/continuous/gumbel.jl index 470680ee2..b474c5077 100644 --- a/test/univariate/continuous/gumbel.jl +++ b/test/univariate/continuous/gumbel.jl @@ -1,8 +1,8 @@ @testset "Gumbel" begin @testset "eltype" begin - @test eltype(Gumbel()) === Float64 - @test eltype(Gumbel(1f0)) === Float32 - @test eltype(Gumbel{Int}(0, 1)) === Int + @test @test_deprecated(eltype(Gumbel())) === Float64 + @test @test_deprecated(eltype(Gumbel(1f0))) === Float32 + @test @test_deprecated(eltype(Gumbel{Int}(0, 1))) === Float64 end @testset "rand" begin diff --git a/test/univariate/continuous/johnsonsu.jl b/test/univariate/continuous/johnsonsu.jl index 716f1b1df..a64a83392 100644 --- a/test/univariate/continuous/johnsonsu.jl +++ b/test/univariate/continuous/johnsonsu.jl @@ -6,7 +6,7 @@ @test shape(d1) == 0.0 @test scale(d1) == 10.0 @test partype(d1) === Float64 - @test eltype(d1) === Float64 + @test @test_deprecated(eltype(d1)) === Float64 @test rand(d1) isa Float64 @test median(d1) == quantile(d1, 0.5) @@ -24,7 +24,7 @@ @test shape(d1) == 10.0f0 @test scale(d1) == 10.0f0 @test partype(d1) === Float32 - @test eltype(d1) === Float64 + @test @test_deprecated(eltype(d1)) === Float64 @test rand(d1) isa Float64 d1 = JohnsonSU(1.0, 1, 0, 1) diff --git a/test/univariate/continuous/loguniform.jl b/test/univariate/continuous/loguniform.jl index 3ba88f021..c5e596de2 100644 --- a/test/univariate/continuous/loguniform.jl +++ b/test/univariate/continuous/loguniform.jl @@ -21,9 +21,9 @@ import Random end d = LogUniform(1,10) - @test eltype(d) === Float64 + @test @test_deprecated(eltype(d)) === Float64 @test 1 <= rand(rng, d) <= 10 - @test rand(rng, d) isa eltype(d) + @test rand(rng, d) isa @test_deprecated(eltype(d)) @test @inferred(quantile(d, 0)) ≈ 1 @test quantile(d, 0.5) ≈ sqrt(10) # geomean @test quantile(d, 1) ≈ 10 diff --git a/test/univariate/continuous/rician.jl b/test/univariate/continuous/rician.jl index a75397f89..e17550040 100644 --- a/test/univariate/continuous/rician.jl +++ b/test/univariate/continuous/rician.jl @@ -6,7 +6,7 @@ @test shape(d1) == 0.0 @test scale(d1) == 200.0 @test partype(d1) === Float64 - @test eltype(d1) === Float64 + @test @test_deprecated(eltype(d1)) === Float64 @test rand(d1) isa Float64 d2 = Rayleigh(10.0) @@ -35,7 +35,7 @@ @test shape(d1) == 0.5f0 @test scale(d1) == 300.0f0 @test partype(d1) === Float32 - @test eltype(d1) === Float64 + @test @test_deprecated(eltype(d1)) === Float64 @test rand(d1) isa Float64 d1 = Rician() diff --git a/test/univariate/discrete/bernoullilogit.jl b/test/univariate/discrete/bernoullilogit.jl index 55560692a..9dacf6967 100644 --- a/test/univariate/discrete/bernoullilogit.jl +++ b/test/univariate/discrete/bernoullilogit.jl @@ -10,7 +10,7 @@ using Test, Random @test d isa BernoulliLogit{typeof(logitp)} @test convert(typeof(d), d) === d @test convert(BernoulliLogit{Float16}, d) === BernoulliLogit(Float16(logitp)) - @test eltype(typeof(d)) === Bool + @test @test_deprecated(eltype(typeof(d))) === Bool @test params(d) == (logitp,) @test partype(d) === typeof(logitp) end diff --git a/test/univariate/locationscale.jl b/test/univariate/locationscale.jl index 5d5f9640b..3f5cb401e 100644 --- a/test/univariate/locationscale.jl +++ b/test/univariate/locationscale.jl @@ -4,7 +4,7 @@ function test_location_scale( ) d = Distributions.AffineDistribution(μ, σ, ρ) @test params(d) == (μ,σ,ρ) - @test eltype(d) === eltype(dref) + @test @test_deprecated(eltype(d)) === @test_deprecated(eltype(dref)) # Different ways to construct the AffineDistribution object if dref isa DiscreteDistribution @@ -110,11 +110,10 @@ function test_location_scale( @test invlogccdf(dtest, log(0.5)) ≈ invlogccdf(dref, log(0.5)) @test invlogccdf(dtest, log(0.8)) ≈ invlogccdf(dref, log(0.8)) - r = Array{float(eltype(dtest))}(undef, 200000) - if ismissing(rng) - rand!(dtest, r) + r = if ismissing(rng) + rand(dtest, r, 200_000) else - rand!(rng, dtest, r) + rand(rng, dtest, 200_000) end @test mean(r) ≈ mean(dref) atol=0.02 @test std(r) ≈ std(dref) atol=0.01