From c6bd1b098a3c1033ff9dd4ef97ce27343a3584da Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 10 May 2016 15:14:04 -0400 Subject: [PATCH 1/2] fix method ambiguities in base --- base/abstractarray.jl | 2 +- base/array.jl | 18 +++++++++--------- base/bool.jl | 1 + base/irrationals.jl | 2 +- base/linalg/hessenberg.jl | 3 ++- base/linalg/symmetric.jl | 3 ++- base/linalg/triangular.jl | 3 ++- base/multidimensional.jl | 8 +++++--- base/rational.jl | 3 +++ base/reshapedarray.jl | 2 +- base/sharedarray.jl | 4 ++-- base/sparse/sparsematrix.jl | 2 +- base/subarray.jl | 2 +- 13 files changed, 31 insertions(+), 22 deletions(-) diff --git a/base/abstractarray.jl b/base/abstractarray.jl index 8a2b5dff32c2f..f9ea9a6e1fb14 100644 --- a/base/abstractarray.jl +++ b/base/abstractarray.jl @@ -155,7 +155,7 @@ similar{T}(a::AbstractArray{T}, dims::DimsInteger) = similar(a, T, dims) similar{T}(a::AbstractArray{T}, dims::Integer...) = similar(a, T, dims) similar( a::AbstractArray, T::Type, dims::Integer...) = similar(a, T, dims) # similar creates an Array by default -similar( a::AbstractArray, T::Type, dims::DimsInteger) = Array(T, dims...) +similar( a::AbstractArray, T::Type, dims::DimsInteger) = similar(a, T, convert(Dims, dims)) similar( a::AbstractArray, T::Type, dims::Dims) = Array(T, dims) ## from general iterable to any array diff --git a/base/array.jl b/base/array.jl index 6c1918db755b6..2d91200291a79 100644 --- a/base/array.jl +++ b/base/array.jl @@ -117,15 +117,15 @@ end ## Constructors ## -similar(a::Array, T, dims::Dims) = Array(T, dims) -similar{T}(a::Array{T,1}) = Array(T, size(a,1)) -similar{T}(a::Array{T,2}) = Array(T, size(a,1), size(a,2)) -similar{T}(a::Array{T,1}, dims::Dims) = Array(T, dims) -similar{T}(a::Array{T,1}, m::Int) = Array(T, m) -similar{T}(a::Array{T,1}, S::Type) = Array(S, size(a,1)) -similar{T}(a::Array{T,2}, dims::Dims) = Array(T, dims) -similar{T}(a::Array{T,2}, m::Int) = Array(T, m) -similar{T}(a::Array{T,2}, S::Type) = Array(S, size(a,1), size(a,2)) +similar(a::Array, T::Type, dims::Dims) = Array(T, dims) +similar{T}(a::Array{T,1}) = Array(T, size(a,1)) +similar{T}(a::Array{T,2}) = Array(T, size(a,1), size(a,2)) +similar{T}(a::Array{T,1}, dims::Dims) = Array(T, dims) +similar{T}(a::Array{T,1}, m::Int) = Array(T, m) +similar{T}(a::Array{T,1}, S::Type) = Array(S, size(a,1)) +similar{T}(a::Array{T,2}, dims::Dims) = Array(T, dims) +similar{T}(a::Array{T,2}, m::Int) = Array(T, m) +similar{T}(a::Array{T,2}, S::Type) = Array(S, size(a,1), size(a,2)) # T[x...] constructs Array{T,1} function getindex(T::Type, vals...) diff --git a/base/bool.jl b/base/bool.jl index 67c3831f2d4e6..cef413455db46 100644 --- a/base/bool.jl +++ b/base/bool.jl @@ -61,4 +61,5 @@ rem(x::Bool, y::Bool) = y ? false : throw(DivideError()) mod(x::Bool, y::Bool) = rem(x,y) promote_op(op, ::Type{Bool}, ::Type{Bool}) = typeof(op(true, true)) +promote_op(::typeof(^), ::Type{Bool}, ::Type{Bool}) = Bool promote_op{T<:Integer}(::typeof(^), ::Type{Bool}, ::Type{T}) = Bool diff --git a/base/irrationals.jl b/base/irrationals.jl index 0fb91aee4f138..e7a65dcbf4b34 100644 --- a/base/irrationals.jl +++ b/base/irrationals.jl @@ -127,7 +127,7 @@ for T in (Range, BitArray, StridedArray, AbstractArray) end log(::Irrational{:e}) = 1 # use 1 to correctly promote expressions like log(x)/log(e) -log(::Irrational{:e}, x) = log(x) +log(::Irrational{:e}, x::Number) = log(x) # align along = for nice Array printing function alignment(io::IO, x::Irrational) diff --git a/base/linalg/hessenberg.jl b/base/linalg/hessenberg.jl index fb3d13f6d9805..b45a1e67efdc1 100644 --- a/base/linalg/hessenberg.jl +++ b/base/linalg/hessenberg.jl @@ -33,7 +33,8 @@ immutable HessenbergQ{T,S<:AbstractMatrix} <: AbstractMatrix{T} end HessenbergQ{T}(factors::AbstractMatrix{T}, τ::Vector{T}) = HessenbergQ{T,typeof(factors)}(factors, τ) HessenbergQ(A::Hessenberg) = HessenbergQ(A.factors, A.τ) -size(A::HessenbergQ, args...) = size(A.factors, args...) +size(A::HessenbergQ, d) = size(A.factors, d) +size(A::HessenbergQ) = size(A.factors) function getindex(A::Hessenberg, d::Symbol) d == :Q && return HessenbergQ(A) diff --git a/base/linalg/symmetric.jl b/base/linalg/symmetric.jl index d4ebe7344a2ca..9308eb4d654f5 100644 --- a/base/linalg/symmetric.jl +++ b/base/linalg/symmetric.jl @@ -22,7 +22,8 @@ end typealias HermOrSym{T,S} Union{Hermitian{T,S}, Symmetric{T,S}} typealias RealHermSymComplexHerm{T<:Real,S} Union{Hermitian{T,S}, Symmetric{T,S}, Hermitian{Complex{T},S}} -size(A::HermOrSym, args...) = size(A.data, args...) +size(A::HermOrSym, d) = size(A.data, d) +size(A::HermOrSym) = size(A.data) @inline function getindex(A::Symmetric, i::Integer, j::Integer) @boundscheck checkbounds(A, i, j) @inbounds r = (A.uplo == 'U') == (i < j) ? A.data[i, j] : A.data[j, i] diff --git a/base/linalg/triangular.jl b/base/linalg/triangular.jl index 6a58d538c6257..7b52bd91e93ca 100644 --- a/base/linalg/triangular.jl +++ b/base/linalg/triangular.jl @@ -17,7 +17,8 @@ for t in (:LowerTriangular, :UnitLowerTriangular, :UpperTriangular, return $t{eltype(A), typeof(A)}(A) end - size(A::$t, args...) = size(A.data, args...) + size(A::$t, d) = size(A.data, d) + size(A::$t) = size(A.data) convert{T,S}(::Type{$t{T}}, A::$t{T,S}) = A convert{Tnew,Told,S}(::Type{$t{Tnew}}, A::$t{Told,S}) = (Anew = convert(AbstractMatrix{Tnew}, A.data); $t(Anew)) diff --git a/base/multidimensional.jl b/base/multidimensional.jl index f050701da89ff..6bbc164c3838e 100644 --- a/base/multidimensional.jl +++ b/base/multidimensional.jl @@ -196,6 +196,11 @@ using .IteratorsMD # Bounds-checking specialization # Specializing for a fixed number of arguments provides a ~25% # improvement over the general definitions in abstractarray.jl + +# This is annoying, but we must first define logical indexing to avoid ambiguities +_internal_checkbounds(A::AbstractVector, I::AbstractVector{Bool}) = length(A) == length(I) || throw_boundserror(A, I) +_internal_checkbounds(A::AbstractVector, I::AbstractArray{Bool}) = size(A) == size(I) || throw_boundserror(A, I) + for N = 1:5 args = [:($(Symbol(:I, d))) for d = 1:N] targs = [:($(Symbol(:I, d))::Union{Colon,Number,AbstractArray}) for d = 1:N] # prevent co-opting the CartesianIndex version @@ -215,9 +220,6 @@ for N = 1:5 end end end -# This is annoying, but we must also define logical indexing to avoid ambiguities -_internal_checkbounds(A::AbstractVector, I::AbstractArray{Bool}) = size(A) == size(I) || throw_boundserror(A, I) -_internal_checkbounds(A::AbstractVector, I::AbstractVector{Bool}) = length(A) == length(I) || throw_boundserror(A, I) # Bounds-checking with CartesianIndex @inline function checkbounds(::Type{Bool}, ::Tuple{}, I1::CartesianIndex) diff --git a/base/rational.jl b/base/rational.jl index e900d8e696c68..86bedaac2f404 100644 --- a/base/rational.jl +++ b/base/rational.jl @@ -340,6 +340,9 @@ function round{T}(::Type{T}, x::Rational{Bool}) convert(T, x) end +round{T}(::Type{T}, x::Rational{Bool}, ::RoundingMode{:Nearest}) = round(T, x) +round{T}(::Type{T}, x::Rational{Bool}, ::RoundingMode{:NearestTiesAway}) = round(T, x) +round{T}(::Type{T}, x::Rational{Bool}, ::RoundingMode{:NearestTiesUp}) = round(T, x) round{T}(::Type{T}, x::Rational{Bool}, ::RoundingMode) = round(T, x) trunc{T}(x::Rational{T}) = Rational(trunc(T,x)) diff --git a/base/reshapedarray.jl b/base/reshapedarray.jl index d085cede6383b..66b06331405e7 100644 --- a/base/reshapedarray.jl +++ b/base/reshapedarray.jl @@ -72,7 +72,7 @@ size_strides(out::Tuple) = out size(A::ReshapedArray) = A.dims size(A::ReshapedArray, d) = d <= ndims(A) ? A.dims[d] : 1 similar(A::ReshapedArray, eltype::Type) = similar(parent(A), eltype, size(A)) -similar(A::ReshapedArray, eltype::Type, dims...) = similar(parent(A), eltype, dims...) +similar(A::ReshapedArray, eltype::Type, dims::Dims) = similar(parent(A), eltype, dims...) linearindexing{R<:ReshapedArrayLF}(::Type{R}) = LinearFast() parent(A::ReshapedArray) = A.parent parentindexes(A::ReshapedArray) = map(s->1:s, size(parent(A))) diff --git a/base/sharedarray.jl b/base/sharedarray.jl index 3e0e5ad50e941..3d64870f26208 100644 --- a/base/sharedarray.jl +++ b/base/sharedarray.jl @@ -430,8 +430,8 @@ function shmem_randn(dims; kwargs...) end shmem_randn(I::Int...; kwargs...) = shmem_randn(I; kwargs...) -similar(S::SharedArray, T, dims::Dims) = similar(S.s, T, dims) -similar(S::SharedArray, T) = similar(S.s, T, size(S)) +similar(S::SharedArray, T::Type, dims::Dims) = similar(S.s, T, dims) +similar(S::SharedArray, T::Type) = similar(S.s, T, size(S)) similar(S::SharedArray, dims::Dims) = similar(S.s, eltype(S), dims) similar(S::SharedArray) = similar(S.s, eltype(S), size(S)) diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index 61fcfbb1c1545..b892396a3bb30 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -241,7 +241,7 @@ end similar(S::SparseMatrixCSC, Tv::Type=eltype(S)) = SparseMatrixCSC(S.m, S.n, copy(S.colptr), copy(S.rowval), Array(Tv, length(S.nzval))) similar{Tv,Ti,TvNew,TiNew}(S::SparseMatrixCSC{Tv,Ti}, ::Type{TvNew}, ::Type{TiNew}) = SparseMatrixCSC(S.m, S.n, convert(Array{TiNew},S.colptr), convert(Array{TiNew}, S.rowval), Array(TvNew, length(S.nzval))) -similar{Tv, N}(S::SparseMatrixCSC, ::Type{Tv}, d::NTuple{N, Integer}) = spzeros(Tv, d...) +similar{Tv}(S::SparseMatrixCSC, ::Type{Tv}, d::Dims) = spzeros(Tv, d...) function convert{Tv,Ti,TvS,TiS}(::Type{SparseMatrixCSC{Tv,Ti}}, S::SparseMatrixCSC{TvS,TiS}) if Tv == TvS && Ti == TiS diff --git a/base/subarray.jl b/base/subarray.jl index 95f1f7c990422..e0a5ee2e301aa 100644 --- a/base/subarray.jl +++ b/base/subarray.jl @@ -67,7 +67,7 @@ viewindexing(I::Tuple{AbstractArray, Vararg{Any}}) = LinearSlow() size(V::SubArray) = V.dims length(V::SubArray) = prod(V.dims) -similar(V::SubArray, T, dims::Dims) = similar(V.parent, T, dims) +similar(V::SubArray, T::Type, dims::Dims) = similar(V.parent, T, dims) parent(V::SubArray) = V.parent parentindexes(V::SubArray) = V.indexes From 85653866074551e443e10a5ff26a21401dd0f633 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Sat, 7 May 2016 14:27:29 -0400 Subject: [PATCH 2/2] more complete ambiguity detection previously we stopped checking for ambiguities as soon as we reached the definition this assumed that jl_args_morespecific was transitive, but in actuality it is only a partial sort and we build the typemap assuming no ambiguities, which sometimes made it even less accurate at finding them --- src/gf.c | 99 +++++++++++++++++++++++++++++++++-------------- test/ambiguous.jl | 18 ++++----- 2 files changed, 79 insertions(+), 38 deletions(-) diff --git a/src/gf.c b/src/gf.c index c4a930151edcd..bcc1382068bae 100644 --- a/src/gf.c +++ b/src/gf.c @@ -744,36 +744,40 @@ struct ambiguous_matches_env { struct typemap_intersection_env match; union jl_typemap_t defs; jl_typemap_entry_t *newentry; + jl_array_t *shadowed; + int after; }; +const int eager_ambiguity_printing = 0; static int check_ambiguous_visitor(jl_typemap_entry_t *oldentry, struct typemap_intersection_env *closure0) { struct ambiguous_matches_env *closure = container_of(closure0, struct ambiguous_matches_env, match); - if (oldentry == closure->newentry) - return 0; // finished once it finds the method that was just inserted - // TODO: instead, maybe stop once we hit something newentry is definitely more specific than - + if (oldentry == closure->newentry) { + closure->after = 1; + return 1; + } union jl_typemap_t map = closure->defs; jl_tupletype_t *type = (jl_tupletype_t*)closure->match.type; jl_method_t *m = closure->newentry->func.method; jl_tupletype_t *sig = oldentry->sig; jl_value_t *isect = closure->match.ti; + if (sigs_eq(isect, (jl_value_t*)(closure->after ? sig : type), 1)) { + // we're ok if the new definition is actually the one we just + // inferred to be required (see issue #3609). ideally this would + // never happen, since if New ⊓ Old == New then we should have + // considered New more specific, but jl_args_morespecific is not + // perfect, so this is a useful fallback. + return 1; + } + // we know type ∩ sig != Union{} and - // we know !jl_args_morespecific(type, sig) + // we know !jl_args_morespecific(type, sig) [before] + // or !jl_args_morespecific(sig, type) [after] // now we are checking that the reverse is true - if (!jl_args_morespecific((jl_value_t*)sig, (jl_value_t*)type)) { - if (sigs_eq(isect, (jl_value_t*)type, 1)) { - // we're ok if the new definition is actually the one we just - // inferred to be required (see issue #3609). ideally this would - // never happen, since if New ⊓ Old == New then we should have - // considered New more specific, but jl_args_morespecific is not - // perfect, so this is a useful fallback. - return 1; - } + if (!jl_args_morespecific((jl_value_t*)(closure->after ? type : sig), + (jl_value_t*)(closure->after ? sig : type))) { jl_typemap_entry_t *l = jl_typemap_assoc_by_type(map, (jl_tupletype_t*)isect, NULL, 0, 0, 0); - if (l) { - // ok, intersection is covered + if (l != NULL) // ok, intersection is covered return 1; - } jl_method_t *mambig = oldentry->func.method; if (m->ambig == jl_nothing) { m->ambig = (jl_value_t*) jl_alloc_cell_1d(0); @@ -785,13 +789,32 @@ static int check_ambiguous_visitor(jl_typemap_entry_t *oldentry, struct typemap_ } jl_cell_1d_push((jl_array_t*) m->ambig, (jl_value_t*) mambig); jl_cell_1d_push((jl_array_t*) mambig->ambig, (jl_value_t*) m); + if (eager_ambiguity_printing) { + JL_STREAM *s = JL_STDERR; + jl_printf(s, "WARNING: New definition \n "); + jl_static_show_func_sig(s, (jl_value_t*)type); + print_func_loc(s, m); + jl_printf(s, "\nis ambiguous with: \n "); + jl_static_show_func_sig(s, (jl_value_t*)sig); + print_func_loc(s, oldentry->func.method); + jl_printf(s, ".\nTo fix, define \n "); + jl_static_show_func_sig(s, isect); + jl_printf(s, "\nbefore the new definition.\n"); + } return 1; // there may be multiple ambiguities, keep going } + else if (closure->after) { + // record that this method definition is being partially replaced + if (closure->shadowed == NULL) { + closure->shadowed = jl_alloc_cell_1d(0); + } + jl_cell_1d_push(closure->shadowed, oldentry->func.value); + } return 1; } -static void check_ambiguous_matches(union jl_typemap_t defs, - jl_typemap_entry_t *newentry) +static jl_array_t *check_ambiguous_matches(union jl_typemap_t defs, + jl_typemap_entry_t *newentry) { jl_tupletype_t *type = newentry->sig; size_t l = jl_svec_len(type->parameters); @@ -811,9 +834,12 @@ static void check_ambiguous_matches(union jl_typemap_t defs, env.match.env = NULL; env.defs = defs; env.newentry = newentry; - JL_GC_PUSH2(&env.match.env, &env.match.ti); + env.shadowed = NULL; + env.after = 0; + JL_GC_PUSH3(&env.match.env, &env.match.ti, &env.shadowed); jl_typemap_intersection_visitor(defs, 0, &env.match); JL_GC_POP(); + return env.shadowed; } static void method_overwrite(jl_typemap_entry_t *newentry, jl_method_t *oldvalue) { @@ -834,7 +860,7 @@ static void method_overwrite(jl_typemap_entry_t *newentry, jl_method_t *oldvalue } // invalidate cached methods that overlap this definition -static void invalidate_conflicting(union jl_typemap_t *pml, jl_value_t *type, jl_value_t *parent) +static void invalidate_conflicting(union jl_typemap_t *pml, jl_value_t *type, jl_value_t *parent, jl_array_t *shadowed) { jl_typemap_entry_t **pl; if (jl_typeof(pml->unknown) == (jl_value_t*)jl_typemap_level_type) { @@ -843,7 +869,7 @@ static void invalidate_conflicting(union jl_typemap_t *pml, jl_value_t *type, jl for(int i=0; i < jl_array_len(cache->arg1); i++) { union jl_typemap_t *pl = &((union jl_typemap_t*)jl_array_data(cache->arg1))[i]; if (pl->unknown && pl->unknown != jl_nothing) { - invalidate_conflicting(pl, type, (jl_value_t*)cache->arg1); + invalidate_conflicting(pl, type, (jl_value_t*)cache->arg1, shadowed); } } } @@ -851,7 +877,7 @@ static void invalidate_conflicting(union jl_typemap_t *pml, jl_value_t *type, jl for(int i=0; i < jl_array_len(cache->targ); i++) { union jl_typemap_t *pl = &((union jl_typemap_t*)jl_array_data(cache->targ))[i]; if (pl->unknown && pl->unknown != jl_nothing) { - invalidate_conflicting(pl, type, (jl_value_t*)cache->targ); + invalidate_conflicting(pl, type, (jl_value_t*)cache->targ, shadowed); } } } @@ -862,9 +888,17 @@ static void invalidate_conflicting(union jl_typemap_t *pml, jl_value_t *type, jl pl = &pml->leaf; } jl_typemap_entry_t *l = *pl; + size_t i, n = jl_array_len(shadowed); + jl_value_t **d = jl_cell_data(shadowed); while (l != (void*)jl_nothing) { - if (jl_type_intersection(type, (jl_value_t*)l->sig) != - (jl_value_t*)jl_bottom_type) { + int replaced = 0; + for (i = 0; i < n; i++) { + if (d[i] == (jl_value_t*)l->func.linfo->def) { + replaced = jl_type_intersection(type, (jl_value_t*)l->sig) != (jl_value_t*)jl_bottom_type; + break; + } + } + if (replaced) { *pl = l->next; jl_gc_wb(parent, *pl); } @@ -893,16 +927,23 @@ void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method, jl_tupletyp jl_svec_t *tvars = method->tvars; assert(jl_is_tuple_type(type)); JL_SIGATOMIC_BEGIN(); - jl_value_t *oldvalue; + jl_value_t *oldvalue = NULL; + JL_GC_PUSH1(&oldvalue); jl_typemap_entry_t *newentry = jl_typemap_insert(&mt->defs, (jl_value_t*)mt, type, tvars, simpletype, jl_emptysvec, (jl_value_t*)method, 0, &method_defs, &oldvalue); if (oldvalue) { method->ambig = ((jl_method_t*)oldvalue)->ambig; method_overwrite(newentry, (jl_method_t*)oldvalue); + jl_array_t *shadowed = jl_alloc_cell_1d(1); + jl_cellset(shadowed, 0, oldvalue); + oldvalue = (jl_value_t*)shadowed; } - else - check_ambiguous_matches(mt->defs, newentry); - invalidate_conflicting(&mt->cache, (jl_value_t*)type, (jl_value_t*)mt); + else { + oldvalue = (jl_value_t*)check_ambiguous_matches(mt->defs, newentry); + } + if (oldvalue) + invalidate_conflicting(&mt->cache, (jl_value_t*)type, (jl_value_t*)mt, (jl_array_t*)oldvalue); + JL_GC_POP(); update_max_args(mt, type); JL_SIGATOMIC_END(); } diff --git a/test/ambiguous.jl b/test/ambiguous.jl index 12052b290bbbd..d9c0a0622a909 100644 --- a/test/ambiguous.jl +++ b/test/ambiguous.jl @@ -60,52 +60,52 @@ ambig(x, y::Integer) = 3 # Automatic detection of ambiguities module Ambig1 - ambig(x, y) = 1 ambig(x::Integer, y) = 2 ambig(x, y::Integer) = 3 - end ambs = detect_ambiguities(Ambig1) @test length(ambs) == 1 module Ambig2 - ambig(x, y) = 1 ambig(x::Integer, y) = 2 ambig(x, y::Integer) = 3 ambig(x::Number, y) = 4 - end ambs = detect_ambiguities(Ambig2) @test length(ambs) == 2 module Ambig3 - ambig(x, y) = 1 ambig(x::Integer, y) = 2 ambig(x, y::Integer) = 3 ambig(x::Int, y::Int) = 4 - end ambs = detect_ambiguities(Ambig3) @test length(ambs) == 1 module Ambig4 - ambig(x, y) = 1 ambig(x::Int, y) = 2 ambig(x, y::Int) = 3 ambig(x::Int, y::Int) = 4 - end - ambs = detect_ambiguities(Ambig4) @test length(ambs) == 0 +module Ambig5 +ambig(x::Int8, y) = 1 +ambig(x::Integer, y) = 2 +ambig(x, y::Int) = 3 +end + +ambs = detect_ambiguities(Ambig5) +@test length(ambs) == 2 + # Test that Core and Base are free of ambiguities @test isempty(detect_ambiguities(Core, Base; imported=true))