Skip to content

Commit

Permalink
RFC: Deprecate implicit scalar broadcasting in setindex! (#26347)
Browse files Browse the repository at this point in the history
The current `setindex!` function (the `A[i] = b` syntax) does three very different operations:
* Given an index `i` that refers to only one location (scalar indexing), `A[i] = b` modifies `A` in that location to hold `b`.
* Given an index `I` that refers to more than one location (non-scalar indexing), `A[I] = b` can mean one of two things:
    * If `b` is an AbstractArray (multi-value), assign the values it contains to those locations `I` in `A`. That is, essentially, `[A[i] = bᵢ for (i, bᵢ) in zip(I, b)]`.
    * If `b` is not an AbstractArray (scalar-value), then broadcast its value to every location selected by `I` in `A`.

These two different behaviors in the non-scalar indexing case basically make using this function in a generic way impossible.  If you want to _always_ set the value `b` to many indices of `A`, you _cannot_ use this syntax because `b` might happen to be an array in some cases, radically changing the meaning of the expression.  You need to manually iterate over the indices, using scalar setindex methods.  But we now also have the new `broadcast!` syntax, `A[I] .= B`, which uses the usual broadcasting semantics to determine how `B` should fill into the values of `A`.

This PR deprecates the implicit broadcasting of scalar values in non-scalar indexing in favor of an explicit `.=` broadcast syntax, leaving multi-value non-scalar indexing intact.  This is the _exact opposite_ of PR #24368.
  • Loading branch information
mbauman authored May 2, 2018
1 parent 5a062fa commit 7e2ce0e
Show file tree
Hide file tree
Showing 33 changed files with 316 additions and 246 deletions.
12 changes: 12 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -697,6 +697,18 @@ Deprecated or removed
`Matrix{Int}(undef, (2, 4))`, and `Array{Float32,3}(11, 13, 17)` is now
`Array{Float32,3}(undef, 11, 13, 17)` ([#24781]).

* Previously `setindex!(A, x, I...)` (and the syntax `A[I...] = x`) supported two
different modes of operation when supplied with a set of non-scalar indices `I`
(e.g., at least one index is an `AbstractArray`) depending upon the value of `x`
on the right hand side. If `x` is an `AbstractArray`, its _contents_ are copied
elementwise into the locations in `A` selected by `I` and it must have the same
number of elements as `I` selects locations. Otherwise, if `x` is not an
`AbstractArray`, then its _value_ is implicitly broadcast to all locations to
all locations in `A` selected by `I`. This latter behavior—implicitly broadcasting
"scalar"-like values across many locations—is now deprecated in favor of explicitly
using the broadcasted assignment syntax `A[I...] .= x` or `fill!(view(A, I...), x)`
([#26347]).

* `LinAlg.fillslots!` has been renamed `LinAlg.fillstored!` ([#25030]).

* `fill!(A::Diagonal, x)` and `fill!(A::AbstractTriangular, x)` have been deprecated
Expand Down
22 changes: 14 additions & 8 deletions base/abstractarray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1181,8 +1181,8 @@ function get!(X::AbstractArray{T}, A::AbstractArray, I::Union{AbstractRange,Abst
# Linear indexing
ind = findall(in(1:length(A)), I)
X[ind] = A[I[ind]]
X[1:first(ind)-1] = default
X[last(ind)+1:length(X)] = default
fill!(view(X, 1:first(ind)-1), default)
fill!(view(X, last(ind)+1:length(X)), default)
X
end

Expand Down Expand Up @@ -1380,7 +1380,11 @@ function _cat(A, shape::NTuple{N}, catdims, X...) where N
end
end
I::NTuple{N, UnitRange{Int}} = (inds...,)
A[I...] = x
if x isa AbstractArray
A[I...] = x
else
fill!(view(A, I...), x)
end
end
return A
end
Expand Down Expand Up @@ -1930,27 +1934,27 @@ function mapslices(f, A::AbstractArray, dims::AbstractVector)
ridx[d] = axes(R,d)
end

R[ridx...] = r1
concatenate_setindex!(R, r1, ridx...)

nidx = length(otherdims)
indices = Iterators.drop(CartesianIndices(itershape), 1)
indices = Iterators.drop(CartesianIndices(itershape), 1) # skip the first element, we already handled it
inner_mapslices!(safe_for_reuse, indices, nidx, idx, otherdims, ridx, Aslice, A, f, R)
end

@noinline function inner_mapslices!(safe_for_reuse, indices, nidx, idx, otherdims, ridx, Aslice, A, f, R)
if safe_for_reuse
# when f returns an array, R[ridx...] = f(Aslice) line copies elements,
# so we can reuse Aslice
for I in indices # skip the first element, we already handled it
for I in indices
replace_tuples!(nidx, idx, ridx, otherdims, I)
_unsafe_getindex!(Aslice, A, idx...)
R[ridx...] = f(Aslice)
concatenate_setindex!(R, f(Aslice), ridx...)
end
else
# we can't guarantee safety (#18524), so allocate new storage for each slice
for I in indices
replace_tuples!(nidx, idx, ridx, otherdims, I)
R[ridx...] = f(A[idx...])
concatenate_setindex!(R, f(A[idx...]), ridx...)
end
end

Expand All @@ -1963,6 +1967,8 @@ function replace_tuples!(nidx, idx, ridx, otherdims, I)
end
end

concatenate_setindex!(R, v, I...) = (R[I...] .= (v,); R)
concatenate_setindex!(R, X::AbstractArray, I...) = (R[I...] = X)

## 1 argument

Expand Down
6 changes: 1 addition & 5 deletions base/abstractarraymath.jl
Original file line number Diff line number Diff line change
Expand Up @@ -363,10 +363,6 @@ _rshps(shp, shp_i, sz, i, ::Tuple{}) =
_reperr(s, n, N) = throw(ArgumentError("number of " * s * " repetitions " *
"($n) cannot be less than number of dimensions of input ($N)"))

# We need special handling when repeating arrays of arrays
cat_fill!(R, X, inds) = (R[inds...] = X)
cat_fill!(R, X::AbstractArray, inds) = fill!(view(R, inds...), X)

@noinline function _repeat(A::AbstractArray, inner, outer)
shape, inner_shape = rep_shapes(A, inner, outer)

Expand All @@ -385,7 +381,7 @@ cat_fill!(R, X::AbstractArray, inds) = fill!(view(R, inds...), X)
n = inner[i]
inner_indices[i] = (1:n) .+ ((c[i] - 1) * n)
end
cat_fill!(R, A[c], inner_indices)
fill!(view(R, inner_indices...), A[c])
end
end

Expand Down
26 changes: 0 additions & 26 deletions base/array.jl
Original file line number Diff line number Diff line change
Expand Up @@ -706,29 +706,6 @@ function setindex! end
@eval setindex!(A::Array{T}, x, i1::Int, i2::Int, I::Int...) where {T} =
(@_inline_meta; arrayset($(Expr(:boundscheck)), A, convert(T,x)::T, i1, i2, I...))

# These are redundant with the abstract fallbacks but needed for bootstrap
function setindex!(A::Array, x, I::AbstractVector{Int})
@_propagate_inbounds_meta
I′ = unalias(A, I)
for i in I′
A[i] = x
end
return A
end
function setindex!(A::Array, X::AbstractArray, I::AbstractVector{Int})
@_propagate_inbounds_meta
@boundscheck setindex_shape_check(X, length(I))
X′ = unalias(A, X)
I′ = unalias(A, I)
count = 1
for i in I′
@inbounds x = X′[count]
A[i] = x
count += 1
end
return A
end

# Faster contiguous setindex! with copyto!
function setindex!(A::Array{T}, X::Array{T}, I::UnitRange{Int}) where T
@_inline_meta
Expand All @@ -750,9 +727,6 @@ function setindex!(A::Array{T}, X::Array{T}, c::Colon) where T
return A
end

setindex!(A::Array, x::Number, ::Colon) = fill!(A, x)
setindex!(A::Array{T, N}, x::Number, ::Vararg{Colon, N}) where {T, N} = fill!(A, x)

# efficiently grow an array

_growbeg!(a::Vector, delta::Integer) =
Expand Down
38 changes: 2 additions & 36 deletions base/bitarray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -584,7 +584,7 @@ function gen_bitarray_from_itr(itr, st)
end
end
if ind > 1
@inbounds C[ind:bitcache_size] = false
@inbounds C[ind:bitcache_size] .= false
resize!(B, length(B) + ind - 1)
dumpbitcache(Bc, cind, C)
end
Expand All @@ -608,7 +608,7 @@ function fill_bitarray_from_itr!(B::BitArray, itr, st)
end
end
if ind > 1
@inbounds C[ind:bitcache_size] = false
@inbounds C[ind:bitcache_size] .= false
dumpbitcache(Bc, cind, C)
end
return B
Expand Down Expand Up @@ -653,44 +653,10 @@ end
indexoffset(i) = first(i)-1
indexoffset(::Colon) = 0

@inline function setindex!(B::BitArray, x, J0::Union{Colon,UnitRange{Int}})
I0 = to_indices(B, (J0,))[1]
@boundscheck checkbounds(B, I0)
y = Bool(x)
l0 = length(I0)
l0 == 0 && return B
f0 = indexoffset(I0)+1
fill_chunks!(B.chunks, y, f0, l0)
return B
end
@propagate_inbounds function setindex!(B::BitArray, X::AbstractArray, J0::Union{Colon,UnitRange{Int}})
_setindex!(IndexStyle(B), B, X, to_indices(B, (J0,))[1])
end

# logical indexing

# When indexing with a BitArray, we can operate whole chunks at a time for a ~100x gain
@inline function setindex!(B::BitArray, x, I::BitArray)
@boundscheck checkbounds(B, I)
_unsafe_setindex!(B, x, I)
end
function _unsafe_setindex!(B::BitArray, x, I::BitArray)
y = convert(Bool, x)
Bc = B.chunks
Ic = I.chunks
length(Bc) == length(Ic) || throw_boundserror(B, I)
@inbounds if y
for i = 1:length(Bc)
Bc[i] |= Ic[i]
end
else
for i = 1:length(Bc)
Bc[i] &= ~Ic[i]
end
end
return B
end

# Assigning an array of bools is more complicated, but we can still do some
# work on chunks by combining X and I 64 bits at a time to improve perf by ~40%
@inline function setindex!(B::BitArray, X::AbstractArray, I::BitArray)
Expand Down
8 changes: 6 additions & 2 deletions base/bitset.jl
Original file line number Diff line number Diff line change
Expand Up @@ -114,12 +114,16 @@ end
@inline function _growend0!(b::Bits, nchunks::Int)
len = length(b)
_growend!(b, nchunks)
@inbounds b[len+1:end] = CHK0 # resize! gives dirty memory
for i in len+1:length(b)
@inbounds b[i] = CHK0 # resize! gives dirty memory
end
end

@inline function _growbeg0!(b::Bits, nchunks::Int)
_growbeg!(b, nchunks)
@inbounds b[1:nchunks] = CHK0
for i in 1:nchunks
@inbounds b[i] = CHK0
end
end

function _matched_map!(f, s1::BitSet, s2::BitSet)
Expand Down
35 changes: 34 additions & 1 deletion base/broadcast.jl
Original file line number Diff line number Diff line change
Expand Up @@ -832,7 +832,7 @@ end
end
end
if ind > 1
@inbounds tmp[ind:bitcache_size] = false
@inbounds tmp[ind:bitcache_size] .= false
dumpbitcache(destc, cind, tmp)
end
return dest
Expand Down Expand Up @@ -1108,6 +1108,39 @@ See [`broadcast_getindex`](@ref) for examples of the treatment of `inds`.
end
end

## In specific instances, we can broadcast masked BitArrays whole chunks at a time
# Very intentionally do not support much functionality here: scalar indexing would be O(n)
struct BitMaskedBitArray{N,M}
parent::BitArray{N}
mask::BitArray{M}
BitMaskedBitArray{N,M}(parent, mask) where {N,M} = new(parent, mask)
end
@inline function BitMaskedBitArray(parent::BitArray{N}, mask::BitArray{M}) where {N,M}
@boundscheck checkbounds(parent, mask)
BitMaskedBitArray{N,M}(parent, mask)
end
Base.@propagate_inbounds dotview(B::BitArray, i::BitArray) = BitMaskedBitArray(B, i)
Base.show(io::IO, B::BitMaskedBitArray) = foreach(arg->show(io, arg), (typeof(B), (B.parent, B.mask)))
# Override materialize! to prevent the BitMaskedBitArray from escaping to an overrideable method
@inline materialize!(B::BitMaskedBitArray, bc::Broadcasted{<:Any,<:Any,typeof(identity),Tuple{Bool}}) = fill!(B, bc.args[1])
@inline materialize!(B::BitMaskedBitArray, bc::Broadcasted{<:Any}) = materialize!(SubArray(B.parent, to_indices(B.parent, (B.mask,))), bc)
function Base.fill!(B::BitMaskedBitArray, b::Bool)
Bc = B.parent.chunks
Ic = B.mask.chunks
@inbounds if b
for i = 1:length(Bc)
Bc[i] |= Ic[i]
end
else
for i = 1:length(Bc)
Bc[i] &= ~Ic[i]
end
end
return B
end



############################################################

# x[...] .= f.(y...) ---> broadcast!(f, dotview(x, ...), y...).
Expand Down
5 changes: 4 additions & 1 deletion base/char.jl
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,10 @@ widen(::Type{T}) where {T<:AbstractChar} = T
print(io::IO, c::Char) = (write(io, c); nothing)
print(io::IO, c::AbstractChar) = print(io, Char(c)) # fallback: convert to output UTF-8

const hex_chars = UInt8['0':'9';'a':'z']
const hex_chars = UInt8['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z']

function show_invalid(io::IO, c::Char)
write(io, 0x27)
Expand Down
4 changes: 3 additions & 1 deletion base/compiler/ssair/ir.jl
Original file line number Diff line number Diff line change
Expand Up @@ -588,7 +588,9 @@ function resize!(compact::IncrementalCompact, nnewnodes)
resize!(compact.result_lines, nnewnodes)
resize!(compact.result_flags, nnewnodes)
resize!(compact.used_ssas, nnewnodes)
compact.used_ssas[(old_length+1):nnewnodes] = 0
for i in (old_length+1):nnewnodes
compact.used_ssas[i] = 0
end
nothing
end

Expand Down
2 changes: 1 addition & 1 deletion base/compiler/ssair/slot2ssa.jl
Original file line number Diff line number Diff line change
Expand Up @@ -377,8 +377,8 @@ function domsort_ssa!(ir::IRCode, domtree::DomTree)
end
old_inst_range = ir.cfg.blocks[bb].stmts
inst_range = (bb_start_off+1):(bb_start_off+length(old_inst_range))
inst_rename[old_inst_range] = Any[SSAValue(x) for x in inst_range]
for (nidx, idx) in zip(inst_range, old_inst_range)
inst_rename[idx] = SSAValue(nidx)
stmt = ir.stmts[idx]
if isa(stmt, PhiNode)
result_stmts[nidx] = rename_phinode_edges(stmt, bb, result_order, bb_rename)
Expand Down
21 changes: 21 additions & 0 deletions base/deprecated.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1491,6 +1491,27 @@ function slicedim(A::AbstractVector, d::Integer, i::Number)
end
end

# PR #26347: Deprecate implicit scalar broadcasting in setindex!
_axes(::Ref) = ()
_axes(x) = axes(x)
function deprecate_scalar_setindex_broadcast_message(v, I...)
value = (_axes(Base.Broadcast.broadcastable(v)) == () ? "x" : "(x,)")
"using `A[I...] = x` to implicitly broadcast `x` across many locations is deprecated. Use `A[I...] .= $value` instead."
end
deprecate_scalar_setindex_broadcast_message(v, ::Colon, ::Vararg{Colon}) =
"using `A[:] = x` to implicitly broadcast `x` across many locations is deprecated. Use `fill!(A, x)` instead."

function _iterable(v, I...)
depwarn(deprecate_scalar_setindex_broadcast_message(v, I...), :setindex!)
Iterators.repeated(v)
end
function setindex!(B::BitArray, x, I0::Union{Colon,UnitRange{Int}}, I::Union{Int,UnitRange{Int},Colon}...)
depwarn(deprecate_scalar_setindex_broadcast_message(x, I0, I...), :setindex!)
B[I0, I...] .= (x,)
B
end


# PR #26283
@deprecate contains(haystack, needle) occursin(needle, haystack)
@deprecate contains(s::AbstractString, r::Regex, offset::Integer) occursin(r, s, offset=offset)
Expand Down
4 changes: 2 additions & 2 deletions base/iobuffer.jl
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ function IOBuffer(;
append=flags.append,
truncate=flags.truncate,
maxsize=maxsize)
buf.data[:] = 0
fill!(buf.data, 0)
return buf
end

Expand Down Expand Up @@ -246,7 +246,7 @@ function truncate(io::GenericIOBuffer, n::Integer)
if n > length(io.data)
resize!(io.data, n)
end
io.data[io.size+1:n] = 0
io.data[io.size+1:n] .= 0
io.size = n
io.ptr = min(io.ptr, n+1)
ismarked(io) && io.mark > n && unmark(io)
Expand Down
Loading

2 comments on commit 7e2ce0e

@nanosoldier
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Executing the daily benchmark build, I will reply here when finished:

@nanosoldier runbenchmarks(ALL, isdaily = true)

@nanosoldier
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Your benchmark job has completed - possible performance regressions were detected. A full report can be found here. cc @ararslan

Please sign in to comment.