Skip to content

Commit

Permalink
Add ability to not round-trip uncached inference results through IRCo…
Browse files Browse the repository at this point in the history
…de (#47137)

There's generally three reasons inference results end up uncached:
1. They come from typeinf_ext
2. We discover some validity limitation (generally due to recursion)
3. They are used for constant propagation

Currently, we convert all such inference results back to CodeInfo,
in case they come from 1. However, for inference results of kind 3,
the only thing we ever do with them is turn them back into IRCode
for inlining. This round-tripping through IRCode is quite wasteful.
Stop doing that. This PR is the minimal change to accomplish that
by marking those inference results that actually need to be converted
back (for case 1). This probably needs some tweaking for external
AbstractInterpreters, but let's make sure this works and has the
right performance first.

This commit just adds the capability, but doesn't turn it on
by default, since the performance for base didn't quite look
favorable yet.
  • Loading branch information
Keno authored Oct 15, 2022
1 parent 8552543 commit 532125d
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 4 deletions.
2 changes: 2 additions & 0 deletions base/compiler/optimize.jl
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ function inlining_policy(interp::AbstractInterpreter,
else
return nothing
end
elseif isa(src, IRCode)
return src
end
return nothing
end
Expand Down
23 changes: 21 additions & 2 deletions base/compiler/typeinfer.jl
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,19 @@ function finish!(interp::AbstractInterpreter, caller::InferenceResult)
opt = caller.src
if opt isa OptimizationState # implies `may_optimize(interp) === true`
if opt.ir !== nothing
caller.src = ir_to_codeinf!(opt)
if caller.must_be_codeinf
caller.src = ir_to_codeinf!(opt)
elseif is_inlineable(opt.src)
# TODO: If the CFG is too big, inlining becomes more expensive and if we're going to
# use this IR over and over, it's worth simplifying it. Round trips through
# CodeInstance do this implicitly, since they recompute the CFG, so try to
# match that behavior here.
# ir = cfg_simplify!(opt.ir)
caller.src = opt.ir
else
# Not cached and not inlineable - drop the ir
caller.src = nothing
end
end
end
return caller.src
Expand Down Expand Up @@ -925,6 +937,9 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize
# completely new
lock_mi_inference(interp, mi)
result = InferenceResult(mi)
if cache === :local
result.must_be_codeinf = true # TODO directly keep `opt.ir` for this case
end
frame = InferenceState(result, cache, interp) # always use the cache for edge targets
if frame === nothing
# can't get the source for this, so we know nothing
Expand Down Expand Up @@ -998,6 +1013,7 @@ function typeinf_frame(interp::AbstractInterpreter, method::Method, @nospecializ
mi = specialize_method(method, atype, sparams)::MethodInstance
ccall(:jl_typeinf_timing_begin, Cvoid, ())
result = InferenceResult(mi)
result.must_be_codeinf = true
frame = InferenceState(result, run_optimizer ? :global : :no, interp)
frame === nothing && return nothing
typeinf(interp, frame)
Expand Down Expand Up @@ -1056,7 +1072,9 @@ function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance)
return retrieve_code_info(mi)
end
lock_mi_inference(interp, mi)
frame = InferenceState(InferenceResult(mi), #=cache=#:global, interp)
result = InferenceResult(mi)
result.must_be_codeinf = true
frame = InferenceState(result, #=cache=#:global, interp)
frame === nothing && return nothing
typeinf(interp, frame)
ccall(:jl_typeinf_timing_end, Cvoid, ())
Expand Down Expand Up @@ -1099,6 +1117,7 @@ function typeinf_ext_toplevel(interp::AbstractInterpreter, linfo::MethodInstance
ccall(:jl_typeinf_timing_begin, Cvoid, ())
if !src.inferred
result = InferenceResult(linfo)
result.must_be_codeinf = true
frame = InferenceState(result, src, #=cache=#:global, interp)
typeinf(interp, frame)
@assert frame.inferred # TODO: deal with this better
Expand Down
5 changes: 3 additions & 2 deletions base/compiler/types.jl
Original file line number Diff line number Diff line change
Expand Up @@ -41,18 +41,19 @@ mutable struct InferenceResult
argtypes::Vector{Any}
overridden_by_const::BitVector
result # ::Type, or InferenceState if WIP
src # ::Union{CodeInfo, OptimizationState} if inferred copy is available, nothing otherwise
src # ::Union{CodeInfo, IRCode, OptimizationState} if inferred copy is available, nothing otherwise
valid_worlds::WorldRange # if inference and optimization is finished
ipo_effects::Effects # if inference is finished
effects::Effects # if optimization is finished
argescapes # ::ArgEscapeCache if optimized, nothing otherwise
must_be_codeinf::Bool # if this must come out as CodeInfo or leaving it as IRCode is ok
# NOTE the main constructor is defined within inferencestate.jl
global function _InferenceResult(
linfo::MethodInstance,
arginfo#=::Union{Nothing,Tuple{ArgInfo,InferenceState}}=#)
argtypes, overridden_by_const = matching_cache_argtypes(linfo, arginfo)
return new(linfo, argtypes, overridden_by_const, Any, nothing,
WorldRange(), Effects(), Effects(), nothing)
WorldRange(), Effects(), Effects(), nothing, true)
end
end

Expand Down

2 comments on commit 532125d

@nanosoldier
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Executing the daily package evaluation, I will reply here when finished:

@nanosoldier runtests(ALL, isdaily = true, configuration=(rr=true,))

@nanosoldier
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Your package evaluation job has completed - possible new issues were detected. A full report can be found here.

Please sign in to comment.