-
-
Notifications
You must be signed in to change notification settings - Fork 5.5k
/
abstractinterpretation.jl
1906 lines (1826 loc) · 80.9 KB
/
abstractinterpretation.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# This file is a part of Julia. License is MIT: https://julialang.org/license
#############
# constants #
#############
const _REF_NAME = Ref.body.name
#########
# logic #
#########
# See if the inference result of the current statement's result value might affect
# the final answer for the method (aside from optimization potential and exceptions).
# To do that, we need to check both for slot assignment and SSA usage.
call_result_unused(frame::InferenceState) =
isexpr(frame.src.code[frame.currpc], :call) && isempty(frame.ssavalue_uses[frame.currpc])
# check if this return type is improvable (i.e. whether it's possible that with
# more information, we might get a more precise type)
function is_improvable(@nospecialize(rtype))
if isa(rtype, Type)
# Could always be improved to Const or PartialStruct, unless we're
# already at Bottom
return rtype !== Union{}
end
# Could be improved to `Const` or a more precise wrapper
return isa(rtype, PartialStruct) || isa(rtype, InterConditional)
end
function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f),
fargs::Union{Nothing,Vector{Any}}, argtypes::Vector{Any}, @nospecialize(atype),
sv::InferenceState, max_methods::Int = InferenceParams(interp).MAX_METHODS)
if sv.params.unoptimize_throw_blocks && sv.currpc in sv.throw_blocks
add_remark!(interp, sv, "Skipped call in throw block")
return CallMeta(Any, false)
end
valid_worlds = WorldRange()
# NOTE this is valid as far as any "constant" lattice element doesn't represent `Union` type
splitunions = 1 < unionsplitcost(argtypes) <= InferenceParams(interp).MAX_UNION_SPLITTING
mts = Core.MethodTable[]
fullmatch = Bool[]
if splitunions
split_argtypes = switchtupleunion(argtypes)
applicable = Any[]
applicable_argtypes = Vector{Any}[] # arrays like `argtypes`, including constants, for each match
infos = MethodMatchInfo[]
for arg_n in split_argtypes
sig_n = argtypes_to_type(arg_n)
mt = ccall(:jl_method_table_for, Any, (Any,), sig_n)
if mt === nothing
add_remark!(interp, sv, "Could not identify method table for call")
return CallMeta(Any, false)
end
mt = mt::Core.MethodTable
matches = findall(sig_n, method_table(interp); limit=max_methods)
if matches === missing
add_remark!(interp, sv, "For one of the union split cases, too many methods matched")
return CallMeta(Any, false)
end
push!(infos, MethodMatchInfo(matches))
for m in matches
push!(applicable, m)
push!(applicable_argtypes, arg_n)
end
valid_worlds = intersect(valid_worlds, matches.valid_worlds)
thisfullmatch = _any(match->(match::MethodMatch).fully_covers, matches)
found = false
for (i, mt′) in enumerate(mts)
if mt′ === mt
fullmatch[i] &= thisfullmatch
found = true
break
end
end
if !found
push!(mts, mt)
push!(fullmatch, thisfullmatch)
end
end
info = UnionSplitInfo(infos)
else
mt = ccall(:jl_method_table_for, Any, (Any,), atype)
if mt === nothing
add_remark!(interp, sv, "Could not identify method table for call")
return CallMeta(Any, false)
end
mt = mt::Core.MethodTable
matches = findall(atype, method_table(interp, sv); limit=max_methods)
if matches === missing
# this means too many methods matched
# (assume this will always be true, so we don't compute / update valid age in this case)
add_remark!(interp, sv, "Too many methods matched")
return CallMeta(Any, false)
end
push!(mts, mt)
push!(fullmatch, _any(match->(match::MethodMatch).fully_covers, matches))
info = MethodMatchInfo(matches)
applicable = matches.matches
valid_worlds = matches.valid_worlds
applicable_argtypes = nothing
end
update_valid_age!(sv, valid_worlds)
applicable = applicable::Array{Any,1}
napplicable = length(applicable)
rettype = Bottom
edges = MethodInstance[]
conditionals = nothing # keeps refinement information of call argument types when the return type is boolean
seen = 0 # number of signatures actually inferred
any_const_result = false
const_results = Union{InferenceResult,Nothing}[]
multiple_matches = napplicable > 1
if f !== nothing && napplicable == 1 && is_method_pure(applicable[1]::MethodMatch)
val = pure_eval_call(f, argtypes)
if val !== false
# TODO: add some sort of edge(s)
return CallMeta(val, MethodResultPure(info))
end
end
for i in 1:napplicable
match = applicable[i]::MethodMatch
method = match.method
sig = match.spec_types
if bail_out_toplevel_call(interp, sig, sv)
# only infer concrete call sites in top-level expressions
add_remark!(interp, sv, "Refusing to infer non-concrete call site in top-level expression")
rettype = Any
break
end
this_rt = Bottom
splitunions = false
# TODO: this used to trigger a bug in inference recursion detection, and is unmaintained now
# sigtuple = unwrap_unionall(sig)::DataType
# splitunions = 1 < unionsplitcost(sigtuple.parameters) * napplicable <= InferenceParams(interp).MAX_UNION_SPLITTING
if splitunions
splitsigs = switchtupleunion(sig)
for sig_n in splitsigs
rt, edgecycle, edgelimited, edge = abstract_call_method(interp, method, sig_n, svec(), multiple_matches, sv)
if edge !== nothing
push!(edges, edge)
end
this_argtypes = applicable_argtypes === nothing ? argtypes : applicable_argtypes[i]
const_rt, const_result = abstract_call_method_with_const_args(interp, rt, f, this_argtypes, match, sv, edgecycle, edgelimited, false)
if const_rt !== rt && const_rt ⊑ rt
rt = const_rt
end
push!(const_results, const_result)
if const_result !== nothing
any_const_result = true
end
this_rt = tmerge(this_rt, rt)
if bail_out_call(interp, this_rt, sv)
break
end
end
else
this_rt, edgecycle, edgelimited, edge = abstract_call_method(interp, method, sig, match.sparams, multiple_matches, sv)
if edge !== nothing
push!(edges, edge)
end
# try constant propagation with argtypes for this match
# this is in preparation for inlining, or improving the return result
this_argtypes = applicable_argtypes === nothing ? argtypes : applicable_argtypes[i]
const_this_rt, const_result = abstract_call_method_with_const_args(interp, this_rt, f, this_argtypes, match, sv, edgecycle, edgelimited, false)
if const_this_rt !== this_rt && const_this_rt ⊑ this_rt
this_rt = const_this_rt
end
push!(const_results, const_result)
if const_result !== nothing
any_const_result = true
end
end
this_conditional = ignorelimited(this_rt)
this_rt = widenwrappedconditional(this_rt)
@assert !(this_conditional isa Conditional) "invalid lattice element returned from inter-procedural context"
seen += 1
rettype = tmerge(rettype, this_rt)
if this_conditional !== Bottom && is_lattice_bool(rettype) && fargs !== nothing
if conditionals === nothing
conditionals = Any[Bottom for _ in 1:length(argtypes)],
Any[Bottom for _ in 1:length(argtypes)]
end
condval = maybe_extract_const_bool(this_conditional)
for i = 1:length(argtypes)
fargs[i] isa SlotNumber || continue
if this_conditional isa InterConditional && this_conditional.slot == i
vtype = this_conditional.vtype
elsetype = this_conditional.elsetype
else
elsetype = vtype = tmeet(argtypes[i], fieldtype(sig, i))
condval === true && (elsetype = Union{})
condval === false && (vtype = Union{})
end
conditionals[1][i] = tmerge(conditionals[1][i], vtype)
conditionals[2][i] = tmerge(conditionals[2][i], elsetype)
end
end
if bail_out_call(interp, rettype, sv)
break
end
end
# inliner uses this information only when there is a single match that has been improved
# by constant analysis, but let's create `ConstCallInfo` if there has been any successful
# constant propagation happened since other consumers may be interested in this
if any_const_result && seen == napplicable
info = ConstCallInfo(info, const_results)
end
if rettype isa LimitedAccuracy
union!(sv.pclimitations, rettype.causes)
rettype = rettype.typ
end
# if we have argument refinement information, apply that now to get the result
if is_lattice_bool(rettype) && conditionals !== nothing && fargs !== nothing
slot = 0
vtype = elsetype = Any
condval = maybe_extract_const_bool(rettype)
for i in 1:length(fargs)
# find the first argument which supports refinment,
# and intersect all equvalent arguments with it
arg = fargs[i]
arg isa SlotNumber || continue # can't refine
old = argtypes[i]
old isa Type || continue # unlikely to refine
id = slot_id(arg)
if slot == 0 || id == slot
new_vtype = conditionals[1][i]
if condval === false
vtype = Union{}
elseif new_vtype ⊑ vtype
vtype = new_vtype
else
vtype = tmeet(vtype, widenconst(new_vtype))
end
new_elsetype = conditionals[2][i]
if condval === true
elsetype = Union{}
elseif new_elsetype ⊑ elsetype
elsetype = new_elsetype
else
elsetype = tmeet(elsetype, widenconst(new_elsetype))
end
if (slot > 0 || condval !== false) && !(old ⊑ vtype) # essentially vtype ⋤ old
slot = id
elseif (slot > 0 || condval !== true) && !(old ⊑ elsetype) # essentially elsetype ⋤ old
slot = id
else # reset: no new useful information for this slot
vtype = elsetype = Any
if slot > 0
slot = 0
end
end
end
end
if vtype === Bottom && elsetype === Bottom
rettype = Bottom # accidentally proved this call to be dead / throw !
elseif slot > 0
rettype = Conditional(SlotNumber(slot), vtype, elsetype) # record a Conditional improvement to this slot
end
end
@assert !(rettype isa InterConditional) "invalid lattice element returned from inter-procedural context"
if call_result_unused(sv) && !(rettype === Bottom)
add_remark!(interp, sv, "Call result type was widened because the return value is unused")
# We're mainly only here because the optimizer might want this code,
# but we ourselves locally don't typically care about it locally
# (beyond checking if it always throws).
# So avoid adding an edge, since we don't want to bother attempting
# to improve our result even if it does change (to always throw),
# and avoid keeping track of a more complex result type.
rettype = Any
end
add_call_backedges!(interp, rettype, edges, fullmatch, mts, atype, sv)
if !isempty(sv.pclimitations) # remove self, if present
delete!(sv.pclimitations, sv)
for caller in sv.callers_in_cycle
delete!(sv.pclimitations, caller)
end
end
#print("=> ", rettype, "\n")
return CallMeta(rettype, info)
end
function add_call_backedges!(interp::AbstractInterpreter,
@nospecialize(rettype),
edges::Vector{MethodInstance},
fullmatch::Vector{Bool}, mts::Vector{Core.MethodTable}, @nospecialize(atype),
sv::InferenceState)
if rettype === Any
# for `NativeInterpreter`, we don't add backedges when a new method couldn't refine
# (widen) this type
return
end
for edge in edges
add_backedge!(edge, sv)
end
for (thisfullmatch, mt) in zip(fullmatch, mts)
if !thisfullmatch
# also need an edge to the method table in case something gets
# added that did not intersect with any existing method
add_mt_backedge!(mt, atype, sv)
end
end
end
const RECURSION_UNUSED_MSG = "Bounded recursion detected with unused result. Annotated return type may be wider than true result."
const RECURSION_MSG = "Bounded recursion detected. Call was widened to force convergence."
function abstract_call_method(interp::AbstractInterpreter, method::Method, @nospecialize(sig), sparams::SimpleVector, hardlimit::Bool, sv::InferenceState)
if method.name === :depwarn && isdefined(Main, :Base) && method.module === Main.Base
add_remark!(interp, sv, "Refusing to infer into `depwarn`")
return Any, false, false, nothing
end
topmost = nothing
# Limit argument type tuple growth of functions:
# look through the parents list to see if there's a call to the same method
# and from the same method.
# Returns the topmost occurrence of that repeated edge.
edgecycle = false
edgelimited = false
# The `method_for_inference_heuristics` will expand the given method's generator if
# necessary in order to retrieve this field from the generated `CodeInfo`, if it exists.
# The other `CodeInfo`s we inspect will already have this field inflated, so we just
# access it directly instead (to avoid regeneration).
callee_method2 = method_for_inference_heuristics(method, sig, sparams) # Union{Method, Nothing}
sv_method2 = sv.src.method_for_inference_limit_heuristics # limit only if user token match
sv_method2 isa Method || (sv_method2 = nothing) # Union{Method, Nothing}
function matches_sv(parent::InferenceState)
parent_method2 = parent.src.method_for_inference_limit_heuristics # limit only if user token match
parent_method2 isa Method || (parent_method2 = nothing) # Union{Method, Nothing}
return parent.linfo.def === sv.linfo.def && sv_method2 === parent_method2
end
function edge_matches_sv(frame::InferenceState)
inf_method2 = frame.src.method_for_inference_limit_heuristics # limit only if user token match
inf_method2 isa Method || (inf_method2 = nothing) # Union{Method, Nothing}
if callee_method2 !== inf_method2
return false
end
if !hardlimit
# if this is a soft limit,
# also inspect the parent of this edge,
# to see if they are the same Method as sv
# in which case we'll need to ensure it is convergent
# otherwise, we don't
# check in the cycle list first
# all items in here are mutual parents of all others
if !_any(matches_sv, frame.callers_in_cycle)
let parent = frame.parent
parent !== nothing || return false
parent = parent::InferenceState
(parent.cached || parent.parent !== nothing) || return false
matches_sv(parent) || return false
end
end
# If the method defines a recursion relation, give it a chance
# to tell us that this recursion is actually ok.
if isdefined(method, :recursion_relation)
if Core._apply_pure(method.recursion_relation, Any[method, callee_method2, sig, frame.linfo.specTypes])
return false
end
end
end
return true
end
for infstate in InfStackUnwind(sv)
if method === infstate.linfo.def
if infstate.linfo.specTypes == sig
# avoid widening when detecting self-recursion
# TODO: merge call cycle and return right away
if call_result_unused(sv)
add_remark!(interp, sv, RECURSION_UNUSED_MSG)
# since we don't use the result (typically),
# we have a self-cycle in the call-graph, but not in the inference graph (typically):
# break this edge now (before we record it) by returning early
# (non-typically, this means that we lose the ability to detect a guaranteed StackOverflow in some cases)
return Any, true, true, nothing
end
topmost = nothing
edgecycle = true
break
end
topmost === nothing || continue
if edge_matches_sv(infstate)
topmost = infstate
edgecycle = true
end
end
end
if topmost !== nothing
sigtuple = unwrap_unionall(sig)::DataType
msig = unwrap_unionall(method.sig)::DataType
spec_len = length(msig.parameters) + 1
ls = length(sigtuple.parameters)
if method === sv.linfo.def
# Under direct self-recursion, permit much greater use of reducers.
# here we assume that complexity(specTypes) :>= complexity(sig)
comparison = sv.linfo.specTypes
l_comparison = length(unwrap_unionall(comparison).parameters)::Int
spec_len = max(spec_len, l_comparison)
else
comparison = method.sig
end
if isdefined(method, :recursion_relation)
# We don't recquire the recursion_relation to be transitive, so
# apply a hard limit
hardlimit = true
end
# see if the type is actually too big (relative to the caller), and limit it if required
newsig = limit_type_size(sig, comparison, hardlimit ? comparison : sv.linfo.specTypes, InferenceParams(interp).TUPLE_COMPLEXITY_LIMIT_DEPTH, spec_len)
if newsig !== sig
# continue inference, but note that we've limited parameter complexity
# on this call (to ensure convergence), so that we don't cache this result
if call_result_unused(sv)
add_remark!(interp, sv, RECURSION_UNUSED_MSG)
# if we don't (typically) actually care about this result,
# don't bother trying to examine some complex abstract signature
# since it's very unlikely that we'll try to inline this,
# or want make an invoke edge to its calling convention return type.
# (non-typically, this means that we lose the ability to detect a guaranteed StackOverflow in some cases)
return Any, true, true, nothing
end
add_remark!(interp, sv, RECURSION_MSG)
topmost = topmost::InferenceState
parentframe = topmost.parent
poison_callstack(sv, parentframe === nothing ? topmost : parentframe)
sig = newsig
sparams = svec()
edgelimited = true
end
end
# if sig changed, may need to recompute the sparams environment
if isa(method.sig, UnionAll) && isempty(sparams)
recomputed = ccall(:jl_type_intersection_with_env, Any, (Any, Any), sig, method.sig)::SimpleVector
#@assert recomputed[1] !== Bottom
# We must not use `sig` here, since that may re-introduce structural complexity that
# our limiting heuristic sought to eliminate. The alternative would be to not increment depth over covariant contexts,
# but we prefer to permit inference of tuple-destructuring, so we don't do that right now
# For example, with a signature such as `Tuple{T, Ref{T}} where {T <: S}`
# we might want to limit this to `Tuple{S, Ref}`, while type-intersection can instead give us back the original type
# (which moves `S` back up to a lower comparison depth)
# Optionally, we could try to drive this to a fixed point, but I think this is getting too complex,
# and this would only cause more questions and more problems
# (the following is only an example, most of the statements are probable in the wrong order):
# newsig = sig
# seen = IdSet()
# while !(newsig in seen)
# push!(seen, newsig)
# lsig = length((unwrap_unionall(sig)::DataType).parameters)
# newsig = limit_type_size(newsig, sig, sv.linfo.specTypes, InferenceParams(interp).TUPLE_COMPLEXITY_LIMIT_DEPTH, lsig)
# recomputed = ccall(:jl_type_intersection_with_env, Any, (Any, Any), newsig, method.sig)::SimpleVector
# newsig = recomputed[2]
# end
# sig = ?
sparams = recomputed[2]::SimpleVector
end
rt, edge = typeinf_edge(interp, method, sig, sparams, sv)
if edge === nothing
edgecycle = edgelimited = true
end
return rt, edgecycle, edgelimited, edge
end
function abstract_call_method_with_const_args(interp::AbstractInterpreter, @nospecialize(rettype),
@nospecialize(f), argtypes::Vector{Any}, match::MethodMatch,
sv::InferenceState, edgecycle::Bool, edgelimited::Bool,
va_override::Bool)
mi = maybe_get_const_prop_profitable(interp, rettype, f, argtypes, match, sv, edgecycle)
mi === nothing && return Any, nothing
# try constant prop'
inf_cache = get_inference_cache(interp)
inf_result = cache_lookup(mi, argtypes, inf_cache)
if inf_result === nothing
# if there might be a cycle, check to make sure we don't end up
# calling ourselves here.
if edgecycle && _any(InfStackUnwind(sv)) do infstate
# if the type complexity limiting didn't decide to limit the call signature (`edgelimited = false`)
# we can relax the cycle detection by comparing `MethodInstance`s and allow inference to
# propagate different constant elements if the recursion is finite over the lattice
return (edgelimited ? match.method === infstate.linfo.def : mi === infstate.linfo) &&
any(infstate.result.overridden_by_const)
end
add_remark!(interp, sv, "[constprop] Edge cycle encountered")
return Any, nothing
end
inf_result = InferenceResult(mi, argtypes, va_override)
frame = InferenceState(inf_result, #=cache=#false, interp)
frame === nothing && return Any, nothing # this is probably a bad generated function (unsound), but just ignore it
frame.parent = sv
push!(inf_cache, inf_result)
typeinf(interp, frame) || return Any, nothing
end
result = inf_result.result
# if constant inference hits a cycle, just bail out
isa(result, InferenceState) && return Any, nothing
add_backedge!(mi, sv)
return result, inf_result
end
# if there's a possibility we could get a better result (hopefully without doing too much work)
# returns `MethodInstance` with constant arguments, returns nothing otherwise
function maybe_get_const_prop_profitable(interp::AbstractInterpreter, @nospecialize(rettype),
@nospecialize(f), argtypes::Vector{Any}, match::MethodMatch,
sv::InferenceState, edgecycle::Bool)
const_prop_entry_heuristic(interp, rettype, sv, edgecycle) || return nothing
method = match.method
nargs::Int = method.nargs
method.isva && (nargs -= 1)
if length(argtypes) < nargs
return nothing
end
const_prop_argument_heuristic(interp, argtypes) || const_prop_rettype_heuristic(interp, rettype) || return nothing
allconst = is_allconst(argtypes)
force = force_const_prop(interp, f, method)
force || const_prop_function_heuristic(interp, f, argtypes, nargs, allconst) || return nothing
force |= allconst
mi = specialize_method(match, !force)
if mi === nothing
add_remark!(interp, sv, "[constprop] Failed to specialize")
return nothing
end
mi = mi::MethodInstance
if !force && !const_prop_methodinstance_heuristic(interp, method, mi)
add_remark!(interp, sv, "[constprop] Disabled by heuristic")
return nothing
end
return mi
end
function const_prop_entry_heuristic(interp::AbstractInterpreter, @nospecialize(rettype), sv::InferenceState, edgecycle::Bool)
call_result_unused(sv) && edgecycle && return false
return is_improvable(rettype) && InferenceParams(interp).ipo_constant_propagation
end
# see if propagating constants may be worthwhile
function const_prop_argument_heuristic(interp::AbstractInterpreter, argtypes::Vector{Any})
for a in argtypes
a = widenconditional(a)
if has_nontrivial_const_info(a) && is_const_prop_profitable_arg(a)
return true
end
end
return false
end
function is_const_prop_profitable_arg(@nospecialize(arg))
# have new information from argtypes that wasn't available from the signature
if isa(arg, PartialStruct)
for b in arg.fields
isconstType(b) && return true
is_const_prop_profitable_arg(b) && return true
end
end
isa(arg, PartialOpaque) && return true
isa(arg, Const) || return true
val = arg.val
# don't consider mutable values or Strings useful constants
return isa(val, Symbol) || isa(val, Type) || (!isa(val, String) && !ismutable(val))
end
function const_prop_rettype_heuristic(interp::AbstractInterpreter, @nospecialize(rettype))
return improvable_via_constant_propagation(rettype)
end
function is_allconst(argtypes::Vector{Any})
for a in argtypes
a = widenconditional(a)
if !isa(a, Const) && !isconstType(a) && !isa(a, PartialStruct) && !isa(a, PartialOpaque)
return false
end
end
return true
end
function force_const_prop(interp::AbstractInterpreter, @nospecialize(f), method::Method)
return method.aggressive_constprop ||
InferenceParams(interp).aggressive_constant_propagation ||
istopfunction(f, :getproperty) ||
istopfunction(f, :setproperty!)
end
function const_prop_function_heuristic(interp::AbstractInterpreter, @nospecialize(f), argtypes::Vector{Any}, nargs::Int, allconst::Bool)
if nargs > 1
if istopfunction(f, :getindex) || istopfunction(f, :setindex!)
arrty = argtypes[2]
# don't propagate constant index into indexing of non-constant array
if arrty isa Type && arrty <: AbstractArray && !issingletontype(arrty)
return false
elseif arrty ⊑ Array
return false
end
elseif istopfunction(f, :iterate)
itrty = argtypes[2]
if itrty ⊑ Array
return false
end
end
end
if !allconst && (istopfunction(f, :+) || istopfunction(f, :-) || istopfunction(f, :*) ||
istopfunction(f, :(==)) || istopfunction(f, :!=) ||
istopfunction(f, :<=) || istopfunction(f, :>=) || istopfunction(f, :<) || istopfunction(f, :>) ||
istopfunction(f, :<<) || istopfunction(f, :>>))
# it is almost useless to inline the op when all the same type,
# but highly worthwhile to inline promote of a constant
length(argtypes) > 2 || return false
t1 = widenconst(argtypes[2])
all_same = true
for i in 3:length(argtypes)
if widenconst(argtypes[i]) !== t1
all_same = false
break
end
end
return !all_same
end
return true
end
# This is a heuristic to avoid trying to const prop through complicated functions
# where we would spend a lot of time, but are probably unlikely to get an improved
# result anyway.
function const_prop_methodinstance_heuristic(interp::AbstractInterpreter, method::Method, mi::MethodInstance)
if method.is_for_opaque_closure
# Not inlining an opaque closure can be very expensive, so be generous
# with the const-prop-ability. It is quite possible that we can't infer
# anything at all without const-propping, so the inlining check below
# isn't particularly helpful here.
return true
end
# Peek at the inferred result for the function to determine if the optimizer
# was able to cut it down to something simple (inlineable in particular).
# If so, there's a good chance we might be able to const prop all the way
# through and learn something new.
code = get(code_cache(interp), mi, nothing)
declared_inline = isdefined(method, :source) && ccall(:jl_ir_flag_inlineable, Bool, (Any,), method.source)
cache_inlineable = declared_inline
if isdefined(code, :inferred) && !cache_inlineable
cache_inf = code.inferred
if !(cache_inf === nothing)
cache_inlineable = inlining_policy(interp)(cache_inf) !== nothing
end
end
if !cache_inlineable
return false
end
return true
end
# This is only for use with `Conditional`.
# In general, usage of this is wrong.
function ssa_def_slot(@nospecialize(arg), sv::InferenceState)
init = sv.currpc
while isa(arg, SSAValue)
init = arg.id
arg = sv.src.code[init]
end
arg isa SlotNumber || return nothing
for i = init:(sv.currpc - 1)
# conservatively make sure there isn't potentially another conflicting assignment to
# the same slot between the def and usage
# we can assume the IR is sorted, since the front-end only creates SSA values in order
e = sv.src.code[i]
e isa Expr || continue
if e.head === :(=) && e.args[1] === arg
return nothing
end
end
return arg
end
# `typ` is the inferred type for expression `arg`.
# if the expression constructs a container (e.g. `svec(x,y,z)`),
# refine its type to an array of element types.
# Union of Tuples of the same length is converted to Tuple of Unions.
# returns an array of types
function precise_container_type(interp::AbstractInterpreter, @nospecialize(itft), @nospecialize(typ), sv::InferenceState)
if isa(typ, PartialStruct) && typ.typ.name === Tuple.name
return typ.fields, nothing
end
if isa(typ, Const)
val = typ.val
if isa(val, SimpleVector) || isa(val, Tuple)
return Any[ Const(val[i]) for i in 1:length(val) ], nothing # avoid making a tuple Generator here!
end
end
tti0 = widenconst(typ)
tti = unwrap_unionall(tti0)
if isa(tti, DataType) && tti.name === NamedTuple_typename
# A NamedTuple iteration is the same as the iteration of its Tuple parameter:
# compute a new `tti == unwrap_unionall(tti0)` based on that Tuple type
tti = tti.parameters[2]
while isa(tti, TypeVar)
tti = tti.ub
end
tti0 = rewrap_unionall(tti, tti0)
end
if isa(tti, Union)
utis = uniontypes(tti)
if _any(t -> !isa(t, DataType) || !(t <: Tuple) || !isknownlength(t), utis)
return Any[Vararg{Any}], nothing
end
result = Any[rewrap_unionall(p, tti0) for p in (utis[1]::DataType).parameters]
for t::DataType in utis[2:end]
if length(t.parameters) != length(result)
return Any[Vararg{Any}], nothing
end
for j in 1:length(t.parameters)
result[j] = tmerge(result[j], rewrap_unionall(t.parameters[j], tti0))
end
end
return result, nothing
elseif tti0 <: Tuple
if isa(tti0, DataType)
if isvatuple(tti0) && length(tti0.parameters) == 1
return Any[Vararg{unwrapva(tti0.parameters[1])}], nothing
else
return Any[ p for p in tti0.parameters ], nothing
end
elseif !isa(tti, DataType)
return Any[Vararg{Any}], nothing
else
len = length(tti.parameters)
last = tti.parameters[len]
va = isvarargtype(last)
elts = Any[ fieldtype(tti0, i) for i = 1:len ]
if va
elts[len] = Vararg{elts[len]}
end
return elts, nothing
end
elseif tti0 === SimpleVector || tti0 === Any
return Any[Vararg{Any}], nothing
elseif tti0 <: Array
return Any[Vararg{eltype(tti0)}], nothing
else
return abstract_iteration(interp, itft, typ, sv)
end
end
# simulate iteration protocol on container type up to fixpoint
function abstract_iteration(interp::AbstractInterpreter, @nospecialize(itft), @nospecialize(itertype), sv::InferenceState)
if isa(itft, Const)
iteratef = itft.val
else
return Any[Vararg{Any}], nothing
end
@assert !isvarargtype(itertype)
call = abstract_call_known(interp, iteratef, nothing, Any[itft, itertype], sv)
stateordonet = call.rt
info = call.info
# Return Bottom if this is not an iterator.
# WARNING: Changes to the iteration protocol must be reflected here,
# this is not just an optimization.
# TODO: this doesn't realize that Array, SimpleVector, Tuple, and NamedTuple do not use the iterate protocol
stateordonet === Bottom && return Any[Bottom], AbstractIterationInfo(CallMeta[CallMeta(Bottom, info)])
valtype = statetype = Bottom
ret = Any[]
calls = CallMeta[call]
# Try to unroll the iteration up to MAX_TUPLE_SPLAT, which covers any finite
# length iterators, or interesting prefix
while true
stateordonet_widened = widenconst(stateordonet)
if stateordonet_widened === Nothing
return ret, AbstractIterationInfo(calls)
end
if Nothing <: stateordonet_widened || length(ret) >= InferenceParams(interp).MAX_TUPLE_SPLAT
break
end
if !isa(stateordonet_widened, DataType) || !(stateordonet_widened <: Tuple) || isvatuple(stateordonet_widened) || length(stateordonet_widened.parameters) != 2
break
end
nstatetype = getfield_tfunc(stateordonet, Const(2))
# If there's no new information in this statetype, don't bother continuing,
# the iterator won't be finite.
if nstatetype ⊑ statetype
return Any[Bottom], nothing
end
valtype = getfield_tfunc(stateordonet, Const(1))
push!(ret, valtype)
statetype = nstatetype
call = abstract_call_known(interp, iteratef, nothing, Any[Const(iteratef), itertype, statetype], sv)
stateordonet = call.rt
push!(calls, call)
end
# From here on, we start asking for results on the widened types, rather than
# the precise (potentially const) state type
statetype = widenconst(statetype)
valtype = widenconst(valtype)
while valtype !== Any
stateordonet = abstract_call_known(interp, iteratef, nothing, Any[Const(iteratef), itertype, statetype], sv).rt
stateordonet = widenconst(stateordonet)
nounion = typesubtract(stateordonet, Nothing, 0)
if !isa(nounion, DataType) || !(nounion <: Tuple) || isvatuple(nounion) || length(nounion.parameters) != 2
valtype = Any
break
end
if nounion.parameters[1] <: valtype && nounion.parameters[2] <: statetype
if typeintersect(stateordonet, Nothing) === Union{}
# Reached a fixpoint, but Nothing is not possible => iterator is infinite or failing
return Any[Bottom], nothing
end
break
end
valtype = tmerge(valtype, nounion.parameters[1])
statetype = tmerge(statetype, nounion.parameters[2])
end
push!(ret, Vararg{valtype})
return ret, nothing
end
# do apply(af, fargs...), where af is a function value
function abstract_apply(interp::AbstractInterpreter, argtypes::Vector{Any}, sv::InferenceState,
max_methods::Int = InferenceParams(interp).MAX_METHODS)
itft = argtype_by_index(argtypes, 2)
aft = argtype_by_index(argtypes, 3)
(itft === Bottom || aft === Bottom) && return CallMeta(Bottom, false)
aargtypes = argtype_tail(argtypes, 4)
aftw = widenconst(aft)
if !isa(aft, Const) && !isa(aft, PartialOpaque) && (!isType(aftw) || has_free_typevars(aftw))
if !isconcretetype(aftw) || (aftw <: Builtin)
add_remark!(interp, sv, "Core._apply_iterate called on a function of a non-concrete type")
# bail now, since it seems unlikely that abstract_call will be able to do any better after splitting
# this also ensures we don't call abstract_call_gf_by_type below on an IntrinsicFunction or Builtin
return CallMeta(Any, false)
end
end
res = Union{}
nargs = length(aargtypes)
splitunions = 1 < unionsplitcost(aargtypes) <= InferenceParams(interp).MAX_APPLY_UNION_ENUM
ctypes = [Any[aft]]
infos = [Union{Nothing, AbstractIterationInfo}[]]
for i = 1:nargs
ctypes´ = Vector{Any}[]
infos′ = Vector{Union{Nothing, AbstractIterationInfo}}[]
for ti in (splitunions ? uniontypes(aargtypes[i]) : Any[aargtypes[i]])
if !isvarargtype(ti)
cti_info = precise_container_type(interp, itft, ti, sv)
cti = cti_info[1]::Vector{Any}
info = cti_info[2]::Union{Nothing,AbstractIterationInfo}
else
cti_info = precise_container_type(interp, itft, unwrapva(ti), sv)
cti = cti_info[1]::Vector{Any}
info = cti_info[2]::Union{Nothing,AbstractIterationInfo}
# We can't represent a repeating sequence of the same types,
# so tmerge everything together to get one type that represents
# everything.
argt = cti[end]
if isvarargtype(argt)
argt = unwrapva(argt)
end
for i in 1:(length(cti)-1)
argt = tmerge(argt, cti[i])
end
cti = Any[Vararg{argt}]
end
if _any(t -> t === Bottom, cti)
continue
end
for j = 1:length(ctypes)
ct = ctypes[j]::Vector{Any}
if isvarargtype(ct[end])
# This is vararg, we're not gonna be able to do any inling,
# drop the info
info = nothing
tail = tuple_tail_elem(unwrapva(ct[end]), cti)
push!(ctypes´, push!(ct[1:(end - 1)], tail))
else
push!(ctypes´, append!(ct[:], cti))
end
push!(infos′, push!(copy(infos[j]), info))
end
end
ctypes = ctypes´
infos = infos′
end
retinfos = ApplyCallInfo[]
retinfo = UnionSplitApplyCallInfo(retinfos)
for i = 1:length(ctypes)
ct = ctypes[i]
arginfo = infos[i]
lct = length(ct)
# truncate argument list at the first Vararg
for i = 1:lct-1
if isvarargtype(ct[i])
ct[i] = tuple_tail_elem(ct[i], ct[(i+1):lct])
resize!(ct, i)
break
end
end
call = abstract_call(interp, nothing, ct, sv, max_methods)
push!(retinfos, ApplyCallInfo(call.info, arginfo))
res = tmerge(res, call.rt)
if bail_out_apply(interp, res, sv)
if i != length(ctypes)
# No point carrying forward the info, we're not gonna inline it anyway
retinfo = false
end
break
end
end
# TODO: Add a special info type to capture all the iteration info.
# For now, only propagate info if we don't also union-split the iteration
return CallMeta(res, retinfo)
end
function is_method_pure(method::Method, @nospecialize(sig), sparams::SimpleVector)
if isdefined(method, :generator)
method.generator.expand_early || return false
mi = specialize_method(method, sig, sparams, false)
isa(mi, MethodInstance) || return false
staged = get_staged(mi)
(staged isa CodeInfo && (staged::CodeInfo).pure) || return false
return true
end
return method.pure
end
is_method_pure(match::MethodMatch) = is_method_pure(match.method, match.spec_types, match.sparams)
function pure_eval_call(@nospecialize(f), argtypes::Vector{Any})
for i = 2:length(argtypes)
a = widenconditional(argtypes[i])
if !(isa(a, Const) || isconstType(a))
return false
end
end
args = Any[ (a = widenconditional(argtypes[i]); isa(a, Const) ? a.val : a.parameters[1]) for i in 2:length(argtypes) ]
try
value = Core._apply_pure(f, args)
return Const(value)
catch
return false
end
end
function argtype_by_index(argtypes::Vector{Any}, i::Int)
n = length(argtypes)
na = argtypes[n]
if isvarargtype(na)
return i >= n ? unwrapva(na) : argtypes[i]
else
return i > n ? Bottom : argtypes[i]
end
end
function argtype_tail(argtypes::Vector{Any}, i::Int)
n = length(argtypes)
if isvarargtype(argtypes[n]) && i > n
i = n
end
return argtypes[i:n]
end
function abstract_call_builtin(interp::AbstractInterpreter, f::Builtin, fargs::Union{Nothing,Vector{Any}},
argtypes::Vector{Any}, sv::InferenceState, max_methods::Int)
@nospecialize f
la = length(argtypes)
if f === ifelse && fargs isa Vector{Any} && la == 4
cnd = argtypes[2]
if isa(cnd, Conditional)
newcnd = widenconditional(cnd)
tx = argtypes[3]
ty = argtypes[4]
if isa(newcnd, Const)
# if `cnd` is constant, we should just respect its constantness to keep inference accuracy
return newcnd.val::Bool ? tx : ty
else
# try to simulate this as a real conditional (`cnd ? x : y`), so that the penalty for using `ifelse` instead isn't too high
a = ssa_def_slot(fargs[3], sv)
b = ssa_def_slot(fargs[4], sv)
if isa(a, SlotNumber) && slot_id(cnd.var) == slot_id(a)
tx = (cnd.vtype ⊑ tx ? cnd.vtype : tmeet(tx, widenconst(cnd.vtype)))
end
if isa(b, SlotNumber) && slot_id(cnd.var) == slot_id(b)
ty = (cnd.elsetype ⊑ ty ? cnd.elsetype : tmeet(ty, widenconst(cnd.elsetype)))
end
return tmerge(tx, ty)
end
end
end
rt = builtin_tfunction(interp, f, argtypes[2:end], sv)
if f === getfield && isa(fargs, Vector{Any}) && la == 3 &&