-
-
Notifications
You must be signed in to change notification settings - Fork 5.6k
/
Copy pathtypes.jl
345 lines (295 loc) · 13 KB
/
types.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
# This file is a part of Julia. License is MIT: https://julialang.org/license
"""
AbstractInterpreter
An abstract base class that allows multiple dispatch to determine the method of
executing Julia code. The native Julia LLVM pipeline is enabled by using the
`NativeInterpreter` concrete instantiation of this abstract class, others can be
swapped in as long as they follow the `AbstractInterpreter` API.
If `interp` is an `AbstractInterpreter`, it is expected to provide at least the following methods:
- `InferenceParams(interp)` - return an `InferenceParams` instance
- `OptimizationParams(interp)` - return an `OptimizationParams` instance
- `get_world_counter(interp)` - return the world age for this interpreter
- `get_inference_cache(interp)` - return the runtime inference cache
- `code_cache(interp)` - return the global inference cache
"""
abstract type AbstractInterpreter end
struct ArgInfo
fargs::Union{Nothing,Vector{Any}}
argtypes::Vector{Any}
end
struct TriState; state::UInt8; end
const ALWAYS_FALSE = TriState(0x00)
const ALWAYS_TRUE = TriState(0x01)
const TRISTATE_UNKNOWN = TriState(0x02)
function tristate_merge(old::TriState, new::TriState)
(old === ALWAYS_FALSE || new === ALWAYS_FALSE) && return ALWAYS_FALSE
old === TRISTATE_UNKNOWN && return old
return new
end
struct Effects
consistent::TriState
effect_free::TriState
nothrow::TriState
terminates::TriState
# This effect is currently only tracked in inference and modified
# :consistent before caching. We may want to track it in the future.
inbounds_taints_consistency::Bool
end
Effects(consistent::TriState, effect_free::TriState, nothrow::TriState, terminates::TriState) =
Effects(consistent, effect_free, nothrow, terminates, false)
Effects() = Effects(TRISTATE_UNKNOWN, TRISTATE_UNKNOWN, TRISTATE_UNKNOWN, TRISTATE_UNKNOWN)
Effects(e::Effects; consistent::TriState=e.consistent,
effect_free::TriState = e.effect_free, nothrow::TriState=e.nothrow, terminates::TriState=e.terminates,
inbounds_taints_consistency::Bool = e.inbounds_taints_consistency) =
Effects(consistent, effect_free, nothrow, terminates, inbounds_taints_consistency)
is_total_or_error(effects::Effects) =
effects.consistent === ALWAYS_TRUE && effects.effect_free === ALWAYS_TRUE &&
effects.terminates === ALWAYS_TRUE
is_total(effects::Effects) =
is_total_or_error(effects) && effects.nothrow === ALWAYS_TRUE
is_removable_if_unused(effects::Effects) =
effects.effect_free === ALWAYS_TRUE &&
effects.terminates === ALWAYS_TRUE &&
effects.nothrow === ALWAYS_TRUE
const EFFECTS_TOTAL = Effects(ALWAYS_TRUE, ALWAYS_TRUE, ALWAYS_TRUE, ALWAYS_TRUE)
encode_effects(e::Effects) = e.consistent.state | (e.effect_free.state << 2) | (e.nothrow.state << 4) | (e.terminates.state << 6)
decode_effects(e::UInt8) =
Effects(TriState(e & 0x3),
TriState((e >> 2) & 0x3),
TriState((e >> 4) & 0x3),
TriState((e >> 6) & 0x3), false)
function tristate_merge(old::Effects, new::Effects)
Effects(tristate_merge(
old.consistent, new.consistent),
tristate_merge(
old.effect_free, new.effect_free),
tristate_merge(
old.nothrow, new.nothrow),
tristate_merge(
old.terminates, new.terminates),
old.inbounds_taints_consistency ||
new.inbounds_taints_consistency)
end
struct EffectsOverride
consistent::Bool
effect_free::Bool
nothrow::Bool
terminates_globally::Bool
terminates_locally::Bool
end
function encode_effects_override(eo::EffectsOverride)
e = 0x00
eo.consistent && (e |= 0x01)
eo.effect_free && (e |= 0x02)
eo.nothrow && (e |= 0x04)
eo.terminates_globally && (e |= 0x08)
eo.terminates_locally && (e |= 0x10)
e
end
decode_effects_override(e::UInt8) =
EffectsOverride(
(e & 0x01) != 0x00,
(e & 0x02) != 0x00,
(e & 0x04) != 0x00,
(e & 0x08) != 0x00,
(e & 0x10) != 0x00)
"""
InferenceResult
A type that represents the result of running type inference on a chunk of code.
"""
mutable struct InferenceResult
linfo::MethodInstance
argtypes::Vector{Any}
overridden_by_const::BitVector
result # ::Type, or InferenceState if WIP
src # ::Union{CodeInfo, OptimizationState} if inferred copy is available, nothing otherwise
valid_worlds::WorldRange # if inference and optimization is finished
ipo_effects::Effects # if inference is finished
effects::Effects # if optimization is finished
argescapes # ::ArgEscapeCache if optimized, nothing otherwise
function InferenceResult(linfo::MethodInstance,
arginfo#=::Union{Nothing,Tuple{ArgInfo,InferenceState}}=# = nothing)
argtypes, overridden_by_const = matching_cache_argtypes(linfo, arginfo)
return new(linfo, argtypes, overridden_by_const, Any, nothing,
WorldRange(), Effects(), Effects(), nothing)
end
end
"""
OptimizationParams
Parameters that control optimizer operation.
"""
struct OptimizationParams
inlining::Bool # whether inlining is enabled
inline_cost_threshold::Int # number of CPU cycles beyond which it's not worth inlining
inline_nonleaf_penalty::Int # penalty for dynamic dispatch
inline_tupleret_bonus::Int # extra inlining willingness for non-concrete tuple return types (in hopes of splitting it up)
inline_error_path_cost::Int # cost of (un-optimized) calls in blocks that throw
trust_inference::Bool
# Duplicating for now because optimizer inlining requires it.
# Keno assures me this will be removed in the near future
MAX_METHODS::Int
MAX_TUPLE_SPLAT::Int
MAX_UNION_SPLITTING::Int
function OptimizationParams(;
inlining::Bool = inlining_enabled(),
inline_cost_threshold::Int = 100,
inline_nonleaf_penalty::Int = 1000,
inline_tupleret_bonus::Int = 250,
inline_error_path_cost::Int = 20,
max_methods::Int = 3,
tuple_splat::Int = 32,
union_splitting::Int = 4,
trust_inference::Bool = false
)
return new(
inlining,
inline_cost_threshold,
inline_nonleaf_penalty,
inline_tupleret_bonus,
inline_error_path_cost,
trust_inference,
max_methods,
tuple_splat,
union_splitting
)
end
end
"""
InferenceParams
Parameters that control type inference operation.
"""
struct InferenceParams
ipo_constant_propagation::Bool
aggressive_constant_propagation::Bool
unoptimize_throw_blocks::Bool
# don't consider more than N methods. this trades off between
# compiler performance and generated code performance.
# typically, considering many methods means spending lots of time
# obtaining poor type information.
# It is important for N to be >= the number of methods in the error()
# function, so we can still know that error() is always Bottom.
MAX_METHODS::Int
# the maximum number of union-tuples to swap / expand
# before computing the set of matching methods
MAX_UNION_SPLITTING::Int
# the maximum number of union-tuples to swap / expand
# when inferring a call to _apply_iterate
MAX_APPLY_UNION_ENUM::Int
# parameters limiting large (tuple) types
TUPLE_COMPLEXITY_LIMIT_DEPTH::Int
# when attempting to inline _apply_iterate, abort the optimization if the
# tuple contains more than this many elements
MAX_TUPLE_SPLAT::Int
function InferenceParams(;
ipo_constant_propagation::Bool = true,
aggressive_constant_propagation::Bool = false,
unoptimize_throw_blocks::Bool = true,
max_methods::Int = 3,
union_splitting::Int = 4,
apply_union_enum::Int = 8,
tupletype_depth::Int = 3,
tuple_splat::Int = 32,
)
return new(
ipo_constant_propagation,
aggressive_constant_propagation,
unoptimize_throw_blocks,
max_methods,
union_splitting,
apply_union_enum,
tupletype_depth,
tuple_splat,
)
end
end
"""
NativeInterpreter
This represents Julia's native type inference algorithm and codegen backend.
It contains many parameters used by the compilation pipeline.
"""
struct NativeInterpreter <: AbstractInterpreter
# Cache of inference results for this particular interpreter
cache::Vector{InferenceResult}
# The world age we're working inside of
world::UInt
# Parameters for inference and optimization
inf_params::InferenceParams
opt_params::OptimizationParams
function NativeInterpreter(world::UInt = get_world_counter();
inf_params = InferenceParams(),
opt_params = OptimizationParams(),
)
# Sometimes the caller is lazy and passes typemax(UInt).
# we cap it to the current world age
if world == typemax(UInt)
world = get_world_counter()
end
# If they didn't pass typemax(UInt) but passed something more subtly
# incorrect, fail out loudly.
@assert world <= get_world_counter()
return new(
# Initially empty cache
Vector{InferenceResult}(),
# world age counter
world,
# parameters for inference and optimization
inf_params,
opt_params,
)
end
end
# Quickly and easily satisfy the AbstractInterpreter API contract
InferenceParams(ni::NativeInterpreter) = ni.inf_params
OptimizationParams(ni::NativeInterpreter) = ni.opt_params
get_world_counter(ni::NativeInterpreter) = ni.world
get_inference_cache(ni::NativeInterpreter) = ni.cache
code_cache(ni::NativeInterpreter) = WorldView(GLOBAL_CI_CACHE, get_world_counter(ni))
"""
lock_mi_inference(ni::NativeInterpreter, mi::MethodInstance)
Hint that `mi` is in inference to help accelerate bootstrapping.
This helps us limit the amount of wasted work we might do when inference is working on initially inferring itself
by letting us detect when inference is already in progress and not running a second copy on it.
This creates a data-race, but the entry point into this code from C (`jl_type_infer`) already includes detection and restriction on recursion,
so it is hopefully mostly a benign problem (since it should really only happen during the first phase of bootstrapping that we encounter this flag).
"""
lock_mi_inference(::NativeInterpreter, mi::MethodInstance) = (mi.inInference = true; nothing)
lock_mi_inference(::AbstractInterpreter, ::MethodInstance) = return
"""
See `lock_mi_inference`.
"""
unlock_mi_inference(::NativeInterpreter, mi::MethodInstance) = (mi.inInference = false; nothing)
unlock_mi_inference(::AbstractInterpreter, ::MethodInstance) = return
"""
Emit an analysis remark during inference for the current line (`sv.pc`).
These annotations are ignored by the native interpreter, but can be used by external tooling
to annotate inference results.
"""
add_remark!(::AbstractInterpreter, sv#=::InferenceState=#, s) = return
may_optimize(::AbstractInterpreter) = true
may_compress(::AbstractInterpreter) = true
may_discard_trees(::AbstractInterpreter) = true
verbose_stmt_info(::AbstractInterpreter) = false
method_table(interp::AbstractInterpreter) = InternalMethodTable(get_world_counter(interp))
"""
By default `AbstractInterpreter` implements the following inference bail out logic:
- `bail_out_toplevel_call(::AbstractInterpreter, sig, ::InferenceState)`: bail out from inter-procedural inference when inferring top-level and non-concrete call site `callsig`
- `bail_out_call(::AbstractInterpreter, rt, ::InferenceState)`: bail out from inter-procedural inference when return type `rt` grows up to `Any`
- `bail_out_apply(::AbstractInterpreter, rt, ::InferenceState)`: bail out from `_apply_iterate` inference when return type `rt` grows up to `Any`
It also bails out from local statement/frame inference when any lattice element gets down to `Bottom`,
but `AbstractInterpreter` doesn't provide a specific interface for configuring it.
"""
bail_out_toplevel_call(::AbstractInterpreter, @nospecialize(callsig), sv#=::InferenceState=#) =
return isa(sv.linfo.def, Module) && !isdispatchtuple(callsig)
bail_out_call(::AbstractInterpreter, @nospecialize(rt), sv#=::InferenceState=#) =
return rt === Any
bail_out_apply(::AbstractInterpreter, @nospecialize(rt), sv#=::InferenceState=#) =
return rt === Any
"""
infer_compilation_signature(::AbstractInterpreter)::Bool
For some call sites (for example calls to varargs methods), the signature to be compiled
and executed at run time can differ from the argument types known at the call site.
This flag controls whether we should always infer the compilation signature in addition
to the call site signature.
"""
infer_compilation_signature(::AbstractInterpreter) = false
infer_compilation_signature(::NativeInterpreter) = true