From 8c445663547791e59f33f9e8d49b276332107614 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 29 Mar 2019 12:11:33 -0400 Subject: [PATCH] internals: better representation for code (#31191) This'll help solve many of the problems with edges getting mis-represented and broken by adding an extra level of indirection between MethodInstance (now really just representing a particular specialization of a method) and the executable object, now called Lambda (representing some functional operator that converts some input arguments to some output values, with whatever metadata is convenient to contain there). This fixes many of the previous representation problems with back-edges, since a MethodInstance (like Method) no longer tries to also represent a computation. That task is now relegated strictly to Lambda. --- base/boot.jl | 7 +- base/compiler/abstractinterpretation.jl | 66 +- base/compiler/bootstrap.jl | 2 +- base/compiler/compiler.jl | 2 +- base/compiler/inferenceresult.jl | 7 +- base/compiler/inferencestate.jl | 18 +- base/compiler/optimize.jl | 16 +- base/compiler/params.jl | 2 + base/compiler/ssair/inlining.jl | 85 +- base/compiler/tfuncs.jl | 1 + base/compiler/typeinfer.jl | 222 +++-- base/compiler/utilities.jl | 31 +- base/errorshow.jl | 6 +- base/methodshow.jl | 2 +- base/reflection.jl | 53 +- doc/src/devdocs/ast.md | 76 +- src/ast.c | 2 +- src/builtins.c | 3 +- src/codegen.cpp | 553 ++++++------ src/common_symbols1.inc | 2 - src/debuginfo.cpp | 67 +- src/dump.c | 350 ++++---- src/gf.c | 991 ++++++++++------------ src/interpreter.c | 50 +- src/jltypes.c | 72 +- src/julia.h | 74 +- src/julia_internal.h | 25 +- src/method.c | 80 +- src/precompile.c | 84 +- src/rtutils.c | 2 +- src/staticdata.c | 43 +- src/threading.c | 4 +- src/toplevel.c | 14 +- src/typemap.c | 29 +- stdlib/REPL/src/REPLCompletions.jl | 2 +- stdlib/Serialization/src/Serialization.jl | 26 +- stdlib/Serialization/test/runtests.jl | 16 +- stdlib/Test/src/Test.jl | 8 +- test/ambiguous.jl | 14 +- test/compiler/inference.jl | 22 +- test/compiler/validation.jl | 2 +- test/reflection.jl | 74 +- test/stacktraces.jl | 2 +- test/syntax.jl | 2 +- test/worlds.jl | 32 +- 45 files changed, 1650 insertions(+), 1591 deletions(-) diff --git a/base/boot.jl b/base/boot.jl index c7d81c71c9360..c502762e272b7 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -65,6 +65,9 @@ #mutable struct MethodInstance #end +#mutable struct CodeInstance +#end + #mutable struct CodeInfo #end @@ -442,11 +445,11 @@ Symbol(s::Symbol) = s # module providing the IR object model module IR -export CodeInfo, MethodInstance, GotoNode, +export CodeInfo, MethodInstance, CodeInstance, GotoNode, NewvarNode, SSAValue, Slot, SlotNumber, TypedSlot, PiNode, PhiNode, PhiCNode, UpsilonNode, LineInfoNode -import Core: CodeInfo, MethodInstance, GotoNode, +import Core: CodeInfo, MethodInstance, CodeInstance, GotoNode, NewvarNode, SSAValue, Slot, SlotNumber, TypedSlot, PiNode, PhiNode, PhiCNode, UpsilonNode, LineInfoNode diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 64f433c026e02..f7e8876301146 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -89,7 +89,8 @@ function abstract_call_gf_by_type(@nospecialize(f), argtypes::Vector{Any}, @nosp this_rt === Any && break end else - this_rt, edgecycle, edge = abstract_call_method(method, sig, match[2]::SimpleVector, sv) + this_rt, edgecycle1, edge = abstract_call_method(method, sig, match[2]::SimpleVector, sv) + edgecycle |= edgecycle1::Bool if edge !== nothing push!(edges, edge) end @@ -170,45 +171,50 @@ function abstract_call_method_with_const_args(@nospecialize(rettype), @nospecial method.isva && (nargs -= 1) length(argtypes) >= nargs || return Any haveconst = false + allconst = true + # see if any or all of the arguments are constant and propagating constants may be worthwhile for a in argtypes a = widenconditional(a) - if has_nontrivial_const_info(a) - haveconst = const_prop_profitable(a) - haveconst && break + if allconst && !isa(a, Const) && !isconstType(a) && !isa(a, PartialStruct) + allconst = false + end + if !haveconst && has_nontrivial_const_info(a) && const_prop_profitable(a) + haveconst = true + end + if haveconst && !allconst + break end end haveconst || improvable_via_constant_propagation(rettype) || return Any sig = match[1] sparams = match[2]::SimpleVector - code = code_for_method(method, sig, sparams, sv.params.world) - code === nothing && return Any - code = code::MethodInstance - # decide if it's likely to be worthwhile - declared_inline = isdefined(method, :source) && ccall(:jl_ast_flag_inlineable, Bool, (Any,), method.source) - cache_inlineable = declared_inline - if isdefined(code, :inferred) && !cache_inlineable - cache_inf = code.inferred - if !(cache_inf === nothing) - cache_src_inferred = ccall(:jl_ast_flag_inferred, Bool, (Any,), cache_inf) - cache_src_inlineable = ccall(:jl_ast_flag_inlineable, Bool, (Any,), cache_inf) - cache_inlineable = cache_src_inferred && cache_src_inlineable - end + force_inference = allconst || sv.params.aggressive_constant_propagation + if istopfunction(f, :getproperty) || istopfunction(f, :setproperty!) + force_inference = true end - if !cache_inlineable && !sv.params.aggressive_constant_propagation - tm = _topmod(sv) - if !istopfunction(f, :getproperty) && !istopfunction(f, :setproperty!) - # in this case, see if all of the arguments are constants - for a in argtypes - a = widenconditional(a) - if !isa(a, Const) && !isconstType(a) && !isa(a, PartialStruct) - return Any - end + mi = specialize_method(method, sig, sparams, !force_inference) + mi === nothing && return Any + mi = mi::MethodInstance + # decide if it's likely to be worthwhile + if !force_inference + code = inf_for_methodinstance(mi, sv.params.world) + declared_inline = isdefined(method, :source) && ccall(:jl_ast_flag_inlineable, Bool, (Any,), method.source) + cache_inlineable = declared_inline + if isdefined(code, :inferred) && !cache_inlineable + cache_inf = code.inferred + if !(cache_inf === nothing) + cache_src_inferred = ccall(:jl_ast_flag_inferred, Bool, (Any,), cache_inf) + cache_src_inlineable = ccall(:jl_ast_flag_inlineable, Bool, (Any,), cache_inf) + cache_inlineable = cache_src_inferred && cache_src_inlineable end end + if !cache_inlineable + return Any + end end - inf_result = cache_lookup(code, argtypes, sv.params.cache) + inf_result = cache_lookup(mi, argtypes, sv.params.cache) if inf_result === nothing - inf_result = InferenceResult(code, argtypes) + inf_result = InferenceResult(mi, argtypes) frame = InferenceState(inf_result, #=cache=#false, sv.params) frame.limited = true frame.parent = sv @@ -216,7 +222,7 @@ function abstract_call_method_with_const_args(@nospecialize(rettype), @nospecial typeinf(frame) || return Any end result = inf_result.result - isa(result, InferenceState) && return Any # TODO: is this recursive constant inference? + isa(result, InferenceState) && return Any # TODO: unexpected, is this recursive constant inference? add_backedge!(inf_result.linfo, sv) return result end @@ -237,7 +243,7 @@ function abstract_call_method(method::Method, @nospecialize(sig), sparams::Simpl # necessary in order to retrieve this field from the generated `CodeInfo`, if it exists. # The other `CodeInfo`s we inspect will already have this field inflated, so we just # access it directly instead (to avoid regeneration). - method2 = method_for_inference_heuristics(method, sig, sparams, sv.params.world) # Union{Method, Nothing} + method2 = method_for_inference_heuristics(method, sig, sparams) # Union{Method, Nothing} sv_method2 = sv.src.method_for_inference_limit_heuristics # limit only if user token match sv_method2 isa Method || (sv_method2 = nothing) # Union{Method, Nothing} while !(infstate === nothing) diff --git a/base/compiler/bootstrap.jl b/base/compiler/bootstrap.jl index 6d889d67ee067..b1fb73f6b659e 100644 --- a/base/compiler/bootstrap.jl +++ b/base/compiler/bootstrap.jl @@ -6,7 +6,7 @@ # since we won't be able to specialize & infer them at runtime let fs = Any[typeinf_ext, typeinf, typeinf_edge, pure_eval_call, run_passes], - world = ccall(:jl_get_world_counter, UInt, ()) + world = get_world_counter() for x in T_FFUNC_VAL push!(fs, x[3]) end diff --git a/base/compiler/compiler.jl b/base/compiler/compiler.jl index b12bb8c2e892d..c2d97e40cded4 100644 --- a/base/compiler/compiler.jl +++ b/base/compiler/compiler.jl @@ -5,7 +5,7 @@ getfield(getfield(Main, :Core), :eval)(getfield(Main, :Core), :(baremodule Compi using Core.Intrinsics, Core.IR import Core: print, println, show, write, unsafe_write, stdout, stderr, - _apply, svec, apply_type, Builtin, IntrinsicFunction, MethodInstance + _apply, svec, apply_type, Builtin, IntrinsicFunction, MethodInstance, CodeInstance const getproperty = getfield const setproperty! = setfield! diff --git a/base/compiler/inferenceresult.jl b/base/compiler/inferenceresult.jl index 8ad8cab76d123..a466a00df89df 100644 --- a/base/compiler/inferenceresult.jl +++ b/base/compiler/inferenceresult.jl @@ -9,13 +9,8 @@ mutable struct InferenceResult result # ::Type, or InferenceState if WIP src #::Union{CodeInfo, OptimizationState, Nothing} # if inferred copy is available function InferenceResult(linfo::MethodInstance, given_argtypes = nothing) - if isdefined(linfo, :inferred_const) - result = Const(linfo.inferred_const) - else - result = linfo.rettype - end argtypes, overridden_by_const = matching_cache_argtypes(linfo, given_argtypes) - return new(linfo, argtypes, overridden_by_const, result, nothing) + return new(linfo, argtypes, overridden_by_const, Any, nothing) end end diff --git a/base/compiler/inferencestate.jl b/base/compiler/inferencestate.jl index 2d76a3ee7887e..2d6d2373e2361 100644 --- a/base/compiler/inferencestate.jl +++ b/base/compiler/inferencestate.jl @@ -5,7 +5,7 @@ const LineNum = Int mutable struct InferenceState params::Params # describes how to compute the result result::InferenceResult # remember where to put the result - linfo::MethodInstance # used here for the tuple (specTypes, env, Method) and world-age validity + linfo::MethodInstance sptypes::Vector{Any} # types of static parameter slottypes::Vector{Any} mod::Module @@ -87,13 +87,8 @@ mutable struct InferenceState inmodule = linfo.def::Module end - if cached && !toplevel - min_valid = min_world(linfo.def) - max_valid = max_world(linfo.def) - else - min_valid = typemax(UInt) - max_valid = typemin(UInt) - end + min_valid = UInt(1) + max_valid = get_world_counter() frame = new( params, result, linfo, sp, slottypes, inmodule, 0, @@ -194,15 +189,12 @@ _topmod(sv::InferenceState) = _topmod(sv.mod) function update_valid_age!(min_valid::UInt, max_valid::UInt, sv::InferenceState) sv.min_valid = max(sv.min_valid, min_valid) sv.max_valid = min(sv.max_valid, max_valid) - @assert(!isa(sv.linfo.def, Method) || - !sv.cached || - sv.min_valid <= sv.params.world <= sv.max_valid, + @assert(sv.min_valid <= sv.params.world <= sv.max_valid, "invalid age range update") nothing end update_valid_age!(edge::InferenceState, sv::InferenceState) = update_valid_age!(edge.min_valid, edge.max_valid, sv) -update_valid_age!(li::MethodInstance, sv::InferenceState) = update_valid_age!(min_world(li), max_world(li), sv) function record_ssa_assign(ssa_id::Int, @nospecialize(new), frame::InferenceState) old = frame.src.ssavaluetypes[ssa_id] @@ -226,6 +218,7 @@ function add_cycle_backedge!(frame::InferenceState, caller::InferenceState, curr update_valid_age!(frame, caller) backedge = (caller, currpc) contains_is(frame.cycle_backedges, backedge) || push!(frame.cycle_backedges, backedge) + add_backedge!(frame.linfo, caller) return frame end @@ -236,7 +229,6 @@ function add_backedge!(li::MethodInstance, caller::InferenceState) caller.stmt_edges[caller.currpc] = [] end push!(caller.stmt_edges[caller.currpc], li) - update_valid_age!(li, caller) nothing end diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 624d2087ea64a..a208156274496 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -56,7 +56,7 @@ mutable struct OptimizationState return new(linfo, s_edges::Vector{Any}, src, inmodule, nargs, - min_world(linfo), max_world(linfo), + UInt(1), get_world_counter(), params, sptypes_from_meth_instance(linfo), slottypes, false) end end @@ -104,19 +104,21 @@ _topmod(sv::OptimizationState) = _topmod(sv.mod) function update_valid_age!(min_valid::UInt, max_valid::UInt, sv::OptimizationState) sv.min_valid = max(sv.min_valid, min_valid) sv.max_valid = min(sv.max_valid, max_valid) - @assert(!isa(sv.linfo.def, Method) || - (sv.min_valid == typemax(UInt) && sv.max_valid == typemin(UInt)) || - sv.min_valid <= sv.params.world <= sv.max_valid, + @assert(sv.min_valid <= sv.params.world <= sv.max_valid, "invalid age range update") nothing end -update_valid_age!(li::MethodInstance, sv::OptimizationState) = update_valid_age!(min_world(li), max_world(li), sv) - function add_backedge!(li::MethodInstance, caller::OptimizationState) + #TODO: deprecate this? isa(caller.linfo.def, Method) || return # don't add backedges to toplevel exprs push!(caller.calledges, li) - update_valid_age!(li, caller) + nothing +end + +function add_backedge!(li::CodeInstance, caller::OptimizationState) + update_valid_age!(min_world(li), max_world(li), caller) + add_backedge!(li.def, caller) nothing end diff --git a/base/compiler/params.jl b/base/compiler/params.jl index 91b9ee35ee9b6..46c086210dbbd 100644 --- a/base/compiler/params.jl +++ b/base/compiler/params.jl @@ -56,6 +56,8 @@ struct Params tuple_splat) end function Params(world::UInt) + world == typemax(UInt) && (world = get_world_counter()) # workaround for bad callers + @assert world <= get_world_counter() inlining = inlining_enabled() return new(Vector{InferenceResult}(), world, true, diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index 0472a11a2312a..42ef67b806324 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -168,14 +168,18 @@ function cfg_inline_item!(item::InliningTodo, state::CFGInliningState, from_unio for (old_block, new_block) in enumerate(bb_rename_range) if old_block != 1 || need_split_before p = state.new_cfg_blocks[new_block].preds - map!(p, p) do old_pred_block - return old_pred_block == 0 ? 0 : bb_rename_range[old_pred_block] + let bb_rename_range = bb_rename_range + map!(p, p) do old_pred_block + return old_pred_block == 0 ? 0 : bb_rename_range[old_pred_block] + end end end if new_block != last(new_block_range) s = state.new_cfg_blocks[new_block].succs - map!(s, s) do old_succ_block - return bb_rename_range[old_succ_block] + let bb_rename_range = bb_rename_range + map!(s, s) do old_succ_block + return bb_rename_range[old_succ_block] + end end end end @@ -569,21 +573,21 @@ function batch_inline!(todo::Vector{Any}, ir::IRCode, linetable::Vector{LineInfo return ir end -function _spec_lambda(@nospecialize(atype), sv::OptimizationState, @nospecialize(invoke_data)) +function spec_lambda(@nospecialize(atype), sv::OptimizationState, @nospecialize(invoke_data)) + min_valid = UInt[typemin(UInt)] + max_valid = UInt[typemax(UInt)] if invoke_data === nothing - return ccall(:jl_get_spec_lambda, Any, (Any, UInt), atype, sv.params.world) + mi = ccall(:jl_get_spec_lambda, Any, (Any, UInt, Ptr{UInt}, Ptr{UInt}), atype, sv.params.world, min_valid, max_valid) else invoke_data = invoke_data::InvokeData atype <: invoke_data.types0 || return nothing - return ccall(:jl_get_invoke_lambda, Any, (Any, Any, Any, UInt), - invoke_data.mt, invoke_data.entry, atype, sv.params.world) + mi = ccall(:jl_get_invoke_lambda, Any, (Any, Any, Any, UInt), + invoke_data.mt, invoke_data.entry, atype, sv.params.world) + #XXX: compute min/max_valid end -end - -function spec_lambda(@nospecialize(atype), sv::OptimizationState, @nospecialize(invoke_data)) - linfo = _spec_lambda(atype, sv, invoke_data) - linfo !== nothing && add_backedge!(linfo, sv) - linfo + mi !== nothing && add_backedge!(mi::MethodInstance, sv) + update_valid_age!(min_valid[1], max_valid[1], sv) + return mi end # This assumes the caller has verified that all arguments to the _apply call are Tuples. @@ -680,52 +684,43 @@ function analyze_method!(idx::Int, sig::Signature, @nospecialize(metharg), meths isa(methsp[i], TypeVar) && return nothing end - # Find the linfo for this methods - linfo = code_for_method(method, metharg, methsp, sv.params.world, true) # Union{Nothing, MethodInstance} - if !isa(linfo, MethodInstance) + # See if there exists a specialization for this method signature + mi = specialize_method(method, metharg, methsp, true) # Union{Nothing, MethodInstance} + if !isa(mi, MethodInstance) return spec_lambda(atype_unlimited, sv, invoke_data) end - if invoke_api(linfo) == 2 - # in this case function can be inlined to a constant - add_backedge!(linfo, sv) - return ConstantCase(quoted(linfo.inferred_const), method, Any[methsp...], metharg) - end - - isconst, inferred = find_inferred(linfo, atypes, sv, stmttyp) + isconst, src = find_inferred(mi, atypes, sv, stmttyp) if isconst - return ConstantCase(inferred, method, Any[methsp...], metharg) + add_backedge!(mi, sv) + return ConstantCase(src, method, Any[methsp...], metharg) end - if inferred === nothing + if src === nothing return spec_lambda(atype_unlimited, sv, invoke_data) end - src_inferred = ccall(:jl_ast_flag_inferred, Bool, (Any,), inferred) - src_inlineable = ccall(:jl_ast_flag_inlineable, Bool, (Any,), inferred) + src_inferred = ccall(:jl_ast_flag_inferred, Bool, (Any,), src) + src_inlineable = ccall(:jl_ast_flag_inlineable, Bool, (Any,), src) if !(src_inferred && src_inlineable) return spec_lambda(atype_unlimited, sv, invoke_data) end # At this point we're committed to performing the inlining, add the backedge - add_backedge!(linfo, sv) + add_backedge!(mi, sv) - if isa(inferred, CodeInfo) - src = inferred - ast = copy_exprargs(inferred.code) - else - src = ccall(:jl_uncompress_ast, Any, (Any, Any), method, inferred::Vector{UInt8})::CodeInfo - ast = src.code + if !isa(src, CodeInfo) + src = ccall(:jl_uncompress_ast, Any, (Any, Ptr{Cvoid}, Any), method, C_NULL, src::Vector{UInt8})::CodeInfo end @timeit "inline IR inflation" begin - ir2 = inflate_ir(src, linfo) + ir2 = inflate_ir(src, mi) # prepare inlining linetable with method instance information inline_linetable = Vector{LineInfoNode}(undef, length(src.linetable)) for i = 1:length(src.linetable) entry = src.linetable[i] if entry.inlined_at === 0 && entry.method === method - entry = LineInfoNode(linfo, entry.file, entry.line, entry.inlined_at) + entry = LineInfoNode(mi, entry.file, entry.line, entry.inlined_at) end inline_linetable[i] = entry end @@ -998,6 +993,7 @@ function assemble_inline_todo!(ir::IRCode, sv::OptimizationState) # No applicable method, or too many applicable methods continue end + update_valid_age!(min_valid[1], max_valid[1], sv) cases = Pair{Any, Any}[] # TODO: This could be better @@ -1117,6 +1113,7 @@ function compute_invoke_data(@nospecialize(atypes), params::Params) invoke_entry = ccall(:jl_gf_invoke_lookup, Any, (Any, UInt), invoke_types, params.world) invoke_entry === nothing && return nothing + #XXX: update_valid_age!(min_valid[1], max_valid[1], sv) invoke_data = InvokeData(mt, invoke_entry, invoke_types) atype0 = atypes[2] atypes = atypes[4:end] @@ -1261,7 +1258,7 @@ function ssa_substitute_op!(@nospecialize(val), arg_replacements::Vector{Any}, return urs[] end -function find_inferred(linfo::MethodInstance, @nospecialize(atypes), sv::OptimizationState, @nospecialize(rettype)) +function find_inferred(mi::MethodInstance, @nospecialize(atypes), sv::OptimizationState, @nospecialize(rettype)) # see if the method has a InferenceResult in the current cache # or an existing inferred code info store in `.inferred` haveconst = false @@ -1273,22 +1270,28 @@ function find_inferred(linfo::MethodInstance, @nospecialize(atypes), sv::Optimiz end end if haveconst || improvable_via_constant_propagation(rettype) - inf_result = cache_lookup(linfo, atypes, sv.params.cache) # Union{Nothing, InferenceResult} + inf_result = cache_lookup(mi, atypes, sv.params.cache) # Union{Nothing, InferenceResult} else inf_result = nothing end + #XXX: update_valid_age!(min_valid[1], max_valid[1], sv) if isa(inf_result, InferenceResult) let inferred_src = inf_result.src if isa(inferred_src, CodeInfo) return svec(false, inferred_src) end if isa(inferred_src, Const) && is_inlineable_constant(inferred_src.val) - add_backedge!(linfo, sv) return svec(true, quoted(inferred_src.val),) end end end - if isdefined(linfo, :inferred) + + linfo = inf_for_methodinstance(mi, sv.params.world) + if linfo isa CodeInstance + if invoke_api(linfo) == 2 + # in this case function can be inlined to a constant + return svec(true, quoted(linfo.rettype_const)) + end return svec(false, linfo.inferred) end return svec(false, nothing) diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index 47f18733b74a1..38d5321b3fca7 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -1094,6 +1094,7 @@ function invoke_tfunc(@nospecialize(ft), @nospecialize(types), @nospecialize(arg if entry === nothing return Any end + # XXX: update_valid_age!(min_valid[1], max_valid[1], sv) meth = entry.func (ti, env) = ccall(:jl_type_intersection_with_env, Any, (Any, Any), argtype, meth.sig)::SimpleVector rt, edge = typeinf_edge(meth::Method, ti, env, sv) diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index bbeea301fe1eb..3eb43e7db928a 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -55,22 +55,26 @@ function typeinf(frame::InferenceState) end end end + if max_valid == get_world_counter() + max_valid = typemax(UInt) + end for caller in frames + caller.min_valid = min_valid + caller.max_valid = max_valid caller.src.min_world = min_valid % Int caller.src.max_world = max_valid % Int if cached cache_result(caller.result, min_valid, max_valid) end - end - # if we aren't cached, we don't need this edge - # but our caller might, so let's just make it anyways - for caller in frames - finalize_backedges(caller) - end - if max_valid == typemax(UInt) - for caller in frames - store_backedges(caller) + if max_valid == typemax(UInt) + # if we aren't cached, we don't need this edge + # but our caller might, so let's just make it anyways + for caller in frames + store_backedges(caller) + end end + # finalize and record the linfo result + caller.inferred = true end return true end @@ -86,33 +90,28 @@ function cache_result(result::InferenceResult, min_valid::UInt, max_valid::UInt) end # check if the existing linfo metadata is also sufficient to describe the current inference result - # to decide if it is worth caching it again (which would also clear any generated code) + # to decide if it is worth caching this already_inferred = !result.linfo.inInference - if isdefined(result.linfo, :inferred) - inf = result.linfo.inferred - if !isa(inf, CodeInfo) || (inf::CodeInfo).inferred - if min_world(result.linfo) == min_valid && max_world(result.linfo) == max_valid - already_inferred = true - end - end + if inf_for_methodinstance(result.linfo, min_valid, max_valid) isa CodeInstance + already_inferred = true end - # don't store inferred code if we've decided to interpret this function - if !already_inferred && invoke_api(result.linfo) != 4 + # TODO: also don't store inferred code if we've previously decided to interpret this function + if !already_inferred inferred_result = result.src if inferred_result isa Const # use constant calling convention - inferred_const = (result.src::Const).val + rettype_const = (result.src::Const).val const_flags = 0x3 else if isa(result.result, Const) - inferred_const = (result.result::Const).val + rettype_const = (result.result::Const).val const_flags = 0x2 elseif isconstType(result.result) - inferred_const = result.result.parameters[1] + rettype_const = result.result.parameters[1] const_flags = 0x2 else - inferred_const = nothing + rettype_const = nothing const_flags = 0x00 end if !toplevel && inferred_result isa CodeInfo @@ -133,13 +132,9 @@ function cache_result(result::InferenceResult, min_valid::UInt, max_valid::UInt) if !isa(inferred_result, Union{CodeInfo, Vector{UInt8}}) inferred_result = nothing end - cache = ccall(:jl_set_method_inferred, Ref{MethodInstance}, (Any, Any, Any, Any, Int32, UInt, UInt), - result.linfo, widenconst(result.result), inferred_const, inferred_result, + ccall(:jl_set_method_inferred, Ref{CodeInstance}, (Any, Any, Any, Any, Int32, UInt, UInt), + result.linfo, widenconst(result.result), rettype_const, inferred_result, const_flags, min_valid, max_valid) - if cache !== result.linfo - result.linfo.inInference = false - result.linfo = cache - end end result.linfo.inInference = false nothing @@ -175,19 +170,7 @@ function finish(src::CodeInfo) nothing end -function finalize_backedges(me::InferenceState) - # update all of the (cycle) callers with real backedges - # by traversing the temporary list of backedges - for (i, _) in me.cycle_backedges - add_backedge!(me.linfo, i) - end - - # finalize and record the linfo result - me.inferred = true - nothing -end - -# add the real backedges +# record the backedges function store_backedges(frame::InferenceState) toplevel = !isa(frame.linfo.def, Method) if !toplevel && (frame.cached || frame.parent !== nothing) @@ -224,6 +207,8 @@ function widen_all_consts!(src::CodeInfo) end end + src.rettype = widenconst(src.rettype) + return src end @@ -464,20 +449,14 @@ end # compute (and cache) an inferred AST and return the current best estimate of the result type function typeinf_edge(method::Method, @nospecialize(atypes), sparams::SimpleVector, caller::InferenceState) - code = code_for_method(method, atypes, sparams, caller.params.world) - code === nothing && return Any, nothing - code = code::MethodInstance - if isdefined(code, :inferred) - # return rettype if the code is already inferred - # staged functions make this hard since they have two "inferred" conditions, - # so need to check whether the code itself is also inferred - inf = code.inferred - if !isa(inf, CodeInfo) || (inf::CodeInfo).inferred - if isdefined(code, :inferred_const) - return AbstractEvalConstant(code.inferred_const), code - else - return code.rettype, code - end + mi = specialize_method(method, atypes, sparams)::MethodInstance + code = inf_for_methodinstance(mi, caller.params.world) + if code isa CodeInstance # return existing rettype if the code is already inferred + update_valid_age!(min_world(code), max_world(code), caller) + if isdefined(code, :rettype_const) + return Const(code.rettype_const), mi + else + return code.rettype, mi end end if !caller.cached && caller.parent === nothing @@ -485,28 +464,31 @@ function typeinf_edge(method::Method, @nospecialize(atypes), sparams::SimpleVect # (if we asked resolve_call_cyle, it might instead detect that there is a cycle that it can't merge) frame = false else - frame = resolve_call_cycle!(code, caller) + frame = resolve_call_cycle!(mi, caller) end if frame === false # completely new - code.inInference = true - result = InferenceResult(code) + mi.inInference = true + result = InferenceResult(mi) frame = InferenceState(result, #=cached=#true, caller.params) # always use the cache for edge targets if frame === nothing # can't get the source for this, so we know nothing - code.inInference = false + mi.inInference = false return Any, nothing end if caller.cached || caller.limited # don't involve uncached functions in cycle resolution frame.parent = caller end typeinf(frame) - return widenconst_bestguess(frame.bestguess), frame.inferred ? frame.linfo : nothing + update_valid_age!(frame, caller) + return widenconst_bestguess(frame.bestguess), frame.inferred ? mi : nothing elseif frame === true # unresolvable cycle return Any, nothing end + # return the current knowledge about this cycle frame = frame::InferenceState + update_valid_age!(frame, caller) return widenconst_bestguess(frame.bestguess), nothing end @@ -519,10 +501,9 @@ end # compute an inferred AST and return type function typeinf_code(method::Method, @nospecialize(atypes), sparams::SimpleVector, run_optimizer::Bool, params::Params) - code = code_for_method(method, atypes, sparams, params.world) - code === nothing && return (nothing, Any) + mi = specialize_method(method, atypes, sparams)::MethodInstance ccall(:jl_typeinf_begin, Cvoid, ()) - result = InferenceResult(code) + result = InferenceResult(mi) frame = InferenceState(result, false, params) frame === nothing && return (nothing, Any) if typeinf(frame) && run_optimizer @@ -536,81 +517,76 @@ function typeinf_code(method::Method, @nospecialize(atypes), sparams::SimpleVect end # compute (and cache) an inferred AST and return type -function typeinf_ext(linfo::MethodInstance, params::Params) - method = linfo.def::Method +function typeinf_ext(mi::MethodInstance, params::Params) + method = mi.def::Method for i = 1:2 # test-and-lock-and-test i == 2 && ccall(:jl_typeinf_begin, Cvoid, ()) - if isdefined(linfo, :inferred) + code = inf_for_methodinstance(mi, params.world) + if code isa CodeInstance # see if this code already exists in the cache - # staged functions make this hard since they have two "inferred" conditions, - # so need to check whether the code itself is also inferred - if min_world(linfo) <= params.world <= max_world(linfo) - inf = linfo.inferred - if invoke_api(linfo) == 2 - tree = ccall(:jl_new_code_info_uninit, Ref{CodeInfo}, ()) - tree.code = Any[ Expr(:return, quoted(linfo.inferred_const)) ] - nargs = Int(method.nargs) - tree.slotnames = ccall(:jl_uncompress_argnames, Any, (Any,), method.slot_syms) - tree.slotflags = fill(0x00, nargs) - tree.ssavaluetypes = 1 - tree.codelocs = Int32[1] - tree.linetable = [LineInfoNode(method, method.file, Int(method.line), 0)] - tree.inferred = true - tree.ssaflags = UInt8[0] - tree.pure = true - tree.inlineable = true - tree.parent = linfo - tree.rettype = typeof(linfo.inferred_const) - tree.min_world = linfo.min_world - tree.max_world = linfo.max_world - i == 2 && ccall(:jl_typeinf_end, Cvoid, ()) - return svec(linfo, tree) - elseif isa(inf, CodeInfo) - if inf.inferred - i == 2 && ccall(:jl_typeinf_end, Cvoid, ()) - return svec(linfo, inf) - end - elseif isa(inf, Vector{UInt8}) - inf = uncompressed_ast(linfo.def::Method, inf) - if inf.inferred - i == 2 && ccall(:jl_typeinf_end, Cvoid, ()) - return svec(linfo, inf) - end + inf = code.inferred + if invoke_api(code) == 2 + i == 2 && ccall(:jl_typeinf_end, Cvoid, ()) + tree = ccall(:jl_new_code_info_uninit, Ref{CodeInfo}, ()) + tree.code = Any[ Expr(:return, quoted(code.rettype_const)) ] + nargs = Int(method.nargs) + tree.slotnames = ccall(:jl_uncompress_argnames, Any, (Any,), method.slot_syms) + tree.slotflags = fill(0x00, nargs) + tree.ssavaluetypes = 1 + tree.codelocs = Int32[1] + tree.linetable = [LineInfoNode(method, method.file, Int(method.line), 0)] + tree.inferred = true + tree.ssaflags = UInt8[0] + tree.pure = true + tree.inlineable = true + tree.parent = mi + tree.rettype = typeof(code.rettype_const) + tree.min_world = code.min_world + tree.max_world = code.max_world + return tree + elseif isa(inf, CodeInfo) + i == 2 && ccall(:jl_typeinf_end, Cvoid, ()) + if !(inf.min_world == code.min_world && + inf.max_world == code.max_world && + inf.rettype === code.rettype) + inf = copy(inf) + inf.min_world = code.min_world + inf.max_world = code.max_world + inf.rettype = code.rettype end + return inf + elseif isa(inf, Vector{UInt8}) + i == 2 && ccall(:jl_typeinf_end, Cvoid, ()) + inf = _uncompressed_ast(code, inf) + return inf end end end - linfo.inInference = true - frame = InferenceState(InferenceResult(linfo), #=cached=#true, params) - frame === nothing && return svec(nothing, nothing) + mi.inInference = true + frame = InferenceState(InferenceResult(mi), #=cached=#true, params) + frame === nothing && return nothing typeinf(frame) ccall(:jl_typeinf_end, Cvoid, ()) - frame.src.inferred || return svec(nothing, nothing) - return svec(frame.result.linfo, frame.src) + frame.src.inferred || return nothing + return frame.src end # compute (and cache) an inferred AST and return the inferred return type function typeinf_type(method::Method, @nospecialize(atypes), sparams::SimpleVector, params::Params) if contains_is(unwrap_unionall(atypes).parameters, Union{}) - return Union{} + return Union{} # don't ask: it does weird and unnecessary things, if it occurs during bootstrap end - code = code_for_method(method, atypes, sparams, params.world) - code === nothing && return nothing - code = code::MethodInstance + mi = specialize_method(method, atypes, sparams)::MethodInstance for i = 1:2 # test-and-lock-and-test i == 2 && ccall(:jl_typeinf_begin, Cvoid, ()) - if isdefined(code, :inferred) + code = inf_for_methodinstance(mi, params.world) + if code isa CodeInstance # see if this rettype already exists in the cache - # staged functions make this hard since they have two "inferred" conditions, - # so need to check whether the code itself is also inferred - inf = code.inferred - if !isa(inf, CodeInfo) || (inf::CodeInfo).inferred - i == 2 && ccall(:jl_typeinf_end, Cvoid, ()) - return code.rettype - end + i == 2 && ccall(:jl_typeinf_end, Cvoid, ()) + return code.rettype end end - frame = InferenceResult(code) + frame = InferenceResult(mi) typeinf(frame, true, params) ccall(:jl_typeinf_end, Cvoid, ()) frame.result isa InferenceState && return nothing @@ -620,9 +596,9 @@ end @timeit function typeinf_ext(linfo::MethodInstance, world::UInt) if isa(linfo.def, Method) # method lambda - infer this specialization via the method cache - return typeinf_ext(linfo, Params(world)) + src = typeinf_ext(linfo, Params(world)) else - src = linfo.inferred::CodeInfo + src = linfo.uninferred::CodeInfo if !src.inferred # toplevel lambda - infer directly ccall(:jl_typeinf_begin, Cvoid, ()) @@ -631,14 +607,12 @@ end frame = InferenceState(result, src, #=cached=#true, Params(world)) typeinf(frame) @assert frame.inferred # TODO: deal with this better - @assert frame.linfo === linfo - linfo.rettype = widenconst(frame.bestguess) src = frame.src end ccall(:jl_typeinf_end, Cvoid, ()) end - return svec(linfo, src) end + return src end diff --git a/base/compiler/utilities.jl b/base/compiler/utilities.jl index 73ec24a7c4269..9daee10c2c7e5 100644 --- a/base/compiler/utilities.jl +++ b/base/compiler/utilities.jl @@ -81,11 +81,12 @@ end # MethodInstance/CodeInfo # ########################### -function invoke_api(li::MethodInstance) +function invoke_api(li::CodeInstance) return ccall(:jl_invoke_api, Cint, (Any,), li) end function get_staged(li::MethodInstance) + may_invoke_generator(li) || return nothing try # user code might throw errors – ignore them return ccall(:jl_code_for_staged, Any, (Any,), li)::CodeInfo @@ -104,7 +105,7 @@ function retrieve_code_info(linfo::MethodInstance) if c === nothing && isdefined(m, :source) src = m.source if isa(src, Array{UInt8,1}) - c = ccall(:jl_uncompress_ast, Any, (Any, Any), m, src) + c = ccall(:jl_uncompress_ast, Any, (Any, Ptr{Cvoid}, Any), m, C_NULL, src) else c = copy(src::CodeInfo) end @@ -115,28 +116,30 @@ function retrieve_code_info(linfo::MethodInstance) end end -function code_for_method(method::Method, @nospecialize(atypes), sparams::SimpleVector, world::UInt, preexisting::Bool=false) - if world < min_world(method) || world > max_world(method) - return nothing - end - if isdefined(method, :generator) && !may_invoke_generator(method, atypes, sparams) - return nothing - end +function inf_for_methodinstance(mi::MethodInstance, min_world::UInt, max_world::UInt=min_world) + inf = ccall(:jl_rettype_inferred, Ptr{Nothing}, (Any, UInt, UInt), mi, min_world, max_world) + inf == C_NULL && return nothing + return unsafe_pointer_to_objref(inf)::CodeInstance +end + + +# get a handle to the unique specialization object representing a particular instantiation of a call +function specialize_method(method::Method, @nospecialize(atypes), sparams::SimpleVector, preexisting::Bool=false) if preexisting if method.specializations !== nothing # check cached specializations # for an existing result stored there - return ccall(:jl_specializations_lookup, Any, (Any, Any, UInt), method, atypes, world) + return ccall(:jl_specializations_lookup, Any, (Any, Any), method, atypes) end return nothing end - return ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any, UInt), method, atypes, sparams, world) + return ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any), method, atypes, sparams) end # This function is used for computing alternate limit heuristics -function method_for_inference_heuristics(method::Method, @nospecialize(sig), sparams::SimpleVector, world::UInt) - if isdefined(method, :generator) && method.generator.expand_early - method_instance = code_for_method(method, sig, sparams, world, false) +function method_for_inference_heuristics(method::Method, @nospecialize(sig), sparams::SimpleVector) + if isdefined(method, :generator) && method.generator.expand_early && may_invoke_generator(method, sig, sparams) + method_instance = specialize_method(method, sig, sparams, false) if isa(method_instance, MethodInstance) cinfo = get_staged(method_instance) if isa(cinfo, CodeInfo) diff --git a/base/errorshow.jl b/base/errorshow.jl index f64fa6a1eccb0..8453de56d31be 100644 --- a/base/errorshow.jl +++ b/base/errorshow.jl @@ -238,7 +238,7 @@ function showerror(io::IO, ex::MethodError) end if (ex.world != typemax(UInt) && hasmethod(ex.f, arg_types) && !hasmethod(ex.f, arg_types, world = ex.world)) - curworld = ccall(:jl_get_world_counter, UInt, ()) + curworld = get_world_counter() println(io) print(io, "The applicable method may be too new: running in world age $(ex.world), while current world is $(curworld).") end @@ -438,9 +438,9 @@ function show_method_candidates(io::IO, ex::MethodError, @nospecialize kwargs=() end end end - if ex.world < min_world(method) + if ex.world < reinterpret(UInt, method.primary_world) print(iob, " (method too new to be called from this world context.)") - elseif ex.world > max_world(method) + elseif ex.world > reinterpret(UInt, method.deleted_world) print(iob, " (method deleted before this world age.)") end # TODO: indicate if it's in the wrong world diff --git a/base/methodshow.jl b/base/methodshow.jl index f03a02f1c7d24..a1e37478d96de 100644 --- a/base/methodshow.jl +++ b/base/methodshow.jl @@ -75,7 +75,7 @@ const empty_sym = Symbol("") function kwarg_decl(m::Method, kwtype::DataType) sig = rewrap_unionall(Tuple{kwtype, Any, unwrap_unionall(m.sig).parameters...}, m.sig) - kwli = ccall(:jl_methtable_lookup, Any, (Any, Any, UInt), kwtype.name.mt, sig, max_world(m)) + kwli = ccall(:jl_methtable_lookup, Any, (Any, Any, UInt), kwtype.name.mt, sig, get_world_counter()) if kwli !== nothing kwli = kwli::Method slotnames = ccall(:jl_uncompress_argnames, Vector{Any}, (Any,), kwli.slot_syms) diff --git a/base/reflection.jl b/base/reflection.jl index c8acf91ecb18e..46f268feb9e0c 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -770,15 +770,15 @@ function code_lowered(@nospecialize(f), @nospecialize(t=Tuple); generated::Bool= end return map(method_instances(f, t)) do m if generated && isgenerated(m) - if isa(m, Core.MethodInstance) - return Core.Compiler.get_staged(m) - else # isa(m, Method) + if may_invoke_generator(m) + return ccall(:jl_code_for_staged, Any, (Any,), m)::CodeInfo + else error("Could not expand generator for `@generated` method ", m, ". ", "This can happen if the provided argument types (", t, ") are ", "not leaf types, but the `generated` argument is `true`.") end end - code = uncompressed_ast(m) + code = uncompressed_ast(m.def::Method) debuginfo == :none && remove_linenums!(code) return code end @@ -898,20 +898,20 @@ function length(mt::Core.MethodTable) end isempty(mt::Core.MethodTable) = (mt.defs === nothing) -uncompressed_ast(m::Method) = isdefined(m, :source) ? uncompressed_ast(m, m.source) : +uncompressed_ast(m::Method) = isdefined(m, :source) ? _uncompressed_ast(m, m.source) : isdefined(m, :generator) ? error("Method is @generated; try `code_lowered` instead.") : error("Code for this Method is not available.") -uncompressed_ast(m::Method, s::CodeInfo) = copy(s) -uncompressed_ast(m::Method, s::Array{UInt8,1}) = ccall(:jl_uncompress_ast, Any, (Any, Any), m, s)::CodeInfo -uncompressed_ast(m::Core.MethodInstance) = uncompressed_ast(m.def) +_uncompressed_ast(m::Method, s::CodeInfo) = copy(s) +_uncompressed_ast(m::Method, s::Array{UInt8,1}) = ccall(:jl_uncompress_ast, Any, (Any, Ptr{Cvoid}, Any), m, C_NULL, s)::CodeInfo +_uncompressed_ast(m::Core.CodeInstance, s::Array{UInt8,1}) = ccall(:jl_uncompress_ast, Any, (Any, Ptr{Cvoid}, Any), li.def.def::Method, li, s)::CodeInfo function method_instances(@nospecialize(f), @nospecialize(t), world::UInt = typemax(UInt)) tt = signature_type(f, t) - results = Vector{Union{Method,Core.MethodInstance}}() + results = Core.MethodInstance[] for method_data in _methods_by_ftype(tt, -1, world) mtypes, msp, m = method_data - instance = Core.Compiler.code_for_method(m, mtypes, msp, world, false) - push!(results, ifelse(isa(instance, Core.MethodInstance), instance, m)) + instance = ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any), m, mtypes, msp) + push!(results, instance) end return results end @@ -957,7 +957,13 @@ functions on concrete types. The one exception to this is that we allow calling generators with abstract types if the generator does not use said abstract type (and thus cannot incorrectly use it to break monotonicity). This function computes whether we are in either of these cases. + +Unlike normal functions, the compilation heuristics still can't generate good dispatch +in some cases, but this may still allow inference not to fall over in some limited cases. """ +function may_invoke_generator(method::MethodInstance) + return may_invoke_generator(method.def::Method, method.specTypes, method.sparam_vals) +end function may_invoke_generator(method::Method, @nospecialize(atypes), sparams::SimpleVector) # If we have complete information, we may always call the generator isdispatchtuple(atypes) && return true @@ -999,7 +1005,7 @@ end # give a decent error message if we try to instantiate a staged function on non-leaf types function func_for_method_checked(m::Method, @nospecialize(types), sparams::SimpleVector) - if isdefined(m, :generator) && !Core.Compiler.may_invoke_generator(m, types, sparams) + if isdefined(m, :generator) && !may_invoke_generator(m, types, sparams) error("cannot call @generated function `", m, "` ", "with abstract argument types: ", types) end @@ -1027,8 +1033,9 @@ The keyword `debuginfo` controls the amount of code metadata present in the outp possible options are `:source` or `:none`. """ function code_typed(@nospecialize(f), @nospecialize(types=Tuple); - optimize=true, debuginfo::Symbol=:default, - world = ccall(:jl_get_world_counter, UInt, ()), + optimize=true, + debuginfo::Symbol=:default, + world = get_world_counter(), params = Core.Compiler.Params(world)) ccall(:jl_is_in_pure_context, Bool, ()) && error("code reflection cannot be used from generated functions") if isa(f, Core.Builtin) @@ -1061,7 +1068,7 @@ function return_types(@nospecialize(f), @nospecialize(types=Tuple)) end types = to_tuple_type(types) rt = [] - world = ccall(:jl_get_world_counter, UInt, ()) + world = get_world_counter() params = Core.Compiler.Params(world) for x in _methods(f, types, -1, world) meth = func_for_method_checked(x[3], types, x[2]) @@ -1204,22 +1211,22 @@ julia> hasmethod(g, Tuple{}, (:a, :b, :c, :d)) # g accepts arbitrary kwargs true ``` """ -function hasmethod(@nospecialize(f), @nospecialize(t); world = typemax(UInt)) +function hasmethod(@nospecialize(f), @nospecialize(t); world=typemax(UInt)) t = to_tuple_type(t) t = signature_type(f, t) return ccall(:jl_gf_invoke_lookup, Any, (Any, UInt), t, world) !== nothing end function hasmethod(@nospecialize(f), @nospecialize(t), kwnames::Tuple{Vararg{Symbol}}; world=typemax(UInt)) + # TODO: this appears to be doing the wrong queries hasmethod(f, t, world=world) || return false isempty(kwnames) && return true m = which(f, t) - max_world(m) <= world || return false kws = kwarg_decl(m, Core.kwftype(typeof(f))) for kw in kws endswith(String(kw), "...") && return true end - issubset(kwnames, kws) + return issubset(kwnames, kws) end """ @@ -1302,10 +1309,12 @@ has_bottom_parameter(t::Union) = has_bottom_parameter(t.a) & has_bottom_paramete has_bottom_parameter(t::TypeVar) = t.ub == Bottom || has_bottom_parameter(t.ub) has_bottom_parameter(::Any) = false -min_world(m::Method) = reinterpret(UInt, m.min_world) -max_world(m::Method) = reinterpret(UInt, m.max_world) -min_world(m::Core.MethodInstance) = reinterpret(UInt, m.min_world) -max_world(m::Core.MethodInstance) = reinterpret(UInt, m.max_world) +min_world(m::Core.CodeInstance) = reinterpret(UInt, m.min_world) +max_world(m::Core.CodeInstance) = reinterpret(UInt, m.max_world) +min_world(m::Core.CodeInfo) = reinterpret(UInt, m.min_world) +max_world(m::Core.CodeInfo) = reinterpret(UInt, m.max_world) +get_world_counter() = ccall(:jl_get_world_counter, UInt, ()) + """ propertynames(x, private=false) diff --git a/doc/src/devdocs/ast.md b/doc/src/devdocs/ast.md index ef8860e375895..4c79de34b5a51 100644 --- a/doc/src/devdocs/ast.md +++ b/doc/src/devdocs/ast.md @@ -260,7 +260,7 @@ types exist in lowered form: * `CodeInfo` - Wraps the IR of a method. Its `code` field is an array of expressions to execute. + Wraps the IR of a group of statements. Its `code` field is an array of expressions to execute. * `GotoNode` @@ -490,7 +490,11 @@ A unique'd container describing the shared metadata for a single method. * `source` - The original source code (usually compressed). + The original source code (if available, usually compressed). + + * `generator` + + A callable object which can be executed to get specialized source for a specific method signature. * `roots` @@ -501,9 +505,9 @@ A unique'd container describing the shared metadata for a single method. Descriptive bit-fields for the source code of this Method. - * `min_world` / `max_world` + * `primary_world` - The range of world ages for which this method is visible to dispatch. + The world age that "owns" this Method. ### MethodInstance @@ -528,16 +532,38 @@ for important details on how to modify these fields safely. runtime `MethodInstance` from the `MethodTable` cache, this will always be defined and indexable. - * `rettype` + * `uninferred` + + The uncompressed source code for a toplevel thunk. Additionally, for a generated function, + this is one of many places that the source code might be found. + + * `backedges` + + We store the reverse-list of cache dependencies for efficient tracking of incremental reanalysis/recompilation work that may be needed after a new method definitions. + This works by keeping a list of the other `MethodInstance` that have been inferred or optimized to contain a possible call to this `MethodInstance`. + Those optimization results might be stored somewhere in the `cache`, or it might have been the result of something we didn't want to cache, such as constant propagation. + Thus we merge all of those backedges to various cache entries here (there's almost always only the one applicable cache entry with a sentinal value for max_world anyways). + + * `cache` + + Cache of `CodeInstance` objects that share this template instantiation. + +### CodeInstance + + * `def` + + The `MethodInstance` that this cache entry is derived from. + + + * `rettype`/`rettype_const` The inferred return type for the `specFunctionObject` field, which (in most cases) is also the computed return type for the function in general. * `inferred` - May contain a cache of the inferred source for this function, or other information about - the inference result such as a constant return value may be put here (if `jlcall_api == - 2`), or it could be set to `nothing` to just indicate `rettype` is inferred. + May contain a cache of the inferred source for this function, + or it could be set to `nothing` to just indicate `rettype` is inferred. * `ftpr` @@ -549,18 +575,20 @@ for important details on how to modify these fields safely. * 0 - Not compiled yet * 1 - JL_CALLABLE `jl_value_t *(*)(jl_function_t *f, jl_value_t *args[nargs], uint32_t nargs)` - * 2 - Constant (value stored in `inferred`) + * 2 - Constant (value stored in `rettype_const`) * 3 - With Static-parameters forwarded `jl_value_t *(*)(jl_svec_t *sparams, jl_function_t *f, jl_value_t *args[nargs], uint32_t nargs)` * 4 - Run in interpreter `jl_value_t *(*)(jl_method_instance_t *meth, jl_function_t *f, jl_value_t *args[nargs], uint32_t nargs)` * `min_world` / `max_world` The range of world ages for which this method instance is valid to be called. + If max_world is the special token value `-1`, the value is not yet known. + It may continue to be used until we encounter a backedge that requires us to reconsider. ### CodeInfo -A temporary container for holding lowered source code. +A (usually temporary) container for holding lowered source code. * `code` @@ -568,11 +596,11 @@ A temporary container for holding lowered source code. * `slotnames` - An array of symbols giving the name of each slot (argument or local variable). + An array of symbols giving names for each slot (argument or local variable). * `slottypes` - An array of types for the slots. + An array of types for the slots (e.g. variables). * `slotflags` @@ -588,7 +616,17 @@ A temporary container for holding lowered source code. Either an array or an `Int`. If an `Int`, it gives the number of compiler-inserted temporary locations in the - function. If an array, specifies a type for each location. + function (the length of `code` array). If an array, specifies a type for each location. + + * `ssaflags` + + Statement-level flags for each expression in the function. Many of these are reserved, but not yet implemented: + + * 0 = inbounds + * 1,2 = inlinehint,always-inline,noinline + * 3 = strict-ieee (strictfp) + * 4-6 = + * 7 = has out-of-band info * `linetable` @@ -599,6 +637,14 @@ A temporary container for holding lowered source code. An array of integer indices into the `linetable`, giving the location associated with each statement. + * `parent` + + The `MethodInstance` that "owns" this object (if applicable). + + * `min_world`/`max_world` + + The range of world ages for which this code was valid at the time when it had been inferred. + Boolean properties: * `inferred` @@ -607,11 +653,11 @@ Boolean properties: * `inlineable` - Whether this should be inlined. + Whether this should be eligible for inlining. * `propagate_inbounds` - Whether this should should propagate `@inbounds` when inlined for the purpose of eliding + Whether this should should propagate `@inbounds` when inlined, for the purpose of eliding `@boundscheck` blocks. * `pure` diff --git a/src/ast.c b/src/ast.c index d9d2738b947ac..a2a4857a11466 100644 --- a/src/ast.c +++ b/src/ast.c @@ -1036,7 +1036,7 @@ static jl_value_t *jl_invoke_julia_macro(jl_array_t *args, jl_module_t *inmodule // unreachable } *ctx = mfunc->def.method->module; - result = mfunc->invoke(mfunc, margs, nargs); + result = jl_invoke(mfunc, margs, nargs); } JL_CATCH { if (jl_loaderror_type == NULL) { diff --git a/src/builtins.c b/src/builtins.c index e211a1ca3fd64..1a01da56ce83a 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -1186,7 +1186,7 @@ static void add_builtin(const char *name, jl_value_t *v) jl_fptr_args_t jl_get_builtin_fptr(jl_value_t *b) { assert(jl_isa(b, (jl_value_t*)jl_builtin_type)); - return ((jl_typemap_entry_t*)jl_gf_mtable(b)->cache)->func.linfo->specptr.fptr1; + return ((jl_typemap_entry_t*)jl_gf_mtable(b)->cache)->func.linfo->cache->specptr.fptr1; } static void add_builtin_func(const char *name, jl_fptr_args_t fptr) @@ -1256,6 +1256,7 @@ void jl_init_primitives(void) JL_GC_DISABLED add_builtin("Module", (jl_value_t*)jl_module_type); add_builtin("MethodTable", (jl_value_t*)jl_methtable_type); add_builtin("Method", (jl_value_t*)jl_method_type); + add_builtin("CodeInstance", (jl_value_t*)jl_code_instance_type); add_builtin("TypeMapEntry", (jl_value_t*)jl_typemap_entry_type); add_builtin("TypeMapLevel", (jl_value_t*)jl_typemap_level_type); add_builtin("Symbol", (jl_value_t*)jl_sym_type); diff --git a/src/codegen.cpp b/src/codegen.cpp index f863d7c3a1605..3f30663f3376f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1029,98 +1029,168 @@ void jl_dump_compiles(void *s) static std::unique_ptr emit_function( jl_method_instance_t *lam, jl_code_info_t *src, + jl_value_t *jlrettype, size_t world, jl_llvm_functions_t *declarations, const jl_cgparams_t *params); -void jl_add_linfo_in_flight(StringRef name, jl_method_instance_t *linfo, const DataLayout &DL); +void jl_add_code_in_flight(StringRef name, jl_code_instance_t *codeinst, const DataLayout &DL); -const char *name_from_method_instance(jl_method_instance_t *li) +const char *name_from_method_instance(jl_method_instance_t *mi) { - return jl_is_method(li->def.method) ? jl_symbol_name(li->def.method->name) : "top-level scope"; + return jl_is_method(mi->def.method) ? jl_symbol_name(mi->def.method->name) : "top-level scope"; } -// Use of `li` is not clobbered in JL_TRY -JL_GCC_IGNORE_START("-Wclobbered") // this generates llvm code for the lambda info // and adds the result to the jitlayers // (and the shadow module), but doesn't yet compile // or generate object code for it extern "C" -jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t **pli, jl_code_info_t *src, size_t world, const jl_cgparams_t *params) +jl_code_instance_t *jl_compile_linfo(jl_method_instance_t *mi, jl_code_info_t *src, size_t world, const jl_cgparams_t *params) { // N.B.: `src` may have not been rooted by the caller. JL_TIMING(CODEGEN); - jl_method_instance_t *li = *pli; - assert(jl_is_method_instance(li)); - jl_llvm_functions_t decls = {}; + assert(jl_is_method_instance(mi)); + jl_code_instance_t *codeinst = NULL; if (params != &jl_default_cgparams /* fast path */ && !compare_cgparams(params, &jl_default_cgparams) && params->cached) jl_error("functions compiled with custom codegen params mustn't be cached"); // Fast path for the already-compiled case - if (jl_is_method(li->def.method)) { - decls = li->functionObjectsDecls; - bool already_compiled = params->cached && decls.functionObject != NULL; - if (!src) { - if ((already_compiled || li->invoke == jl_fptr_const_return) && - (li->min_world <= world && li->max_world >= world)) { - return decls; + if (jl_is_method(mi->def.method)) { + codeinst = mi->cache; + while (codeinst) { + if (codeinst->min_world <= world && world <= codeinst->max_world) { + jl_llvm_functions_t decls = codeinst->functionObjectsDecls; + bool already_compiled = params->cached && decls.functionObject != NULL; + if (!src) { + if (already_compiled || codeinst->invoke == jl_fptr_const_return) + return codeinst; + } + else if (already_compiled) { + return codeinst; + } } - } else if (already_compiled) { - return decls; + codeinst = codeinst->next; } } - JL_GC_PUSH1(&src); + JL_GC_PUSH2(&src, &codeinst); JL_LOCK(&codegen_lock); - decls = li->functionObjectsDecls; // Codegen lock held in this block { // Step 1: Re-check if this was already compiled (it may have been while // we waited at the lock). - if (!jl_is_method(li->def.method)) { - src = (jl_code_info_t*)li->inferred; - if (decls.functionObject != NULL || !src || !jl_is_code_info(src) || li->invoke == jl_fptr_const_return) { + if (!jl_is_method(mi->def.method)) { + src = (jl_code_info_t*)mi->uninferred; + if (!src || !jl_is_code_info(src)) goto locked_out; + codeinst = mi->cache; + while (codeinst) { + jl_llvm_functions_t decls = codeinst->functionObjectsDecls; + if (decls.functionObject != NULL || codeinst->invoke == jl_fptr_const_return) { + goto locked_out; + } + codeinst = codeinst->next; } } else if (!src) { // If the caller didn't provide the source, - // try to infer it for ourself, but first, re-check if it's already compiled. - assert(li->min_world <= world && li->max_world >= world); - if ((params->cached && decls.functionObject != NULL) || li->invoke == jl_fptr_const_return) - goto locked_out; + // try to infer it for ourself, but first, repeat the search for it already compiled + if (jl_is_method(mi->def.method)) { + codeinst = mi->cache; + while (codeinst) { + if (codeinst->min_world <= world && world <= codeinst->max_world) { + jl_llvm_functions_t decls = codeinst->functionObjectsDecls; + bool already_compiled = params->cached && decls.functionObject != NULL; + if (!src) { + if (already_compiled || codeinst->invoke == jl_fptr_const_return) + goto locked_out; + } + else if (already_compiled) { + goto locked_out; + } + } + codeinst = codeinst->next; + } + } // see if it is inferred - src = (jl_code_info_t*)li->inferred; - if (src) { - if ((jl_value_t*)src != jl_nothing) - src = jl_uncompress_ast(li->def.method, (jl_array_t*)src); - if (!jl_is_code_info(src)) { - src = jl_type_infer(pli, world, 0); - li = *pli; + codeinst = mi->cache; + while (codeinst) { + src = (jl_code_info_t*)codeinst->inferred; + if (src && codeinst->min_world <= world && world <= codeinst->max_world) { + break; } - if (!src || li->invoke == jl_fptr_const_return) - goto locked_out; + codeinst = codeinst->next; } - else { + if (!codeinst) { // declare a failure to compile goto locked_out; } - } - else if (params->cached && decls.functionObject != NULL) { - // similar to above, but never returns a NULL - // decl (unless compile fails), even if invoke == jl_fptr_const_return - goto locked_out; + // found inferred code, prep it for codegen + if ((jl_value_t*)src != jl_nothing) + src = jl_uncompress_ast(mi->def.method, codeinst, (jl_array_t*)src); + if (!jl_is_code_info(src)) { + src = jl_type_infer(mi, world, 0); + if (!src) + goto locked_out; + codeinst = jl_get_method_inferred(mi, src->rettype, src->min_world, src->max_world); + if (!codeinst->inferred) + codeinst->inferred = jl_nothing; + } + // check again for recursion + jl_llvm_functions_t decls = codeinst->functionObjectsDecls; + bool already_compiled = params->cached && decls.functionObject != NULL; + if (already_compiled || codeinst->invoke == jl_fptr_const_return) + goto locked_out; } else { - if ((jl_value_t*)src != jl_nothing) - src = jl_uncompress_ast(li->def.method, (jl_array_t*)src); + if (params->cached) { + codeinst = mi->cache; + while (codeinst) { + if (codeinst->min_world <= world && world <= codeinst->max_world) { + jl_llvm_functions_t decls = codeinst->functionObjectsDecls; + bool already_compiled = params->cached && decls.functionObject != NULL; + if (already_compiled) { + // similar to above, but never returns a NULL + // decl (unless compile fails), even if invoke == jl_fptr_const_return + goto locked_out; + } + } + codeinst = codeinst->next; + } + } + assert((jl_value_t*)src != jl_nothing); + src = jl_uncompress_ast(mi->def.method, NULL, (jl_array_t*)src); } assert(jl_is_code_info(src)); + if (!codeinst) { + codeinst = jl_get_method_inferred(mi, src->rettype, src->min_world, src->max_world); + if (src->inferred && !codeinst->inferred) + codeinst->inferred = jl_nothing; + } + if (!params->cached) { + // caller demanded that we make a new copy + jl_ptls_t ptls = jl_get_ptls_states(); + jl_code_instance_t *uncached = (jl_code_instance_t*)jl_gc_alloc(ptls, sizeof(jl_code_instance_t), + jl_code_instance_type); + uncached->min_world = codeinst->min_world; + uncached->max_world = codeinst->max_world; + uncached->functionObjectsDecls.functionObject = NULL; + uncached->functionObjectsDecls.specFunctionObject = NULL; + uncached->rettype = codeinst->rettype; + uncached->inferred = jl_nothing; + uncached->rettype_const = codeinst->rettype_const; + uncached->invoke = NULL; + if (codeinst->invoke == jl_fptr_const_return) + uncached->invoke = jl_fptr_const_return; + uncached->specptr.fptr = NULL; + codeinst = uncached; + } + // Step 2: setup global state bool last_n_c = nested_compile; if (!nested_compile && dump_compiles_stream != NULL) @@ -1130,49 +1200,40 @@ jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t **pli, jl_code_info_t // Step 3. actually do the work of emitting the function std::unique_ptr m; JL_TRY { - jl_llvm_functions_t *pdecls; - if (!params->cached) - pdecls = &decls; - else if (li->min_world <= world && li->max_world >= world) - pdecls = &li->functionObjectsDecls; - else if (!jl_is_method(li->def.method)) // toplevel thunk - pdecls = &li->functionObjectsDecls; - else - pdecls = &decls; - m = emit_function(li, src, world, pdecls, params); - if (params->cached && world) - decls = li->functionObjectsDecls; + m = emit_function(mi, src, codeinst->rettype, world, &codeinst->functionObjectsDecls, params); //n_emit++; } JL_CATCH { // something failed! this is very bad, since other WIP may be pointing to this function // but there's not much we can do now. try to clear much of the WIP anyways. - li->functionObjectsDecls.functionObject = NULL; - li->functionObjectsDecls.specFunctionObject = NULL; + codeinst->functionObjectsDecls.functionObject = NULL; + codeinst->functionObjectsDecls.specFunctionObject = NULL; nested_compile = last_n_c; JL_UNLOCK(&codegen_lock); // Might GC - const char *mname = name_from_method_instance(li); + const char *mname = name_from_method_instance(mi); jl_rethrow_with_add("error compiling %s", mname); } + jl_llvm_functions_t decls = codeinst->functionObjectsDecls; const char *f = decls.functionObject; const char *specf = decls.specFunctionObject; if (JL_HOOK_TEST(params, module_activation)) { JL_HOOK_CALL(params, module_activation, 1, jl_box_voidpointer(wrap(m.release()))); - } else { + } + else { // Step 4. Prepare debug info to receive this function // record that this function name came from this linfo, // so we can build a reverse mapping for debug-info. - bool toplevel = !jl_is_method(li->def.method); + bool toplevel = !jl_is_method(mi->def.method); if (!toplevel) { const DataLayout &DL = m->getDataLayout(); // but don't remember toplevel thunks because // they may not be rooted in the gc for the life of the program, // and the runtime doesn't notify us when the code becomes unreachable :( if (specf) - jl_add_linfo_in_flight(specf, li, DL); + jl_add_code_in_flight(specf, codeinst, DL); if (strcmp(f, "jl_fptr_args") && strcmp(f, "jl_fptr_sparam")) - jl_add_linfo_in_flight(f, li, DL); + jl_add_code_in_flight(f, codeinst, DL); } // Step 5. Add the result to the execution engine now @@ -1182,29 +1243,29 @@ jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t **pli, jl_code_info_t if (// don't alter `inferred` when the code is not directly being used world && // don't change inferred state - li->inferred) { + codeinst->inferred) { if (// keep code when keeping everything !(JL_DELETE_NON_INLINEABLE) || - // keep code when debugging level >= 2 + // aggressively keep code when debugging level >= 2 jl_options.debug_level > 1) { // update the stored code - if (li->inferred != (jl_value_t*)src) { - if (jl_is_method(li->def.method)) - src = (jl_code_info_t*)jl_compress_ast(li->def.method, src); - li->inferred = (jl_value_t*)src; - jl_gc_wb(li, src); + if (codeinst->inferred != (jl_value_t*)src) { + if (jl_is_method(mi->def.method)) + src = (jl_code_info_t*)jl_compress_ast(mi->def.method, src); + codeinst->inferred = (jl_value_t*)src; + jl_gc_wb(codeinst, src); } } else if (// don't delete toplevel code - jl_is_method(li->def.method) && + jl_is_method(mi->def.method) && // and there is something to delete (test this before calling jl_ast_flag_inlineable) - li->inferred != jl_nothing && + codeinst->inferred != jl_nothing && // don't delete inlineable code, unless it is constant - (li->invoke == jl_fptr_const_return || !jl_ast_flag_inlineable((jl_array_t*)li->inferred)) && + (codeinst->invoke == jl_fptr_const_return || !jl_ast_flag_inlineable((jl_array_t*)codeinst->inferred)) && // don't delete code when generating a precompile file !imaging_mode) { // if not inlineable, code won't be needed again - li->inferred = jl_nothing; + codeinst->inferred = jl_nothing; } } @@ -1215,25 +1276,24 @@ jl_llvm_functions_t jl_compile_linfo(jl_method_instance_t **pli, jl_code_info_t JL_UNLOCK(&codegen_lock); // Might GC // If logging of the compilation stream is enabled then dump the function to the stream - // ... unless li->def isn't defined here meaning the function is a toplevel thunk and + // ... unless mi->def isn't defined here meaning the function is a toplevel thunk and // would have its CodeInfo printed in the stream, which might contain double-quotes that // would not be properly escaped given the double-quotes added to the stream below. - if (dump_compiles_stream != NULL && jl_is_method(li->def.method)) { + if (dump_compiles_stream != NULL && jl_is_method(mi->def.method)) { uint64_t this_time = jl_hrtime(); jl_printf(dump_compiles_stream, "%" PRIu64 "\t\"", this_time - last_time); - jl_static_show(dump_compiles_stream, li->specTypes); + jl_static_show(dump_compiles_stream, mi->specTypes); jl_printf(dump_compiles_stream, "\"\n"); last_time = this_time; } JL_GC_POP(); - return decls; + return codeinst; locked_out: JL_UNLOCK(&codegen_lock); JL_GC_POP(); - return decls; + return codeinst; } -JL_GCC_IGNORE_STOP #define getModuleFlag(m,str) m->getModuleFlag(str) @@ -1265,7 +1325,6 @@ static void jl_setup_module(Module *m, const jl_cgparams_t *params = &jl_default // this ensures that llvmf has been emitted to the execution engine, // returning the function pointer to it -extern void jl_callback_triggered_linfos(void); static uint64_t getAddressForFunction(StringRef fname) { JL_TIMING(LLVM_EMIT); @@ -1278,9 +1337,6 @@ static uint64_t getAddressForFunction(StringRef fname) #endif jl_finalize_function(fname); uint64_t ret = jl_ExecutionEngine->getFunctionAddress(fname); - // delay executing trace callbacks until here to make sure there's no - // recursive compilation. - jl_callback_triggered_linfos(); return ret; } @@ -1298,80 +1354,70 @@ uint64_t jl_get_llvm_fptr(void *function) return addr; } -static jl_method_instance_t *jl_get_unspecialized(jl_method_instance_t *method) -{ - // one unspecialized version of a function can be shared among all cached specializations - jl_method_t *def = method->def.method; - if (def->source == NULL) { - return method; - } - if (def->unspecialized == NULL) { - JL_LOCK(&def->writelock); - if (def->unspecialized == NULL) { - def->unspecialized = jl_get_specialized(def, def->sig, jl_emptysvec); - jl_gc_wb(def, def->unspecialized); - } - JL_UNLOCK(&def->writelock); - } - return def->unspecialized; -} - -// this compiles li and emits fptr +// this finishes compiling a native-code object and sets `output->invoke` extern "C" -jl_callptr_t jl_generate_fptr(jl_method_instance_t **pli, jl_llvm_functions_t decls, size_t world) +void jl_generate_fptr(jl_code_instance_t *output) { - jl_method_instance_t *li = *pli; + jl_code_instance_t *codeinst = output; jl_callptr_t fptr; - fptr = li->invoke; - if (fptr != jl_fptr_trampoline) - return fptr; + fptr = codeinst->invoke; + if (fptr != NULL) + return; JL_LOCK(&codegen_lock); - fptr = li->invoke; - if (fptr != jl_fptr_trampoline) { + fptr = codeinst->invoke; + if (fptr != NULL) { JL_UNLOCK(&codegen_lock); - return fptr; + return; } jl_method_instance_t *unspec = NULL; - if (jl_is_method(li->def.method)) { - if (li->def.method->unspecialized) { - unspec = li->def.method->unspecialized; - } + if (jl_is_method(codeinst->def->def.method)) { + jl_llvm_functions_t decls = codeinst->functionObjectsDecls; const char *F = decls.functionObject; const char *specF = decls.specFunctionObject; if (!F || !jl_can_finalize_function(F) || (specF && !jl_can_finalize_function(specF))) { // can't compile F in the JIT right now, // so instead compile an unspecialized version // and return its fptr instead + if (codeinst->def->def.method->unspecialized) { + unspec = codeinst->def->def.method->unspecialized; + } if (!unspec) - unspec = jl_get_unspecialized(li); // get-or-create the unspecialized version to cache the result + unspec = jl_get_unspecialized(codeinst->def); // get-or-create the unspecialized version to cache the result jl_code_info_t *src = (jl_code_info_t*)unspec->def.method->source; if (src == NULL) { assert(unspec->def.method->generator); src = jl_code_for_staged(unspec); } - fptr = unspec->invoke; - if (fptr != jl_fptr_trampoline) { - li->specptr = unspec->specptr; - li->inferred_const = unspec->inferred_const; - if (li->inferred_const) - jl_gc_wb(li, li->inferred_const); - li->invoke = fptr; - JL_UNLOCK(&codegen_lock); - return fptr; + jl_code_instance_t *ucache = unspec->cache; + while (ucache) { + if (ucache->min_world <= output->min_world && output->max_world <= ucache->max_world && ucache->invoke != NULL) + break; + ucache = ucache->next; } - if (unspec == li) { - // discard decls so that this generated function will get compiled - // and cached permanently without optimizations - unspec->functionObjectsDecls.functionObject = NULL; - unspec->functionObjectsDecls.specFunctionObject = NULL; + if (codeinst->invoke) + return; + if (ucache != NULL) { + codeinst->specptr = ucache->specptr; + codeinst->rettype_const = ucache->rettype_const; + if (codeinst->rettype_const) + jl_gc_wb(codeinst, codeinst->rettype_const); + codeinst->invoke = ucache->invoke; + JL_UNLOCK(&codegen_lock); + return; } assert(src); - decls = jl_compile_linfo(&unspec, src, unspec->min_world, &jl_default_cgparams); // this does not change unspec - li = unspec; + ucache = jl_compile_linfo(unspec, src, codeinst->def->def.method->primary_world, &jl_default_cgparams); + // if we hit a generated function, this might explode now, because + // (unlike normal functions), any call to a generated function + // in the compiler (Core or Core.Compiler) might cause us to die at any time + if (ucache == NULL) + jl_error("Failed to handle @generated function in Core.Compiler module."); + codeinst = ucache; } } + jl_llvm_functions_t decls = codeinst->functionObjectsDecls; const char *F = decls.functionObject; const char *specF = decls.specFunctionObject; assert(F && jl_can_finalize_function(F)); @@ -1386,23 +1432,18 @@ jl_callptr_t jl_generate_fptr(jl_method_instance_t **pli, jl_llvm_functions_t de void *specptr = (void*)(uintptr_t)getAddressForFunction(specF); assert(specptr != NULL); // the fptr should be cached somewhere also - if (li->invoke == jl_fptr_trampoline) { - // once set, don't change invoke-ptr, as that leads to race conditions - // with the (not) simultaneous updates to invoke and specptr - li->specptr.fptr = specptr; - li->invoke = fptr; - } - if (li != *pli) { - assert(unspec); - li = *pli; - li->specptr = unspec->specptr; - li->inferred_const = unspec->inferred_const; - if (li->inferred_const) - jl_gc_wb(li, li->inferred_const); - li->invoke = fptr; + if (codeinst->invoke == NULL) { + codeinst->specptr.fptr = specptr; + codeinst->invoke = fptr; + } + if (codeinst != output && output->invoke == NULL) { + output->specptr = codeinst->specptr; + output->rettype_const = codeinst->rettype_const; + if (output->rettype_const) + jl_gc_wb(output, output->rettype_const); + output->invoke = fptr; } JL_UNLOCK(&codegen_lock); // Might GC - return fptr; } static Function *jl_cfunction_object(jl_value_t *f, jl_value_t *declrt, jl_tupletype_t *argt); @@ -1457,29 +1498,35 @@ void jl_extern_c(jl_function_t *f, jl_value_t *rt, jl_value_t *argt, char *name) // this is paired with jl_dump_function_ir and jl_dump_function_asm in particular ways: // misuse will leak memory or cause read-after-free extern "C" JL_DLLEXPORT -void *jl_get_llvmf_defn(jl_method_instance_t *linfo, size_t world, bool getwrapper, bool optimize, const jl_cgparams_t params) +void *jl_get_llvmf_defn(jl_method_instance_t *mi, size_t world, bool getwrapper, bool optimize, const jl_cgparams_t params) { - if (jl_is_method(linfo->def.method) && linfo->def.method->source == NULL && - linfo->def.method->generator == NULL) { + if (jl_is_method(mi->def.method) && mi->def.method->source == NULL && + mi->def.method->generator == NULL) { // not a generic function return NULL; } - jl_code_info_t *src = (jl_code_info_t*)linfo->inferred; - JL_GC_PUSH1(&src); + jl_value_t *jlrettype = (jl_value_t*)jl_any_type; + jl_code_info_t *src = NULL; + JL_GC_PUSH2(&src, &jlrettype); + if (jl_code_instance_t *codeinst = jl_rettype_inferred(mi, world, world)) { + src = (jl_code_info_t*)codeinst->inferred; + if ((jl_value_t*)src != jl_nothing && !jl_is_code_info(src) && jl_is_method(mi->def.method)) + src = jl_uncompress_ast(mi->def.method, codeinst, (jl_array_t*)src); + jlrettype = codeinst->rettype; + } if (!src || (jl_value_t*)src == jl_nothing) { - src = jl_type_infer(&linfo, world, 0); - if (!src && jl_is_method(linfo->def.method)) - src = linfo->def.method->generator ? jl_code_for_staged(linfo) : (jl_code_info_t*)linfo->def.method->source; - } - if ((jl_value_t*)src == jl_nothing) - src = NULL; - if (src && !jl_is_code_info(src) && jl_is_method(linfo->def.method)) - src = jl_uncompress_ast(linfo->def.method, (jl_array_t*)src); - if (src && !jl_is_code_info(src)) - src = NULL; - if (!src) - jl_error("source not found for function"); + src = jl_type_infer(mi, world, 0); + if (src) + jlrettype = src->rettype; + if (!src && jl_is_method(mi->def.method)) { + src = mi->def.method->generator ? jl_code_for_staged(mi) : (jl_code_info_t*)mi->def.method->source; + if (src && !jl_is_code_info(src) && jl_is_method(mi->def.method)) + src = jl_uncompress_ast(mi->def.method, NULL, (jl_array_t*)src); + } + } + if (src == NULL || (jl_value_t*)src == jl_nothing || !jl_is_code_info(src)) + jl_error("source not available for function"); // Backup the info for the nested compile JL_LOCK(&codegen_lock); @@ -1488,13 +1535,13 @@ void *jl_get_llvmf_defn(jl_method_instance_t *linfo, size_t world, bool getwrapp jl_llvm_functions_t declarations; std::unique_ptr m; JL_TRY { - m = emit_function(linfo, src, world, &declarations, ¶ms); + m = emit_function(mi, src, jlrettype, world, &declarations, ¶ms); } JL_CATCH { // something failed! m.reset(); JL_UNLOCK(&codegen_lock); // Might GC - const char *mname = name_from_method_instance(linfo); + const char *mname = name_from_method_instance(mi); jl_rethrow_with_add("error compiling %s", mname); } @@ -1514,15 +1561,21 @@ void *jl_get_llvmf_defn(jl_method_instance_t *linfo, size_t world, bool getwrapp if (f) // don't try to free sentinel names like "jl_fptr_args" and "jl_fptr_sparam" free(const_cast(fname)); assert(specf || f); - // clone the name from the runtime linfo, if it exists + // try to clone the name from some linfo, if it exists // to give the user a (false) sense of stability - specfname = linfo->functionObjectsDecls.specFunctionObject; - if (specfname && specf) { - specf->setName(specfname); - } - fname = linfo->functionObjectsDecls.functionObject; - if (fname && f && strcmp(fname, "jl_fptr_args") && strcmp(fname, "jl_fptr_sparam")) { - f->setName(fname); + jl_code_instance_t *codeinst = mi->cache; + while (codeinst) { + specfname = codeinst->functionObjectsDecls.specFunctionObject; + if (specfname && specf) { + specf->setName(specfname); + } + fname = codeinst->functionObjectsDecls.functionObject; + if (fname && f) { + if (strcmp(fname, "jl_fptr_args") && strcmp(fname, "jl_fptr_sparam")) + f->setName(fname); + break; + } + codeinst = codeinst->next; } m.release(); // the return object `llvmf` will be the owning pointer JL_UNLOCK(&codegen_lock); // Might GC @@ -1535,32 +1588,37 @@ void *jl_get_llvmf_defn(jl_method_instance_t *linfo, size_t world, bool getwrapp extern "C" JL_DLLEXPORT -void *jl_get_llvmf_decl(jl_method_instance_t *linfo, size_t world, bool getwrapper, const jl_cgparams_t params) +void *jl_get_llvmf_decl(jl_method_instance_t *mi, size_t world, bool getwrapper, const jl_cgparams_t params) { - if (jl_is_method(linfo->def.method) && linfo->def.method->source == NULL && - linfo->def.method->generator == NULL) { + if (jl_is_method(mi->def.method) && mi->def.method->source == NULL && + mi->def.method->generator == NULL) { // not a generic function return NULL; } // compile this normally jl_code_info_t *src = NULL; - if (linfo->inferred == NULL) - src = jl_type_infer(&linfo, world, 0); - jl_llvm_functions_t decls = jl_compile_linfo(&linfo, src, world, &jl_default_cgparams); + if (!jl_rettype_inferred(mi, world, world)) + src = jl_type_infer(mi, world, 0); + jl_code_instance_t *codeinst = jl_compile_linfo(mi, src, world, ¶ms); + if (codeinst == NULL) + // internal error + return NULL; - if (decls.functionObject == NULL && linfo->invoke == jl_fptr_const_return && jl_is_method(linfo->def.method)) { + const jl_llvm_functions_t &decls = codeinst->functionObjectsDecls; + if (decls.functionObject == NULL && codeinst->invoke == jl_fptr_const_return && jl_is_method(mi->def.method)) { // normally we don't generate native code for these functions, so need an exception here // This leaks a bit of memory to cache native code that we'll never actually need JL_LOCK(&codegen_lock); - decls = linfo->functionObjectsDecls; if (decls.functionObject == NULL) { - src = jl_type_infer(&linfo, world, 0); - if (!src) { - src = linfo->def.method->generator ? jl_code_for_staged(linfo) : (jl_code_info_t*)linfo->def.method->source; - } - decls = jl_compile_linfo(&linfo, src, world, ¶ms); - linfo->functionObjectsDecls = decls; + if (src == NULL) + src = jl_type_infer(mi, world, 0); + if (src == NULL) + src = mi->def.method->generator ? jl_code_for_staged(mi) : (jl_code_info_t*)mi->def.method->source; + codeinst = jl_compile_linfo(mi, src, world, ¶ms); + if (codeinst == NULL) + // internal error + return NULL; } JL_UNLOCK(&codegen_lock); } @@ -1579,7 +1637,7 @@ void *jl_get_llvmf_decl(jl_method_instance_t *linfo, size_t world, bool getwrapp return f; } else { - jl_returninfo_t returninfo = get_specsig_function(NULL, decls.specFunctionObject, linfo->specTypes, linfo->rettype); + jl_returninfo_t returninfo = get_specsig_function(NULL, decls.specFunctionObject, mi->specTypes, codeinst->rettype); return returninfo.decl; } } @@ -2998,12 +3056,12 @@ static Value *emit_jlcall(jl_codectx_t &ctx, Value *theFptr, Value *theF, } -static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, jl_method_instance_t *li, StringRef specFunctionObject, +static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, jl_code_instance_t *codeinst, StringRef specFunctionObject, jl_cgval_t *argv, size_t nargs, jl_value_t *inferred_retty) { // emit specialized call site - jl_value_t *jlretty = li->rettype; - jl_returninfo_t returninfo = get_specsig_function(jl_Module, specFunctionObject, li->specTypes, jlretty); + jl_value_t *jlretty = codeinst->rettype; + jl_returninfo_t returninfo = get_specsig_function(jl_Module, specFunctionObject, codeinst->def->specTypes, jlretty); FunctionType *cft = returninfo.decl->getFunctionType(); size_t nfargs = cft->getNumParams(); @@ -3030,7 +3088,7 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, jl_method_instance_ } for (size_t i = 0; i < nargs; i++) { - jl_value_t *jt = jl_nth_slot_type(li->specTypes, i); + jl_value_t *jt = jl_nth_slot_type(codeinst->def->specTypes, i); bool isboxed; Type *et = julia_type_to_llvm(jt, &isboxed); if (type_is_ghost(et)) @@ -3109,7 +3167,7 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, jl_method_instance_ return retval; } -static jl_cgval_t emit_call_specfun_boxed(jl_codectx_t &ctx, jl_method_instance_t *li, StringRef specFunctionObject, +static jl_cgval_t emit_call_specfun_boxed(jl_codectx_t &ctx, StringRef specFunctionObject, jl_cgval_t *argv, size_t nargs, jl_value_t *inferred_retty) { auto theFptr = jl_Module->getOrInsertFunction(specFunctionObject, jl_func_sig); @@ -3139,21 +3197,24 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt) bool handled = false; jl_cgval_t result; if (lival.constant) { - jl_method_instance_t *li = (jl_method_instance_t*)lival.constant; - assert(jl_is_method_instance(li)); - jl_llvm_functions_t decls = jl_compile_linfo(&li, NULL, ctx.world, ctx.params); - if (li->invoke == jl_fptr_const_return) { - assert(li->inferred_const); - return mark_julia_const(li->inferred_const); - } - if (decls.functionObject) { - if (!strcmp(decls.functionObject, "jl_fptr_args")) { - result = emit_call_specfun_boxed(ctx, li, decls.specFunctionObject, argv, nargs, rt); - handled = true; - } - else if (!!strcmp(decls.functionObject, "jl_fptr_sparam")) { - result = emit_call_specfun_other(ctx, li, decls.specFunctionObject, argv, nargs, rt); - handled = true; + jl_method_instance_t *mi = (jl_method_instance_t*)lival.constant; + assert(jl_is_method_instance(mi)); + jl_code_instance_t *codeinst = jl_compile_linfo(mi, NULL, ctx.world, ctx.params); + if (codeinst && codeinst->inferred) { + const jl_llvm_functions_t &decls = codeinst->functionObjectsDecls; + if (codeinst->invoke == jl_fptr_const_return) { + assert(codeinst->rettype_const); + return mark_julia_const(codeinst->rettype_const); + } + if (decls.functionObject) { + if (!strcmp(decls.functionObject, "jl_fptr_args")) { + result = emit_call_specfun_boxed(ctx, decls.specFunctionObject, argv, nargs, rt); + handled = true; + } + else if (!!strcmp(decls.functionObject, "jl_fptr_sparam")) { + result = emit_call_specfun_other(ctx, codeinst, decls.specFunctionObject, argv, nargs, rt); + handled = true; + } } } } @@ -4217,8 +4278,9 @@ static void emit_last_age_field(jl_codectx_t &ctx) static void emit_cfunc_invalidate( Function *gf_thunk, jl_returninfo_t::CallingConv cc, - jl_method_instance_t *lam, size_t nargs, size_t world) + jl_code_instance_t *codeinst, size_t nargs, size_t world) { + jl_method_instance_t *lam = codeinst->def; jl_codectx_t ctx(jl_LLVMContext); ctx.f = gf_thunk; ctx.world = world; @@ -4263,7 +4325,7 @@ static void emit_cfunc_invalidate( assert(AI == gf_thunk->arg_end()); Value *gf_ret = emit_jlcall(ctx, jlapplygeneric_func, NULL, myargs, nargs); jl_cgval_t gf_retbox = mark_julia_type(ctx, gf_ret, true, jl_any_type); - jl_value_t *astrt = lam->rettype; + jl_value_t *astrt = codeinst->rettype; if (cc != jl_returninfo_t::Boxed) { emit_typecheck(ctx, gf_retbox, astrt, "cfunction"); } @@ -4316,23 +4378,30 @@ static Function* gen_cfun_wrapper( size_t nargs = sig.nargs; const char *name = "cfunction"; size_t world = jl_world_counter; + size_t min_valid = 0; + size_t max_valid = ~(size_t)0; bool nest = (!ff || unionall_env); // try to look up this function for direct invoking - jl_method_instance_t *lam = sigt ? jl_get_specialization1((jl_tupletype_t*)sigt, world, 1) : NULL; + jl_method_instance_t *lam = sigt ? jl_get_specialization1((jl_tupletype_t*)sigt, world, &min_valid, &max_valid, 1) : NULL; + jl_code_instance_t *codeinst = NULL; jl_value_t *astrt = (jl_value_t*)jl_any_type; // infer it first, if necessary if (lam) { name = jl_symbol_name(lam->def.method->name); jl_code_info_t *src = NULL; - if (!into && !lam->inferred) // TODO: this isn't ideal to be unconditionally calling type inference from here - src = jl_type_infer(&lam, world, 0); - jl_compile_linfo(&lam, src, world, &jl_default_cgparams); - if (lam->functionObjectsDecls.specFunctionObject == NULL || - !strcmp(lam->functionObjectsDecls.specFunctionObject, "jl_fptr_sparam")) { - lam = NULL; // TODO: use emit_invoke framework to dispatch these - } - if (lam) { - astrt = lam->rettype; + codeinst = jl_rettype_inferred(lam, world, world); + if (!into && codeinst == NULL) // TODO: this isn't ideal to be unconditionally calling type inference from here + src = jl_type_infer(lam, world, 0); + codeinst = jl_compile_linfo(lam, src, world, &jl_default_cgparams); + if (codeinst) { + if (!codeinst->inferred || + codeinst->functionObjectsDecls.specFunctionObject == NULL || + !strcmp(codeinst->functionObjectsDecls.specFunctionObject, "jl_fptr_sparam")) { + codeinst = NULL; // TODO: use emit_invoke framework to dispatch these + } + } + if (codeinst) { + astrt = codeinst->rettype; if (astrt != (jl_value_t*)jl_bottom_type && jl_type_intersection(astrt, declrt) == jl_bottom_type) { // Do not warn if the function does not return since it is @@ -4342,6 +4411,9 @@ static Function* gen_cfun_wrapper( jl_printf(JL_STDERR, "WARNING: cfunction: return type of %s does not match\n", name); } } + else { + lam = NULL; + } } std::stringstream funcName; @@ -4419,8 +4491,8 @@ static Function* gen_cfun_wrapper( T_size, ctx.builder.CreateConstInBoundsGEP1_32( T_size, - emit_bitcast(ctx, literal_pointer_val(ctx, (jl_value_t*)lam), T_psize), - offsetof(jl_method_instance_t, max_world) / sizeof(size_t))); + emit_bitcast(ctx, literal_pointer_val(ctx, (jl_value_t*)codeinst), T_psize), + offsetof(jl_code_instance_t, max_world) / sizeof(size_t))); // XXX: age is always OK if we don't have a TLS. This is a hack required due to `@threadcall` abuse. // and adds quite a bit of complexity here, even though it's still wrong // (anything that tries to interact with the runtime will fault) @@ -4585,20 +4657,20 @@ static Function* gen_cfun_wrapper( // Create the call bool jlfunc_sret; jl_cgval_t retval; - if (lam && lam->invoke == jl_fptr_const_return) { + if (codeinst && codeinst->invoke == jl_fptr_const_return) { nargs = 0; // arguments not needed -- TODO: not really true, should emit an age_ok test and jlcall jlfunc_sret = false; - retval = mark_julia_const(lam->inferred_const); + retval = mark_julia_const(codeinst->rettype_const); } - else if (!lam || !lam->functionObjectsDecls.functionObject || - !strcmp(lam->functionObjectsDecls.functionObject, "jl_fptr_args") || - !strcmp(lam->functionObjectsDecls.functionObject, "jl_fptr_sparam")) { + else if (!codeinst || !codeinst->functionObjectsDecls.functionObject || + !strcmp(codeinst->functionObjectsDecls.functionObject, "jl_fptr_args") || + !strcmp(codeinst->functionObjectsDecls.functionObject, "jl_fptr_sparam")) { // emit a jlcall jlfunc_sret = false; Function *theFptr = NULL; - if (lam && lam->functionObjectsDecls.functionObject) { - if (!strcmp(lam->functionObjectsDecls.functionObject, "jl_fptr_args")) { - const char *fname = lam->functionObjectsDecls.specFunctionObject; + if (codeinst && codeinst->functionObjectsDecls.functionObject) { + if (!strcmp(codeinst->functionObjectsDecls.functionObject, "jl_fptr_args")) { + const char *fname = codeinst->functionObjectsDecls.specFunctionObject; theFptr = cast_or_null(jl_Module->getNamedValue(fname)); if (!theFptr) { theFptr = Function::Create(jl_func_sig, GlobalVariable::ExternalLinkage, @@ -4640,8 +4712,8 @@ static Function* gen_cfun_wrapper( } else { // emit a specsig call - const char *protoname = lam->functionObjectsDecls.specFunctionObject; - jl_returninfo_t returninfo = get_specsig_function(M, protoname, lam->specTypes, lam->rettype); + const char *protoname = codeinst->functionObjectsDecls.specFunctionObject; + jl_returninfo_t returninfo = get_specsig_function(M, protoname, lam->specTypes, codeinst->rettype); FunctionType *cft = returninfo.decl->getFunctionType(); jlfunc_sret = (returninfo.cc == jl_returninfo_t::SRet); @@ -4695,7 +4767,7 @@ static Function* gen_cfun_wrapper( // build a specsig -> jl_apply_generic converter thunk // this builds a method that calls jl_apply_generic (as a closure over a singleton function pointer), // but which has the signature of a specsig - emit_cfunc_invalidate(gf_thunk, returninfo.cc, lam, nargs + 1, world); + emit_cfunc_invalidate(gf_thunk, returninfo.cc, codeinst, nargs + 1, world); theFptr = ctx.builder.CreateSelect(age_ok, theFptr, gf_thunk); } CallInst *call = ctx.builder.CreateCall(theFptr, ArrayRef(args)); @@ -5065,7 +5137,7 @@ static Function *jl_cfunction_object(jl_value_t *ff, jl_value_t *declrt, jl_tupl } // generate a julia-callable function that calls f (AKA lam) -static Function *gen_invoke_wrapper(jl_method_instance_t *lam, const jl_returninfo_t &f, StringRef funcName, Module *M) +static Function *gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlretty, const jl_returninfo_t &f, StringRef funcName, Module *M) { Function *w = Function::Create(jl_func_sig, GlobalVariable::ExternalLinkage, funcName, M); add_return_attr(w, Attribute::NonNull); @@ -5135,7 +5207,6 @@ static Function *gen_invoke_wrapper(jl_method_instance_t *lam, const jl_returnin CallInst *call = ctx.builder.CreateCall(f.decl, ArrayRef(&args[0], nfargs)); call->setAttributes(f.decl->getAttributes()); - jl_value_t *jlretty = lam->rettype; jl_cgval_t retval; switch (f.cc) { case jl_returninfo_t::Boxed: @@ -5323,6 +5394,7 @@ static jl_datatype_t *compute_va_type(jl_method_instance_t *lam, size_t nreq) static std::unique_ptr emit_function( jl_method_instance_t *lam, jl_code_info_t *src, + jl_value_t *jlrettype, size_t world, jl_llvm_functions_t *declarations, const jl_cgparams_t *params) @@ -5417,7 +5489,6 @@ static std::unique_ptr emit_function( } } - jl_value_t *jlrettype = lam->rettype; bool specsig = uses_specsig(lam->specTypes, nreq, jlrettype, needsparams, va, src, params->prefer_specsig); // step 3. some variable analysis @@ -5487,7 +5558,7 @@ static std::unique_ptr emit_function( std::stringstream wrapName; wrapName << "jfptr_" << unadorned_name << "_" << globalUnique; - Function *fwrap = gen_invoke_wrapper(lam, returninfo, wrapName.str(), M); + Function *fwrap = gen_invoke_wrapper(lam, jlrettype, returninfo, wrapName.str(), M); declarations->functionObject = strdup(fwrap->getName().str().c_str()); } else { @@ -5560,7 +5631,7 @@ static std::unique_ptr emit_function( subrty = jl_di_func_sig; } else { - subrty = get_specsig_di(lam->rettype, lam->specTypes, topfile, dbuilder); + subrty = get_specsig_di(jlrettype, lam->specTypes, topfile, dbuilder); } SP = dbuilder.createFunction(CU, dbgFuncName, // Name @@ -6747,7 +6818,7 @@ static GlobalVariable *julia_const_gv(jl_value_t *val) } // TODO: do this lazily -extern "C" void jl_fptr_to_llvm(void *fptr, jl_method_instance_t *lam, int specsig) +extern "C" void jl_fptr_to_llvm(void *fptr, jl_code_instance_t *lam, int specsig) { if (!imaging_mode) { // in imaging mode, it's fine to use the fptr, but we don't want it in the shadow_module // this assigns a function pointer (from loading the system image), to the function object @@ -6760,7 +6831,7 @@ extern "C" void jl_fptr_to_llvm(void *fptr, jl_method_instance_t *lam, int specs funcName << "jsys3_"; else funcName << "julia_"; // it's a specsig call - const char* unadorned_name = jl_symbol_name(lam->def.method->name); + const char* unadorned_name = jl_symbol_name(lam->def->def.method->name); funcName << unadorned_name << "_" << globalUnique++; Function *f = Function::Create(jl_func_sig, Function::ExternalLinkage, funcName.str()); add_named_global(f, fptr); diff --git a/src/common_symbols1.inc b/src/common_symbols1.inc index bd8970da28695..428b7406f96bd 100644 --- a/src/common_symbols1.inc +++ b/src/common_symbols1.inc @@ -98,8 +98,6 @@ jl_symbol("isa"), jl_symbol("UInt"), jl_symbol("haskey"), jl_symbol("setproperty!"), -jl_symbol("similar"), -jl_symbol("sext_int"), jl_symbol("promote"), jl_symbol("undef"), jl_symbol("Vector"), diff --git a/src/debuginfo.cpp b/src/debuginfo.cpp index c092bbf458a93..a3ee4f64e64f5 100644 --- a/src/debuginfo.cpp +++ b/src/debuginfo.cpp @@ -68,7 +68,7 @@ struct ObjectInfo { // Maintain a mapping of unrealized function names -> linfo objects // so that when we see it get emitted, we can add a link back to the linfo // that it came from (providing name, type signature, file info, etc.) -static StringMap linfo_in_flight; +static StringMap ncode_in_flight; static std::string mangle(const std::string &Name, const DataLayout &DL) { std::string MangledName; @@ -78,9 +78,9 @@ static std::string mangle(const std::string &Name, const DataLayout &DL) } return MangledName; } -void jl_add_linfo_in_flight(StringRef name, jl_method_instance_t *linfo, const DataLayout &DL) +void jl_add_code_in_flight(StringRef name, jl_code_instance_t *codeinst, const DataLayout &DL) { - linfo_in_flight[mangle(name, DL)] = linfo; + ncode_in_flight[mangle(name, DL)] = codeinst; } @@ -153,19 +153,6 @@ struct strrefcomp { } }; -extern "C" tracer_cb jl_linfo_tracer; -static std::vector triggered_linfos; -void jl_callback_triggered_linfos(void) -{ - if (triggered_linfos.empty()) - return; - if (jl_linfo_tracer) { - std::vector to_process(std::move(triggered_linfos)); - for (jl_method_instance_t *linfo : to_process) - jl_call_tracer(jl_linfo_tracer, (jl_value_t*)linfo); - } -} - class JuliaJITEventListener: public JITEventListener { std::map objectmap; @@ -196,7 +183,7 @@ class JuliaJITEventListener: public JITEventListener RTDyldMemoryManager *memmgr) { jl_ptls_t ptls = jl_get_ptls_states(); - // This function modify linfo->fptr in GC safe region. + // This function modify codeinst->fptr in GC safe region. // This should be fine since the GC won't scan this field. int8_t gc_state = jl_gc_safe_enter(ptls); uv_rwlock_wrlock(&threadsafe); @@ -333,8 +320,8 @@ class JuliaJITEventListener: public JITEventListener #endif // defined(_OS_X86_64_) #endif // defined(_OS_WINDOWS_) - std::vector> def_spec; - std::vector> def_invoke; + std::vector> def_spec; + std::vector> def_invoke; auto symbols = object::computeSymbolSizes(debugObj); bool first = true; for (const auto &sym_size : symbols) { @@ -370,30 +357,28 @@ class JuliaJITEventListener: public JITEventListener (uint8_t*)(uintptr_t)Addr, (size_t)Size, sName, (uint8_t*)(uintptr_t)SectionLoadAddr, (size_t)SectionSize, UnwindData); #endif - StringMap::iterator linfo_it = linfo_in_flight.find(sName); - jl_method_instance_t *linfo = NULL; - if (linfo_it != linfo_in_flight.end()) { - linfo = linfo_it->second; - if (linfo->compile_traced) - triggered_linfos.push_back(linfo); - linfo_in_flight.erase(linfo_it); - const char *F = linfo->functionObjectsDecls.functionObject; - const char *specF = linfo->functionObjectsDecls.specFunctionObject; - if (linfo->invoke == jl_fptr_trampoline) { + StringMap::iterator linfo_it = ncode_in_flight.find(sName); + jl_code_instance_t *codeinst = NULL; + if (linfo_it != ncode_in_flight.end()) { + codeinst = linfo_it->second; + ncode_in_flight.erase(linfo_it); + const char *F = codeinst->functionObjectsDecls.functionObject; + const char *specF = codeinst->functionObjectsDecls.specFunctionObject; + if (codeinst->invoke == NULL) { if (specF && sName.equals(specF)) { - def_spec.push_back({linfo, Addr}); + def_spec.push_back({codeinst, Addr}); if (!strcmp(F, "jl_fptr_args")) - def_invoke.push_back({linfo, (uintptr_t)&jl_fptr_args}); + def_invoke.push_back({codeinst, (uintptr_t)&jl_fptr_args}); else if (!strcmp(F, "jl_fptr_sparam")) - def_invoke.push_back({linfo, (uintptr_t)&jl_fptr_sparam}); + def_invoke.push_back({codeinst, (uintptr_t)&jl_fptr_sparam}); } else if (sName.equals(F)) { - def_invoke.push_back({linfo, Addr}); + def_invoke.push_back({codeinst, Addr}); } } } - if (linfo) - linfomap[Addr] = std::make_pair(Size, linfo); + if (codeinst) + linfomap[Addr] = std::make_pair(Size, codeinst->def); if (first) { ObjectInfo tmp = {&debugObj, (size_t)SectionSize, @@ -403,12 +388,12 @@ class JuliaJITEventListener: public JITEventListener objectmap[SectionLoadAddr] = tmp; first = false; } - // now process these in order, so we ensure the closure values are updated before removing the trampoline - for (auto &def : def_spec) - def.first->specptr.fptr = (void*)def.second; - for (auto &def : def_invoke) - def.first->invoke = (jl_callptr_t)def.second; - } + } + // now process these in order, so we ensure the closure values are updated before enabling the invoke pointer + for (auto &def : def_spec) + def.first->specptr.fptr = (void*)def.second; + for (auto &def : def_invoke) + def.first->invoke = (jl_callptr_t)def.second; uv_rwlock_wrunlock(&threadsafe); jl_gc_safe_leave(ptls, gc_state); } diff --git a/src/dump.c b/src/dump.c index 84a84032f4aa4..6749fd76f9542 100644 --- a/src/dump.c +++ b/src/dump.c @@ -98,33 +98,34 @@ extern jl_array_t *jl_module_init_order; #define TAG_TVAR 25 #define TAG_METHOD_INSTANCE 26 #define TAG_METHOD 27 -#define TAG_COMMONSYM 28 -#define TAG_NEARBYGLOBAL 29 -#define TAG_GLOBALREF 30 -#define TAG_CORE 31 -#define TAG_BASE 32 -#define TAG_BITYPENAME 33 -#define TAG_NEARBYMODULE 34 -#define TAG_INT32 35 -#define TAG_INT64 36 -#define TAG_UINT8 37 -#define TAG_VECTORTY 38 -#define TAG_PTRTY 39 -#define TAG_LONG_SSAVALUE 40 -#define TAG_LONG_METHODROOT 41 -#define TAG_SHORTER_INT64 42 -#define TAG_SHORT_INT32 43 -#define TAG_CALL1 44 -#define TAG_CALL2 45 -#define TAG_LINEINFO 46 -#define TAG_SHORT_BACKREF 47 -#define TAG_BACKREF 48 -#define TAG_UNIONALL 49 -#define TAG_GOTONODE 50 -#define TAG_QUOTENODE 51 -#define TAG_GENERAL 52 - -#define LAST_TAG 52 +#define TAG_CODE_INSTANCE 28 +#define TAG_COMMONSYM 29 +#define TAG_NEARBYGLOBAL 30 +#define TAG_GLOBALREF 31 +#define TAG_CORE 32 +#define TAG_BASE 33 +#define TAG_BITYPENAME 34 +#define TAG_NEARBYMODULE 35 +#define TAG_INT32 36 +#define TAG_INT64 37 +#define TAG_UINT8 38 +#define TAG_VECTORTY 39 +#define TAG_PTRTY 40 +#define TAG_LONG_SSAVALUE 41 +#define TAG_LONG_METHODROOT 42 +#define TAG_SHORTER_INT64 43 +#define TAG_SHORT_INT32 44 +#define TAG_CALL1 45 +#define TAG_CALL2 46 +#define TAG_LINEINFO 47 +#define TAG_SHORT_BACKREF 48 +#define TAG_BACKREF 49 +#define TAG_UNIONALL 50 +#define TAG_GOTONODE 51 +#define TAG_QUOTENODE 52 +#define TAG_GENERAL 53 + +#define LAST_TAG 53 typedef enum _DUMP_MODES { // not in the serializer at all, or @@ -805,41 +806,30 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li } else if (jl_is_method_instance(v)) { write_uint8(s->s, TAG_METHOD_INSTANCE); - jl_method_instance_t *li = (jl_method_instance_t*)v; + jl_method_instance_t *mi = (jl_method_instance_t*)v; int internal = 0; - if (li->max_world == 0 && li->min_world == 0) { - internal = 1; // not world-tracked - } - else if (!jl_is_method(li->def.method) || module_in_worklist(li->def.method->module)) { - if (li->max_world == ~(size_t)0) { - internal = 2; // update world on deserialization - } - else { - internal = 3; // garbage object :( - } - } + if (!jl_is_method(mi->def.method)) + internal = 1; + else if (module_in_worklist(mi->def.method->module)) + internal = 2; + write_uint8(s->s, internal); if (!internal) { // also flag this in the backref table as special uintptr_t *bp = (uintptr_t*)ptrhash_bp(&backref_table, v); assert(*bp != (uintptr_t)HT_NOTFOUND); *bp |= 1; } - if (li->invoke == jl_fptr_const_return) - write_uint8(s->s, internal | 4); + if (internal == 1) + jl_serialize_value(s, (jl_value_t*)mi->uninferred); + jl_serialize_value(s, (jl_value_t*)mi->specTypes); + if (internal) + jl_serialize_value(s, mi->def.value); else - write_uint8(s->s, internal); - jl_serialize_value(s, (jl_value_t*)li->specTypes); - if (!internal) - jl_serialize_value(s, (jl_value_t*)li->def.method->sig); - else - jl_serialize_value(s, li->def.value); + jl_serialize_value(s, (jl_value_t*)mi->def.method->sig); if (!internal) return; - jl_serialize_value(s, li->inferred); - jl_serialize_value(s, li->inferred_const); - jl_serialize_value(s, li->rettype); - jl_serialize_value(s, (jl_value_t*)li->sparam_vals); - jl_array_t *backedges = li->backedges; + jl_serialize_value(s, (jl_value_t*)mi->sparam_vals); + jl_array_t *backedges = mi->backedges; if (s->mode == MODE_MODULE && backedges) { // filter backedges to only contain pointers // to items that we will actually store (internal == 2) @@ -857,6 +847,31 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li backedges = NULL; } jl_serialize_value(s, (jl_value_t*)backedges); + jl_serialize_value(s, (jl_value_t*)mi->cache); + } + else if (jl_is_code_instance(v)) { + write_uint8(s->s, TAG_CODE_INSTANCE); + jl_code_instance_t *codeinst = (jl_code_instance_t*)v; + int validate = 0; + if (codeinst->max_world == ~(size_t)0) + validate = 1; // can check on deserialize if this cache entry is still valid + int flags = validate << 0; + if (codeinst->invoke == jl_fptr_const_return) + flags |= 1 << 2; + write_uint8(s->s, flags); + jl_serialize_value(s, (jl_value_t*)codeinst->def); + if (validate || codeinst->min_world == 0) { + jl_serialize_value(s, codeinst->inferred); + jl_serialize_value(s, codeinst->rettype_const); + jl_serialize_value(s, codeinst->rettype); + } + else { + // skip storing useless data + jl_serialize_value(s, NULL); + jl_serialize_value(s, NULL); + jl_serialize_value(s, jl_any_type); + } + jl_serialize_value(s, codeinst->next); } else if (jl_typeis(v, jl_module_type)) { jl_serialize_module(s, (jl_module_t*)v); @@ -879,7 +894,8 @@ static void jl_serialize_value_(jl_serializer_state *s, jl_value_t *v, int as_li size_t n = 0; jl_typemap_entry_t *te = (jl_typemap_entry_t*)v; while ((jl_value_t*)te != jl_nothing) { - n++; te = te->next; + n++; + te = te->next; } write_int32(s->s, n); te = (jl_typemap_entry_t*)v; @@ -1050,13 +1066,11 @@ static void jl_collect_missing_backedges_to_mod(jl_methtable_t *mt) size_t i, l = jl_array_len(backedges); for (i = 1; i < l; i += 2) { jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(backedges, i); - if (caller->max_world == ~(size_t)0) { - jl_value_t *missing_callee = jl_array_ptr_ref(backedges, i - 1); - jl_array_t **edges = (jl_array_t**)ptrhash_bp(&edges_map, (void*)caller); - if (*edges == HT_NOTFOUND) - *edges = jl_alloc_vec_any(0); - jl_array_ptr_1d_push(*edges, missing_callee); - } + jl_value_t *missing_callee = jl_array_ptr_ref(backedges, i - 1); + jl_array_t **edges = (jl_array_t**)ptrhash_bp(&edges_map, (void*)caller); + if (*edges == HT_NOTFOUND) + *edges = jl_alloc_vec_any(0); + jl_array_ptr_1d_push(*edges, missing_callee); } } } @@ -1067,16 +1081,13 @@ static void collect_backedges(jl_method_instance_t *callee) JL_GC_DISABLED { jl_array_t *backedges = callee->backedges; if (backedges) { - assert(callee->max_world == ~(size_t)0); size_t i, l = jl_array_len(backedges); for (i = 0; i < l; i++) { jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(backedges, i); - if (caller->max_world == ~(size_t)0) { - jl_array_t **edges = (jl_array_t**)ptrhash_bp(&edges_map, caller); - if (*edges == HT_NOTFOUND) - *edges = jl_alloc_vec_any(0); - jl_array_ptr_1d_push(*edges, (jl_value_t*)callee); - } + jl_array_t **edges = (jl_array_t**)ptrhash_bp(&edges_map, caller); + if (*edges == HT_NOTFOUND) + *edges = jl_alloc_vec_any(0); + jl_array_ptr_1d_push(*edges, (jl_value_t*)callee); } } } @@ -1652,8 +1663,8 @@ static jl_value_t *jl_deserialize_value_method(jl_serializer_state *s, jl_value_ jl_gc_wb(m, m->name); m->file = (jl_sym_t*)jl_deserialize_value(s, NULL); m->line = read_int32(s->s); - m->min_world = jl_world_counter; - m->max_world = ~(size_t)0; + m->primary_world = jl_world_counter; + m->deleted_world = ~(size_t)0; m->ambig = jl_deserialize_value(s, (jl_value_t**)&m->ambig); jl_gc_wb(m, m->ambig); m->called = read_int32(s->s); @@ -1679,7 +1690,6 @@ static jl_value_t *jl_deserialize_value_method(jl_serializer_state *s, jl_value_ jl_gc_wb(m, m->generator); m->invokes = jl_deserialize_value(s, (jl_value_t**)&m->invokes); jl_gc_wb(m, m->invokes); - m->traced = 0; JL_MUTEX_INIT(&m->writelock); return (jl_value_t*)m; } @@ -1687,69 +1697,68 @@ static jl_value_t *jl_deserialize_value_method(jl_serializer_state *s, jl_value_ static jl_value_t *jl_deserialize_value_method_instance(jl_serializer_state *s, jl_value_t **loc) JL_GC_DISABLED { int usetable = (s->mode != MODE_IR); - jl_method_instance_t *li = + jl_method_instance_t *mi = (jl_method_instance_t*)jl_gc_alloc(s->ptls, sizeof(jl_method_instance_t), jl_method_instance_type); - memset(li, 0, sizeof(jl_method_instance_t)); + memset(mi, 0, sizeof(jl_method_instance_t)); uintptr_t pos = backref_list.len; if (usetable) - arraylist_push(&backref_list, li); + arraylist_push(&backref_list, mi); int internal = read_uint8(s->s); - int constret = internal & 4; - internal &= 0x3; - if (internal == 1) { - li->min_world = 0; - li->max_world = 0; - } - else if (internal == 2) { - li->min_world = jl_world_counter; - li->max_world = ~(size_t)0; - } - else if (internal == 3) { - li->min_world = 1; - li->max_world = 0; - } - else if (internal) { - assert(0 && "corrupt deserialization state"); - abort(); - } - - li->specTypes = (jl_value_t*)jl_deserialize_value(s, (jl_value_t**)&li->specTypes); - if (li->specTypes) - jl_gc_wb(li, li->specTypes); - li->def.value = jl_deserialize_value(s, &li->def.value); - if (li->def.value) - jl_gc_wb(li, li->def.value); + mi->specTypes = (jl_value_t*)jl_deserialize_value(s, (jl_value_t**)&mi->specTypes); + jl_gc_wb(mi, mi->specTypes); + mi->def.value = jl_deserialize_value(s, &mi->def.value); + jl_gc_wb(mi, mi->def.value); if (!internal) { assert(loc != NULL && loc != HT_NOTFOUND); arraylist_push(&flagref_list, loc); arraylist_push(&flagref_list, (void*)pos); - return (jl_value_t*)li; - } - - li->inferred = jl_deserialize_value(s, &li->inferred); - jl_gc_wb(li, li->inferred); - li->inferred_const = jl_deserialize_value(s, &li->inferred_const); - if (li->inferred_const) - jl_gc_wb(li, li->inferred_const); - li->rettype = jl_deserialize_value(s, &li->rettype); - jl_gc_wb(li, li->rettype); - li->sparam_vals = (jl_svec_t*)jl_deserialize_value(s, (jl_value_t**)&li->sparam_vals); - jl_gc_wb(li, li->sparam_vals); - li->backedges = (jl_array_t*)jl_deserialize_value(s, (jl_value_t**)&li->backedges); - if (li->backedges) - jl_gc_wb(li, li->backedges); - li->functionObjectsDecls.functionObject = NULL; - li->functionObjectsDecls.specFunctionObject = NULL; - li->inInference = 0; - li->specptr.fptr = NULL; + return (jl_value_t*)mi; + } + + if (internal == 1) { + mi->uninferred = jl_deserialize_value(s, &mi->uninferred); + jl_gc_wb(mi, mi->uninferred); + } + mi->sparam_vals = (jl_svec_t*)jl_deserialize_value(s, (jl_value_t**)&mi->sparam_vals); + jl_gc_wb(mi, mi->sparam_vals); + mi->backedges = (jl_array_t*)jl_deserialize_value(s, (jl_value_t**)&mi->backedges); + if (mi->backedges) + jl_gc_wb(mi, mi->backedges); + mi->cache = (jl_code_instance_t*)jl_deserialize_value(s, (jl_value_t**)&mi->cache); + if (mi->cache) + jl_gc_wb(mi, mi->cache); + return (jl_value_t*)mi; +} + +static jl_value_t *jl_deserialize_value_code_instance(jl_serializer_state *s, jl_value_t **loc) JL_GC_DISABLED +{ + int usetable = (s->mode != MODE_IR); + jl_code_instance_t *codeinst = + (jl_code_instance_t*)jl_gc_alloc(s->ptls, sizeof(jl_code_instance_t), jl_code_instance_type); + memset(codeinst, 0, sizeof(jl_code_instance_t)); + if (usetable) + arraylist_push(&backref_list, codeinst); + int flags = read_uint8(s->s); + int validate = (flags >> 0) & 3; + int constret = (flags >> 2) & 1; + codeinst->def = (jl_method_instance_t*)jl_deserialize_value(s, (jl_value_t**)&codeinst->def); + jl_gc_wb(codeinst, codeinst->def); + codeinst->inferred = jl_deserialize_value(s, &codeinst->inferred); + jl_gc_wb(codeinst, codeinst->inferred); + codeinst->rettype_const = jl_deserialize_value(s, &codeinst->rettype_const); + if (codeinst->rettype_const) + jl_gc_wb(codeinst, codeinst->rettype_const); + codeinst->rettype = jl_deserialize_value(s, &codeinst->rettype); + jl_gc_wb(codeinst, codeinst->rettype); if (constret) - li->invoke = jl_fptr_const_return; - else - li->invoke = jl_fptr_trampoline; - li->compile_traced = 0; - return (jl_value_t*)li; + codeinst->invoke = jl_fptr_const_return; + codeinst->next = (jl_code_instance_t*)jl_deserialize_value(s, (jl_value_t**)&codeinst->next); + jl_gc_wb(codeinst, codeinst->next); + if (validate) + codeinst->min_world = jl_world_counter; + return (jl_value_t*)codeinst; } static jl_value_t *jl_deserialize_value_module(jl_serializer_state *s) JL_GC_DISABLED @@ -1910,14 +1919,6 @@ static jl_value_t *jl_deserialize_typemap_entry(jl_serializer_state *s) JL_GC_DI jl_typemap_entry_t* te = (jl_typemap_entry_t*)v; te->next = (jl_typemap_entry_t*)jl_nothing; // `next` is the first field jl_deserialize_struct(s, v, 1); -#ifndef NDEBUG - if (te->func.value && jl_typeis(te->func.value, jl_method_instance_type)) { - assert(((te->max_world == 0 && te->min_world == 1) || - (te->func.linfo->max_world >= te->max_world && - te->func.linfo->min_world <= te->min_world)) && - "corrupt typemap entry structure"); - } -#endif *pn = v; pn = (jl_value_t**)&te->next; n--; @@ -2084,6 +2085,8 @@ static jl_value_t *jl_deserialize_value(jl_serializer_state *s, jl_value_t **loc return jl_deserialize_value_method(s, loc); case TAG_METHOD_INSTANCE: return jl_deserialize_value_method_instance(s, loc); + case TAG_CODE_INSTANCE: + return jl_deserialize_value_code_instance(s, loc); case TAG_MODULE: return jl_deserialize_value_module(s); case TAG_SHORTER_INT64: @@ -2193,14 +2196,15 @@ static size_t lowerbound_dependent_world_set(size_t world, arraylist_t *dependen abort(); // unreachable } -void jl_method_instance_delete(jl_method_instance_t *mi); +extern int JL_DEBUG_METHOD_INVALIDATION; + static void jl_insert_backedges(jl_array_t *list, arraylist_t *dependent_worlds) { size_t i, l = jl_array_len(list); for (i = 0; i < l; i += 2) { jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(list, i); - assert(jl_is_method_instance(caller)); - assert(caller->min_world == jl_world_counter); // caller should be new + assert(jl_is_method_instance(caller) && jl_is_method(caller->def.method)); + assert(caller->def.method->primary_world == jl_world_counter); // caller should be new jl_array_t *callees = (jl_array_t*)jl_array_ptr_ref(list, i + 1); assert(jl_is_array(callees)); int valid = 1; @@ -2212,10 +2216,7 @@ static void jl_insert_backedges(jl_array_t *list, arraylist_t *dependent_worlds) if (jl_is_method_instance(callee)) { sig = callee_mi->specTypes; assert(!module_in_worklist(callee_mi->def.method->module)); - if (callee_mi->max_world != ~(size_t)0) { - valid = 0; - break; - } + // TODO: short-circuit, so we don't do a huge amount of excess work } else { sig = callee; @@ -2223,14 +2224,16 @@ static void jl_insert_backedges(jl_array_t *list, arraylist_t *dependent_worlds) // verify that this backedge doesn't intersect with any new methods size_t min_valid = 0; size_t max_valid = ~(size_t)0; - jl_value_t *matches = jl_matching_methods((jl_tupletype_t*)sig, 50, 1, jl_world_counter, &min_valid, &max_valid); + jl_value_t *matches = jl_matching_methods((jl_tupletype_t*)sig, -1, 1, 0, &min_valid, &max_valid); if (matches == jl_false) valid = 0; size_t k; for (k = 0; valid && k < jl_array_len(matches); k++) { jl_method_t *m = (jl_method_t*)jl_svecref(jl_array_ptr_ref(matches, k), 2); - if (lowerbound_dependent_world_set(m->min_world, dependent_worlds) != m->min_world) { - // intersection has a new method and is now probably no good, just invalidate everything now + int wasactive = (lowerbound_dependent_world_set(m->primary_world, dependent_worlds) == m->primary_world); + int nowactive = (m->deleted_world == ~(size_t)0); + if (wasactive != nowactive) { + // intersection has a new method or a method was deleted--this is now probably no good, just invalidate everything now valid = 0; } } @@ -2249,10 +2252,19 @@ static void jl_insert_backedges(jl_array_t *list, arraylist_t *dependent_worlds) jl_method_table_add_backedge(mt, callee, (jl_value_t*)caller); } } + // then enable it + jl_code_instance_t *codeinst = caller->cache; + while (codeinst) { + if (codeinst->min_world > 0) + codeinst->max_world = ~(size_t)0; + codeinst = codeinst->next; + } } else { - // otherwise delete it - jl_method_instance_delete(caller); + if (JL_DEBUG_METHOD_INVALIDATION) { + jl_static_show(JL_STDOUT, (jl_value_t*)caller); + jl_uv_puts(JL_STDOUT, "<<<\n", 4); + } } } } @@ -2518,7 +2530,7 @@ JL_DLLEXPORT jl_array_t *jl_compress_ast(jl_method_t *m, jl_code_info_t *code) return v; } -JL_DLLEXPORT jl_code_info_t *jl_uncompress_ast(jl_method_t *m, jl_array_t *data) +JL_DLLEXPORT jl_code_info_t *jl_uncompress_ast(jl_method_t *m, jl_code_instance_t *metadata, jl_array_t *data) { if (jl_is_code_info(data)) return (jl_code_info_t*)data; @@ -2585,6 +2597,12 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ast(jl_method_t *m, jl_array_t *data) jl_gc_enable(en); JL_UNLOCK(&m->writelock); // Might GC JL_GC_POP(); + if (metadata) { + code->min_world = metadata->min_world; + code->max_world = metadata->max_world; + code->rettype = metadata->rettype; + code->parent = metadata->def; + } return code; } @@ -3010,32 +3028,20 @@ static void jl_update_backref_list(jl_value_t *old, jl_value_t *_new, size_t sta } // repeatedly look up older methods until we come to one that existed -// at the time this module was serialized -static jl_method_t *jl_lookup_method_worldset(jl_methtable_t *mt, jl_datatype_t *sig, arraylist_t *dependent_worlds, size_t *max_world) +// at the time this module was serialized (e.g. ignoring deletion) +static jl_method_t *jl_lookup_method_worldset(jl_methtable_t *mt, jl_datatype_t *sig, arraylist_t *dependent_worlds) { size_t world = jl_world_counter; jl_typemap_entry_t *entry; - jl_method_t *_new; while (1) { entry = jl_typemap_assoc_by_type( - mt->defs, (jl_value_t*)sig, NULL, /*subtype*/0, /*offs*/0, world, /*max_world_mask*/0); - if (!entry) - break; - _new = (jl_method_t*)entry->func.value; - world = lowerbound_dependent_world_set(_new->min_world, dependent_worlds); - if (world == _new->min_world) { - *max_world = entry->max_world; + mt->defs, (jl_value_t*)sig, NULL, /*subtype*/0, /*offs*/0, world, /*max_world_mask*/(~(size_t)0) >> 1); + jl_method_t *_new = (jl_method_t*)entry->func.value; + world = lowerbound_dependent_world_set(_new->primary_world, dependent_worlds); + if (world == _new->primary_world) { return _new; } } - // If we failed to find a method (perhaps due to method deletion), - // grab anything - entry = jl_typemap_assoc_by_type( - mt->defs, (jl_value_t*)sig, NULL, /*subtype*/0, /*offs*/0, /*world*/jl_world_counter, /*max_world_mask*/(~(size_t)0) >> 1); - assert(entry); - assert(entry->max_world != ~(size_t)0); - *max_world = entry->max_world; - return (jl_method_t*)entry->func.value; } static jl_method_t *jl_recache_method(jl_method_t *m, size_t start, arraylist_t *dependent_worlds) @@ -3043,31 +3049,28 @@ static jl_method_t *jl_recache_method(jl_method_t *m, size_t start, arraylist_t jl_datatype_t *sig = (jl_datatype_t*)m->sig; jl_datatype_t *ftype = jl_first_argument_datatype((jl_value_t*)sig); jl_methtable_t *mt = ftype->name->mt; - size_t max_world = 0; jl_set_typeof(m, (void*)(intptr_t)0x30); // invalidate the old value to help catch errors - jl_method_t *_new = jl_lookup_method_worldset(mt, sig, dependent_worlds, &max_world); + jl_method_t *_new = jl_lookup_method_worldset(mt, sig, dependent_worlds); jl_update_backref_list((jl_value_t*)m, (jl_value_t*)_new, start); return _new; } -static jl_method_instance_t *jl_recache_method_instance(jl_method_instance_t *li, size_t start, arraylist_t *dependent_worlds) +static jl_method_instance_t *jl_recache_method_instance(jl_method_instance_t *mi, size_t start, arraylist_t *dependent_worlds) { - jl_datatype_t *sig = (jl_datatype_t*)li->def.value; + jl_datatype_t *sig = (jl_datatype_t*)mi->def.value; assert(jl_is_datatype(sig) || jl_is_unionall(sig)); jl_datatype_t *ftype = jl_first_argument_datatype((jl_value_t*)sig); jl_methtable_t *mt = ftype->name->mt; - size_t max_world = 0; - jl_method_t *m = jl_lookup_method_worldset(mt, sig, dependent_worlds, &max_world); - jl_datatype_t *argtypes = (jl_datatype_t*)li->specTypes; - jl_set_typeof(li, (void*)(intptr_t)0x40); // invalidate the old value to help catch errors + jl_method_t *m = jl_lookup_method_worldset(mt, sig, dependent_worlds); + jl_datatype_t *argtypes = (jl_datatype_t*)mi->specTypes; + jl_set_typeof(mi, (void*)(intptr_t)0x40); // invalidate the old value to help catch errors jl_svec_t *env = jl_emptysvec; jl_value_t *ti = jl_type_intersection_env((jl_value_t*)argtypes, (jl_value_t*)m->sig, &env); //assert(ti != jl_bottom_type); (void)ti; if (ti == jl_bottom_type) env = jl_emptysvec; // the intersection may fail now if the type system had made an incorrect subtype env in the past - jl_method_instance_t *_new = jl_specializations_get_linfo(m, (jl_value_t*)argtypes, env, jl_world_counter < max_world ? jl_world_counter : max_world); - _new->max_world = max_world; - jl_update_backref_list((jl_value_t*)li, (jl_value_t*)_new, start); + jl_method_instance_t *_new = jl_specializations_get_linfo(m, (jl_value_t*)argtypes, env); + jl_update_backref_list((jl_value_t*)mi, (jl_value_t*)_new, start); return _new; } @@ -3293,6 +3296,7 @@ void jl_init_serializer(void) deser_tag[TAG_TVAR] = (jl_value_t*)jl_tvar_type; deser_tag[TAG_METHOD_INSTANCE] = (jl_value_t*)jl_method_instance_type; deser_tag[TAG_METHOD] = (jl_value_t*)jl_method_type; + deser_tag[TAG_CODE_INSTANCE] = (jl_value_t*)jl_code_instance_type; deser_tag[TAG_GLOBALREF] = (jl_value_t*)jl_globalref_type; deser_tag[TAG_INT32] = (jl_value_t*)jl_int32_type; deser_tag[TAG_INT64] = (jl_value_t*)jl_int64_type; @@ -3307,7 +3311,7 @@ void jl_init_serializer(void) deser_tag[LAST_TAG+1+i] = (jl_value_t*)vals[i]; i += 1; } - assert(i <= 256); + assert(LAST_TAG+1+i < 256); for (i = 2; i < 256; i++) { if (deser_tag[i]) diff --git a/src/gf.c b/src/gf.c index d697131520e56..c01da4554ef1e 100644 --- a/src/gf.c +++ b/src/gf.c @@ -43,48 +43,12 @@ JL_DLLEXPORT int8_t jl_is_in_pure_context(void) return ptls->in_pure_callback; } -JL_DLLEXPORT void jl_trace_method(jl_method_t *m) -{ - assert(jl_is_method(m)); - m->traced = 1; -} - -JL_DLLEXPORT void jl_untrace_method(jl_method_t *m) -{ - assert(jl_is_method(m)); - m->traced = 0; -} - -JL_DLLEXPORT void jl_trace_linfo(jl_method_instance_t *linfo) -{ - assert(jl_is_method_instance(linfo)); - linfo->compile_traced = 1; -} - -JL_DLLEXPORT void jl_untrace_linfo(jl_method_instance_t *linfo) -{ - assert(jl_is_method_instance(linfo)); - linfo->compile_traced = 0; -} - -static tracer_cb jl_method_tracer = NULL; -JL_DLLEXPORT void jl_register_method_tracer(void (*callback)(jl_method_instance_t *tracee)) -{ - jl_method_tracer = (tracer_cb)callback; -} - tracer_cb jl_newmeth_tracer = NULL; JL_DLLEXPORT void jl_register_newmeth_tracer(void (*callback)(jl_method_t *tracee)) { jl_newmeth_tracer = (tracer_cb)callback; } -tracer_cb jl_linfo_tracer = NULL; -JL_DLLEXPORT void jl_register_linfo_tracer(void (*callback)(jl_method_instance_t *tracee)) -{ - jl_linfo_tracer = (tracer_cb)callback; -} - void jl_call_tracer(tracer_cb callback, jl_value_t *tracee) { jl_ptls_t ptls = jl_get_ptls_states(); @@ -123,46 +87,30 @@ static int8_t jl_cachearg_offset(jl_methtable_t *mt) /// ----- Insertion logic for special entries ----- /// // get or create the MethodInstance for a specialization -JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams, size_t world) +JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo(jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams) { - assert(world >= m->min_world && world <= m->max_world && "typemap lookup is corrupted"); JL_LOCK(&m->writelock); jl_typemap_entry_t *sf = - jl_typemap_assoc_by_type(m->specializations, type, NULL, /*subtype*/0, /*offs*/0, world, /*max_world_mask*/0); + jl_typemap_assoc_by_type(m->specializations, type, NULL, /*subtype*/0, /*offs*/0, 1, /*max_world_mask*/0); if (sf && jl_is_method_instance(sf->func.value)) { - jl_method_instance_t *linfo = (jl_method_instance_t*)sf->func.value; - assert(linfo->min_world <= sf->min_world && linfo->max_world >= sf->max_world); JL_UNLOCK(&m->writelock); - return linfo; + return sf->func.linfo; } - jl_method_instance_t *li = jl_get_specialized(m, type, sparams); - JL_GC_PUSH1(&li); + jl_method_instance_t *mi = jl_get_specialized(m, type, sparams); + JL_GC_PUSH1(&mi); // TODO: fuse lookup and insert steps - // pick an initial world that is likely to be valid both before and after inference - if (world > jl_world_counter) { - li->min_world = jl_world_counter; - } - else { - li->min_world = world; - } - if (world == jl_world_counter) { - li->max_world = m->max_world; - } - else { - li->max_world = world; - } jl_typemap_insert(&m->specializations, (jl_value_t*)m, (jl_tupletype_t*)type, - NULL, jl_emptysvec, (jl_value_t*)li, 0, &tfunc_cache, - li->min_world, li->max_world, NULL); + NULL, jl_emptysvec, (jl_value_t*)mi, 0, &tfunc_cache, + 1, ~(size_t)0, NULL); JL_UNLOCK(&m->writelock); JL_GC_POP(); - return li; + return mi; } -JL_DLLEXPORT jl_value_t *jl_specializations_lookup(jl_method_t *m, jl_value_t *type, size_t world) +JL_DLLEXPORT jl_value_t *jl_specializations_lookup(jl_method_t *m, jl_value_t *type) { jl_typemap_entry_t *sf = jl_typemap_assoc_by_type( - m->specializations, type, NULL, /*subtype*/0, /*offs*/0, world, /*max_world_mask*/0); + m->specializations, type, NULL, /*subtype*/0, /*offs*/0, 1, /*max_world_mask*/0); if (!sf) return jl_nothing; return sf->func.value; @@ -180,6 +128,10 @@ JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt, jl_value_t *typ // ----- MethodInstance specialization instantiation ----- // JL_DLLEXPORT jl_method_t *jl_new_method_uninit(jl_module_t*); +JL_DLLEXPORT jl_code_instance_t* jl_set_method_inferred( + jl_method_instance_t *mi, jl_value_t *rettype, + jl_value_t *inferred_const, jl_value_t *inferred, + int32_t const_flags, size_t min_world, size_t max_world); void jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_args_t fptr) JL_GC_DISABLED { @@ -189,17 +141,8 @@ void jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_args_t fptr jl_set_const(jl_core_module, sname, f); dt = (jl_datatype_t*)jl_typeof(f); } - jl_method_instance_t *li = jl_new_method_instance_uninit(); - li->invoke = jl_fptr_args; - li->specptr.fptr1 = fptr; - li->specTypes = (jl_value_t*)jl_anytuple_type; - li->min_world = 1; - li->max_world = ~(size_t)0; - JL_GC_PUSH1(&li); jl_method_t *m = jl_new_method_uninit(jl_core_module); - li->def.method = m; - jl_gc_wb(li, m); m->name = sname; m->module = jl_core_module; m->isva = 1; @@ -207,70 +150,75 @@ void jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_args_t fptr m->sig = (jl_value_t*)jl_anytuple_type; m->slot_syms = jl_an_empty_string; + JL_GC_PUSH1(&m); + jl_method_instance_t *mi = jl_get_specialized(m, (jl_value_t*)jl_anytuple_type, jl_emptysvec); + m->unspecialized = mi; + jl_gc_wb(m, mi); + + jl_code_instance_t *codeinst = jl_set_method_inferred(mi, (jl_value_t*)jl_any_type, jl_nothing, jl_nothing, + 0, 1, ~(size_t)0); + codeinst->specptr.fptr1 = fptr; + codeinst->invoke = jl_fptr_args; + jl_methtable_t *mt = dt->name->mt; jl_typemap_insert(&mt->cache, (jl_value_t*)mt, jl_anytuple_type, - NULL, jl_emptysvec, (jl_value_t*)li, 0, &lambda_cache, 1, ~(size_t)0, NULL); + NULL, jl_emptysvec, (jl_value_t*)mi, 0, &lambda_cache, 1, ~(size_t)0, NULL); JL_GC_POP(); } -// run type inference on lambda "li" for given argument types. -// returns the inferred source, and may cache the result in li -// if successful, also updates the li argument to describe the validity of this src +// run type inference on lambda "mi" for given argument types. +// returns the inferred source, and may cache the result in mi +// if successful, also updates the mi argument to describe the validity of this src // if inference doesn't occur (or can't finish), returns NULL instead -jl_code_info_t *jl_type_infer(jl_method_instance_t **pli JL_ROOTS_TEMPORARILY, size_t world, int force) +jl_code_info_t *jl_type_infer(jl_method_instance_t *mi, size_t world, int force) { JL_TIMING(INFERENCE); if (jl_typeinf_func == NULL) return NULL; + if (jl_is_method(mi->def.method) && mi->def.method->unspecialized == mi) + return NULL; // avoid inferring the unspecialized method static int in_inference; if (in_inference > 2) return NULL; jl_code_info_t *src = NULL; #ifdef ENABLE_INFERENCE - jl_method_instance_t *li = *pli; - if (li->inInference && !force) + if (mi->inInference && !force) return NULL; jl_value_t **fargs; JL_GC_PUSHARGS(fargs, 3); fargs[0] = (jl_value_t*)jl_typeinf_func; - fargs[1] = (jl_value_t*)li; + fargs[1] = (jl_value_t*)mi; fargs[2] = jl_box_ulong(world); #ifdef TRACE_INFERENCE - if (li->specTypes != (jl_value_t*)jl_emptytuple_type) { + if (mi->specTypes != (jl_value_t*)jl_emptytuple_type) { jl_printf(JL_STDERR,"inference on "); - jl_static_show_func_sig(JL_STDERR, (jl_value_t*)li->specTypes); + jl_static_show_func_sig(JL_STDERR, (jl_value_t*)mi->specTypes); jl_printf(JL_STDERR, "\n"); } #endif jl_ptls_t ptls = jl_get_ptls_states(); size_t last_age = ptls->world_age; ptls->world_age = jl_typeinf_world; - li->inInference = 1; + mi->inInference = 1; in_inference++; - jl_svec_t *linfo_src; JL_TRY { - linfo_src = (jl_svec_t*)jl_apply(fargs, 3); + src = (jl_code_info_t*)jl_apply(fargs, 3); } JL_CATCH { jl_printf(JL_STDERR, "Internal error: encountered unexpected error in runtime:\n"); jl_static_show(JL_STDERR, jl_current_exception()); jl_printf(JL_STDERR, "\n"); jlbacktrace(); // written to STDERR_FILENO - linfo_src = NULL; + src = NULL; } ptls->world_age = last_age; in_inference--; - li->inInference = 0; + mi->inInference = 0; - if (linfo_src && jl_is_svec(linfo_src) && jl_svec_len(linfo_src) == 2) { - jl_value_t *mi = jl_svecref(linfo_src, 0); - jl_value_t *ci = jl_svecref(linfo_src, 1); - if (jl_is_method_instance(mi) && jl_is_code_info(ci)) { - *pli = (jl_method_instance_t*)mi; - src = (jl_code_info_t*)ci; - } + if (src && !jl_is_code_info(src)) { + src = NULL; } JL_GC_POP(); #endif @@ -287,157 +235,83 @@ JL_DLLEXPORT jl_value_t *jl_call_in_typeinf_world(jl_value_t **args, int nargs) return ret; } -int jl_is_rettype_inferred(jl_method_instance_t *li) JL_NOTSAFEPOINT -{ - if (!li->inferred) - return 0; - if (jl_is_code_info(li->inferred) && !((jl_code_info_t*)li->inferred)->inferred) - return 0; - return 1; -} - -struct set_world { - jl_method_instance_t *replaced; - size_t world; -}; - -static int set_max_world2(jl_typemap_entry_t *entry, void *closure0) +JL_DLLEXPORT jl_code_instance_t *jl_rettype_inferred(jl_method_instance_t *mi, size_t min_world, size_t max_world) JL_NOTSAFEPOINT { - struct set_world *closure = (struct set_world*)closure0; - // entry->max_world should be <= closure->replaced->max_world - if (entry->func.linfo == closure->replaced && entry->max_world > closure->world) { - entry->max_world = closure->world; + jl_code_instance_t *codeinst = mi->cache; + while (codeinst) { + if (codeinst->min_world <= min_world && max_world <= codeinst->max_world) { + jl_value_t *code = codeinst->inferred; + if (code && (code == jl_nothing || jl_ast_flag_inferred((jl_array_t*)code))) + return codeinst; + } + codeinst = codeinst->next; } - return 1; + return NULL; } -static int set_min_world2(jl_typemap_entry_t *entry, void *closure0) -{ - struct set_world *closure = (struct set_world*)closure0; - // entry->min_world should be >= closure->replaced->min_world and >= closure->world - if (entry->func.linfo == closure->replaced) { - entry->min_world = closure->world; - } - return 1; -} -static void update_world_bound(jl_method_instance_t *replaced, jl_typemap_visitor_fptr fptr, size_t world) +JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( + jl_method_instance_t *mi, jl_value_t *rettype, + size_t min_world, size_t max_world) { - struct set_world update; - update.replaced = replaced; - update.world = world; - - jl_method_t *m = replaced->def.method; - // update the world-valid in the specializations caches - jl_typemap_visitor(m->specializations, fptr, (void*)&update); - // update the world-valid in the invoke cache - if (m->invokes != NULL) - jl_typemap_visitor(m->invokes, fptr, (void*)&update); - // update the world-valid in the gf cache - jl_datatype_t *gf = jl_first_argument_datatype((jl_value_t*)m->sig); - assert(jl_is_datatype(gf) && gf->name->mt && "method signature invalid?"); - jl_typemap_visitor(gf->name->mt->cache, fptr, (void*)&update); + jl_code_instance_t *codeinst = mi->cache; + while (codeinst) { + if (codeinst->min_world == min_world && + codeinst->max_world == max_world && + jl_egal(codeinst->rettype, rettype)) { + return codeinst; + } + codeinst = codeinst->next; + } + return jl_set_method_inferred( + mi, rettype, NULL, NULL, + 0, min_world, max_world); } -JL_DLLEXPORT jl_method_instance_t* jl_set_method_inferred( - jl_method_instance_t *li, jl_value_t *rettype, +JL_DLLEXPORT jl_code_instance_t *jl_set_method_inferred( + jl_method_instance_t *mi, jl_value_t *rettype, jl_value_t *inferred_const, jl_value_t *inferred, - int32_t const_flags, size_t min_world, size_t max_world) + int32_t const_flags, size_t min_world, size_t max_world + /*, jl_array_t *edges, int absolute_max*/) { - JL_GC_PUSH1(&li); + jl_ptls_t ptls = jl_get_ptls_states(); assert(min_world <= max_world && "attempting to set invalid world constraints"); - assert(li->inInference && "shouldn't be caching an inference result for a MethodInstance that wasn't being inferred"); - if (min_world != li->min_world || max_world != li->max_world) { - if (!jl_is_method(li->def.method)) { - // thunks don't have multiple references, so just update in-place - li->min_world = min_world; - li->max_world = max_world; - } - else { - JL_LOCK(&li->def.method->writelock); - assert(min_world >= li->def.method->min_world); - assert(max_world <= li->def.method->max_world); - int isinferred = jl_is_rettype_inferred(li); - if (!isinferred && li->min_world >= min_world && li->max_world <= max_world) { - // expand the current (uninferred) entry to cover the full inferred range - // only update the specializations though, since the method table may have other - // reasons for needing a narrower applicability range - struct set_world update; - update.replaced = li; - if (li->min_world != min_world) { - li->min_world = min_world; - update.world = min_world; - jl_typemap_visitor(li->def.method->specializations, set_min_world2, (void*)&update); - } - if (li->max_world != max_world) { - li->max_world = max_world; - update.world = max_world; - jl_typemap_visitor(li->def.method->specializations, set_max_world2, (void*)&update); - } - } - else { - // clip applicability of old method instance (uninferred or inferred) - // to make it easier to find the inferred method - // (even though the real applicability was unchanged) - // there are 6(!) regions here to consider + boundary conditions for each - if (li->max_world >= min_world && li->min_world <= max_world) { - // there is a non-zero overlap between [li->min, li->max] and [min, max] - // there are now 4 regions left to consider - // TODO: also take into account li->def.method->world range when computing preferred division - if (li->max_world > max_world) { - // prefer making it applicable to future ages, - // as those are more likely to be useful - update_world_bound(li, set_min_world2, max_world + 1); - } - else if (li->min_world < min_world) { - assert(min_world > 1 && "logic violation: min(li->min_world) == 1 (by construction), so min(min_world) == 2"); - update_world_bound(li, set_max_world2, min_world - 1); - } - else { - // old inferred li is fully covered by new inference result, so just delete it - assert(isinferred); - update_world_bound(li, set_max_world2, li->min_world - 1); - } - } - - // build a new entry to describe the new (inferred) applicability - li = jl_get_specialized(li->def.method, li->specTypes, li->sparam_vals); - li->min_world = min_world; - li->max_world = max_world; - jl_typemap_insert(&li->def.method->specializations, li->def.value, - (jl_tupletype_t*)li->specTypes, NULL, jl_emptysvec, - (jl_value_t*)li, 0, &tfunc_cache, - li->min_world, li->max_world, NULL); - } - JL_UNLOCK(&li->def.method->writelock); - } - } - - // changing rettype changes the llvm signature, - // so clear all of the llvm state at the same time - li->invoke = jl_fptr_trampoline; - li->functionObjectsDecls.functionObject = NULL; - li->functionObjectsDecls.specFunctionObject = NULL; - li->rettype = rettype; - jl_gc_wb(li, rettype); - li->inferred = inferred; - jl_gc_wb(li, inferred); - if (const_flags & 2) { - li->inferred_const = inferred_const; - jl_gc_wb(li, inferred_const); - } - if (const_flags & 1) { + jl_code_instance_t *codeinst = (jl_code_instance_t*)jl_gc_alloc(ptls, sizeof(jl_code_instance_t), + jl_code_instance_type); + JL_GC_PUSH1(&codeinst); + codeinst->def = mi; + codeinst->min_world = min_world; + codeinst->max_world = max_world; + codeinst->functionObjectsDecls.functionObject = NULL; + codeinst->functionObjectsDecls.specFunctionObject = NULL; + codeinst->rettype = rettype; + codeinst->inferred = inferred; + //codeinst->edges = NULL; + if ((const_flags & 2) == 0) + inferred_const = NULL; + codeinst->rettype_const = inferred_const; + codeinst->invoke = NULL; + if ((const_flags & 1) != 0) { assert(const_flags & 2); - li->invoke = jl_fptr_const_return; - } - li->specptr.fptr = NULL; + codeinst->invoke = jl_fptr_const_return; + } + codeinst->specptr.fptr = NULL; + if (jl_is_method(mi->def.method)) + JL_LOCK(&mi->def.method->writelock); + codeinst->next = mi->cache; + mi->cache = codeinst; + jl_gc_wb(mi, codeinst); + if (jl_is_method(mi->def.method)) + JL_UNLOCK(&mi->def.method->writelock); JL_GC_POP(); - return li; + return codeinst; } static int get_spec_unspec_list(jl_typemap_entry_t *l, void *closure) { - if (jl_is_method_instance(l->func.value) && !jl_is_rettype_inferred(l->func.linfo)) + jl_method_instance_t *mi = l->func.linfo; + assert(jl_is_method_instance(mi)); + if (!jl_rettype_inferred(mi, jl_world_counter, jl_world_counter)) jl_array_ptr_1d_push((jl_array_t*)closure, l->func.value); return 1; } @@ -521,9 +395,9 @@ size_t jl_typeinf_world = 0; JL_DLLEXPORT void jl_set_typeinf_func(jl_value_t *f) { jl_typeinf_func = (jl_function_t*)f; + jl_typeinf_world = jl_get_tls_world_age(); + ++jl_world_counter; // make type-inference the only thing in this world if (jl_typeinf_world == 0) { - jl_typeinf_world = jl_get_tls_world_age(); - ++jl_world_counter; // make type-inference the only thing in this world // give type inference a chance to see all of these // TODO: also reinfer if max_world != ~(size_t)0 jl_array_t *unspec = jl_alloc_vec_any(0); @@ -531,9 +405,9 @@ JL_DLLEXPORT void jl_set_typeinf_func(jl_value_t *f) jl_foreach_reachable_mtable(reset_mt_caches, (void*)unspec); size_t i, l; for (i = 0, l = jl_array_len(unspec); i < l; i++) { - jl_method_instance_t *li = (jl_method_instance_t*)jl_array_ptr_ref(unspec, i); - if (!jl_is_rettype_inferred(li)) - jl_type_infer(&li, jl_world_counter, 1); + jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(unspec, i); + if (!jl_rettype_inferred(mi, jl_world_counter, jl_world_counter)) + jl_type_infer(mi, jl_world_counter, 1); } JL_GC_POP(); } @@ -969,14 +843,13 @@ static jl_method_instance_t *cache_method( jl_tupletype_t *tt, // the original tupletype of the signature jl_method_t *definition, size_t world, - jl_svec_t *sparams, - int allow_exec) + jl_svec_t *sparams) { // caller must hold the mt->writelock // short-circuit (now that we hold the lock) if this entry is already present - jl_typemap_entry_t *entry = jl_typemap_assoc_by_type(*cache, (jl_value_t*)tt, NULL, /*subtype*/1, jl_cachearg_offset(mt), world, /*max_world_mask*/0); + jl_typemap_entry_t *entry = jl_typemap_assoc_by_type(*cache, (jl_value_t*)tt, NULL, /*subtype*/1, mt ? jl_cachearg_offset(mt) : 1, world, /*max_world_mask*/0); if (entry && entry->func.value) - return (jl_method_instance_t*)entry->func.value; + return entry->func.linfo; jl_value_t *temp = NULL; jl_value_t *temp2 = NULL; @@ -987,7 +860,7 @@ static jl_method_instance_t *cache_method( int cache_with_orig = 1; jl_tupletype_t *compilationsig = tt; - intptr_t nspec = (mt == jl_type_type_mt ? definition->nargs + 1 : mt->max_args + 2); + intptr_t nspec = (mt == NULL || mt == jl_type_type_mt ? definition->nargs + 1 : mt->max_args + 2); jl_compilation_sig(tt, sparams, definition, nspec, &newparams); if (newparams) { cache_with_orig = 0; @@ -998,13 +871,13 @@ static jl_method_instance_t *cache_method( // we might choose a replacement type that's preferable but not strictly better } // TODO: maybe assert(jl_isa_compileable_sig(compilationsig, definition)); - newmeth = jl_specializations_get_linfo(definition, (jl_value_t*)compilationsig, sparams, world); + newmeth = jl_specializations_get_linfo(definition, (jl_value_t*)compilationsig, sparams); jl_tupletype_t *cachett = tt; jl_svec_t* guardsigs = jl_emptysvec; - size_t min_valid = definition->min_world; - size_t max_valid = definition->max_world; - if (!cache_with_orig) { + size_t min_valid = 1; + size_t max_valid = ~(size_t)0; + if (!cache_with_orig && mt) { // now examine what will happen if we chose to use this sig in the cache // TODO: should we first check `compilationsig <: definition`? temp = ml_matches(mt->defs, 0, compilationsig, -1, 0, world, &min_valid, &max_valid); // TODO: use MAX_UNSPECIALIZED_CONFLICTS? @@ -1058,17 +931,21 @@ static jl_method_instance_t *cache_method( } } } - if (!cache_with_orig) { + if (cache_with_orig) { + min_valid = 1; + max_valid = ~(size_t)0; + } + else { // determined above that there's no ambiguity in also using compilationsig as the cacheablesig cachett = compilationsig; } } - // here we infer types and specialize the method - if (newmeth->min_world > min_valid) - min_valid = newmeth->min_world; - if (newmeth->max_world < max_valid) - max_valid = newmeth->max_world; + if (cache_with_orig && mt) { + // now examine defs to determine the min/max-valid range for this lookup result + (void)ml_matches(mt->defs, 0, cachett, -1, 0, world, &min_valid, &max_valid); + } + assert(mt == NULL || min_valid > 1); // now scan `cachett` and ensure that `Type{T}` in the cache will be matched exactly by `typeof(T)` // and also reduce the complexity of rejecting this entry in the cache @@ -1109,16 +986,14 @@ static jl_method_instance_t *cache_method( //} jl_typemap_insert(cache, parent, cachett, simplett, guardsigs, - (jl_value_t*)newmeth, jl_cachearg_offset(mt), &lambda_cache, + (jl_value_t*)newmeth, mt ? jl_cachearg_offset(mt) : 1, &lambda_cache, min_valid, max_valid, NULL); - if (definition->traced && jl_method_tracer && allow_exec) - jl_call_tracer(jl_method_tracer, (jl_value_t*)newmeth); JL_GC_POP(); return newmeth; } -static jl_method_instance_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype_t *tt, int mt_cache, int allow_exec, size_t world) +static jl_method_instance_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype_t *tt, int mt_cache, size_t world) { // TODO: Merge with jl_dump_compiles? static ios_t f_precompile; @@ -1128,8 +1003,6 @@ static jl_method_instance_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype jl_typemap_entry_t *entry = NULL; entry = jl_typemap_assoc_by_type(mt->cache, (jl_value_t*)tt, NULL, /*subtype*/1, jl_cachearg_offset(mt), world, /*max_world_mask*/0); if (entry && entry->func.value) { - assert(entry->func.linfo->min_world <= entry->min_world && entry->func.linfo->max_world >= entry->max_world && - "typemap consistency error: MethodInstance doesn't apply to full range of its entry"); return entry->func.linfo; } @@ -1166,11 +1039,10 @@ static jl_method_instance_t *jl_mt_assoc_by_type(jl_methtable_t *mt, jl_datatype jl_compilation_sig(tt, env, m, nspec, &newparams); if (newparams) tt = jl_apply_tuple_type(newparams); - nf = jl_specializations_get_linfo(m, (jl_value_t*)tt, env, world); - assert(nf->min_world <= world && nf->max_world >= world); + nf = jl_specializations_get_linfo(m, (jl_value_t*)tt, env); } else { - nf = cache_method(mt, &mt->cache, (jl_value_t*)mt, tt, m, world, env, allow_exec); + nf = cache_method(mt, &mt->cache, (jl_value_t*)mt, tt, m, world, env); } } } @@ -1260,11 +1132,11 @@ static int check_ambiguous_visitor(jl_typemap_entry_t *oldentry, struct typemap_ jl_gc_wb(m, m->ambig); } if (mambig->ambig == jl_nothing) { - mambig->ambig = (jl_value_t*) jl_alloc_vec_any(0); + mambig->ambig = (jl_value_t*)jl_alloc_vec_any(0); jl_gc_wb(mambig, mambig->ambig); } - jl_array_ptr_1d_push((jl_array_t*) m->ambig, (jl_value_t*) mambig); - jl_array_ptr_1d_push((jl_array_t*) mambig->ambig, (jl_value_t*) m); + jl_array_ptr_1d_push((jl_array_t*)m->ambig, (jl_value_t*)oldentry); + jl_array_ptr_1d_push((jl_array_t*)mambig->ambig, (jl_value_t*)closure->newentry); if (eager_ambiguity_printing) { JL_STREAM *s = JL_STDERR; jl_printf(s, "WARNING: New definition \n "); @@ -1282,16 +1154,16 @@ static int check_ambiguous_visitor(jl_typemap_entry_t *oldentry, struct typemap_ // record that this method definition is being partially replaced // (either with a real definition, or an ambiguity error) if (closure->shadowed == NULL) { - closure->shadowed = oldentry->func.value; + closure->shadowed = (jl_value_t*)oldentry; } else if (!jl_is_array(closure->shadowed)) { jl_array_t *list = jl_alloc_vec_any(2); jl_array_ptr_set(list, 0, closure->shadowed); - jl_array_ptr_set(list, 1, oldentry->func.value); + jl_array_ptr_set(list, 1, (jl_value_t*)oldentry); closure->shadowed = (jl_value_t*)list; } else { - jl_array_ptr_1d_push((jl_array_t*)closure->shadowed, oldentry->func.value); + jl_array_ptr_1d_push((jl_array_t*)closure->shadowed, (jl_value_t*)oldentry); } } return 1; @@ -1342,12 +1214,12 @@ static int check_disabled_ambiguous_visitor(jl_typemap_entry_t *oldentry, struct int i, l = jl_array_len(closure->shadowed); for (i = 0; i < l; i++) { - jl_method_t *mth = (jl_method_t*)jl_array_ptr_ref(closure->shadowed, i); - jl_value_t *isect2 = jl_type_intersection(mth->sig, (jl_value_t*)sig); + jl_typemap_entry_t *mth = (jl_typemap_entry_t*)jl_array_ptr_ref(closure->shadowed, i); + jl_value_t *isect2 = jl_type_intersection((jl_value_t*)mth->sig, (jl_value_t*)sig); // see if the intersection was covered by precisely the disabled method // that means we now need to record the ambiguity if (jl_types_equal(isect, isect2)) { - jl_method_t *mambig = mth; + jl_method_t *mambig = mth->func.method; jl_method_t *m = oldentry->func.method; if (m->ambig == jl_nothing) { m->ambig = (jl_value_t*) jl_alloc_vec_any(0); @@ -1357,12 +1229,12 @@ static int check_disabled_ambiguous_visitor(jl_typemap_entry_t *oldentry, struct mambig->ambig = (jl_value_t*) jl_alloc_vec_any(0); jl_gc_wb(mambig, mambig->ambig); } - jl_array_ptr_1d_push((jl_array_t*) m->ambig, (jl_value_t*) mambig); - jl_array_ptr_1d_push((jl_array_t*) mambig->ambig, (jl_value_t*) m); + jl_array_ptr_1d_push((jl_array_t*)m->ambig, (jl_value_t*)mth); + jl_array_ptr_1d_push((jl_array_t*)mambig->ambig, (jl_value_t*)oldentry); } } - jl_array_ptr_1d_push((jl_array_t*)closure->shadowed, oldentry->func.value); + jl_array_ptr_1d_push((jl_array_t*)closure->shadowed, (jl_value_t*)oldentry); return 1; } @@ -1373,8 +1245,8 @@ static void method_overwrite(jl_typemap_entry_t *newentry, jl_method_t *oldvalue jl_method_t *method = (jl_method_t*)newentry->func.method; jl_module_t *newmod = method->module; jl_module_t *oldmod = oldvalue->module; + JL_STREAM *s = JL_STDERR; if (jl_options.warn_overwrite == JL_OPTIONS_WARN_OVERWRITE_ON) { - JL_STREAM *s = JL_STDERR; jl_printf(s, "WARNING: Method definition "); jl_static_show_func_sig(s, (jl_value_t*)newentry->sig); jl_printf(s, " in module %s", jl_symbol_name(oldmod->name)); @@ -1386,6 +1258,8 @@ static void method_overwrite(jl_typemap_entry_t *newentry, jl_method_t *oldvalue jl_printf(s, ".\n"); jl_uv_flush(s); } + if (jl_options.incremental && jl_generating_output()) + jl_printf(JL_STDERR, " ** incremental compilation may be fatally broken for this module **\n\n"); } static void update_max_args(jl_methtable_t *mt, jl_value_t *type) @@ -1401,36 +1275,40 @@ static void update_max_args(jl_methtable_t *mt, jl_value_t *type) mt->max_args = na; } -static int JL_DEBUG_METHOD_INVALIDATION = 0; +int JL_DEBUG_METHOD_INVALIDATION = 0; -// invalidate cached methods that had an edge to a replaced method +// recursively invalidate cached methods that had an edge to a replaced method static void invalidate_method_instance(jl_method_instance_t *replaced, size_t max_world, int depth) { + if (JL_DEBUG_METHOD_INVALIDATION) { + int d0 = depth; + while (d0-- > 0) + jl_uv_puts(JL_STDOUT, " ", 1); + jl_static_show(JL_STDOUT, (jl_value_t*)replaced); + jl_uv_puts(JL_STDOUT, "\n", 1); + } if (!jl_is_method(replaced->def.method)) - return; + return; // shouldn't happen, but better to be safe JL_LOCK_NOGC(&replaced->def.method->writelock); + jl_code_instance_t *codeinst = replaced->cache; + while (codeinst) { + if (codeinst->max_world == ~(size_t)0) { + assert(codeinst->min_world - 1 <= max_world && "attempting to set illogical world constraints (probable race condition)"); + codeinst->max_world = max_world; + } + assert(codeinst->max_world <= max_world); + codeinst = codeinst->next; + } + // recurse to all backedges to update their valid range also jl_array_t *backedges = replaced->backedges; - if (replaced->max_world > max_world) { - // recurse to all backedges to update their valid range also - assert(replaced->min_world - 1 <= max_world && "attempting to set invalid world constraints"); - if (JL_DEBUG_METHOD_INVALIDATION) { - int d0 = depth; - while (d0-- > 0) - jl_uv_puts(JL_STDOUT, " ", 1); - jl_static_show(JL_STDOUT, (jl_value_t*)replaced); - jl_uv_puts(JL_STDOUT, "\n", 1); - } - replaced->max_world = max_world; - update_world_bound(replaced, set_max_world2, max_world); - if (backedges) { - size_t i, l = jl_array_len(backedges); - for (i = 0; i < l; i++) { - jl_method_instance_t *replaced = (jl_method_instance_t*)jl_array_ptr_ref(backedges, i); - invalidate_method_instance(replaced, max_world, depth + 1); - } + if (backedges) { + replaced->backedges = NULL; + size_t i, l = jl_array_len(backedges); + for (i = 0; i < l; i++) { + jl_method_instance_t *replaced = (jl_method_instance_t*)jl_array_ptr_ref(backedges, i); + invalidate_method_instance(replaced, max_world, depth + 1); } } - replaced->backedges = NULL; JL_UNLOCK_NOGC(&replaced->def.method->writelock); } @@ -1443,48 +1321,26 @@ struct invalidate_conflicting_env { static int invalidate_backedges(jl_typemap_entry_t *oldentry, struct typemap_intersection_env *closure0) { struct invalidate_conflicting_env *closure = container_of(closure0, struct invalidate_conflicting_env, match); - if (oldentry->max_world > closure->max_world) { - jl_method_instance_t *replaced_linfo = oldentry->func.linfo; - { - struct set_world def; - def.replaced = oldentry->func.linfo; - def.world = closure->max_world; - jl_method_t *m = def.replaced->def.method; - - // truncate the max-valid in the invoke cache - if (m->invokes != NULL) - jl_typemap_visitor(m->invokes, set_max_world2, (void*)&def); - // invalidate mt cache entries - jl_datatype_t *gf = jl_first_argument_datatype((jl_value_t*)m->sig); - assert(jl_is_datatype(gf) && gf->name->mt && "method signature invalid?"); - jl_typemap_visitor(gf->name->mt->cache, set_max_world2, (void*)&def); - if (JL_DEBUG_METHOD_INVALIDATION) { - jl_static_show(JL_STDOUT, (jl_value_t*)def.replaced); - jl_uv_puts(JL_STDOUT, "\n", 1); - } - } - - // invalidate backedges - JL_LOCK_NOGC(&replaced_linfo->def.method->writelock); - jl_array_t *backedges = replaced_linfo->backedges; - if (backedges) { - size_t i, l = jl_array_len(backedges); - jl_method_instance_t **replaced = (jl_method_instance_t**)jl_array_ptr_data(backedges); - for (i = 0; i < l; i++) { - invalidate_method_instance(replaced[i], closure->max_world, 1); - } + jl_method_instance_t *replaced_linfo = oldentry->func.linfo; + JL_LOCK_NOGC(&replaced_linfo->def.method->writelock); + jl_array_t *backedges = replaced_linfo->backedges; + if (backedges) { + // invalidate callers (if any) + replaced_linfo->backedges = NULL; + size_t i, l = jl_array_len(backedges); + jl_method_instance_t **replaced = (jl_method_instance_t**)jl_array_ptr_data(backedges); + for (i = 0; i < l; i++) { + invalidate_method_instance(replaced[i], closure->max_world, 1); } closure->invalidated = 1; - replaced_linfo->backedges = NULL; - JL_UNLOCK_NOGC(&replaced_linfo->def.method->writelock); } + JL_UNLOCK_NOGC(&replaced_linfo->def.method->writelock); return 1; } // add a backedge from callee to caller JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_method_instance_t *caller) { - assert(callee->def.method->min_world <= caller->min_world && callee->max_world >= caller->max_world); JL_LOCK(&callee->def.method->writelock); if (!callee->backedges) { // lazy-init the backedges array @@ -1534,11 +1390,50 @@ JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *t JL_UNLOCK(&mt->writelock); } -void jl_method_instance_delete(jl_method_instance_t *mi) -{ - invalidate_method_instance(mi, mi->min_world - 1, 0); - if (JL_DEBUG_METHOD_INVALIDATION) - jl_uv_puts(JL_STDOUT, "<<<\n", 4); +//void jl_method_instance_delete(jl_method_instance_t *mi) +//{ +// invalidate_method_instance(mi, mi->min_world - 1, 0); +// if (JL_DEBUG_METHOD_INVALIDATION) +// jl_uv_puts(JL_STDOUT, "<<<\n", 4); +//} + +struct invalidate_mt_env { + jl_value_t *shadowed; + size_t max_world; +}; +static int invalidate_mt_cache(jl_typemap_entry_t *oldentry, void *closure0) +{ + struct invalidate_mt_env *env = (struct invalidate_mt_env*)closure0; + if (oldentry->max_world == ~(size_t)0) { + jl_method_instance_t *mi = oldentry->func.linfo; + jl_method_t *m = mi->def.method; + int intersects = 0; + if (jl_is_method(env->shadowed)) { + if ((jl_value_t*)m == env->shadowed) { + intersects = 1; + } + } + else { + assert(jl_is_array(env->shadowed)); + jl_typemap_entry_t **d = (jl_typemap_entry_t**)jl_array_ptr_data(env->shadowed); + size_t i, n = jl_array_len(env->shadowed); + for (i = 0; i < n; i++) { + if (m == d[i]->func.method) { + intersects = 1; + break; + } + } + } + if (intersects) { + if (JL_DEBUG_METHOD_INVALIDATION) { + jl_uv_puts(JL_STDOUT, "-- ", 4); + jl_static_show(JL_STDOUT, (jl_value_t*)mi); + jl_uv_puts(JL_STDOUT, "\n", 1); + } + oldentry->max_world = env->max_world; + } + } + return 1; } static int typemap_search(jl_typemap_entry_t *entry, void *closure) @@ -1563,18 +1458,25 @@ static jl_typemap_entry_t *do_typemap_search(jl_methtable_t *mt JL_PROPAGATES_RO JL_DLLEXPORT void jl_method_table_disable(jl_methtable_t *mt, jl_method_t *method) { + if (jl_options.incremental && jl_generating_output()) + jl_printf(JL_STDERR, "WARNING: method deletion during Module precompile may lead to undefined behavior" + "\n ** incremental compilation may be fatally broken for this module **\n\n"); jl_typemap_entry_t *methodentry = do_typemap_search(mt, method); JL_LOCK(&mt->writelock); // Narrow the world age on the method to make it uncallable - method->max_world = jl_world_counter; - methodentry->max_world = jl_world_counter++; + method->deleted_world = methodentry->max_world = jl_world_counter++; // Recompute ambiguities (deleting a more specific method might reveal ambiguities that it previously resolved) - check_ambiguous_matches(mt->defs, methodentry, check_disabled_ambiguous_visitor); // TODO: decrease repeated work? + (void)check_ambiguous_matches(mt->defs, methodentry, check_disabled_ambiguous_visitor); // TODO: decrease repeated work? + // drop this method from mt->cache + struct invalidate_mt_env mt_cache_env; + mt_cache_env.max_world = methodentry->max_world - 1; + mt_cache_env.shadowed = (jl_value_t*)method; + jl_typemap_visitor(mt->cache, invalidate_mt_cache, (void*)&mt_cache_env); // Invalidate the backedges struct invalidate_conflicting_env env; env.invalidated = 0; env.max_world = methodentry->max_world; - jl_typemap_visitor(methodentry->func.method->specializations, (jl_typemap_visitor_fptr)invalidate_backedges, &env); + jl_typemap_visitor(methodentry->func.method->specializations, (jl_typemap_visitor_fptr)invalidate_backedges, &env.match); JL_UNLOCK(&mt->writelock); } @@ -1585,14 +1487,16 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method assert(jl_is_mtable(mt)); jl_value_t *type = method->sig; jl_value_t *oldvalue = NULL; + if (method->primary_world == 1) + method->primary_world = ++jl_world_counter; struct invalidate_conflicting_env env; env.invalidated = 0; - env.max_world = method->min_world - 1; + env.max_world = method->primary_world - 1; JL_GC_PUSH1(&oldvalue); JL_LOCK(&mt->writelock); jl_typemap_entry_t *newentry = jl_typemap_insert(&mt->defs, (jl_value_t*)mt, (jl_tupletype_t*)type, simpletype, jl_emptysvec, (jl_value_t*)method, 0, &method_defs, - method->min_world, method->max_world, &oldvalue); + method->primary_world, method->deleted_world, &oldvalue); if (oldvalue) { if (oldvalue == (jl_value_t*)method) { // redundant add of same method; no need to do anything @@ -1600,7 +1504,16 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method JL_GC_POP(); return; } - method->ambig = ((jl_method_t*)oldvalue)->ambig; + jl_value_t *ambig = ((jl_method_t*)oldvalue)->ambig; + if (ambig != jl_nothing) { + method->ambig = ambig; + jl_gc_wb(method, ambig); + size_t i, na = jl_array_len(ambig); + for (i = 0; i < na; i++) { + jl_typemap_entry_t *mambig = (jl_typemap_entry_t*)jl_array_ptr_ref(ambig, i); + jl_array_ptr_1d_push((jl_array_t*)mambig->func.method->ambig, (jl_value_t*)newentry); + } + } method_overwrite(newentry, (jl_method_t*)oldvalue); } else { @@ -1628,6 +1541,18 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method } } if (oldvalue) { + if (jl_typeis(oldvalue, jl_typemap_entry_type)) + oldvalue = ((jl_typemap_entry_t*)oldvalue)->func.value; // a method + // drop anything in mt->cache that might overlap with the new method + struct invalidate_mt_env mt_cache_env; + mt_cache_env.max_world = env.max_world; + mt_cache_env.shadowed = oldvalue; + jl_typemap_visitor(mt->cache, invalidate_mt_cache, (void*)&mt_cache_env); + //TODO: if it's small, might it be better to drop it all too? + //if (mt != jl_type_type_mt) { + // mt->cache = jl_nothing; + //} + jl_datatype_t *unw = (jl_datatype_t*)jl_unwrap_unionall(type); size_t l = jl_svec_len(unw->parameters); jl_value_t *va = NULL; @@ -1648,10 +1573,10 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method } else { assert(jl_is_array(oldvalue)); - jl_method_t **d = (jl_method_t**)jl_array_ptr_data(oldvalue); + jl_typemap_entry_t **d = (jl_typemap_entry_t**)jl_array_ptr_data(oldvalue); size_t i, n = jl_array_len(oldvalue); for (i = 0; i < n; i++) { - jl_typemap_intersection_visitor(d[i]->specializations, 0, &env.match); + jl_typemap_intersection_visitor(d[i]->func.method->specializations, 0, &env.match); } } } @@ -1745,7 +1670,7 @@ jl_method_instance_t *jl_method_lookup(jl_methtable_t *mt, jl_value_t **args, si JL_LOCK(&mt->writelock); jl_tupletype_t *tt = arg_type_tuple(args, nargs); JL_GC_PUSH1(&tt); - jl_method_instance_t *sf = jl_mt_assoc_by_type(mt, tt, cache, 1, world); + jl_method_instance_t *sf = jl_mt_assoc_by_type(mt, tt, cache, world); JL_GC_POP(); JL_UNLOCK(&mt->writelock); return sf; @@ -1774,125 +1699,125 @@ JL_DLLEXPORT jl_value_t *jl_matching_methods(jl_tupletype_t *types, int lim, int return ml_matches(mt->defs, 0, types, lim, include_ambiguous, world, min_valid, max_valid); } -jl_callptr_t jl_compile_method_internal(jl_method_instance_t **pli, size_t world) +jl_method_instance_t *jl_get_unspecialized(jl_method_instance_t *method) { - jl_method_instance_t *li = *pli; - jl_callptr_t fptr = li->invoke; - if (fptr != jl_fptr_trampoline) - return fptr; + // one unspecialized version of a function can be shared among all cached specializations + jl_method_t *def = method->def.method; + if (def->source == NULL) { + return method; + } + if (def->unspecialized == NULL) { + JL_LOCK(&def->writelock); + if (def->unspecialized == NULL) { + def->unspecialized = jl_get_specialized(def, def->sig, jl_emptysvec); + jl_gc_wb(def, def->unspecialized); + } + JL_UNLOCK(&def->writelock); + } + return def->unspecialized; +} + +jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t world) +{ + jl_code_instance_t *codeinst; + codeinst = mi->cache; + while (codeinst) { + if (codeinst->min_world <= world && world <= codeinst->max_world && codeinst->invoke != NULL) { + return codeinst; + } + codeinst = codeinst->next; + } - if (jl_options.compile_enabled == JL_OPTIONS_COMPILE_OFF || - jl_options.compile_enabled == JL_OPTIONS_COMPILE_MIN) { + if (jl_options.compile_enabled == JL_OPTIONS_COMPILE_OFF || + jl_options.compile_enabled == JL_OPTIONS_COMPILE_MIN) { // copy fptr from the template method definition - jl_method_t *def = li->def.method; + jl_method_t *def = mi->def.method; if (jl_is_method(def) && def->unspecialized) { - jl_method_instance_t *unspec = def->unspecialized; - if (unspec->invoke != jl_fptr_trampoline) { - li->functionObjectsDecls.functionObject = NULL; - li->functionObjectsDecls.specFunctionObject = NULL; - li->specptr = unspec->specptr; - li->inferred = unspec->inferred; - if (li->inferred) - jl_gc_wb(li, li->inferred); - li->inferred_const = unspec->inferred_const; - if (li->inferred_const) - jl_gc_wb(li, li->inferred_const); - li->invoke = unspec->invoke; - return fptr; + jl_code_instance_t *unspec = def->unspecialized->cache; + if (unspec && unspec->invoke != NULL) { + jl_code_instance_t *codeinst = jl_set_method_inferred(mi, (jl_value_t*)jl_any_type, NULL, NULL, + 0, 1, ~(size_t)0); + codeinst->specptr = unspec->specptr; + codeinst->rettype_const = unspec->rettype_const; + codeinst->invoke = unspec->invoke; + return codeinst; } } - jl_code_info_t *src = jl_code_for_interpreter(li); + jl_code_info_t *src = jl_code_for_interpreter(mi); if (!jl_code_requires_compiler(src)) { - li->inferred = (jl_value_t*)src; - jl_gc_wb(li, src); - li->functionObjectsDecls.functionObject = NULL; - li->functionObjectsDecls.specFunctionObject = NULL; - li->invoke = jl_fptr_interpret_call; - return jl_fptr_interpret_call; + jl_code_instance_t *codeinst = jl_set_method_inferred(mi, (jl_value_t*)jl_any_type, NULL, NULL, + 0, 1, ~(size_t)0); + codeinst->invoke = jl_fptr_interpret_call; + return codeinst; } if (jl_options.compile_enabled == JL_OPTIONS_COMPILE_OFF) { jl_printf(JL_STDERR, "code missing for "); - jl_static_show(JL_STDERR, (jl_value_t*)li); + jl_static_show(JL_STDERR, (jl_value_t*)mi); jl_printf(JL_STDERR, " : sysimg may not have been built with --compile=all\n"); } } - jl_llvm_functions_t decls = li->functionObjectsDecls; - if (decls.functionObject == NULL) { - // if we don't have decls already, try to generate it now + codeinst = mi->cache; + while (codeinst) { + if (codeinst->min_world <= world && world <= codeinst->max_world && codeinst->functionObjectsDecls.functionObject != NULL) + break; + codeinst = codeinst->next; + } + if (codeinst == NULL) { + // if we don't have any decls already, try to generate it now jl_code_info_t *src = NULL; - if (jl_is_method(li->def.method) && !jl_is_rettype_inferred(li) && - jl_symbol_name(li->def.method->name)[0] != '@') { + if (jl_is_method(mi->def.method) && !jl_rettype_inferred(mi, world, world) && + jl_symbol_name(mi->def.method->name)[0] != '@') { // don't bother with typeinf on macros or toplevel thunks // but try to infer everything else - src = jl_type_infer(pli, world, 0); - li = *pli; - } - - // check again, because jl_type_infer may have changed li or compiled it - fptr = li->invoke; - if (fptr != jl_fptr_trampoline) - return fptr; - decls = li->functionObjectsDecls; - if (decls.functionObject == NULL) { - decls = jl_compile_linfo(pli, src, world, &jl_default_cgparams); - li = *pli; - } - - // check again, of course - fptr = li->invoke; - if (fptr != jl_fptr_trampoline) - return fptr; - - // if it's not inferred (inference failed / disabled), try to just use the unspecialized fptr - if (!li->inferred) { - if (jl_is_method(li->def.method) && li->def.method->unspecialized) { - jl_method_instance_t *unspec = li->def.method->unspecialized; - fptr = unspec->invoke; - if (fptr != jl_fptr_trampoline) { - li->specptr = unspec->specptr; - li->inferred_const = unspec->inferred_const; - if (li->inferred_const) - jl_gc_wb(li, li->inferred_const); - li->invoke = fptr; - return fptr; - } + src = jl_type_infer(mi, world, 0); + } + codeinst = jl_compile_linfo(mi, src, world, &jl_default_cgparams); + if (!codeinst) { + jl_method_instance_t *unspec = jl_get_unspecialized(mi); + jl_code_instance_t *ucache = jl_get_method_inferred(unspec, (jl_value_t*)jl_any_type, 1, ~(size_t)0); + // ask codegen to make the fptr for unspec + if (ucache->invoke == NULL) + jl_generate_fptr(ucache); + if (ucache->invoke != jl_fptr_sparam && + ucache->invoke != jl_fptr_interpret_call) { + return ucache; } + jl_code_instance_t *codeinst = jl_set_method_inferred(mi, (jl_value_t*)jl_any_type, NULL, NULL, + 0, 1, ~(size_t)0); + codeinst->specptr = ucache->specptr; + codeinst->rettype_const = ucache->rettype_const; + codeinst->invoke = ucache->invoke; + return codeinst; } } - // ask codegen to make the fptr - fptr = jl_generate_fptr(pli, decls, world); - return fptr; -} - -JL_DLLEXPORT jl_value_t *jl_fptr_trampoline(jl_method_instance_t *m, jl_value_t **args, uint32_t nargs) -{ - size_t world = jl_get_ptls_states()->world_age; - jl_callptr_t fptr = jl_compile_method_internal(&m, world); - return fptr(m, args, nargs); + jl_generate_fptr(codeinst); + return codeinst; } -JL_DLLEXPORT jl_value_t *jl_fptr_const_return(jl_method_instance_t *m, jl_value_t **args, uint32_t nargs) +JL_DLLEXPORT jl_value_t *jl_fptr_const_return(jl_code_instance_t *m, jl_value_t **args, uint32_t nargs) { - return m->inferred_const; + return m->rettype_const; } -JL_DLLEXPORT jl_value_t *jl_fptr_args(jl_method_instance_t *m, jl_value_t **args, uint32_t nargs) +JL_DLLEXPORT jl_value_t *jl_fptr_args(jl_code_instance_t *m, jl_value_t **args, uint32_t nargs) { return m->specptr.fptr1(args[0], &args[1], nargs - 1); } -JL_DLLEXPORT jl_value_t *jl_fptr_sparam(jl_method_instance_t *m, jl_value_t **args, uint32_t nargs) +JL_DLLEXPORT jl_value_t *jl_fptr_sparam(jl_code_instance_t *m, jl_value_t **args, uint32_t nargs) { - return m->specptr.fptr3(m->sparam_vals, args[0], &args[1], nargs - 1); + jl_svec_t *sparams = m->def->sparam_vals; + assert(sparams != jl_emptysvec); + return m->specptr.fptr3(sparams, args[0], &args[1], nargs - 1); } // Return the index of the invoke api, if known -JL_DLLEXPORT int32_t jl_invoke_api(jl_method_instance_t *mi) +JL_DLLEXPORT int32_t jl_invoke_api(jl_code_instance_t *codeinst) { - jl_callptr_t f = mi->invoke; - if (f == &jl_fptr_trampoline) + jl_callptr_t f = codeinst->invoke; + if (f == NULL) return 0; if (f == &jl_fptr_args) return 1; @@ -1906,7 +1831,7 @@ JL_DLLEXPORT int32_t jl_invoke_api(jl_method_instance_t *mi) } // compile-time method lookup -jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types JL_PROPAGATES_ROOT, size_t world, int mt_cache) +jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types JL_PROPAGATES_ROOT, size_t world, size_t *min_valid, size_t *max_valid, int mt_cache) { JL_TIMING(METHOD_LOOKUP_COMPILE); if (jl_has_free_typevars((jl_value_t*)types)) @@ -1915,9 +1840,7 @@ jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types JL_PROPAGATES return NULL; // find if exactly 1 method matches (issue #7302) - size_t min_valid = 0; - size_t max_valid = ~(size_t)0; - jl_value_t *matches = jl_matching_methods(types, 1, 1, world, &min_valid, &max_valid); + jl_value_t *matches = jl_matching_methods(types, 1, 1, world, min_valid, max_valid); if (matches == jl_false || jl_array_len(matches) != 1) return NULL; jl_tupletype_t *tt = NULL; @@ -1940,9 +1863,8 @@ jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types JL_PROPAGATES // used via dispatch later (e.g. because it was hinted via a call to `precompile`) jl_methtable_t *mt = dt->name->mt; JL_LOCK(&mt->writelock); - nf = cache_method(mt, &mt->cache, (jl_value_t*)mt, ti, m, world, env, /*allow_exec*/1); + nf = cache_method(mt, &mt->cache, (jl_value_t*)mt, ti, m, world, env); JL_UNLOCK(&mt->writelock); - assert(nf->min_world <= world && nf->max_world >= world); } else { intptr_t nspec = (mt == jl_type_type_mt ? m->nargs + 1 : mt->max_args + 2); @@ -1951,8 +1873,7 @@ jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types JL_PROPAGATES int is_compileable = ((jl_datatype_t*)ti)->isdispatchtuple || jl_isa_compileable_sig(tt, m); if (is_compileable) { - nf = jl_specializations_get_linfo(m, (jl_value_t*)tt, env, world); - assert(nf->min_world <= world && nf->max_world >= world); + nf = jl_specializations_get_linfo(m, (jl_value_t*)tt, env); } } } @@ -1961,88 +1882,111 @@ jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types JL_PROPAGATES return nf; } +static void _generate_from_hint(jl_method_instance_t *mi, size_t world) +{ + int generating_llvm = jl_options.outputo || jl_options.outputbc || jl_options.outputunoptbc; + jl_code_info_t *src = NULL; + // If we are saving ji files (e.g. package pre-compilation or intermediate sysimg build steps), + // don't bother generating anything since it won't be saved. + if (!jl_rettype_inferred(mi, world, world)) + src = jl_type_infer(mi, world, 1); + if (generating_llvm) { + jl_code_instance_t *codeinst; + if ((codeinst = jl_rettype_inferred(mi, world, world))) + if (codeinst->invoke == jl_fptr_const_return) + return; // probably not a good idea to generate code + // If we are saving LLVM or native code, generate the LLVM IR so that it'll + // be included in the saved LLVM module. + (void)jl_compile_linfo(mi, src, world, &jl_default_cgparams); + } +} + +void jl_compile_now(jl_method_instance_t *mi) +{ + size_t world = jl_world_counter; + size_t tworld = jl_typeinf_world; + _generate_from_hint(mi, world); + if (jl_typeinf_func && mi->def.method->primary_world <= tworld) { + // if it's part of the compiler, also attempt to compile for the compiler world too + _generate_from_hint(mi, tworld); + } +} + JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types) { size_t world = jl_world_counter; - jl_method_instance_t *li = jl_get_specialization1(types, world, 1); - if (li == NULL) + size_t tworld = jl_typeinf_world; + size_t min_valid = 0; + size_t max_valid = ~(size_t)0; + jl_method_instance_t *mi = jl_get_specialization1(types, world, &min_valid, &max_valid, 1); + if (mi == NULL) return 0; + JL_GC_PROMISE_ROOTED(mi); if (jl_generating_output()) { - jl_code_info_t *src = NULL; - // If we are saving ji files (e.g. package pre-compilation or intermediate sysimg build steps), - // don't bother generating anything since it won't be saved. - if (!jl_is_rettype_inferred(li)) - src = jl_type_infer(&li, world, 0); - if (li->invoke != jl_fptr_const_return) { - if (jl_options.outputo || jl_options.outputbc || jl_options.outputunoptbc) { - // If we are saving LLVM or native code, generate the LLVM IR so that it'll - // be included in the saved LLVM module. - jl_compile_linfo(&li, src, world, &jl_default_cgparams); - if (jl_typeinf_func && li->def.method->module == ((jl_datatype_t*)jl_typeof(jl_typeinf_func))->name->module) { - size_t world = jl_typeinf_world; - // if it's part of the compiler, also attempt to compile for the compiler world too - jl_method_instance_t *li = jl_get_specialization1(types, world, 1); - if (li != NULL) - jl_compile_linfo(&li, NULL, world, &jl_default_cgparams); - } - } - } + jl_compile_now(mi); // In addition to full compilation of the compilation-signature, if `types` is more specific (e.g. due to nospecialize), // also run inference now on the original `types`, since that may help us guide inference to find // additional useful methods that should be compiled - //ALT: if (jl_is_datatype(types) && ((jl_datatype_t*)types)->isdispatchtuple && !jl_egal(li->specTypes, types)) - //ALT: if (jl_subtype(types, li->specTypes)) - JL_GC_PROMISE_ROOTED(li); - if (!jl_subtype(li->specTypes, (jl_value_t*)types)) { + //ALT: if (jl_is_datatype(types) && ((jl_datatype_t*)types)->isdispatchtuple && !jl_egal(mi->specTypes, types)) + //ALT: if (jl_subtype(types, mi->specTypes)) + if (!jl_subtype(mi->specTypes, (jl_value_t*)types)) { jl_svec_t *tpenv2 = jl_emptysvec; jl_value_t *types2 = NULL; JL_GC_PUSH2(&tpenv2, &types2); - types2 = jl_type_intersection_env((jl_value_t*)types, (jl_value_t*)li->def.method->sig, &tpenv2); - jl_method_instance_t *li2 = jl_specializations_get_linfo(li->def.method, (jl_value_t*)types2, tpenv2, world); + types2 = jl_type_intersection_env((jl_value_t*)types, (jl_value_t*)mi->def.method->sig, &tpenv2); + jl_method_instance_t *li2 = jl_specializations_get_linfo(mi->def.method, (jl_value_t*)types2, tpenv2); JL_GC_POP(); - if (!jl_is_rettype_inferred(li2)) - (void)jl_type_infer(&li2, world, 0); + if (!jl_rettype_inferred(li2, world, world)) + (void)jl_type_infer(li2, world, 1); + if (jl_typeinf_func && mi->def.method->primary_world <= tworld) { + if (!jl_rettype_inferred(li2, tworld, tworld)) + (void)jl_type_infer(li2, tworld, 1); + } } } else { // Otherwise (this branch), assuming we are at runtime (normal JIT) and // we should generate the native code immediately in preparation for use. - (void)jl_compile_method_internal(&li, world); + (void)jl_compile_method_internal(mi, world); } return 1; } -JL_DLLEXPORT jl_value_t *jl_get_spec_lambda(jl_tupletype_t *types, size_t world) +JL_DLLEXPORT jl_value_t *jl_get_spec_lambda(jl_tupletype_t *types, size_t world, size_t *min_valid, size_t *max_valid) { - jl_method_instance_t *li = jl_get_specialization1(types, world, 0); - if (!li || jl_has_call_ambiguities((jl_value_t*)types, li->def.method)) + jl_method_instance_t *mi = jl_get_specialization1(types, world, min_valid, max_valid, 0); + if (!mi || jl_has_call_ambiguities((jl_value_t*)types, mi->def.method)) return jl_nothing; - return (jl_value_t*)li; + return (jl_value_t*)mi; } // see if a call to m with computed from `types` is ambiguous +// XXX: returns wrong answers due to use of jl_world_counter JL_DLLEXPORT int jl_is_call_ambiguous(jl_value_t *types, jl_method_t *m) { if (m->ambig == jl_nothing) return 0; for (size_t i = 0; i < jl_array_len(m->ambig); i++) { - jl_method_t *mambig = (jl_method_t*)jl_array_ptr_ref(m->ambig, i); - if (mambig->min_world <= jl_world_counter && jl_world_counter <= mambig->max_world && jl_subtype((jl_value_t*)types, (jl_value_t*)mambig->sig)) - return 1; + jl_typemap_entry_t *mambig = (jl_typemap_entry_t*)jl_array_ptr_ref(m->ambig, i); + if (mambig->min_world <= jl_world_counter && jl_world_counter <= mambig->max_world) + if (jl_subtype((jl_value_t*)types, (jl_value_t*)mambig->sig)) + return 1; } return 0; } // see if a call to m with a subtype of `types` might be ambiguous // if types is from a call signature (isdispatchtuple), this is the same as jl_is_call_ambiguous above +// XXX: returns wrong answers due to use of jl_world_counter JL_DLLEXPORT int jl_has_call_ambiguities(jl_value_t *types, jl_method_t *m) { if (m->ambig == jl_nothing) return 0; for (size_t i = 0; i < jl_array_len(m->ambig); i++) { - jl_method_t *mambig = (jl_method_t*)jl_array_ptr_ref(m->ambig, i); - if (mambig->min_world <= jl_world_counter && jl_world_counter <= mambig->max_world && !jl_has_empty_intersection(mambig->sig, types)) - return 1; + jl_typemap_entry_t *mambig = (jl_typemap_entry_t*)jl_array_ptr_ref(m->ambig, i); + if (mambig->min_world <= jl_world_counter && jl_world_counter <= mambig->max_world) + if (!jl_has_empty_intersection((jl_value_t*)mambig->sig, types)) + return 1; } return 0; } @@ -2162,14 +2106,25 @@ STATIC_INLINE jl_method_instance_t *jl_lookup_generic_(jl_value_t **args, uint32 jl_methtable_t *mt = NULL; int i; // check each cache entry to see if it matches - for (i = 0; i < 4; i++) { - entry = call_cache[cache_idx[i]]; - if (entry && nargs == jl_svec_len(entry->sig->parameters) && - sig_match_fast(args, jl_svec_data(entry->sig->parameters), 0, nargs) && - world >= entry->min_world && world <= entry->max_world) { - break; - } - } + //#pragma unroll + //for (i = 0; i < 4; i++) { + // LOOP_BODY(i); + //} +#define LOOP_BODY(_i) do { \ + i = _i; \ + entry = call_cache[cache_idx[i]]; \ + if (entry && nargs == jl_svec_len(entry->sig->parameters) && \ + sig_match_fast(args, jl_svec_data(entry->sig->parameters), 0, nargs) && \ + world >= entry->min_world && world <= entry->max_world) { \ + goto have_entry; \ + } \ + } while (0); + LOOP_BODY(0); + LOOP_BODY(1); + LOOP_BODY(2); + LOOP_BODY(3); +#undef LOOP_BODY + i = 4; // if no method was found in the associative cache, check the full cache if (i == 4) { JL_TIMING(METHOD_LOOKUP_FAST); @@ -2180,11 +2135,13 @@ STATIC_INLINE jl_method_instance_t *jl_lookup_generic_(jl_value_t **args, uint32 // put the entry into the cache if it's valid for a leafsig lookup, // using pick_which to slightly randomize where it ends up call_cache[cache_idx[++pick_which[cache_idx[0]] & 3]] = entry; + goto have_entry; } } - jl_method_instance_t *mfunc = NULL; + jl_method_instance_t *mfunc; if (entry) { +have_entry: mfunc = entry->func.linfo; } else { @@ -2193,7 +2150,7 @@ STATIC_INLINE jl_method_instance_t *jl_lookup_generic_(jl_value_t **args, uint32 JL_TIMING(METHOD_LOOKUP_SLOW); jl_tupletype_t *tt = arg_type_tuple(args, nargs); JL_GC_PUSH1(&tt); - mfunc = jl_mt_assoc_by_type(mt, tt, /*cache*/1, /*allow_exec*/1, world); + mfunc = jl_mt_assoc_by_type(mt, tt, /*cache*/1, world); JL_GC_POP(); JL_UNLOCK(&mt->writelock); if (mfunc == NULL) { @@ -2221,11 +2178,23 @@ jl_method_instance_t *jl_lookup_generic(jl_value_t **args, uint32_t nargs, uint3 JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs) { + size_t world = jl_get_ptls_states()->world_age; jl_method_instance_t *mfunc = jl_lookup_generic_(args, nargs, jl_int32hash_fast(jl_return_address()), - jl_get_ptls_states()->world_age); + world); JL_GC_PROMISE_ROOTED(mfunc); - jl_value_t *res = mfunc->invoke(mfunc, args, nargs); + jl_value_t *res; + // manually inline key parts of jl_invoke: + jl_code_instance_t *codeinst = mfunc->cache; + while (codeinst) { + if (codeinst->min_world <= world && world <= codeinst->max_world && codeinst->invoke != NULL) { + res = codeinst->invoke(codeinst, args, nargs); + return verify_type(res); + } + codeinst = codeinst->next; + } + codeinst = jl_compile_method_internal(mfunc, world); + res = codeinst->invoke(codeinst, args, nargs); return verify_type(res); } @@ -2277,15 +2246,13 @@ jl_value_t *jl_gf_invoke(jl_value_t *types0, jl_value_t **args, size_t nargs) jl_value_t *jl_gf_invoke_by_method(jl_method_t *method, jl_value_t **args, size_t nargs) { - size_t world = jl_get_ptls_states()->world_age; jl_method_instance_t *mfunc = NULL; jl_typemap_entry_t *tm = NULL; - jl_methtable_t *mt = jl_gf_mtable(args[0]); jl_svec_t *tpenv = jl_emptysvec; jl_tupletype_t *tt = NULL; JL_GC_PUSH2(&tpenv, &tt); if (method->invokes != NULL) - tm = jl_typemap_assoc_exact(method->invokes, args, nargs, jl_cachearg_offset(mt), world); + tm = jl_typemap_assoc_exact(method->invokes, args, nargs, 1, 1); if (tm) { mfunc = tm->func.linfo; } @@ -2300,18 +2267,16 @@ jl_value_t *jl_gf_invoke_by_method(jl_method_t *method, jl_value_t **args, size_ if (method->invokes == NULL) method->invokes = jl_nothing; - mfunc = cache_method(mt, &method->invokes, (jl_value_t*)method, tt, method, world, tpenv, 1); + mfunc = cache_method(NULL, &method->invokes, (jl_value_t*)method, tt, method, 1, tpenv); JL_UNLOCK(&method->writelock); } JL_GC_POP(); JL_GC_PROMISE_ROOTED(mfunc); - return mfunc->invoke(mfunc, args, nargs); + return jl_invoke(mfunc, args, nargs); } -JL_DLLEXPORT jl_value_t *jl_get_invoke_lambda(jl_methtable_t *mt, - jl_typemap_entry_t *entry, - jl_value_t *tt, - size_t world) +JL_DLLEXPORT jl_value_t *jl_get_invoke_lambda(jl_typemap_entry_t *entry, + jl_value_t *tt) { // TODO: refactor this method to be more like `jl_get_specialization1` if (!jl_is_datatype(tt) || !((jl_datatype_t*)tt)->isdispatchtuple) @@ -2321,7 +2286,7 @@ JL_DLLEXPORT jl_value_t *jl_get_invoke_lambda(jl_methtable_t *mt, jl_typemap_entry_t *tm = NULL; if (method->invokes != NULL) { tm = jl_typemap_assoc_by_type(method->invokes, tt, NULL, /*subtype*/1, - jl_cachearg_offset(mt), world, /*max_world_mask*/0); + 1, 1, /*max_world_mask*/0); if (tm) { return (jl_value_t*)tm->func.linfo; } @@ -2330,7 +2295,7 @@ JL_DLLEXPORT jl_value_t *jl_get_invoke_lambda(jl_methtable_t *mt, JL_LOCK(&method->writelock); if (method->invokes != NULL) { tm = jl_typemap_assoc_by_type(method->invokes, tt, NULL, /*subtype*/1, - jl_cachearg_offset(mt), world, /*max_world_mask*/0); + 1, 1, /*max_world_mask*/0); if (tm) { jl_method_instance_t *mfunc = tm->func.linfo; JL_UNLOCK(&method->writelock); @@ -2349,8 +2314,8 @@ JL_DLLEXPORT jl_value_t *jl_get_invoke_lambda(jl_methtable_t *mt, if (method->invokes == NULL) method->invokes = jl_nothing; - jl_method_instance_t *mfunc = cache_method(mt, &method->invokes, entry->func.value, - (jl_tupletype_t*)tt, method, world, tpenv, 1); + jl_method_instance_t *mfunc = cache_method(NULL, &method->invokes, entry->func.value, + (jl_tupletype_t*)tt, method, 1, tpenv); JL_GC_POP(); JL_UNLOCK(&method->writelock); return (jl_value_t*)mfunc; @@ -2358,29 +2323,9 @@ JL_DLLEXPORT jl_value_t *jl_get_invoke_lambda(jl_methtable_t *mt, JL_DLLEXPORT jl_value_t *jl_invoke(jl_method_instance_t *meth, jl_value_t **args, uint32_t nargs) { - jl_callptr_t fptr = meth->invoke; - if (fptr != jl_fptr_trampoline) { - return fptr(meth, args, nargs); - } - else { - // if this hasn't been inferred (compiled) yet, - // inferring it might not be able to handle the world range - // so we just do a generic apply here - // because that might actually be faster - // since it can go through the unrolled caches for this world - // and if inference is successful, this meth would get updated anyways, - // and we'll get the fast path here next time - - jl_method_instance_t *mfunc = jl_lookup_generic_(args, nargs, - jl_int32hash_fast(jl_return_address()), - jl_get_ptls_states()->world_age); - // check whether `jl_apply_generic` would call the right method - if (mfunc->def.method == meth->def.method) - return mfunc->invoke(mfunc, args, nargs); - - // no; came from an `invoke` call - return jl_gf_invoke_by_method(meth->def.method, args, nargs); - } + size_t world = jl_get_ptls_states()->world_age; + jl_code_instance_t *codeinst = jl_compile_method_internal(meth, world); + return codeinst->invoke(codeinst, args, nargs); } // Return value is rooted globally @@ -2514,7 +2459,7 @@ static int ml_matches_visitor(jl_typemap_entry_t *ml, struct typemap_intersectio jl_value_t *mti = NULL; JL_GC_PUSH2(&env, &mti); for (size_t j = 0; j < jl_array_len(meth->ambig); j++) { - jl_method_t *mambig = (jl_method_t*)jl_array_ptr_ref(meth->ambig, j); + jl_typemap_entry_t *mambig = (jl_typemap_entry_t*)jl_array_ptr_ref(meth->ambig, j); if (closure->include_ambiguous) { env = jl_emptysvec; mti = jl_type_intersection_env((jl_value_t*)closure->match.type, @@ -2523,14 +2468,14 @@ static int ml_matches_visitor(jl_typemap_entry_t *ml, struct typemap_intersectio assert(done); int k; for (k = 0; k < len; k++) { - if ((jl_value_t*)mambig == jl_svecref(jl_array_ptr_ref(closure->t, k), 2)) + if (mambig->func.value == jl_svecref(jl_array_ptr_ref(closure->t, k), 2)) break; } if (k >= len) { if (len == 0) { closure->t = (jl_value_t*)jl_alloc_vec_any(0); } - mti = (jl_value_t*)jl_svec(3, mti, env, mambig); + mti = (jl_value_t*)jl_svec(3, mti, env, mambig->func.value); jl_array_ptr_1d_push((jl_array_t*)closure->t, mti); len++; } @@ -2539,7 +2484,7 @@ static int ml_matches_visitor(jl_typemap_entry_t *ml, struct typemap_intersectio else { // the current method definitely never matches if the intersection with this method // is also fully covered by an ambiguous method's signature - if (jl_subtype(closure->match.ti, mambig->sig)) { + if (jl_subtype(closure->match.ti, (jl_value_t*)mambig->sig)) { return_this_match = 0; break; } diff --git a/src/interpreter.c b/src/interpreter.c index cba2c909c0d32..f4b93fe3ffa39 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -318,7 +318,7 @@ SECT_INTERP static jl_value_t *do_call(jl_value_t **args, size_t nargs, interpre jl_value_t **argv; JL_GC_PUSHARGS(argv, nargs); size_t i; - for(i=0; i < nargs; i++) + for (i = 0; i < nargs; i++) argv[i] = eval_value(args[i], s); jl_value_t *result = jl_apply_generic(argv, nargs); JL_GC_POP(); @@ -334,7 +334,7 @@ SECT_INTERP static jl_value_t *do_invoke(jl_value_t **args, size_t nargs, interp argv[i - 1] = eval_value(args[i], s); jl_method_instance_t *meth = (jl_method_instance_t*)args[0]; assert(jl_is_method_instance(meth)); - jl_value_t *result = meth->invoke(meth, argv, nargs - 1); + jl_value_t *result = jl_invoke(meth, argv, nargs - 1); JL_GC_POP(); return result; } @@ -778,35 +778,37 @@ SECT_INTERP static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s // preparing method IR for interpreter -jl_code_info_t *jl_code_for_interpreter(jl_method_instance_t *lam) +jl_code_info_t *jl_code_for_interpreter(jl_method_instance_t *mi) { - jl_code_info_t *src = (jl_code_info_t*)lam->inferred; - JL_GC_PUSH1(&src); - if (jl_is_method(lam->def.method)) { + jl_code_info_t *src = (jl_code_info_t*)mi->uninferred; + if (jl_is_method(mi->def.value)) { if (!src || (jl_value_t*)src == jl_nothing) { - if (lam->def.method->source) { - src = (jl_code_info_t*)lam->def.method->source; + if (mi->def.method->source) { + src = (jl_code_info_t*)mi->def.method->source; } else { - assert(lam->def.method->generator); - src = jl_code_for_staged(lam); + assert(mi->def.method->generator); + src = jl_code_for_staged(mi); } } if (src && (jl_value_t*)src != jl_nothing) { - src = jl_uncompress_ast(lam->def.method, (jl_array_t*)src); + JL_GC_PUSH1(&src); + src = jl_uncompress_ast(mi->def.method, NULL, (jl_array_t*)src); + mi->uninferred = (jl_value_t*)src; + jl_gc_wb(mi, src); + JL_GC_POP(); } } if (!src || !jl_is_code_info(src)) { jl_error("source missing for method called in interpreter"); } - JL_GC_POP(); return src; } // interpreter entry points struct jl_interpret_call_args { - jl_method_instance_t *lam; + jl_method_instance_t *mi; jl_value_t **args; uint32_t nargs; }; @@ -816,9 +818,7 @@ SECT_INTERP CALLBACK_ABI void *jl_interpret_call_callback(interpreter_state *s, struct jl_interpret_call_args *args = (struct jl_interpret_call_args *)vargs; JL_GC_PROMISE_ROOTED(args); - jl_code_info_t *src = jl_code_for_interpreter(args->lam); - args->lam->inferred = (jl_value_t*)src; - jl_gc_wb(args->lam, src); + jl_code_info_t *src = jl_code_for_interpreter(args->mi); jl_array_t *stmts = src->code; assert(jl_typeis(stmts, jl_array_any_type)); @@ -829,21 +829,21 @@ SECT_INTERP CALLBACK_ABI void *jl_interpret_call_callback(interpreter_state *s, s->src = src; size_t nargs; int isva; - if (jl_is_module(args->lam->def.value)) { - s->module = args->lam->def.module; + if (jl_is_module(args->mi->def.value)) { + s->module = args->mi->def.module; nargs = 0; isva = 0; } else { - s->module = args->lam->def.method->module; - nargs = args->lam->def.method->nargs; - isva = args->lam->def.method->isva; + s->module = args->mi->def.method->module; + nargs = args->mi->def.method->nargs; + isva = args->mi->def.method->isva; } s->locals = locals + 2; - s->sparam_vals = args->lam->sparam_vals; + s->sparam_vals = args->mi->sparam_vals; s->preevaluation = 0; s->continue_at = 0; - s->mi = args->lam; + s->mi = args->mi; size_t i; for (i = 0; i < nargs; i++) { if (isva && i == nargs - 1) @@ -856,9 +856,9 @@ SECT_INTERP CALLBACK_ABI void *jl_interpret_call_callback(interpreter_state *s, return (void*)r; } -SECT_INTERP jl_value_t *jl_fptr_interpret_call(jl_method_instance_t *lam, jl_value_t **args, uint32_t nargs) +SECT_INTERP jl_value_t *jl_fptr_interpret_call(jl_code_instance_t *codeinst, jl_value_t **args, uint32_t nargs) { - struct jl_interpret_call_args callback_args = { lam, args, nargs }; + struct jl_interpret_call_args callback_args = { codeinst->def, args, nargs }; return (jl_value_t*)enter_interpreter_frame(jl_interpret_call_callback, (void *)&callback_args); } diff --git a/src/jltypes.c b/src/jltypes.c index 22346331f117b..6c7fe31871a61 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -101,7 +101,7 @@ jl_datatype_t *jl_methtable_type; jl_datatype_t *jl_typemap_entry_type; jl_datatype_t *jl_typemap_level_type; jl_datatype_t *jl_method_instance_type; -jl_datatype_t *jl_lambda_type; +jl_datatype_t *jl_code_instance_type; jl_datatype_t *jl_code_info_type; jl_datatype_t *jl_module_type; jl_datatype_t *jl_errorexception_type; @@ -2074,9 +2074,9 @@ void jl_init_types(void) JL_GC_DISABLED "module", "file", "line", + "primary_world", + "deleted_world", "sig", - "min_world", - "max_world", "ambig", "specializations", "slot_syms", @@ -2095,9 +2095,9 @@ void jl_init_types(void) JL_GC_DISABLED jl_module_type, jl_sym_type, jl_int32_type, - jl_type_type, jl_long_type, jl_long_type, + jl_type_type, jl_any_type, // Union{Array, Nothing} jl_any_type, // TypeMap jl_string_type, @@ -2116,37 +2116,53 @@ void jl_init_types(void) JL_GC_DISABLED jl_method_instance_type = jl_new_datatype(jl_symbol("MethodInstance"), core, jl_any_type, jl_emptysvec, - jl_perm_symsvec(15, + jl_perm_symsvec(7, "def", "specTypes", - "rettype", "sparam_vals", + "uninferred", "backedges", - "inferred", - "inferred_const", - "min_world", - "max_world", - "inInference", - "", - "invoke", - "specptr", - "", ""), - jl_svec(15, + "cache", + "inInference"), + jl_svec(7, jl_new_struct(jl_uniontype_type, jl_method_type, jl_module_type), jl_any_type, - jl_any_type, jl_simplevector_type, jl_any_type, jl_any_type, jl_any_type, + jl_bool_type), + 0, 1, 3); + + jl_code_instance_type = + jl_new_datatype(jl_symbol("CodeInstance"), core, + jl_any_type, jl_emptysvec, + jl_perm_symsvec(11, + "def", + "next", + "min_world", + "max_world", + "rettype", + "rettype_const", + "inferred", + //"edges", + //"absolute_max", + "invoke", "specptr", + "", ""), // function object decls + jl_svec(11, + jl_method_instance_type, + jl_any_type, jl_long_type, jl_long_type, - jl_bool_type, - jl_bool_type, - jl_any_type, // void* - jl_any_type, // void* - jl_any_type, jl_any_type), // void*, void* - 0, 1, 4); + jl_any_type, + jl_any_type, + jl_any_type, + //jl_any_type, + //jl_bool_type, + jl_any_type, jl_any_type, // fptrs + jl_any_type, jl_any_type), // fptrs + 0, 1, 1); + jl_svecset(jl_code_instance_type->types, 1, jl_code_instance_type); // all kinds of types share a method table jl_unionall_type->name->mt = jl_uniontype_type->name->mt = jl_datatype_type->name->mt = @@ -2220,10 +2236,11 @@ void jl_init_types(void) JL_GC_DISABLED #endif jl_svecset(jl_methtable_type->types, 9, jl_uint8_type); jl_svecset(jl_method_type->types, 11, jl_method_instance_type); - jl_svecset(jl_method_instance_type->types, 11, jl_voidpointer_type); - jl_svecset(jl_method_instance_type->types, 12, jl_voidpointer_type); - jl_svecset(jl_method_instance_type->types, 13, jl_voidpointer_type); - jl_svecset(jl_method_instance_type->types, 14, jl_voidpointer_type); + jl_svecset(jl_method_instance_type->types, 5, jl_code_instance_type); + jl_svecset(jl_code_instance_type->types, 7, jl_voidpointer_type); + jl_svecset(jl_code_instance_type->types, 8, jl_voidpointer_type); + jl_svecset(jl_code_instance_type->types, 9, jl_voidpointer_type); + jl_svecset(jl_code_instance_type->types, 10, jl_voidpointer_type); jl_compute_field_offsets(jl_datatype_type); jl_compute_field_offsets(jl_typename_type); @@ -2239,6 +2256,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_compute_field_offsets(jl_phinode_type); jl_compute_field_offsets(jl_module_type); jl_compute_field_offsets(jl_method_instance_type); + jl_compute_field_offsets(jl_code_instance_type); jl_compute_field_offsets(jl_unionall_type); jl_compute_field_offsets(jl_simplevector_type); jl_compute_field_offsets(jl_sym_type); diff --git a/src/julia.h b/src/julia.h index e404c4787c1c1..0d4f3855c43e0 100644 --- a/src/julia.h +++ b/src/julia.h @@ -191,7 +191,7 @@ STATIC_INLINE int jl_array_ndimwords(uint32_t ndims) JL_NOTSAFEPOINT } typedef struct _jl_datatype_t jl_tupletype_t; -struct _jl_method_instance_t; +struct _jl_code_instance_t; // TypeMap is an implicitly defined type // that can consist of any of the following nodes: @@ -203,13 +203,11 @@ struct _jl_method_instance_t; // otherwise the leaf entries are stored sorted, linearly typedef jl_value_t jl_typemap_t; -typedef jl_value_t *(jl_call_t)(struct _jl_method_instance_t*, jl_value_t**, uint32_t); +typedef jl_value_t *(jl_call_t)(struct _jl_code_instance_t*, jl_value_t**, uint32_t); typedef jl_call_t *jl_callptr_t; // "speccall" calling convention signatures. // This describes some of the special ABI used by compiled julia functions. -JL_DLLEXPORT extern jl_call_t jl_fptr_trampoline; - JL_DLLEXPORT extern jl_call_t jl_fptr_args; typedef jl_value_t *(*jl_fptr_args_t)(jl_value_t*, jl_value_t**, uint32_t); @@ -219,13 +217,13 @@ JL_DLLEXPORT extern jl_call_t jl_fptr_sparam; typedef jl_value_t *(*jl_fptr_sparam_t)(jl_svec_t*, jl_value_t*, jl_value_t**, uint32_t); JL_DLLEXPORT extern jl_call_t jl_fptr_interpret_call; -typedef jl_value_t *(*jl_fptr_interpret_t)(struct _jl_method_instance_t*, jl_value_t*, jl_value_t**, uint32_t, jl_svec_t*); JL_EXTENSION typedef union { void* fptr; jl_fptr_args_t fptr1; + // 2 constant jl_fptr_sparam_t fptr3; - jl_fptr_interpret_t fptr4; + // 4 interpreter } jl_generic_specptr_t; typedef struct _jl_llvm_functions_t { @@ -273,16 +271,16 @@ typedef struct _jl_method_t { struct _jl_module_t *module; jl_sym_t *file; int32_t line; + size_t primary_world; + size_t deleted_world; // method's type signature. redundant with TypeMapEntry->specTypes jl_value_t *sig; - size_t min_world; - size_t max_world; - // list of potentially-ambiguous methods (nothing = none, Vector{Any} of Methods otherwise) + // list of potentially-ambiguous methods (nothing = none, Vector{Any} of TypeMapEntry otherwise) jl_value_t *ambig; - // table of all argument types for which we've inferred or compiled this code + // table of all jl_method_instance_t specializations we have jl_typemap_t *specializations; jl_value_t *slot_syms; // compacted list of slot names (String) @@ -303,36 +301,52 @@ typedef struct _jl_method_t { uint8_t pure; // hidden fields: - uint8_t traced; // lock for modifications to the method jl_mutex_t writelock; } jl_method_t; -// This type caches the data for a specType signature specialization of a Method +// This type is a placeholder to cache data for a specType signature specialization of a Method +// can can be used as a unique dictionary key representation of a call to a particular Method +// with a particular set of argument types struct _jl_method_instance_t { JL_DATA_TYPE union { jl_value_t *value; // generic accessor struct _jl_module_t *module; // this is a toplevel thunk jl_method_t *method; // method this is specialized from - } def; // context for this lambda definition + } def; // pointer back to the context for this code jl_value_t *specTypes; // argument types this was specialized for - jl_value_t *rettype; // return type for fptr - jl_svec_t *sparam_vals; // static parameter values, indexed by def.method->sig UnionAll tvars - jl_array_t *backedges; - jl_value_t *inferred; // inferred jl_code_info_t, or jl_nothing, or null - jl_value_t *inferred_const; // inferred constant return value, or null + jl_svec_t *sparam_vals; // static parameter values, indexed by def.method->sparam_syms + jl_value_t *uninferred; // cached uncompressed code, for generated functions, top-level thunks, or the interpreter + jl_array_t *backedges; // list of method-instances which contain a call into this method-instance + struct _jl_code_instance_t *cache; + uint8_t inInference; // flags to tell if inference is running on this object +}; + +// This type represents an executable operation +typedef struct _jl_code_instance_t { + JL_DATA_TYPE + jl_method_instance_t *def; // method this is specialized from + struct _jl_code_instance_t *next; // pointer to the next cache entry + + // world range for which this object is valid to use size_t min_world; size_t max_world; - uint8_t inInference; // flags to tell if inference is running on this function - uint8_t compile_traced; // if set will notify callback if this linfo is compiled - jl_callptr_t invoke; // jlcall entry point - jl_generic_specptr_t specptr; + // inference state cache + jl_value_t *rettype; // return type for fptr + jl_value_t *rettype_const; // inferred constant return value, or null + jl_value_t *inferred; // inferred jl_code_info_t, or jl_nothing, or null + //TODO: jl_array_t *edges; // stored information about edges from this object + //TODO: uint8_t absolute_max; // whether true max world is unknown + + // compilation state cache + jl_callptr_t invoke; // jlcall entry point + jl_generic_specptr_t specptr; // private data for `jlcall entry point` // names of declarations in the JIT, // suitable for referencing in LLVM IR jl_llvm_functions_t functionObjectsDecls; -}; +} jl_code_instance_t; // all values are callable as Functions typedef jl_value_t jl_function_t; @@ -559,6 +573,7 @@ extern JL_DLLEXPORT jl_datatype_t *jl_builtin_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_value_t *jl_bottom_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_datatype_t *jl_method_instance_type JL_GLOBALLY_ROOTED; +extern JL_DLLEXPORT jl_datatype_t *jl_code_instance_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_datatype_t *jl_code_info_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_datatype_t *jl_method_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_datatype_t *jl_module_type JL_GLOBALLY_ROOTED; @@ -992,7 +1007,8 @@ static inline int jl_is_layout_opaque(const jl_datatype_layout_t *l) JL_NOTSAFEP #define jl_is_newvarnode(v) jl_typeis(v,jl_newvarnode_type) #define jl_is_linenode(v) jl_typeis(v,jl_linenumbernode_type) #define jl_is_method_instance(v) jl_typeis(v,jl_method_instance_type) -#define jl_is_code_info(v) jl_typeis(v,jl_code_info_type) +#define jl_is_code_instance(v) jl_typeis(v,jl_code_instance_type) +#define jl_is_code_info(v) jl_typeis(v,jl_code_info_type) #define jl_is_method(v) jl_typeis(v,jl_method_type) #define jl_is_module(v) jl_typeis(v,jl_module_type) #define jl_is_mtable(v) jl_typeis(v,jl_methtable_type) @@ -1545,19 +1561,13 @@ JL_DLLEXPORT jl_value_t *jl_load(jl_module_t *module, const char *fname); JL_DLLEXPORT jl_module_t *jl_base_relative_to(jl_module_t *m JL_PROPAGATES_ROOT); // tracing -JL_DLLEXPORT void jl_trace_method(jl_method_t *m); -JL_DLLEXPORT void jl_untrace_method(jl_method_t *m); -JL_DLLEXPORT void jl_trace_linfo(jl_method_instance_t *linfo); -JL_DLLEXPORT void jl_untrace_linfo(jl_method_instance_t *linfo); -JL_DLLEXPORT void jl_register_linfo_tracer(void (*callback)(jl_method_instance_t *tracee)); -JL_DLLEXPORT void jl_register_method_tracer(void (*callback)(jl_method_instance_t *tracee)); JL_DLLEXPORT void jl_register_newmeth_tracer(void (*callback)(jl_method_t *tracee)); // AST access JL_DLLEXPORT jl_value_t *jl_copy_ast(jl_value_t *expr JL_MAYBE_UNROOTED); JL_DLLEXPORT jl_array_t *jl_compress_ast(jl_method_t *m, jl_code_info_t *code); -JL_DLLEXPORT jl_code_info_t *jl_uncompress_ast(jl_method_t *m, jl_array_t *data); +JL_DLLEXPORT jl_code_info_t *jl_uncompress_ast(jl_method_t *m, jl_code_instance_t *metadata, jl_array_t *data); JL_DLLEXPORT uint8_t jl_ast_flag_inferred(jl_array_t *data); JL_DLLEXPORT uint8_t jl_ast_flag_inlineable(jl_array_t *data); JL_DLLEXPORT uint8_t jl_ast_flag_pure(jl_array_t *data); @@ -1586,7 +1596,7 @@ STATIC_INLINE int jl_vinfo_usedundef(uint8_t vi) JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t **args, uint32_t nargs); JL_DLLEXPORT jl_value_t *jl_invoke(jl_method_instance_t *meth, jl_value_t **args, uint32_t nargs); -JL_DLLEXPORT int32_t jl_invoke_api(jl_method_instance_t *mi); +JL_DLLEXPORT int32_t jl_invoke_api(jl_code_instance_t *linfo); STATIC_INLINE jl_value_t *jl_apply(jl_value_t **args, uint32_t nargs) diff --git a/src/julia_internal.h b/src/julia_internal.h index 65284d3591830..d864bc4e5908b 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -328,14 +328,19 @@ JL_DLLEXPORT void *jl_gc_counted_malloc(size_t sz); JL_DLLEXPORT void JL_NORETURN jl_throw_out_of_memory_error(void); -jl_code_info_t *jl_type_infer(jl_method_instance_t **pli JL_ROOTS_TEMPORARILY, size_t world, int force); -jl_callptr_t jl_generate_fptr(jl_method_instance_t **pli, jl_llvm_functions_t decls, size_t world); -jl_llvm_functions_t jl_compile_linfo( - jl_method_instance_t **pli, +jl_code_info_t *jl_type_infer(jl_method_instance_t *li, size_t world, int force); +jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *meth, size_t world); +void jl_generate_fptr(jl_code_instance_t *codeinst); +jl_code_instance_t *jl_compile_linfo( + jl_method_instance_t *li, jl_code_info_t *src JL_MAYBE_UNROOTED, size_t world, const jl_cgparams_t *params); -jl_callptr_t jl_compile_method_internal(jl_method_instance_t **pmeth, size_t world); +JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( + jl_method_instance_t *mi, jl_value_t *rettype, + size_t min_world, size_t max_world); +jl_method_instance_t *jl_get_unspecialized(jl_method_instance_t *method); + JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types); jl_code_info_t *jl_code_for_interpreter(jl_method_instance_t *lam); int jl_code_requires_compiler(jl_code_info_t *src) JL_NOTSAFEPOINT; @@ -572,27 +577,27 @@ static inline void jl_set_gc_and_wait(void) #endif JL_DLLEXPORT jl_value_t *jl_dump_fptr_asm(uint64_t fptr, int raw_mc, const char *asm_variant, const char *debuginfo); - void jl_dump_native(const char *bc_fname, const char *unopt_bc_fname, const char *obj_fname, const char *sysimg_data, size_t sysimg_len); int32_t jl_get_llvm_gv(jl_value_t *p) JL_NOTSAFEPOINT; int32_t jl_assign_functionID(const char *fname); + // the first argument to jl_idtable_rehash is used to return a value // make sure it is rooted if it is used after the function returns JL_DLLEXPORT jl_array_t *jl_idtable_rehash(jl_array_t *a, size_t newsz); JL_DLLEXPORT jl_methtable_t *jl_new_method_table(jl_sym_t *name, jl_module_t *module); -jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types, size_t world, int mt_cache); +jl_method_instance_t *jl_get_specialization1(jl_tupletype_t *types, size_t world, size_t *min_valid, size_t *max_valid, int mt_cache); JL_DLLEXPORT int jl_has_call_ambiguities(jl_value_t *types, jl_method_t *m); jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_value_t *types, jl_svec_t *sp); -int jl_is_rettype_inferred(jl_method_instance_t *li); +JL_DLLEXPORT jl_code_instance_t *jl_rettype_inferred(jl_method_instance_t *li, size_t min_world, size_t max_world); JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt, jl_value_t *type, size_t world); JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo( - jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams, size_t world); + jl_method_t *m, jl_value_t *type, jl_svec_t *sparams); JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_method_instance_t *caller); JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_value_t *caller); uint32_t jl_module_next_counter(jl_module_t *m); -void jl_fptr_to_llvm(void *fptr, jl_method_instance_t *lam, int spec_abi); +void jl_fptr_to_llvm(void *fptr, jl_code_instance_t *codeinst, int spec_abi); jl_tupletype_t *arg_type_tuple(jl_value_t **args, size_t nargs); int jl_has_meta(jl_array_t *body, jl_sym_t *sym); diff --git a/src/method.c b/src/method.c index ff25b66eba2d2..a936462caca76 100644 --- a/src/method.c +++ b/src/method.c @@ -287,21 +287,13 @@ JL_DLLEXPORT jl_method_instance_t *jl_new_method_instance_uninit(void) jl_method_instance_t *li = (jl_method_instance_t*)jl_gc_alloc(ptls, sizeof(jl_method_instance_t), jl_method_instance_type); - li->inferred = NULL; - li->inferred_const = NULL; - li->rettype = (jl_value_t*)jl_any_type; + li->def.value = NULL; + li->specTypes = NULL; li->sparam_vals = jl_emptysvec; + li->uninferred = NULL; li->backedges = NULL; - li->invoke = jl_fptr_trampoline; - li->specptr.fptr = NULL; - li->compile_traced = 0; - li->functionObjectsDecls.functionObject = NULL; - li->functionObjectsDecls.specFunctionObject = NULL; - li->specTypes = NULL; + li->cache = NULL; li->inInference = 0; - li->def.value = NULL; - li->min_world = 0; - li->max_world = 0; return li; } @@ -322,8 +314,8 @@ JL_DLLEXPORT jl_code_info_t *jl_new_code_info_uninit(void) src->slottypes = jl_nothing; src->parent = (jl_method_instance_t*)jl_nothing; src->rettype = (jl_value_t*)jl_any_type; - src->min_world = 0; - src->max_world = 0; + src->min_world = 1; + src->max_world = ~(size_t)0; src->inferred = 0; src->inlineable = 0; src->propagate_inbounds = 0; @@ -404,7 +396,7 @@ JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *linfo) JL_TRY { ptls->in_pure_callback = 1; // and the right world - ptls->world_age = def->min_world; + ptls->world_age = def->primary_world; // invoke code generator jl_tupletype_t *ttdt = (jl_tupletype_t*)jl_unwrap_unionall(tt); @@ -459,8 +451,6 @@ jl_method_instance_t *jl_get_specialized(jl_method_t *m, jl_value_t *types, jl_s new_linfo->def.method = m; new_linfo->specTypes = types; new_linfo->sparam_vals = sp; - new_linfo->min_world = m->min_world; - new_linfo->max_world = m->max_world; return new_linfo; } @@ -595,46 +585,12 @@ JL_DLLEXPORT jl_method_t *jl_new_method_uninit(jl_module_t *module) m->invokes = NULL; m->isva = 0; m->nargs = 0; - m->traced = 0; - m->min_world = 1; - m->max_world = ~(size_t)0; + m->primary_world = 1; + m->deleted_world = ~(size_t)0; JL_MUTEX_INIT(&m->writelock); return m; } -jl_array_t *jl_all_methods JL_GLOBALLY_ROOTED; -static jl_method_t *jl_new_method( - jl_code_info_t *definition, - jl_sym_t *name, - jl_module_t *inmodule, - jl_tupletype_t *sig, - size_t nargs, - int isva) -{ - jl_method_t *m = jl_new_method_uninit(inmodule); - JL_GC_PUSH1(&m); - m->sig = (jl_value_t*)sig; - m->name = name; - m->isva = isva; - m->nargs = nargs; - jl_method_set_source(m, definition); - -#ifdef RECORD_METHOD_ORDER - if (jl_all_methods == NULL) - jl_all_methods = jl_alloc_vec_any(0); -#endif - if (jl_all_methods != NULL) { - while (jl_array_len(jl_all_methods) < jl_world_counter) - jl_array_ptr_1d_push(jl_all_methods, NULL); - jl_array_ptr_1d_push(jl_all_methods, (jl_value_t*)m); - } - - JL_GC_POP(); - m->min_world = ++jl_world_counter; - m->max_world = ~(size_t)0; - return m; -} - // method definition ---------------------------------------------------------- void print_func_loc(JL_STREAM *s, jl_method_t *m); @@ -711,6 +667,7 @@ JL_DLLEXPORT jl_value_t *jl_argument_datatype(jl_value_t *argt JL_PROPAGATES_ROO } extern tracer_cb jl_newmeth_tracer; +jl_array_t *jl_all_methods JL_GLOBALLY_ROOTED; JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, jl_code_info_t *f, @@ -757,7 +714,12 @@ JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, // the result is that the closure variables get interpolated directly into the AST f = jl_new_code_info_from_ast((jl_expr_t*)f); } - m = jl_new_method(f, name, module, (jl_tupletype_t*)argtype, nargs, isva); + m = jl_new_method_uninit(module); + m->sig = argtype; + m->name = name; + m->isva = isva; + m->nargs = nargs; + jl_method_set_source(m, f); if (jl_has_free_typevars(argtype)) { jl_exceptionf(jl_argumenterror_type, @@ -794,6 +756,16 @@ JL_DLLEXPORT void jl_method_def(jl_svec_t *argdata, m->line); } +#ifdef RECORD_METHOD_ORDER + if (jl_all_methods == NULL) + jl_all_methods = jl_alloc_vec_any(0); +#endif + if (jl_all_methods != NULL) { + while (jl_array_len(jl_all_methods) < m->primary_world) + jl_array_ptr_1d_push(jl_all_methods, NULL); + jl_array_ptr_1d_push(jl_all_methods, (jl_value_t*)m); + } + jl_method_table_insert(mt, m, NULL); if (jl_newmeth_tracer) jl_call_tracer(jl_newmeth_tracer, (jl_value_t*)m); diff --git a/src/precompile.c b/src/precompile.c index 6209e1f9d8dad..8653e8bdc266c 100644 --- a/src/precompile.c +++ b/src/precompile.c @@ -247,39 +247,32 @@ static void _compile_all_deq(jl_array_t *found) { int found_i, found_l = jl_array_len(found); jl_printf(JL_STDERR, "found %d uncompiled methods for compile-all\n", (int)found_l); - jl_method_instance_t *linfo = NULL; + jl_method_instance_t *mi = NULL; jl_value_t *src = NULL; - JL_GC_PUSH2(&linfo, &src); + JL_GC_PUSH2(&mi, &src); for (found_i = 0; found_i < found_l; found_i++) { if (found_i % (1 + found_l / 300) == 0 || found_i == found_l - 1) // show 300 progress steps, to show progress without overwhelming log files jl_printf(JL_STDERR, " %d / %d\r", found_i + 1, found_l); jl_typemap_entry_t *ml = (jl_typemap_entry_t*)jl_array_ptr_ref(found, found_i); jl_method_t *m = ml->func.method; - if (m->source == NULL) // TODO: generic implementations of generated functions + if (m->source == NULL) // TODO: generic implementations of generated functions continue; - linfo = m->unspecialized; - if (!linfo) { - linfo = jl_get_specialized(m, (jl_value_t*)m->sig, jl_emptysvec); - m->unspecialized = linfo; - jl_gc_wb(m, linfo); - } - - if (linfo->invoke != jl_fptr_trampoline) + mi = jl_get_unspecialized(mi); + assert(mi == m->unspecialized); // make sure we didn't get tricked by a generated function, since we can't handle those + jl_code_instance_t *ucache = jl_get_method_inferred(mi, (jl_value_t*)jl_any_type, 1, ~(size_t)0); + if (ucache->invoke != NULL) continue; src = m->source; - // TODO: the `unspecialized` field is not yet world-aware, so we can't store - // an inference result there. - //src = jl_type_infer(&linfo, jl_world_counter, 1); - //m->unspecialized = linfo; - //jl_gc_wb(m, linfo); - //if (linfo->trampoline != jl_fptr_trampoline) + // TODO: we could now enable storing inferred function pointers in the `unspecialized` cache + //src = jl_type_infer(mi, jl_world_counter, 1); + //if (ucache->invoke != NULL) // continue; // first try to create leaf signatures from the signature declaration and compile those _compile_all_union((jl_value_t*)ml->sig); // then also compile the generic fallback - jl_compile_linfo(&linfo, (jl_code_info_t*)src, jl_world_counter, &jl_default_cgparams); - assert(linfo->functionObjectsDecls.functionObject != NULL); + jl_compile_linfo(mi, (jl_code_info_t*)src, 1, &jl_default_cgparams); + assert(ucache->functionObjectsDecls.functionObject != NULL); } JL_GC_POP(); jl_printf(JL_STDERR, "\n"); @@ -292,8 +285,9 @@ static int compile_all_enq__(jl_typemap_entry_t *ml, void *env) jl_method_t *m = ml->func.method; if (m->source && (!m->unspecialized || - (m->unspecialized->functionObjectsDecls.functionObject == NULL && - m->unspecialized->invoke == jl_fptr_trampoline))) { + !m->unspecialized->cache || + (m->unspecialized->cache->functionObjectsDecls.functionObject == NULL && + m->unspecialized->cache->invoke == NULL))) { // found a lambda that still needs to be compiled jl_array_ptr_1d_push(found, (jl_value_t*)ml); } @@ -327,20 +321,33 @@ static void jl_compile_all_defs(void) static int precompile_enq_all_cache__(jl_typemap_entry_t *l, void *closure) { - jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)l->sig); + jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)l->func.linfo); return 1; } static int precompile_enq_specialization_(jl_typemap_entry_t *l, void *closure) { - if (jl_is_method_instance(l->func.value) && - l->func.linfo->functionObjectsDecls.functionObject == NULL && - l->func.linfo->invoke != jl_fptr_const_return && - (l->func.linfo->inferred && - l->func.linfo->inferred != jl_nothing && - jl_ast_flag_inferred((jl_array_t*)l->func.linfo->inferred) && - !jl_ast_flag_inlineable((jl_array_t*)l->func.linfo->inferred))) - jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)l->sig); + jl_method_instance_t *mi = l->func.linfo; + assert(jl_is_method_instance(mi)); + jl_code_instance_t *codeinst = mi->cache; + while (codeinst) { + int do_compile = 0; + if (codeinst->functionObjectsDecls.functionObject == NULL && codeinst->invoke != jl_fptr_const_return) { + if (codeinst->inferred && codeinst->inferred != jl_nothing && + jl_ast_flag_inferred((jl_array_t*)codeinst->inferred) && + !jl_ast_flag_inlineable((jl_array_t*)codeinst->inferred)) { + do_compile = 1; + } + else if (codeinst->invoke != NULL) { + do_compile = 1; + } + } + if (do_compile) { + jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)mi); + return 1; + } + codeinst = codeinst->next; + } return 1; } @@ -349,8 +356,8 @@ static int precompile_enq_all_specializations__(jl_typemap_entry_t *def, void *c jl_method_t *m = def->func.method; if (m->name == jl_symbol("__init__") && jl_is_dispatch_tupletype(m->sig)) { // ensure `__init__()` gets strongly-hinted, specialized, and compiled - jl_specializations_get_linfo(m, m->sig, jl_emptysvec, jl_world_counter); - jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)m->sig); + jl_method_instance_t *mi = jl_specializations_get_linfo(m, m->sig, jl_emptysvec); + jl_array_ptr_1d_push((jl_array_t*)closure, (jl_value_t*)mi); } else { jl_typemap_visitor(def->func.method->specializations, precompile_enq_specialization_, closure); @@ -364,6 +371,8 @@ static void precompile_enq_all_specializations_(jl_methtable_t *mt, void *env) jl_typemap_visitor(mt->cache, precompile_enq_all_cache__, env); } +void jl_compile_now(jl_method_instance_t *mi); + static void jl_compile_specializations(void) { // this "found" array will contain function @@ -371,11 +380,12 @@ static void jl_compile_specializations(void) jl_array_t *m = jl_alloc_vec_any(0); JL_GC_PUSH1(&m); jl_foreach_reachable_mtable(precompile_enq_all_specializations_, m); - // Ensure stable ordering to make inference problems more reproducible (#29923) - jl_sort_types((jl_value_t**)jl_array_data(m), jl_array_len(m)); - size_t i, l; - for (i = 0, l = jl_array_len(m); i < l; i++) { - jl_compile_hint((jl_tupletype_t*)jl_array_ptr_ref(m, i)); + // TODO: Ensure stable ordering to make inference problems more reproducible (#29923) + //jl_sort_types((jl_value_t**)jl_array_data(m), jl_array_len(m)); + size_t i, l = jl_array_len(m); + for (i = 0; i < l; i++) { + jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(m, i); + jl_compile_now(mi); } JL_GC_POP(); } diff --git a/src/rtutils.c b/src/rtutils.c index d7c1b83213836..7818690b0f4e0 100644 --- a/src/rtutils.c +++ b/src/rtutils.c @@ -636,7 +636,7 @@ static size_t jl_static_show_x_(JL_STREAM *out, jl_value_t *v, jl_datatype_t *vt else { n += jl_static_show_x(out, (jl_value_t*)li->def.module, depth); n += jl_printf(out, ". -> "); - n += jl_static_show_x(out, li->inferred, depth); + n += jl_static_show_x(out, li->uninferred, depth); } } else if (vt == jl_simplevector_type) { diff --git a/src/staticdata.c b/src/staticdata.c index 60bcb21acc321..e41d377cdc231 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -109,7 +109,7 @@ enum RefTags { // calling conventions for internal entry points. // this is used to set the method-instance->invoke field typedef enum { - JL_API_TRAMPOLINE, + JL_API_NULL, JL_API_BOXED, JL_API_CONST, JL_API_WITH_PARAMETERS, @@ -673,21 +673,21 @@ static void jl_write_values(jl_serializer_state *s) if (jl_is_method(v)) { write_padding(s->s, sizeof(jl_method_t) - tot); } - else if (jl_is_method_instance(v)) { - jl_method_instance_t *m = (jl_method_instance_t*)v; - jl_method_instance_t *newm = (jl_method_instance_t*)&s->s->buf[reloc_offset]; + else if (jl_is_code_instance(v)) { + jl_code_instance_t *m = (jl_code_instance_t*)v; + jl_code_instance_t *newm = (jl_code_instance_t*)&s->s->buf[reloc_offset]; newm->invoke = NULL; newm->specptr.fptr = NULL; newm->functionObjectsDecls.functionObject = NULL; newm->functionObjectsDecls.specFunctionObject = NULL; - uintptr_t fptr_id = JL_API_TRAMPOLINE; + uintptr_t fptr_id = JL_API_NULL; uintptr_t specfptr_id = 0; if (m->invoke == jl_fptr_const_return) { fptr_id = JL_API_CONST; } else { - if (jl_is_method(m->def.method)) { + if (jl_is_method(m->def->def.method)) { specfptr_id = jl_fptr_id(m->specptr.fptr); const char *fname = m->functionObjectsDecls.functionObject; if (specfptr_id) { // found in the table of builtins @@ -727,10 +727,12 @@ static void jl_write_values(jl_serializer_state *s) } } newm->invoke = NULL; // relocation offset - arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_method_instance_t, invoke))); // relocation location - arraylist_push(&s->relocs_list, (void*)(((uintptr_t)FunctionRef << RELOC_TAG_OFFSET) + fptr_id)); // relocation target + if (fptr_id != JL_API_NULL) { + arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_code_instance_t, invoke))); // relocation location + arraylist_push(&s->relocs_list, (void*)(((uintptr_t)FunctionRef << RELOC_TAG_OFFSET) + fptr_id)); // relocation target + } if (specfptr_id >= 2) { - arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_method_instance_t, specptr.fptr))); // relocation location + arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_code_instance_t, specptr.fptr))); // relocation location arraylist_push(&s->relocs_list, (void*)(((uintptr_t)BuiltinFunctionRef << RELOC_TAG_OFFSET) + specfptr_id - 2)); // relocation target } } @@ -895,15 +897,14 @@ static inline uintptr_t get_item_for_reloc(jl_serializer_state *s, uintptr_t bas case JL_API_WITH_PARAMETERS: if (sysimg_fptrs.base) return (uintptr_t)jl_fptr_sparam; - JL_FALLTHROUGH; - case JL_API_TRAMPOLINE: - return (uintptr_t)jl_fptr_trampoline; + return (uintptr_t)NULL; case JL_API_CONST: return (uintptr_t)jl_fptr_const_return; case JL_API_INTERPRETED: return (uintptr_t)jl_fptr_interpret_call; case JL_API_BUILTIN: return (uintptr_t)jl_fptr_args; + case JL_API_NULL: case JL_API_MAX: //default: assert("corrupt relocation item id"); @@ -1024,11 +1025,11 @@ static void jl_update_all_fptrs(jl_serializer_state *s) specfunc = 0; offset = ~offset; } - jl_method_instance_t *li = (jl_method_instance_t*)(base + offset); + jl_code_instance_t *codeinst = (jl_code_instance_t*)(base + offset); uintptr_t base = (uintptr_t)fvars.base; - assert(jl_is_method(li->def.method) && li->invoke != jl_fptr_const_return); - assert(specfunc ? li->invoke != jl_fptr_trampoline : li->invoke == jl_fptr_trampoline); - linfos[i] = li; + assert(jl_is_method(codeinst->def->def.method) && codeinst->invoke != jl_fptr_const_return); + assert(specfunc ? codeinst->invoke != NULL : codeinst->invoke == NULL); + linfos[i] = codeinst->def; int32_t offset = fvars.offsets[i]; for (; clone_idx < fvars.nclones; clone_idx++) { uint32_t idx = fvars.clone_idxs[clone_idx] & jl_sysimg_val_mask; @@ -1040,10 +1041,10 @@ static void jl_update_all_fptrs(jl_serializer_state *s) } void *fptr = (void*)(base + offset); if (specfunc) - li->specptr.fptr = fptr; + codeinst->specptr.fptr = fptr; else - li->invoke = (jl_callptr_t)fptr; - jl_fptr_to_llvm(fptr, li, specfunc); + codeinst->invoke = (jl_callptr_t)fptr; + jl_fptr_to_llvm(fptr, codeinst, specfunc); } } jl_register_fptrs(sysimage_base, &fvars, linfos, sysimg_fvars_max); @@ -1629,7 +1630,7 @@ static void jl_init_serializer2(int for_serialize) jl_symbol_type, jl_ssavalue_type, jl_datatype_type, jl_slotnumber_type, jl_simplevector_type, jl_array_type, jl_typedslot_type, jl_expr_type, jl_globalref_type, jl_string_type, - jl_module_type, jl_tvar_type, jl_method_instance_type, jl_method_type, + jl_module_type, jl_tvar_type, jl_method_instance_type, jl_method_type, jl_code_instance_type, jl_emptysvec, jl_emptytuple, jl_false, jl_true, jl_nothing, jl_any_type, call_sym, invoke_sym, goto_ifnot_sym, return_sym, jl_symbol("tuple"), unreachable_sym, jl_an_empty_string, @@ -1654,7 +1655,7 @@ static void jl_init_serializer2(int for_serialize) jl_methtable_type->name, jl_typemap_level_type->name, jl_typemap_entry_type->name, jl_tvar_type->name, ((jl_datatype_t*)jl_unwrap_unionall((jl_value_t*)jl_abstractarray_type))->name, ((jl_datatype_t*)jl_unwrap_unionall((jl_value_t*)jl_densearray_type))->name, - jl_vararg_typename, jl_void_type->name, jl_method_instance_type->name, jl_method_type->name, + jl_vararg_typename, jl_void_type->name, jl_method_instance_type->name, jl_method_type->name, jl_code_instance_type->name, jl_module_type->name, jl_function_type->name, jl_typedslot_type->name, jl_abstractslot_type->name, jl_slotnumber_type->name, jl_unionall_type->name, jl_intrinsic_type->name, jl_task_type->name, diff --git a/src/threading.c b/src/threading.c index e6475ac376af7..d17da19d96007 100644 --- a/src/threading.c +++ b/src/threading.c @@ -483,8 +483,8 @@ JL_DLLEXPORT void jl_threading_run(jl_value_t *func) size_t world = jl_world_counter; jl_method_instance_t *mfunc = jl_lookup_generic(&func, 1, jl_int32hash_fast(jl_return_address()), world); // Ignore constant return value for now. - jl_callptr_t fptr = jl_compile_method_internal(&mfunc, world); - if (fptr == jl_fptr_const_return) + jl_code_instance_t *fptr = jl_compile_method_internal(mfunc, world); + if (fptr->invoke == jl_fptr_const_return) return; size_t nthreads = jl_n_threads; diff --git a/src/toplevel.c b/src/toplevel.c index fdc17f7aef9b0..2be53b2980367 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -514,7 +514,7 @@ void jl_resolve_globals_in_ir(jl_array_t *stmts, jl_module_t *m, jl_svec_t *spar static jl_method_instance_t *method_instance_for_thunk(jl_code_info_t *src, jl_module_t *module) { jl_method_instance_t *li = jl_new_method_instance_uninit(); - li->inferred = (jl_value_t*)src; + li->uninferred = (jl_value_t*)src; li->specTypes = (jl_value_t*)jl_emptytuple_type; li->def.module = module; return li; @@ -785,10 +785,10 @@ jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_value_t *e, int size_t world = jl_world_counter; ptls->world_age = world; if (!has_defs) { - jl_type_infer(&li, world, 0); + (void)jl_type_infer(li, world, 0); } jl_value_t *dummy_f_arg = NULL; - result = li->invoke(li, &dummy_f_arg, 1); + result = jl_invoke(li, &dummy_f_arg, 1); ptls->world_age = last_age; } else { @@ -818,7 +818,7 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_in(jl_module_t *m, jl_value_t *ex) if (m != jl_main_module) { // TODO: this was grand-fathered in jl_printf(JL_STDERR, "WARNING: eval into closed module %s:\n", jl_symbol_name(m->name)); jl_static_show(JL_STDERR, ex); - jl_printf(JL_STDERR, "\n ** incremental compilation may be broken for this module **\n\n"); + jl_printf(JL_STDERR, "\n ** incremental compilation may be fatally broken for this module **\n\n"); } } } @@ -839,9 +839,11 @@ JL_DLLEXPORT jl_value_t *jl_infer_thunk(jl_code_info_t *thk, jl_module_t *m) jl_method_instance_t *li = method_instance_for_thunk(thk, m); JL_GC_PUSH1(&li); jl_resolve_globals_in_ir((jl_array_t*)thk->code, m, NULL, 0); - jl_type_infer(&li, jl_get_ptls_states()->world_age, 0); + jl_code_info_t *src = jl_type_infer(li, jl_get_ptls_states()->world_age, 0); JL_GC_POP(); - return li->rettype; + if (src) + return src->rettype; + return (jl_value_t*)jl_any_type; } JL_DLLEXPORT jl_value_t *jl_load(jl_module_t *module, const char *fname) diff --git a/src/typemap.c b/src/typemap.c index ea1df9fb517ec..ef7aa50120204 100644 --- a/src/typemap.c +++ b/src/typemap.c @@ -495,11 +495,25 @@ static int jl_typemap_intersection_node_visitor(jl_typemap_entry_t *ml, struct t int jl_typemap_intersection_visitor(jl_typemap_t *map, int offs, struct typemap_intersection_env *closure) { + jl_value_t *ttypes = jl_unwrap_unionall(closure->type); + assert(jl_is_datatype(ttypes)); + //TODO: fast-path for leaf-type tuples? + //if (ttypes->isdispatchtuple) { + // register jl_typemap_intersection_visitor_fptr fptr = closure->fptr; + // size_t world = jl_world_counter; + // while (world > 0) { + // jl_typemap_entry_t *ml = jl_typemap_assoc_by_type(map, ttypes, &closure->env, /*subtype*/1, offs, world, /*max_world_mask*/(~(size_t)0) >> 1); + // if (!ml) + // break; + // if (!fptr(ml, closure)) + // return 0; + // world = ml->min_world - 1; + // } + // return 1; + //} if (jl_typeof(map) == (jl_value_t *)jl_typemap_level_type) { jl_typemap_level_t *cache = (jl_typemap_level_t*)map; jl_value_t *ty = NULL; - jl_value_t *ttypes = jl_unwrap_unionall(closure->type); - assert(jl_is_datatype(ttypes)); size_t l = jl_field_count(ttypes); if (closure->va && l <= offs + 1) { ty = closure->va; @@ -1002,8 +1016,14 @@ jl_typemap_entry_t *jl_typemap_insert(jl_typemap_t **cache, jl_value_t *parent, if (ml && ml->simplesig == (void*)jl_nothing) { if (overwritten != NULL) *overwritten = ml->func.value; - if (newvalue == ml->func.value) // no change. TODO: involve world in computation! + if (newvalue == ml->func.value) { + // just update the existing entry to reflect new knowledge + if (ml->min_world > min_world) + ml->min_world = min_world; + if (ml->max_world < max_world) + ml->max_world = max_world; return ml; + } if (newvalue == NULL) // don't overwrite with guard entries return ml; ml->max_world = min_world - 1; @@ -1071,7 +1091,8 @@ static void jl_typemap_list_insert_sorted(jl_typemap_entry_t **pml, jl_value_t * if (!l->isleafsig) { // quickly ignore all of the leafsig entries (these were handled by caller) if (jl_type_morespecific((jl_value_t*)newrec->sig, (jl_value_t*)l->sig)) { if (l->simplesig == (void*)jl_nothing || - newrec->simplesig != (void*)jl_nothing || !jl_types_equal((jl_value_t*)l->sig, (jl_value_t*)newrec->sig)) { + newrec->simplesig != (void*)jl_nothing || + !jl_types_equal((jl_value_t*)l->sig, (jl_value_t*)newrec->sig)) { // might need to insert multiple entries for a lookup differing only by their simplesig // when simplesig contains a kind // TODO: make this test more correct or figure out a better way to compute this diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index d9a9136074d8d..34bb3ca0942af 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -375,7 +375,7 @@ function get_type_call(expr::Expr) found ? push!(args, typ) : push!(args, Any) end # use _methods_by_ftype as the function is supplied as a type - world = ccall(:jl_get_world_counter, UInt, ()) + world = Base.get_world_counter() mt = Base._methods_by_ftype(Tuple{ft, args...}, -1, world) length(mt) == 1 || return (Any, false) m = first(mt) diff --git a/stdlib/Serialization/src/Serialization.jl b/stdlib/Serialization/src/Serialization.jl index 9db97490e2078..8d4f300064ff6 100644 --- a/stdlib/Serialization/src/Serialization.jl +++ b/stdlib/Serialization/src/Serialization.jl @@ -391,16 +391,15 @@ function serialize(s::AbstractSerializer, meth::Method) serialize(s, meth.line) serialize(s, meth.sig) serialize(s, meth.slot_syms) - serialize(s, meth.ambig) serialize(s, meth.nargs) serialize(s, meth.isva) if isdefined(meth, :source) - serialize(s, uncompressed_ast(meth, meth.source)) + serialize(s, Base._uncompressed_ast(meth, meth.source)) else serialize(s, nothing) end if isdefined(meth, :generator) - serialize(s, uncompressed_ast(meth, meth.generator.inferred)) + serialize(s, Base._uncompressed_ast(meth, meth.generator.inferred)) # XXX: what was this supposed to do? else serialize(s, nothing) end @@ -411,14 +410,8 @@ function serialize(s::AbstractSerializer, linfo::Core.MethodInstance) serialize_cycle(s, linfo) && return isa(linfo.def, Module) || error("can only serialize toplevel MethodInstance objects") writetag(s.io, METHODINSTANCE_TAG) - serialize(s, linfo.inferred) - if isdefined(linfo, :inferred_const) - serialize(s, linfo.inferred_const) - else - writetag(s.io, UNDEFREF_TAG) - end + serialize(s, linfo.uninferred) serialize(s, linfo.sparam_vals) - serialize(s, linfo.rettype) serialize(s, linfo.specTypes) serialize(s, linfo.def) nothing @@ -910,7 +903,6 @@ function deserialize(s::AbstractSerializer, ::Type{Method}) line = deserialize(s)::Int32 sig = deserialize(s)::Type slot_syms = deserialize(s)::String - ambig = deserialize(s)::Union{Array{Any,1}, Nothing} nargs = deserialize(s)::Int32 isva = deserialize(s)::Bool template = deserialize(s) @@ -922,12 +914,11 @@ function deserialize(s::AbstractSerializer, ::Type{Method}) meth.line = line meth.sig = sig meth.slot_syms = slot_syms - meth.ambig = ambig meth.nargs = nargs meth.isva = isva - # TODO: compress template if template !== nothing - meth.source = template + # TODO: compress template + meth.source = template::CodeInfo meth.pure = template.pure end if generator !== nothing @@ -949,13 +940,8 @@ end function deserialize(s::AbstractSerializer, ::Type{Core.MethodInstance}) linfo = ccall(:jl_new_method_instance_uninit, Ref{Core.MethodInstance}, (Ptr{Cvoid},), C_NULL) deserialize_cycle(s, linfo) - linfo.inferred = deserialize(s)::CodeInfo - tag = Int32(read(s.io, UInt8)::UInt8) - if tag != UNDEFREF_TAG - linfo.inferred_const = handle_deserialize(s, tag) - end + linfo.uninferred = deserialize(s)::CodeInfo linfo.sparam_vals = deserialize(s)::SimpleVector - linfo.rettype = deserialize(s) linfo.specTypes = deserialize(s) linfo.def = deserialize(s)::Module return linfo diff --git a/stdlib/Serialization/test/runtests.jl b/stdlib/Serialization/test/runtests.jl index dd275d227dd7a..a88cfd4cf5f20 100644 --- a/stdlib/Serialization/test/runtests.jl +++ b/stdlib/Serialization/test/runtests.jl @@ -315,10 +315,12 @@ main_ex = quote seekstart(s) ds = Serializer(s) local g2 = deserialize(ds) - $Test.@test g2 !== g - $Test.@test g2() == :magic_token_anon_fun_test - $Test.@test g2() == :magic_token_anon_fun_test - $Test.@test deserialize(ds) === g2 + Base.invokelatest() do + $Test.@test g2 !== g + $Test.@test g2() == :magic_token_anon_fun_test + $Test.@test g2() == :magic_token_anon_fun_test + $Test.@test deserialize(ds) === g2 + end # issue #21793 y = x -> (() -> x) @@ -326,8 +328,10 @@ main_ex = quote serialize(s, y) seekstart(s) y2 = deserialize(s) - x2 = y2(2) - $Test.@test x2() == 2 + Base.invokelatest() do + x2 = y2(2) + $Test.@test x2() == 2 + end end end # This needs to be run on `Main` since the serializer treats it differently. diff --git a/stdlib/Test/src/Test.jl b/stdlib/Test/src/Test.jl index 1d6ef5025f247..bd3bf2e53cfd5 100644 --- a/stdlib/Test/src/Test.jl +++ b/stdlib/Test/src/Test.jl @@ -1411,12 +1411,12 @@ function detect_ambiguities(mods...; imported::Bool = false, recursive::Bool = false, ambiguous_bottom::Bool = false) - function sortdefs(m1, m2) + function sortdefs(m1::Method, m2::Method) ord12 = m1.file < m2.file if !ord12 && (m1.file == m2.file) ord12 = m1.line < m2.line end - ord12 ? (m1, m2) : (m2, m1) + return ord12 ? (m1, m2) : (m2, m1) end ambs = Set{Tuple{Method,Method}}() for mod in mods @@ -1436,8 +1436,8 @@ function detect_ambiguities(mods...; for m in mt if m.ambig !== nothing for m2 in m.ambig - if Base.isambiguous(m, m2, ambiguous_bottom=ambiguous_bottom) - push!(ambs, sortdefs(m, m2)) + if Base.isambiguous(m, m2.func, ambiguous_bottom=ambiguous_bottom) + push!(ambs, sortdefs(m, m2.func)) end end end diff --git a/test/ambiguous.jl b/test/ambiguous.jl index 6be9c01ca2b9f..573924afb5674 100644 --- a/test/ambiguous.jl +++ b/test/ambiguous.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -world_counter() = ccall(:jl_get_world_counter, UInt, ()) +using Base: get_world_counter # DO NOT ALTER ORDER OR SPACING OF METHODS BELOW const lineoffset = @__LINE__ @@ -16,19 +16,17 @@ using LinearAlgebra, SparseArrays # For curmod_* include("testenv.jl") -ambigs = Any[[], [3], [2,5], [], [3]] - -mt = methods(ambig) - +getline(m::Core.TypeMapEntry) = getline(m.func::Method) getline(m::Method) = m.line - lineoffset -for m in mt +ambigs = Any[[], [3], [2, 5], [], [3]] +for m in methods(ambig) ln = getline(m) atarget = ambigs[ln] if isempty(atarget) @test m.ambig === nothing else - aln = Int[getline(a) for a in m.ambig] + aln = Int[getline(a::Core.TypeMapEntry) for a in m.ambig] @test sort(aln) == atarget end end @@ -81,7 +79,7 @@ end let io = IOBuffer() @test precompile(ambig, (UInt8, Int)) == false cf = @eval @cfunction(ambig, Int, (UInt8, Int)) # test for a crash (doesn't throw an error) - @test_throws(MethodError(ambig, (UInt8(1), Int(2)), world_counter()), + @test_throws(MethodError(ambig, (UInt8(1), Int(2)), get_world_counter()), ccall(cf, Int, (UInt8, Int), 1, 2)) @test_throws(ErrorException("no unique matching method found for the specified argument types"), which(ambig, (UInt8, Int))) diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index 488fa17e242bd..d63ead4eeea1c 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -956,7 +956,7 @@ end # but which also means we should still be storing the inference result from inferring the cycle f21653() = f21653() @test code_typed(f21653, Tuple{}, optimize=false)[1] isa Pair{CodeInfo, typeof(Union{})} -@test which(f21653, ()).specializations.func.rettype === Union{} +@test which(f21653, ()).specializations.func.cache.rettype === Union{} # issue #22290 f22290() = return 3 @@ -1086,27 +1086,25 @@ function get_linfo(@nospecialize(f), @nospecialize(t)) throw(ArgumentError("argument is not a generic function")) end # get the MethodInstance for the method match - world = typemax(UInt) meth = which(f, t) t = Base.to_tuple_type(t) ft = isa(f, Type) ? Type{f} : typeof(f) tt = Tuple{ft, t.parameters...} - precompile(tt) + precompile(tt) # does inference (calls jl_type_infer) on this signature (ti, env) = ccall(:jl_type_intersection_with_env, Ref{Core.SimpleVector}, (Any, Any), tt, meth.sig) - meth = Base.func_for_method_checked(meth, tt, env) return ccall(:jl_specializations_get_linfo, Ref{Core.MethodInstance}, - (Any, Any, Any, UInt), meth, tt, env, world) + (Any, Any, Any), meth, tt, env) end function test_const_return(@nospecialize(f), @nospecialize(t), @nospecialize(val)) - linfo = get_linfo(f, t) + linfo = Core.Compiler.inf_for_methodinstance(get_linfo(f, t), Core.Compiler.get_world_counter())::Core.CodeInstance # If coverage is not enabled, make the check strict by requiring constant ABI # Otherwise, check the typed AST to make sure we return a constant. if Base.JLOptions().code_coverage == 0 @test Core.Compiler.invoke_api(linfo) == 2 end if Core.Compiler.invoke_api(linfo) == 2 - @test linfo.inferred_const == val + @test linfo.rettype_const == val return end ct = code_typed(f, t) @@ -1163,8 +1161,8 @@ test_const_return(()->sizeof(1), Tuple{}, sizeof(Int)) test_const_return(()->sizeof(DataType), Tuple{}, sizeof(DataType)) test_const_return(()->sizeof(1 < 2), Tuple{}, 1) test_const_return(()->fieldtype(Dict{Int64,Nothing}, :age), Tuple{}, UInt) -@eval test_const_return(()->Core.sizeof($(Array{Int,0}(undef))), Tuple{}, sizeof(Int)) -@eval test_const_return(()->Core.sizeof($(Matrix{Float32}(undef, 2, 2))), Tuple{}, 4 * 2 * 2) +test_const_return(@eval(()->Core.sizeof($(Array{Int,0}(undef)))), Tuple{}, sizeof(Int)) +test_const_return(@eval(()->Core.sizeof($(Matrix{Float32}(undef, 2, 2)))), Tuple{}, 4 * 2 * 2) # Make sure Core.sizeof with a ::DataType as inferred input type is inferred but not constant. function sizeof_typeref(typeref) @@ -1392,7 +1390,7 @@ gg13183(x::X...) where {X} = (_false13183 ? gg13183(x, x) : 0) # test the external OptimizationState constructor let linfo = get_linfo(Base.convert, Tuple{Type{Int64}, Int32}), - world = typemax(UInt), + world = UInt(23) # some small-numbered world that should be valid opt = Core.Compiler.OptimizationState(linfo, Core.Compiler.Params(world)) # make sure the state of the properties look reasonable @test opt.src !== linfo.def.source @@ -1400,8 +1398,8 @@ let linfo = get_linfo(Base.convert, Tuple{Type{Int64}, Int32}), @test opt.src.ssavaluetypes isa Vector{Any} @test !opt.src.inferred @test opt.mod === Base - @test opt.max_valid === typemax(UInt) - @test opt.min_valid === Core.Compiler.min_world(opt.linfo) > 2 + @test opt.max_valid === Core.Compiler.get_world_counter() + @test opt.min_valid === Core.Compiler.min_world(opt.src) === UInt(1) @test opt.nargs == 3 end diff --git a/test/compiler/validation.jl b/test/compiler/validation.jl index 1ce85b3259328..a63f5ddc57d71 100644 --- a/test/compiler/validation.jl +++ b/test/compiler/validation.jl @@ -21,7 +21,7 @@ end msig = Tuple{typeof(f22938),Int,Int,Int,Int} world = typemax(UInt) _, msp, m = Base._methods_by_ftype(msig, -1, world)[] -mi = Core.Compiler.code_for_method(m, msig, msp, world, false) +mi = Core.Compiler.specialize_method(m, msig, msp, false) c0 = Core.Compiler.retrieve_code_info(mi) @test isempty(Core.Compiler.validate_code(mi)) diff --git a/test/reflection.jl b/test/reflection.jl index 8cded1a5f412c..92b68181b83be 100644 --- a/test/reflection.jl +++ b/test/reflection.jl @@ -1,4 +1,5 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using Test # code_native / code_llvm (issue #8239) # It's hard to really test these, but just running them should be @@ -308,7 +309,7 @@ for (f, t) in Any[(definitely_not_in_sysimg, Tuple{}), tt = Tuple{typeof(f), t.parameters...} (ti, env) = ccall(:jl_type_intersection_with_env, Any, (Any, Any), tt, meth.sig)::Core.SimpleVector @test ti === tt # intersection should be a subtype - world = typemax(UInt) + world = Core.Compiler.get_world_counter() linfo = ccall(:jl_specializations_get_linfo, Ref{Core.MethodInstance}, (Any, Any, Any, UInt), meth, tt, env, world) params = Base.CodegenParams() llvmf1 = ccall(:jl_get_llvmf_decl, Ptr{Cvoid}, (Any, UInt, Bool, Base.CodegenParams), linfo::Core.MethodInstance, world, true, params) @@ -430,27 +431,13 @@ end # Linfo Tracing test -tracefoo(x, y) = x+y -didtrace = false -tracer(x::Ptr{Cvoid}) = (@test isa(unsafe_pointer_to_objref(x), Core.MethodInstance); global didtrace = true; nothing) -let ctracer = @cfunction(tracer, Cvoid, (Ptr{Cvoid},)) - ccall(:jl_register_method_tracer, Cvoid, (Ptr{Cvoid},), ctracer) -end -meth = which(tracefoo,Tuple{Any,Any}) -ccall(:jl_trace_method, Cvoid, (Any,), meth) -@test tracefoo(1, 2) == 3 -ccall(:jl_untrace_method, Cvoid, (Any,), meth) -@test didtrace -didtrace = false -@test tracefoo(1.0, 2.0) == 3.0 -@test !didtrace -ccall(:jl_register_method_tracer, Cvoid, (Ptr{Cvoid},), C_NULL) - +function tracefoo end # Method Tracing test methtracer(x::Ptr{Cvoid}) = (@test isa(unsafe_pointer_to_objref(x), Method); global didtrace = true; nothing) let cmethtracer = @cfunction(methtracer, Cvoid, (Ptr{Cvoid},)) ccall(:jl_register_newmeth_tracer, Cvoid, (Ptr{Cvoid},), cmethtracer) end +didtrace = false tracefoo2(x, y) = x*y @test didtrace didtrace = false @@ -529,25 +516,26 @@ else end # PR #18888: code_typed shouldn't cache, return_types should +f18888() = nothing let - world = typemax(UInt) - f18888() = return nothing + world = Core.Compiler.get_world_counter() m = first(methods(f18888, Tuple{})) @test m.specializations === nothing ft = typeof(f18888) code_typed(f18888, Tuple{}; optimize=false) - @test m.specializations !== nothing # uncached, but creates the specializations entry - code = Core.Compiler.code_for_method(m, Tuple{ft}, Core.svec(), world, true) - @test !isdefined(code, :inferred) + @test m.specializations isa Core.TypeMapEntry # uncached, but creates the specializations entry + mi = Core.Compiler.specialize_method(m, Tuple{ft}, Core.svec()) + @test Core.Compiler.inf_for_methodinstance(mi, world) === nothing + @test !isdefined(mi, :cache) code_typed(f18888, Tuple{}; optimize=true) - code = Core.Compiler.code_for_method(m, Tuple{ft}, Core.svec(), world, true) - @test !isdefined(code, :inferred) + @test !isdefined(mi, :cache) Base.return_types(f18888, Tuple{}) - code = Core.Compiler.code_for_method(m, Tuple{ft}, Core.svec(), world, true) - @test isdefined(code, :inferred) + @test Core.Compiler.inf_for_methodinstance(mi, world) === mi.cache + @test mi.cache isa Core.CodeInstance + @test !isdefined(mi.cache, :next) end # New reflection methods in 0.6 @@ -645,22 +633,24 @@ function test_similar_codeinfo(a, b) end @generated f22979(x...) = (y = 1; :(x[1] + x[2])) -x22979 = (1, 2.0, 3.0 + im) -T22979 = Tuple{typeof(f22979),typeof.(x22979)...} -world = typemax(UInt) -mtypes, msp, m = Base._methods_by_ftype(T22979, -1, world)[] -instance = Core.Compiler.code_for_method(m, mtypes, msp, world, false) -cinfo_generated = Core.Compiler.get_staged(instance) -@test_throws ErrorException Base.uncompressed_ast(m) - -test_similar_codeinfo(code_lowered(f22979, typeof(x22979))[1], cinfo_generated) - -cinfos = code_lowered(f22979, typeof.(x22979), generated = true) -@test length(cinfos) == 1 -cinfo = cinfos[] -test_similar_codeinfo(cinfo, cinfo_generated) - -@test_throws ErrorException code_lowered(f22979, typeof.(x22979), generated = false) +let + x22979 = (1, 2.0, 3.0 + im) + T22979 = Tuple{typeof(f22979), typeof.(x22979)...} + world = Core.Compiler.get_world_counter() + mtypes, msp, m = Base._methods_by_ftype(T22979, -1, world)[1] + instance = Core.Compiler.specialize_method(m, mtypes, msp) + cinfo_generated = Core.Compiler.get_staged(instance) + @test_throws ErrorException Base.uncompressed_ast(m) + + test_similar_codeinfo(code_lowered(f22979, typeof(x22979))[1], cinfo_generated) + + cinfos = code_lowered(f22979, typeof.(x22979), generated=true) + @test length(cinfos) == 1 + cinfo = cinfos[1] + test_similar_codeinfo(cinfo, cinfo_generated) + @test_throws ErrorException code_lowered(f22979, typeof.(x22979), generated=false) +end + module MethodDeletion using Test, Random diff --git a/test/stacktraces.jl b/test/stacktraces.jl index c684257b6fa26..e2c6a49503a1a 100644 --- a/test/stacktraces.jl +++ b/test/stacktraces.jl @@ -104,7 +104,7 @@ let src = Meta.lower(Main, quote let x = 1 end end).args[1]::Core.CodeInfo, li = ccall(:jl_new_method_instance_uninit, Ref{Core.MethodInstance}, ()), sf - li.inferred = src + li.uninferred = src li.specTypes = Tuple{} li.def = @__MODULE__ sf = StackFrame(:a, :b, 3, li, false, false, 0) diff --git a/test/syntax.jl b/test/syntax.jl index 1626b57b1085f..4f3c474ecf145 100644 --- a/test/syntax.jl +++ b/test/syntax.jl @@ -1493,7 +1493,7 @@ end # issue #27129 f27129(x = 1) = (@Base._inline_meta; x) for meth in methods(f27129) - @test ccall(:jl_uncompress_ast, Any, (Any, Any), meth, meth.source).inlineable + @test ccall(:jl_uncompress_ast, Any, (Any, Ptr{Cvoid}, Any), meth, C_NULL, meth.source).inlineable end # issue #27710 diff --git a/test/worlds.jl b/test/worlds.jl index 1b8e4e03546ba..a1bda0ff9a4e5 100644 --- a/test/worlds.jl +++ b/test/worlds.jl @@ -2,9 +2,9 @@ # tests for accurate updating of method tables +using Base: get_world_counter tls_world_age() = ccall(:jl_get_tls_world_age, UInt, ()) -world_counter() = ccall(:jl_get_world_counter, UInt, ()) -@test typemax(UInt) > world_counter() == tls_world_age() > 0 +@test typemax(UInt) > get_world_counter() == tls_world_age() > 0 # test simple method replacement begin @@ -106,17 +106,17 @@ function put_n_take!(v...) end g265() = [f265(x) for x in 1:3.] -wc265 = world_counter() +wc265 = get_world_counter() f265(::Any) = 1.0 -@test wc265 + 1 == world_counter() +@test wc265 + 1 == get_world_counter() chnls, tasks = Base.channeled_tasks(2, wfunc) t265 = tasks[1] -wc265 = world_counter() -@test put_n_take!(world_counter, ()) == wc265 +wc265 = get_world_counter() +@test put_n_take!(get_world_counter, ()) == wc265 @test put_n_take!(tls_world_age, ()) == wc265 f265(::Int) = 1 -@test put_n_take!(world_counter, ()) == wc265 + 1 == world_counter() == tls_world_age() +@test put_n_take!(get_world_counter, ()) == wc265 + 1 == get_world_counter() == tls_world_age() @test put_n_take!(tls_world_age, ()) == wc265 @test g265() == Int[1, 1, 1] @@ -146,7 +146,7 @@ let ex = t265.exception @test ex.args == () @test ex.world == wc265 str = sprint(showerror, ex) - wc = world_counter() + wc = get_world_counter() cmps = """ MethodError: no method matching h265() The applicable method may be too new: running in world age $wc265, while current world is $wc.""" @@ -177,9 +177,10 @@ z = Any["ABC"] f26506(x::Int) = 2 g26506(z) # Places an entry for f26506(::String) in mt.name.cache f26506(x::String) = 3 -cache = typeof(f26506).name.mt.cache -# The entry we created above should have been truncated -@test cache.min_world == cache.max_world +let cache = typeof(f26506).name.mt.cache + # The entry we created above should have been truncated + @test cache.min_world == cache.max_world +end c26506_1, c26506_2 = Condition(), Condition() # Captures the world age result26506 = Any[] @@ -190,7 +191,10 @@ t = Task(()->begin end) yield(t) f26506(x::Float64) = 4 -cache = typeof(f26506).name.mt.cache -@test cache.min_world == cache.max_world -notify(c26506_1); wait(c26506_2); +let cache = typeof(f26506).name.mt.cache + # The entry we created above should have been truncated + @test cache.min_world == cache.max_world +end +notify(c26506_1) +wait(c26506_2) @test result26506[1] == 3