Skip to content

gf.c: include const-return methods in --trace-compile #237

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: v1.10.2+RAI
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 14 additions & 5 deletions src/gf.c
Original file line number Diff line number Diff line change
Expand Up @@ -476,6 +476,8 @@ JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred(
return codeinst;
}

static void record_precompile_statement(jl_method_instance_t *mi, double compilation_time, int is_const_return_abi);

JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst(
jl_method_instance_t *mi, jl_value_t *rettype,
jl_value_t *inferred_const, jl_value_t *inferred,
Expand All @@ -501,6 +503,10 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst(
jl_atomic_store_relaxed(&codeinst->invoke, NULL);
if ((const_flags & 1) != 0) {
assert(const_flags & 2);
if (jl_is_method(mi->def.value) && jl_isa_compileable_sig((jl_tupletype_t *)mi->specTypes, mi->sparam_vals, mi->def.method)) {
// This code was freshly-inferred, so let's emit a `precompile(...)` statement for it
record_precompile_statement(mi, 0.0, 1);
}
jl_atomic_store_relaxed(&codeinst->invoke, jl_fptr_const_return);
}
jl_atomic_store_relaxed(&codeinst->specsigflags, 0);
Expand Down Expand Up @@ -2392,7 +2398,7 @@ JL_DLLEXPORT void jl_force_trace_compile_timing_disable(void)
jl_atomic_fetch_add(&jl_force_trace_compile_timing_enabled, -1);
}

static void record_precompile_statement(jl_method_instance_t *mi, double compilation_time)
static void record_precompile_statement(jl_method_instance_t *mi, double compilation_time, int is_const_return_abi)
{
static ios_t f_precompile;
static JL_STREAM* s_precompile = NULL;
Expand Down Expand Up @@ -2420,7 +2426,10 @@ static void record_precompile_statement(jl_method_instance_t *mi, double compila
jl_printf(s_precompile, "#= %6.1f =# ", compilation_time / 1e6);
jl_printf(s_precompile, "precompile(");
jl_static_show(s_precompile, mi->specTypes);
jl_printf(s_precompile, ")\n");
if (is_const_return_abi)
jl_printf(s_precompile, ") #= const-return =#\n");
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this going to negatively interact with any trace-processing tools we have?

why do we want this in the output?

Copy link
Collaborator Author

@kpamnany kpamnany Jun 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Great observation. I think I did see some CI errors due to this comment when I had added the TPC-H trace (before I removed that trace from testing). Those should be easily fixable; I'll check.

Upstream, they are (for now) maintaining the idea that compilation traces are human-readable, which is why this is in the output. We could remove it locally if we're okay with diverging.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

compilation traces are human-readable, which is why this is in the output

i wonder in what situations seeing the #= const-return =# comment is helpful?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we should be able to handle these trailing comments, but it's definitely a good idea to check 👍

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

julia> RAI_CompilationsCache.PrecompileStatementParsing.is_precompileable(Base, "#= 1.0 =# precompile(Tuple{typeof(Base.:+), Int, Int}) #= const-return =#")
pr_success::PrecompilationResult = 1

julia> RAI_CompilationsCache.PrecompileStatementParsing.run_precompile(Base, "#= 1.0 =# precompile(Tuple{typeof(Base.:+), Int, Int}) #= const-return =#")
pr_success::PrecompilationResult = 1

We could end up with duplicate traces though -- if on engine 1 we produced

#= 1.0 =# precompile(Tuple{typeof(Base.:+), Int, Int}) #= const-return =#

and on engine 2, which called the function in a different context, we'd get

#= 1.0 =# precompile(Tuple{typeof(Base.:+), Int, Int})

That could cause the precompile to take up 2 spots in the cache, so we probably need to augment the cache to strip any trailing comments.

else
jl_printf(s_precompile, ")\n");
if (s_precompile != JL_STDERR)
ios_flush(&f_precompile);
}
Expand Down Expand Up @@ -2566,7 +2575,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t
codeinst->rettype_const = unspec->rettype_const;
jl_atomic_store_release(&codeinst->invoke, unspec_invoke);
jl_mi_cache_insert(mi, codeinst);
record_precompile_statement(mi, 0);
record_precompile_statement(mi, 0.0, 0);
return codeinst;
}
}
Expand All @@ -2583,7 +2592,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t
0, 1, ~(size_t)0, 0, 0, jl_nothing, 0);
jl_atomic_store_release(&codeinst->invoke, jl_fptr_interpret_call);
jl_mi_cache_insert(mi, codeinst);
record_precompile_statement(mi, 0);
record_precompile_statement(mi, 0.0, 0);
return codeinst;
}
if (compile_option == JL_OPTIONS_COMPILE_OFF) {
Expand Down Expand Up @@ -2638,7 +2647,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t
jl_mi_cache_insert(mi, codeinst);
}
else if (did_compile) {
record_precompile_statement(mi, compile_time);
record_precompile_statement(mi, compile_time, 0);
}
jl_atomic_store_relaxed(&codeinst->precompile, 1);
return codeinst;
Expand Down