diff --git a/base/timing.jl b/base/timing.jl index 1889a0050e5fd..8ddee831c07f0 100644 --- a/base/timing.jl +++ b/base/timing.jl @@ -60,13 +60,6 @@ cumulative_compile_time_ns_before() = ccall(:jl_cumulative_compile_time_ns_befor cumulative_compile_time_ns_after() = ccall(:jl_cumulative_compile_time_ns_after, UInt64, ()) # cumulative total time this thread has spent on compilation since process start. cumulative_compile_time_ns() = ccall(:jl_cumulative_compile_time_ns, UInt64, ()) -function process_cumulative_compile_time_ns() - out = fill(UInt(0), Threads.nthreads()) - Threads.@threads for i in 1:Threads.nthreads() - out[i] = cumulative_compile_time_ns() - end - return sum(out) -end # total time spend in garbage collection, in nanoseconds gc_time_ns() = ccall(:jl_gc_total_hrtime, UInt64, ()) diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index 96364d9904816..6906d81b805d4 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -286,8 +286,7 @@ void *jl_create_native(jl_array_t *methods, const jl_cgparams_t cgparams, int _p JL_GC_PUSH1(&src); JL_LOCK(&codegen_lock); uint64_t compiler_start_time = 0; - int tid = jl_threadid(); - if (jl_measure_compile_time[tid]) + if (jl_atomic_load(&jl_measure_compile_time)) compiler_start_time = jl_hrtime(); CompilationPolicy policy = (CompilationPolicy) _policy; @@ -415,8 +414,8 @@ void *jl_create_native(jl_array_t *methods, const jl_cgparams_t cgparams, int _p } data->M = std::move(clone); - if (jl_measure_compile_time[tid]) - jl_cumulative_compile_time[tid] += (jl_hrtime() - compiler_start_time); + if (jl_atomic_load(&jl_measure_compile_time)) + jl_atomic_fetch_add(&jl_measure_compile_time, (jl_hrtime() - compiler_start_time)); if (policy == CompilationPolicy::ImagingMode) imaging_mode = 0; JL_UNLOCK(&codegen_lock); // Might GC @@ -916,8 +915,7 @@ void *jl_get_llvmf_defn(jl_method_instance_t *mi, size_t world, char getwrapper, jl_llvm_functions_t decls; JL_LOCK(&codegen_lock); uint64_t compiler_start_time = 0; - int tid = jl_threadid(); - if (jl_measure_compile_time[tid]) + if (jl_atomic_load(&jl_measure_compile_time)) compiler_start_time = jl_hrtime(); std::tie(m, decls) = jl_emit_code(mi, src, jlrettype, output); @@ -942,8 +940,8 @@ void *jl_get_llvmf_defn(jl_method_instance_t *mi, size_t world, char getwrapper, m.release(); // the return object `llvmf` will be the owning pointer } JL_GC_POP(); - if (jl_measure_compile_time[tid]) - jl_cumulative_compile_time[tid] += (jl_hrtime() - compiler_start_time); + if (jl_atomic_load(&jl_measure_compile_time)) + jl_atomic_fetch_add(&jl_measure_compile_time, (jl_hrtime() - compiler_start_time)); JL_UNLOCK(&codegen_lock); // Might GC if (F) return F; diff --git a/src/gf.c b/src/gf.c index 41381ccc5178e..5809721ceaadd 100644 --- a/src/gf.c +++ b/src/gf.c @@ -3164,15 +3164,14 @@ static uint64_t inference_start_time = 0; JL_DLLEXPORT void jl_typeinf_begin(void) { JL_LOCK(&typeinf_lock); - if (jl_measure_compile_time[jl_threadid()]) + if (jl_atomic_load(&jl_measure_compile_time)) inference_start_time = jl_hrtime(); } JL_DLLEXPORT void jl_typeinf_end(void) { - int tid = jl_threadid(); - if (typeinf_lock.count == 1 && jl_measure_compile_time[tid]) - jl_cumulative_compile_time[tid] += (jl_hrtime() - inference_start_time); + if (typeinf_lock.count == 1 && jl_atomic_load(&jl_measure_compile_time)) + jl_atomic_fetch_add(&jl_measure_compile_time, (jl_hrtime() - inference_start_time)); JL_UNLOCK(&typeinf_lock); } diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index 0e3bf4035d831..77c031f985409 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -78,22 +78,19 @@ void jl_jit_globals(std::map &globals) extern "C" JL_DLLEXPORT uint64_t jl_cumulative_compile_time_ns_before() { - int tid = jl_threadid(); - jl_measure_compile_time[tid] += 1; - return jl_cumulative_compile_time[tid]; + jl_atomic_fetch_add(&jl_measure_compile_time, 1); + return jl_atomic_load(&jl_cumulative_compile_time); } extern "C" JL_DLLEXPORT uint64_t jl_cumulative_compile_time_ns_after() { - int tid = jl_threadid(); - jl_measure_compile_time[tid] -= 1; - return jl_cumulative_compile_time[tid]; + jl_atomic_fetch_add(&jl_measure_compile_time, -1); + return jl_atomic_load(&jl_cumulative_compile_time); } extern "C" JL_DLLEXPORT uint64_t jl_cumulative_compile_time_ns() { - int tid = jl_threadid(); - return jl_cumulative_compile_time[tid]; + return jl_atomic_load(&jl_cumulative_compile_time); } // this generates llvm code for the lambda info @@ -239,8 +236,7 @@ int jl_compile_extern_c(void *llvmmod, void *p, void *sysimg, jl_value_t *declrt { JL_LOCK(&codegen_lock); uint64_t compiler_start_time = 0; - int tid = jl_threadid(); - if (jl_measure_compile_time[tid]) + if (jl_atomic_load(&jl_measure_compile_time)) compiler_start_time = jl_hrtime(); jl_codegen_params_t params; jl_codegen_params_t *pparams = (jl_codegen_params_t*)p; @@ -264,8 +260,8 @@ int jl_compile_extern_c(void *llvmmod, void *p, void *sysimg, jl_value_t *declrt if (success && llvmmod == NULL) jl_add_to_ee(std::unique_ptr(into)); } - if (codegen_lock.count == 1 && jl_measure_compile_time[tid]) - jl_cumulative_compile_time[tid] += (jl_hrtime() - compiler_start_time); + if (codegen_lock.count == 1 && jl_atomic_load(&jl_measure_compile_time)) + jl_atomic_fetch_add(&jl_cumulative_compile_time, (jl_hrtime() - compiler_start_time)); JL_UNLOCK(&codegen_lock); return success; } @@ -321,8 +317,7 @@ jl_code_instance_t *jl_generate_fptr(jl_method_instance_t *mi JL_PROPAGATES_ROOT { JL_LOCK(&codegen_lock); // also disables finalizers, to prevent any unexpected recursion uint64_t compiler_start_time = 0; - int tid = jl_threadid(); - if (jl_measure_compile_time[tid]) + if (jl_atomic_load(&jl_measure_compile_time)) compiler_start_time = jl_hrtime(); // if we don't have any decls already, try to generate it now jl_code_info_t *src = NULL; @@ -360,8 +355,8 @@ jl_code_instance_t *jl_generate_fptr(jl_method_instance_t *mi JL_PROPAGATES_ROOT else { codeinst = NULL; } - if (codegen_lock.count == 1 && jl_measure_compile_time[tid]) - jl_cumulative_compile_time[tid] += (jl_hrtime() - compiler_start_time); + if (codegen_lock.count == 1 && jl_atomic_load(&jl_measure_compile_time)) + jl_atomic_fetch_add(&jl_cumulative_compile_time, (jl_hrtime() - compiler_start_time)); JL_UNLOCK(&codegen_lock); JL_GC_POP(); return codeinst; @@ -375,8 +370,7 @@ void jl_generate_fptr_for_unspecialized(jl_code_instance_t *unspec) } JL_LOCK(&codegen_lock); uint64_t compiler_start_time = 0; - int tid = jl_threadid(); - if (jl_measure_compile_time[tid]) + if (jl_atomic_load(&jl_measure_compile_time)) compiler_start_time = jl_hrtime(); if (unspec->invoke == NULL) { jl_code_info_t *src = NULL; @@ -404,8 +398,8 @@ void jl_generate_fptr_for_unspecialized(jl_code_instance_t *unspec) } JL_GC_POP(); } - if (codegen_lock.count == 1 && jl_measure_compile_time[tid]) - jl_cumulative_compile_time[tid] += (jl_hrtime() - compiler_start_time); + if (codegen_lock.count == 1 && jl_atomic_load(&jl_measure_compile_time)) + jl_atomic_fetch_add(&jl_cumulative_compile_time, (jl_hrtime() - compiler_start_time)); JL_UNLOCK(&codegen_lock); // Might GC } @@ -428,8 +422,7 @@ jl_value_t *jl_dump_method_asm(jl_method_instance_t *mi, size_t world, // so create an exception here so we can print pretty our lies JL_LOCK(&codegen_lock); // also disables finalizers, to prevent any unexpected recursion uint64_t compiler_start_time = 0; - int tid = jl_threadid(); - if (jl_measure_compile_time[tid]) + if (jl_atomic_load(&jl_measure_compile_time)) compiler_start_time = jl_hrtime(); specfptr = (uintptr_t)codeinst->specptr.fptr; if (specfptr == 0) { @@ -454,8 +447,8 @@ jl_value_t *jl_dump_method_asm(jl_method_instance_t *mi, size_t world, } JL_GC_POP(); } - if (jl_measure_compile_time[tid]) - jl_cumulative_compile_time[tid] += (jl_hrtime() - compiler_start_time); + if (jl_atomic_load(&jl_measure_compile_time)) + jl_atomic_fetch_add(&jl_cumulative_compile_time, (jl_hrtime() - compiler_start_time)); JL_UNLOCK(&codegen_lock); } if (specfptr != 0) diff --git a/src/julia_internal.h b/src/julia_internal.h index b997ad0d1214a..ebe037e21dd4d 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -154,8 +154,9 @@ static inline uint64_t cycleclock(void) #include "timing.h" -extern uint8_t *jl_measure_compile_time; -extern uint64_t *jl_cumulative_compile_time; +// Global *atomic* integers controlling *process-wide* measurement of compilation time. +extern volatile uint8_t jl_measure_compile_time; +extern volatile uint64_t jl_cumulative_compile_time; #ifdef _COMPILER_MICROSOFT_ # define jl_return_address() ((uintptr_t)_ReturnAddress()) diff --git a/src/task.c b/src/task.c index c3ac26fbcf511..e9bbd4ecb5e86 100644 --- a/src/task.c +++ b/src/task.c @@ -561,7 +561,7 @@ static void JL_NORETURN throw_internal(jl_task_t *ct, jl_value_t *exception JL_M ptls->io_wait = 0; // @time needs its compile timer disabled on error, // and cannot use a try-finally as it would break scope for assignments - jl_measure_compile_time[ptls->tid] = 0; + jl_atomic_fetch_add(&jl_measure_compile_time, -1); JL_GC_PUSH1(&exception); jl_gc_unsafe_enter(ptls); if (exception) { diff --git a/src/threading.c b/src/threading.c index 235bb9f870ba1..65ed0171ff317 100644 --- a/src/threading.c +++ b/src/threading.c @@ -287,8 +287,8 @@ void jl_pgcstack_getkey(jl_get_pgcstack_func **f, jl_pgcstack_key_t *k) #endif jl_ptls_t *jl_all_tls_states JL_GLOBALLY_ROOTED; -uint8_t *jl_measure_compile_time = NULL; -uint64_t *jl_cumulative_compile_time = NULL; +volatile uint8_t jl_measure_compile_time = 0; +volatile uint64_t jl_cumulative_compile_time = 0; // return calling thread's ID // Also update the suspended_threads list in signals-mach when changing the @@ -467,8 +467,6 @@ void jl_init_threading(void) } if (jl_n_threads <= 0) jl_n_threads = 1; - jl_measure_compile_time = (uint8_t*)calloc(jl_n_threads, sizeof(*jl_measure_compile_time)); - jl_cumulative_compile_time = (uint64_t*)calloc(jl_n_threads, sizeof(*jl_cumulative_compile_time)); #ifndef __clang_analyzer__ jl_all_tls_states = (jl_ptls_t*)calloc(jl_n_threads, sizeof(void*)); #endif