diff --git a/ext/ddtrace_profiling_native_extension/collectors_thread_context.c b/ext/ddtrace_profiling_native_extension/collectors_thread_context.c index 618cdb23f5d..0845f196695 100644 --- a/ext/ddtrace_profiling_native_extension/collectors_thread_context.c +++ b/ext/ddtrace_profiling_native_extension/collectors_thread_context.c @@ -1210,7 +1210,7 @@ void thread_context_collector_sample_allocation(VALUE self_instance, unsigned in } } - track_object(state->recorder_instance, new_object, sample_weight); + track_object(state->recorder_instance, new_object, sample_weight, class_name); trigger_sample_for_thread( state, diff --git a/ext/ddtrace_profiling_native_extension/heap_recorder.c b/ext/ddtrace_profiling_native_extension/heap_recorder.c index a0cdfa7dfc5..89fa0d288d6 100644 --- a/ext/ddtrace_profiling_native_extension/heap_recorder.c +++ b/ext/ddtrace_profiling_native_extension/heap_recorder.c @@ -4,6 +4,7 @@ #include "ruby_helpers.h" #include #include "collectors_stack.h" +#include "libdatadog_helpers.h" #define MAX_QUEUE_LIMIT 10000 @@ -89,9 +90,10 @@ static void heap_record_free(heap_record*); typedef struct { long obj_id; heap_record *heap_record; - live_object_data object_data; + // Has ownership of this, needs to clean-it-up if not transferred. + live_object_data *object_data; } object_record; -static object_record* object_record_new(long, heap_record*, live_object_data); +static object_record* object_record_new(long, heap_record*, live_object_data*); static void object_record_free(object_record*); // Allows storing data passed to ::start_heap_allocation_recording to make it accessible to @@ -100,14 +102,16 @@ static void object_record_free(object_record*); // obj_id != 0 flags this struct as holding a valid partial heap recording. typedef struct { long obj_id; - live_object_data object_data; + // Has ownership of this, needs to clean-it-up if not transferred. + live_object_data *object_data; } partial_heap_recording; typedef struct { // Has ownership of this, needs to clean-it-up if not transferred. heap_record *heap_record; long obj_id; - live_object_data object_data; + // Has ownership of this, needs to clean-it-up if not transferred. + live_object_data *object_data; } uncommitted_sample; struct heap_recorder { @@ -141,9 +145,11 @@ static int st_object_record_entry_free(st_data_t, st_data_t, st_data_t); static int st_object_record_entry_free_if_invalid(st_data_t, st_data_t, st_data_t); static int st_object_records_iterate(st_data_t, st_data_t, st_data_t); static int update_object_record_entry(st_data_t*, st_data_t*, st_data_t, int); -static void commit_allocation(heap_recorder*, heap_record*, long, live_object_data); +static void commit_allocation(heap_recorder*, heap_record*, long, live_object_data*); static void flush_queue(heap_recorder*); -static bool enqueue_allocation(heap_recorder*, heap_record*, long, live_object_data); +static bool enqueue_allocation(heap_recorder*, heap_record*, long, live_object_data*); +static live_object_data* live_object_data_new(unsigned int, ddog_CharSlice, size_t); +static void live_object_data_free(live_object_data*); // ========================== // Heap Recorder External API @@ -164,7 +170,7 @@ heap_recorder* heap_recorder_new(void) { recorder->active_recording = (partial_heap_recording) { .obj_id = 0, // 0 is actually the obj_id of false, but we'll never track that one in heap so we use // it as invalid/unset value. - .object_data = {0}, + .object_data = NULL, }; recorder->queued_samples = ruby_xcalloc(MAX_QUEUE_LIMIT, sizeof(uncommitted_sample)); recorder->queued_samples_len = 0; @@ -177,12 +183,22 @@ void heap_recorder_free(heap_recorder *heap_recorder) { return; } + // Clean-up all object records st_foreach(heap_recorder->object_records, st_object_record_entry_free, 0); st_free_table(heap_recorder->object_records); + // Clean-up all heap records (this includes those only referred to by queued_samples) st_foreach(heap_recorder->heap_records, st_heap_record_entry_free, 0); st_free_table(heap_recorder->heap_records); + // Clean-up dangling object datas that are not part of object records yet + if (heap_recorder->active_recording.obj_id != 0) { + live_object_data_free(heap_recorder->active_recording.object_data); + } + for (size_t i = 0; i < heap_recorder->queued_samples_len; i++) { + live_object_data_free(heap_recorder->queued_samples[i].object_data); + } + pthread_mutex_destroy(&heap_recorder->records_mutex); ruby_xfree(heap_recorder->queued_samples); @@ -220,7 +236,7 @@ void heap_recorder_after_fork(heap_recorder *heap_recorder) { heap_recorder->records_mutex = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER; } -void start_heap_allocation_recording(heap_recorder *heap_recorder, VALUE new_obj, unsigned int weight) { +void start_heap_allocation_recording(heap_recorder* heap_recorder, VALUE new_obj, unsigned int weight, ddog_CharSlice alloc_class) { if (heap_recorder == NULL) { return; } @@ -232,9 +248,7 @@ void start_heap_allocation_recording(heap_recorder *heap_recorder, VALUE new_obj heap_recorder->active_recording = (partial_heap_recording) { .obj_id = FIX2LONG(ruby_obj_id), - .object_data = (live_object_data) { - .weight = weight, - }, + .object_data = live_object_data_new(weight, alloc_class, rb_gc_count()), }; } @@ -265,6 +279,8 @@ void end_heap_allocation_recording(struct heap_recorder *heap_recorder, ddog_pro // We weren't able to get a lock bool enqueued = enqueue_allocation(heap_recorder, heap_record, obj_id, active_recording->object_data); if (!enqueued) { + // Need to clean up pending stuff + live_object_data_free(active_recording->object_data); cleanup_heap_record_if_unused(heap_recorder, heap_record); } return; @@ -443,8 +459,9 @@ static int update_object_record_entry(DDTRACE_UNUSED st_data_t *key, st_data_t * return ST_CONTINUE; } +// Expects transfer of ownership of object_data // WARN: Expects records_mutex to be held -static void commit_allocation(heap_recorder *heap_recorder, heap_record *heap_record, long obj_id, live_object_data object_data) { +static void commit_allocation(heap_recorder *heap_recorder, heap_record *heap_record, long obj_id, live_object_data *object_data) { // Update object_records object_record_update_data update_data = (object_record_update_data) { .heap_recorder = heap_recorder, @@ -539,8 +556,9 @@ static void flush_queue(heap_recorder *heap_recorder) { heap_recorder->queued_samples_len = 0; } +// Expects transfer of ownership of object_data. // WARN: This can get called during Ruby GC. NO HEAP ALLOCATIONS OR EXCEPTIONS ARE ALLOWED. -static bool enqueue_allocation(heap_recorder *heap_recorder, heap_record *heap_record, long obj_id, live_object_data object_data) { +static bool enqueue_allocation(heap_recorder *heap_recorder, heap_record *heap_record, long obj_id, live_object_data *object_data) { if (heap_recorder->queued_samples_len >= MAX_QUEUE_LIMIT) { // TODO: Some telemetry here? return false; @@ -555,6 +573,20 @@ static bool enqueue_allocation(heap_recorder *heap_recorder, heap_record *heap_r return true; } +static live_object_data* live_object_data_new(unsigned int weight, ddog_CharSlice alloc_class, size_t alloc_gen) { + live_object_data* object_data = ruby_xmalloc(sizeof(live_object_data)); + (*object_data) = (live_object_data) { + .weight = weight, + .alloc_class = string_from_char_slice(alloc_class), + .alloc_gen = alloc_gen, + }; + return object_data; +} + +static void live_object_data_free(live_object_data *object_data) { + ruby_xfree(object_data->alloc_class); +} + // =============== // Heap Record API // =============== @@ -574,7 +606,7 @@ void heap_record_free(heap_record *record) { // ================= // Object Record API // ================= -object_record* object_record_new(long obj_id, heap_record *heap_record, live_object_data object_data) { +object_record* object_record_new(long obj_id, heap_record *heap_record, live_object_data *object_data) { object_record *record = ruby_xcalloc(1, sizeof(object_record)); record->obj_id = obj_id; record->heap_record = heap_record; @@ -583,6 +615,7 @@ object_record* object_record_new(long obj_id, heap_record *heap_record, live_obj } void object_record_free(object_record *record) { + live_object_data_free(record->object_data); ruby_xfree(record); } @@ -637,7 +670,6 @@ st_index_t ddog_location_hash(ddog_prof_Location location, st_index_t seed) { return hash; } - // ============== // Heap Stack API // ============== @@ -654,8 +686,8 @@ heap_stack* heap_stack_new(ddog_prof_Slice_Location locations) { for (uint16_t i = 0; i < stack->frames_len; i++) { const ddog_prof_Location *location = &locations.ptr[i]; stack->frames[i] = (heap_frame) { - .name = ruby_strndup(location->function.name.ptr, location->function.name.len), - .filename = ruby_strndup(location->function.filename.ptr, location->function.filename.len), + .name = string_from_char_slice(location->function.name), + .filename = string_from_char_slice(location->function.filename), // ddog_prof_Location is a int64_t. We don't expect to have to profile files with more than // 2M lines so this cast should be fairly safe? .line = (int32_t) location->line, diff --git a/ext/ddtrace_profiling_native_extension/heap_recorder.h b/ext/ddtrace_profiling_native_extension/heap_recorder.h index 6705abf602c..99b0b2e05b8 100644 --- a/ext/ddtrace_profiling_native_extension/heap_recorder.h +++ b/ext/ddtrace_profiling_native_extension/heap_recorder.h @@ -26,13 +26,21 @@ typedef struct live_object_data { // Example: If we were sampling every 50 objects, then each sampled object // could be seen as being representative of 50 objects. unsigned int weight; + + // The class of the object that we're tracking. + char* alloc_class; + + // The GC allocation gen in which we saw this object being allocated. + // + // This enables us to calculate the age of this object in terms of GC executions. + size_t alloc_gen; } live_object_data; // Data that is made available to iterators of heap recorder data for each live object // tracked therein. typedef struct { ddog_prof_Slice_Location locations; - live_object_data object_data; + const live_object_data *object_data; } heap_recorder_iteration_data; // Initialize a new heap recorder. @@ -56,7 +64,7 @@ void heap_recorder_after_fork(heap_recorder *heap_recorder); // The sampling weight of this object. // // WARN: It needs to be paired with a ::end_heap_allocation_recording call. -void start_heap_allocation_recording(heap_recorder *heap_recorder, VALUE new_obj, unsigned int weight); +void start_heap_allocation_recording(heap_recorder *heap_recorder, VALUE new_obj, unsigned int weight, ddog_CharSlice alloc_class); // End a previously started heap allocation recording on the heap recorder. // diff --git a/ext/ddtrace_profiling_native_extension/libdatadog_helpers.c b/ext/ddtrace_profiling_native_extension/libdatadog_helpers.c index f6f8b69b1b9..02777f33cb0 100644 --- a/ext/ddtrace_profiling_native_extension/libdatadog_helpers.c +++ b/ext/ddtrace_profiling_native_extension/libdatadog_helpers.c @@ -60,3 +60,7 @@ size_t read_ddogerr_string_and_drop(ddog_Error *error, char *string, size_t capa ddog_Error_drop(error); return error_msg_size; } + +char* string_from_char_slice(ddog_CharSlice slice) { + return ruby_strndup(slice.ptr, slice.len); +} diff --git a/ext/ddtrace_profiling_native_extension/libdatadog_helpers.h b/ext/ddtrace_profiling_native_extension/libdatadog_helpers.h index 7d4593fa550..fa86184189f 100644 --- a/ext/ddtrace_profiling_native_extension/libdatadog_helpers.h +++ b/ext/ddtrace_profiling_native_extension/libdatadog_helpers.h @@ -34,3 +34,7 @@ size_t read_ddogerr_string_and_drop(ddog_Error *error, char *string, size_t capa // ruby_value_type that Ruby uses so that we can also use this for debugging. const char *ruby_value_type_to_string(enum ruby_value_type type); ddog_CharSlice ruby_value_type_to_char_slice(enum ruby_value_type type); + +// Returns a dynamically allocated string from the provided char slice. +// WARN: The returned string must be explicitly freed with ruby_xfree. +char* string_from_char_slice(ddog_CharSlice slice); diff --git a/ext/ddtrace_profiling_native_extension/stack_recorder.c b/ext/ddtrace_profiling_native_extension/stack_recorder.c index c241afece90..fee6d406903 100644 --- a/ext/ddtrace_profiling_native_extension/stack_recorder.c +++ b/ext/ddtrace_profiling_native_extension/stack_recorder.c @@ -219,7 +219,7 @@ static VALUE _native_reset_after_fork(DDTRACE_UNUSED VALUE self, VALUE recorder_ static void serializer_set_start_timestamp_for_next_profile(struct stack_recorder_state *state, ddog_Timespec start_time); static VALUE _native_record_endpoint(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE local_root_span_id, VALUE endpoint); static void reset_profile(ddog_prof_Profile *profile, ddog_Timespec *start_time /* Can be null */); -static VALUE _native_track_object(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE new_obj, VALUE weight); +static VALUE _native_track_object(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE new_obj, VALUE weight, VALUE alloc_class); static VALUE _native_check_heap_hashes(DDTRACE_UNUSED VALUE _self, VALUE locations); static VALUE _native_start_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance); static VALUE _native_end_fake_slow_heap_serialization(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance); @@ -247,7 +247,7 @@ void stack_recorder_init(VALUE profiling_module) { rb_define_singleton_method(testing_module, "_native_slot_one_mutex_locked?", _native_is_slot_one_mutex_locked, 1); rb_define_singleton_method(testing_module, "_native_slot_two_mutex_locked?", _native_is_slot_two_mutex_locked, 1); rb_define_singleton_method(testing_module, "_native_record_endpoint", _native_record_endpoint, 3); - rb_define_singleton_method(testing_module, "_native_track_object", _native_track_object, 3); + rb_define_singleton_method(testing_module, "_native_track_object", _native_track_object, 4); rb_define_singleton_method(testing_module, "_native_check_heap_hashes", _native_check_heap_hashes, 1); rb_define_singleton_method(testing_module, "_native_start_fake_slow_heap_serialization", _native_start_fake_slow_heap_serialization, 1); @@ -510,13 +510,13 @@ void record_sample(VALUE recorder_instance, ddog_prof_Slice_Location locations, } } -void track_object(VALUE recorder_instance, VALUE new_object, unsigned int sample_weight) { +void track_object(VALUE recorder_instance, VALUE new_object, unsigned int sample_weight, ddog_CharSlice alloc_class) { struct stack_recorder_state *state; TypedData_Get_Struct(recorder_instance, struct stack_recorder_state, &stack_recorder_typed_data, state); // FIXME: Heap sampling currently has to be done in 2 parts because the construction of locations is happening // very late in the allocation-sampling path (which is shared with the cpu sampling path). This can // be fixed with some refactoring but for now this leads to a less impactful change. - start_heap_allocation_recording(state->heap_recorder, new_object, sample_weight); + start_heap_allocation_recording(state->heap_recorder, new_object, sample_weight, alloc_class); } void record_endpoint(VALUE recorder_instance, uint64_t local_root_span_id, ddog_CharSlice endpoint) { @@ -541,8 +541,11 @@ void record_endpoint(VALUE recorder_instance, uint64_t local_root_span_id, ddog_ typedef struct heap_recorder_iteration_context { struct stack_recorder_state *state; ddog_prof_Profile *profile; + bool error; char error_msg[MAX_LEN_HEAP_ITERATION_ERROR_MSG]; + + size_t profile_gen; } heap_recorder_iteration_context; static bool add_heap_sample_to_active_profile_without_gvl(heap_recorder_iteration_data iteration_data, void *extra_arg) { @@ -551,14 +554,33 @@ static bool add_heap_sample_to_active_profile_without_gvl(heap_recorder_iteratio int64_t metric_values[ALL_VALUE_TYPES_COUNT] = {0}; uint8_t *position_for = context->state->position_for; - metric_values[position_for[HEAP_SAMPLES_VALUE_ID]] = iteration_data.object_data.weight; + metric_values[position_for[HEAP_SAMPLES_VALUE_ID]] = iteration_data.object_data->weight; + + const live_object_data *object_data = iteration_data.object_data; + ddog_prof_Label labels[2]; + size_t num_labels = 2; + + labels[0] = (ddog_prof_Label ){ + .key = DDOG_CHARSLICE_C("allocation class"), + .str = (ddog_CharSlice) { + .ptr = object_data->alloc_class, + .len = strlen(object_data->alloc_class), + }, + }; + labels[1] = (ddog_prof_Label ){ + .key = DDOG_CHARSLICE_C("gc gen age"), + .num = context->profile_gen - object_data->alloc_gen, + }; ddog_prof_Profile_Result result = ddog_prof_Profile_add( context->profile, (ddog_prof_Sample) { .locations = iteration_data.locations, .values = (ddog_Slice_I64) {.ptr = metric_values, .len = context->state->enabled_values_count}, - .labels = {0}, + .labels = (ddog_prof_Slice_Label) { + .ptr = labels, + .len = num_labels, + } }, 0 ); @@ -580,6 +602,7 @@ static void build_heap_profile_without_gvl(struct stack_recorder_state *state, d .profile = profile, .error = false, .error_msg = {0}, + .profile_gen = rb_gc_count(), }; heap_recorder_for_each_live_object(state->heap_recorder, add_heap_sample_to_active_profile_without_gvl, (void*) &iteration_context, false); if (iteration_context.error) { @@ -741,9 +764,9 @@ static VALUE _native_record_endpoint(DDTRACE_UNUSED VALUE _self, VALUE recorder_ return Qtrue; } -static VALUE _native_track_object(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE new_obj, VALUE weight) { +static VALUE _native_track_object(DDTRACE_UNUSED VALUE _self, VALUE recorder_instance, VALUE new_obj, VALUE weight, VALUE alloc_class) { ENFORCE_TYPE(weight, T_FIXNUM); - track_object(recorder_instance, new_obj, NUM2UINT(weight)); + track_object(recorder_instance, new_obj, NUM2UINT(weight), char_slice_from_ruby_string(alloc_class)); return Qtrue; } diff --git a/ext/ddtrace_profiling_native_extension/stack_recorder.h b/ext/ddtrace_profiling_native_extension/stack_recorder.h index 9190cc89a25..a18fd42d199 100644 --- a/ext/ddtrace_profiling_native_extension/stack_recorder.h +++ b/ext/ddtrace_profiling_native_extension/stack_recorder.h @@ -22,5 +22,5 @@ typedef struct sample_labels { void record_sample(VALUE recorder_instance, ddog_prof_Slice_Location locations, sample_values values, sample_labels labels); void record_endpoint(VALUE recorder_instance, uint64_t local_root_span_id, ddog_CharSlice endpoint); -void track_object(VALUE recorder_instance, VALUE new_object, unsigned int sample_weight); +void track_object(VALUE recorder_instance, VALUE new_object, unsigned int sample_weight, ddog_CharSlice alloc_class); VALUE enforce_recorder_instance(VALUE object); diff --git a/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb b/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb index ce8a855dd9f..e0b0e054b66 100644 --- a/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb +++ b/spec/datadog/profiling/collectors/cpu_and_wall_time_worker_spec.rb @@ -572,10 +572,6 @@ it 'records live heap objects' do stub_const('CpuAndWallTimeWorkerSpec::TestStruct', Struct.new(:foo)) - # Warm this up to remove VM-related allocations - # TODO: Remove this when we can match on allocation class - CpuAndWallTimeWorkerSpec::TestStruct.new - start live_objects = Array.new(test_num_allocated_object) @@ -587,7 +583,10 @@ test_struct_heap_sample = lambda { |sample| first_frame = sample.locations.first - first_frame.lineno == allocation_line && first_frame.path == __FILE__ && first_frame.base_label == 'new' && + first_frame.lineno == allocation_line && + first_frame.path == __FILE__ && + first_frame.base_label == 'new' && + sample.labels[:'allocation class'] == 'CpuAndWallTimeWorkerSpec::TestStruct' && (sample.values[:'heap-live-samples'] || 0) > 0 } diff --git a/spec/datadog/profiling/stack_recorder_spec.rb b/spec/datadog/profiling/stack_recorder_spec.rb index 564f32dabea..b6a87c2251c 100644 --- a/spec/datadog/profiling/stack_recorder_spec.rb +++ b/spec/datadog/profiling/stack_recorder_spec.rb @@ -337,6 +337,8 @@ def sample_types_from(decoded_profile) end describe 'heap samples' do + attr_reader(:num_allocations) + let(:sample_rate) { 50 } let(:metric_values) { { 'cpu-time' => 101, 'cpu-samples' => 1, 'wall-time' => 789, 'alloc-samples' => sample_rate } } let(:labels) { { 'label_a' => 'value_a', 'label_b' => 'value_b', 'state' => 'unknown' }.to_a } @@ -349,7 +351,7 @@ def sample_types_from(decoded_profile) def sample_allocation(obj, _unused = 0) # Heap sampling currently requires this 2-step process to first pass data about the allocated object... - described_class::Testing._native_track_object(stack_recorder, obj, sample_rate) + described_class::Testing._native_track_object(stack_recorder, obj, sample_rate, obj.class.name) Datadog::Profiling::Collectors::Stack::Testing ._native_sample(Thread.current, stack_recorder, metric_values, labels, numeric_labels, 400, false) end @@ -357,19 +359,21 @@ def sample_allocation(obj, _unused = 0) before do allocations = [a_string, an_array, 'a fearsome string', [-10..-1], a_hash, { 'z' => -1, 'y' => '-2', 'x' => false }] @num_allocations = 0 + @before_start_gc_gen = GC.count allocations.each_with_index do |obj, i| # Sample allocations with 2 distinct stacktraces - # (the 2nd parameter is there only to avoid a rubocop issue with identical branches) if i.even? sample_allocation(obj, 1) else sample_allocation(obj, 2) end @num_allocations += 1 + GC.start # Force each allocation to be done in its own GC epoch for interesting GC age labels end allocations.clear # The literals in the previous array are now dangling GC.start # And this will clear them, leaving only the non-literals which are still pointed to by the lets + @before_end_gc_gen = GC.count end context 'when disabled' do @@ -393,13 +397,67 @@ def sample_allocation(obj, _unused = 0) samples.select { |s| s.values[:'heap-live-samples'] == 0 } end - it 'include the stack and sample counts for the objects still left alive' do - # We sample from 2 distinct locations - expect(heap_samples.size).to eq(2) + matcher :approximate_age_based_on_allocation_order do |allocation_order| + # To prevent flakiness if other GCs other than the requested ones occur + # allow some margin in the matching. We do not expect this test to be + # particularly memory heavy so at most we expect one unexpected GC. + flaky_margin = 1 + + match do |gc_gen_age| + expected_age = num_allocations - allocation_order + gc_gen_age.between?(expected_age, expected_age + flaky_margin) + end + description do + expected_age = num_allocations - allocation_order + "be between #{expected_age} and #{expected_age + flaky_margin}" + end + end + + it 'include the stack, sample counts and labels for the objects still left alive' do + # There should be 3 different allocation class labels so we expect 3 different heap samples + expect(heap_samples.size).to eq(3) + + string_sample = heap_samples.find { |s| s.labels[:'allocation class'] == 'String' } + expect(string_sample).not_to be nil + expect(string_sample.values).to match( + hash_including( + :"heap-live-samples" => sample_rate, + ) + ) + expect(string_sample.labels).to match( + { + :'allocation class' => 'String', + :'gc gen age' => approximate_age_based_on_allocation_order(0) + } + ) - sum_heap_samples = 0 - heap_samples.each { |s| sum_heap_samples += s.values[:'heap-live-samples'] } - expect(sum_heap_samples).to eq([a_string, an_array, a_hash].size * sample_rate) + array_sample = heap_samples.find { |s| s.labels[:'allocation class'] == 'Array' } + expect(array_sample).not_to be nil + expect(array_sample.values).to match( + hash_including( + :"heap-live-samples" => sample_rate, + ) + ) + expect(array_sample.labels).to match( + { + :'allocation class' => 'Array', + :'gc gen age' => approximate_age_based_on_allocation_order(1) + } + ) + + hash_sample = heap_samples.find { |s| s.labels[:'allocation class'] == 'Hash' } + expect(hash_sample).not_to be nil + expect(array_sample.values).to match( + hash_including( + :"heap-live-samples" => sample_rate, + ) + ) + expect(hash_sample.labels).to match( + { + :'allocation class' => 'Hash', + :'gc gen age' => approximate_age_based_on_allocation_order(4) + } + ) end it 'keeps on reporting accurate samples for other profile types' do