diff --git a/test/libscap/test_suites/engines/bpf/bpf.cpp b/test/libscap/test_suites/engines/bpf/bpf.cpp index e68befc6b9..6c450e0e2b 100644 --- a/test/libscap/test_suites/engines/bpf/bpf.cpp +++ b/test/libscap/test_suites/engines/bpf/bpf.cpp @@ -138,8 +138,8 @@ TEST(bpf, metrics_v2_check_per_CPU_stats) ssize_t num_possible_CPUs = num_possible_cpus(); - // We want to check our CPUs counters - uint32_t flags = METRICS_V2_KERNEL_COUNTERS; + // Enabling `METRICS_V2_KERNEL_COUNTERS_PER_CPU` we also enable `METRICS_V2_KERNEL_COUNTERS` + uint32_t flags = METRICS_V2_KERNEL_COUNTERS_PER_CPU; uint32_t nstats = 0; int32_t rc = 0; const metrics_v2* stats_v2 = scap_get_stats_v2(h, flags, &nstats, &rc); @@ -151,9 +151,18 @@ TEST(bpf, metrics_v2_check_per_CPU_stats) ssize_t found = 0; char expected_name[METRIC_NAME_MAX] = ""; snprintf(expected_name, METRIC_NAME_MAX, N_EVENTS_PER_CPU_PREFIX"%ld", found); + bool check_general_kernel_counters_presence = false; while(i < nstats) { + // We check if `METRICS_V2_KERNEL_COUNTERS` are enabled as well + if(strncmp(stats_v2[i].name, N_EVENTS_PREFIX, sizeof(N_EVENTS_PREFIX)) == 0) + { + check_general_kernel_counters_presence = true; + i++; + continue; + } + // `sizeof(N_EVENTS_PER_CPU_PREFIX)-1` because we need to exclude the `\0` if(strncmp(stats_v2[i].name, N_EVENTS_PER_CPU_PREFIX, sizeof(N_EVENTS_PER_CPU_PREFIX)-1) == 0) { @@ -176,6 +185,8 @@ TEST(bpf, metrics_v2_check_per_CPU_stats) } } + ASSERT_TRUE(check_general_kernel_counters_presence) << "per-CPU counter are enabled but general kernel counters are not"; + // This test could fail in case of rare race conditions in which the number of available CPUs changes // between the scap_open and the `num_possible_cpus` function. In CI we shouldn't have hot plugs so probably we // can live with this. @@ -220,6 +231,16 @@ TEST(bpf, metrics_v2_check_results) FAIL() << "unable to find stat '" << stat_name << "' into the array"; } } + + // Check per-CPU stats are not enabled since we didn't provide the flag. + for(i = 0; i < nstats; i++) + { + if(strncmp(stats_v2[i].name, N_EVENTS_PER_CPU_PREFIX, sizeof(N_EVENTS_PER_CPU_PREFIX)-1) == 0) + { + FAIL() << "per-CPU counters are enabled but we didn't provide the flag!"; + } + } + scap_close(h); } diff --git a/test/libscap/test_suites/engines/kmod/kmod.cpp b/test/libscap/test_suites/engines/kmod/kmod.cpp index e7e1933371..6a1cc4b420 100644 --- a/test/libscap/test_suites/engines/kmod/kmod.cpp +++ b/test/libscap/test_suites/engines/kmod/kmod.cpp @@ -193,8 +193,8 @@ TEST(kmod, metrics_v2_check_per_CPU_stats) ssize_t num_online_CPUs = sysconf(_SC_NPROCESSORS_ONLN); - // We want to check our CPUs counters - uint32_t flags = METRICS_V2_KERNEL_COUNTERS; + // Enabling `METRICS_V2_KERNEL_COUNTERS_PER_CPU` we also enable `METRICS_V2_KERNEL_COUNTERS` + uint32_t flags = METRICS_V2_KERNEL_COUNTERS_PER_CPU; uint32_t nstats = 0; int32_t rc = 0; const metrics_v2* stats_v2 = scap_get_stats_v2(h, flags, &nstats, &rc); @@ -206,9 +206,18 @@ TEST(kmod, metrics_v2_check_per_CPU_stats) ssize_t found = 0; char expected_name[METRIC_NAME_MAX] = ""; snprintf(expected_name, METRIC_NAME_MAX, N_EVENTS_PER_DEVICE_PREFIX"%ld", found); + bool check_general_kernel_counters_presence = false; while(i < nstats) { + // We check if `METRICS_V2_KERNEL_COUNTERS` are enabled as well + if(strncmp(stats_v2[i].name, N_EVENTS_PREFIX, sizeof(N_EVENTS_PREFIX)) == 0) + { + check_general_kernel_counters_presence = true; + i++; + continue; + } + // `sizeof(N_EVENTS_PER_DEVICE_PREFIX)-1` because we need to exclude the `\0` if(strncmp(stats_v2[i].name, N_EVENTS_PER_DEVICE_PREFIX, sizeof(N_EVENTS_PER_DEVICE_PREFIX)-1) == 0) { @@ -231,6 +240,8 @@ TEST(kmod, metrics_v2_check_per_CPU_stats) } } + ASSERT_TRUE(check_general_kernel_counters_presence) << "per-CPU counter are enabled but general kernel counters are not"; + // This test could fail in case of rare race conditions in which the number of online CPUs changes // between the scap_open and the `sysconf(_SC_NPROCESSORS_ONLN)` function. In CI we shouldn't have hot plugs so probably we // can live with this. @@ -271,6 +282,16 @@ TEST(kmod, metrics_v2_check_results) FAIL() << "unable to find stat '" << stat_name << "' into the array"; } } + + // Check per-CPU stats are not enabled since we didn't provide the flag. + for(i = 0; i < nstats; i++) + { + if(strncmp(stats_v2[i].name, N_EVENTS_PER_DEVICE_PREFIX, sizeof(N_EVENTS_PER_DEVICE_PREFIX)-1) == 0) + { + FAIL() << "per-CPU counters are enabled but we didn't provide the flag!"; + } + } + scap_close(h); } diff --git a/test/libscap/test_suites/engines/modern_bpf/modern_bpf.cpp b/test/libscap/test_suites/engines/modern_bpf/modern_bpf.cpp index 267f0b7a0f..3863b17a15 100644 --- a/test/libscap/test_suites/engines/modern_bpf/modern_bpf.cpp +++ b/test/libscap/test_suites/engines/modern_bpf/modern_bpf.cpp @@ -257,8 +257,8 @@ TEST(modern_bpf, metrics_v2_check_per_CPU_stats) ssize_t num_possible_CPUs = num_possible_cpus(); - // We want to check our CPUs counters - uint32_t flags = METRICS_V2_KERNEL_COUNTERS; + // Enabling `METRICS_V2_KERNEL_COUNTERS_PER_CPU` we also enable `METRICS_V2_KERNEL_COUNTERS` + uint32_t flags = METRICS_V2_KERNEL_COUNTERS_PER_CPU; uint32_t nstats = 0; int32_t rc = 0; const metrics_v2* stats_v2 = scap_get_stats_v2(h, flags, &nstats, &rc); @@ -270,9 +270,18 @@ TEST(modern_bpf, metrics_v2_check_per_CPU_stats) ssize_t found = 0; char expected_name[METRIC_NAME_MAX] = ""; snprintf(expected_name, METRIC_NAME_MAX, N_EVENTS_PER_CPU_PREFIX"%ld", found); + bool check_general_kernel_counters_presence = false; while(i < nstats) { + // We check if `METRICS_V2_KERNEL_COUNTERS` are enabled as well + if(strncmp(stats_v2[i].name, N_EVENTS_PREFIX, sizeof(N_EVENTS_PREFIX)) == 0) + { + check_general_kernel_counters_presence = true; + i++; + continue; + } + // `sizeof(N_EVENTS_PER_CPU_PREFIX)-1` because we need to exclude the `\0` if(strncmp(stats_v2[i].name, N_EVENTS_PER_CPU_PREFIX, sizeof(N_EVENTS_PER_CPU_PREFIX)-1) == 0) { @@ -295,6 +304,8 @@ TEST(modern_bpf, metrics_v2_check_per_CPU_stats) } } + ASSERT_TRUE(check_general_kernel_counters_presence) << "per-CPU counter are enabled but general kernel counters are not"; + // This test could fail in case of rare race conditions in which the number of available CPUs changes // between the scap_open and the `num_possible_cpus` function. In CI we shouldn't have hot plugs so probably we // can live with this. @@ -340,6 +351,16 @@ TEST(modern_bpf, metrics_v2_check_results) FAIL() << "unable to find stat '" << stat_name << "' into the array"; } } + + // Check per-CPU stats are not enabled since we didn't provide the flag. + for(i = 0; i < nstats; i++) + { + if(strncmp(stats_v2[i].name, N_EVENTS_PER_CPU_PREFIX, sizeof(N_EVENTS_PER_CPU_PREFIX)-1) == 0) + { + FAIL() << "per-CPU counters are enabled but we didn't provide the flag!"; + } + } + scap_close(h); } diff --git a/userspace/libpman/src/stats.c b/userspace/libpman/src/stats.c index 43766cc15f..9d4bd16001 100644 --- a/userspace/libpman/src/stats.c +++ b/userspace/libpman/src/stats.c @@ -53,7 +53,7 @@ typedef enum modern_bpf_libbpf_stats } modern_bpf_libbpf_stats; const char *const modern_bpf_kernel_counters_stats_names[] = { - [MODERN_BPF_N_EVTS] = "n_evts", + [MODERN_BPF_N_EVTS] = N_EVENTS_PREFIX, [MODERN_BPF_N_DROPS_BUFFER_TOTAL] = "n_drops_buffer_total", [MODERN_BPF_N_DROPS_BUFFER_CLONE_FORK_ENTER] = "n_drops_buffer_clone_fork_enter", [MODERN_BPF_N_DROPS_BUFFER_CLONE_FORK_EXIT] = "n_drops_buffer_clone_fork_exit", @@ -140,10 +140,10 @@ int pman_get_scap_stats(struct scap_stats *stats) return errno; } -static void set_u64_monotonic_kernel_counter(uint32_t pos, uint64_t val) +static void set_u64_monotonic_kernel_counter(uint32_t pos, uint64_t val, uint32_t metric_flag) { g_state.stats[pos].type = METRIC_VALUE_TYPE_U64; - g_state.stats[pos].flags = METRICS_V2_KERNEL_COUNTERS; + g_state.stats[pos].flags = metric_flag; g_state.stats[pos].unit = METRIC_VALUE_UNIT_COUNT; g_state.stats[pos].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; g_state.stats[pos].value.u64 = val; @@ -166,11 +166,15 @@ struct metrics_v2 *pman_get_metrics_v2(uint32_t flags, uint32_t *nstats, int32_t } } - // At the moment for each available CPU we want: - // - the number of events. - // - the number of drops. - uint32_t per_cpu_stats = g_state.n_possible_cpus* 2; - + uint32_t per_cpu_stats = 0; + if(flags & METRICS_V2_KERNEL_COUNTERS_PER_CPU) + { + // At the moment for each available CPU we want: + // - the number of events. + // - the number of drops. + per_cpu_stats = g_state.n_possible_cpus* 2; + } + g_state.nstats = MODERN_BPF_MAX_KERNEL_COUNTERS_STATS + per_cpu_stats + (nprogs_attached * MODERN_BPF_MAX_LIBBPF_STATS); g_state.stats = (metrics_v2 *)calloc(g_state.nstats, sizeof(metrics_v2)); if(!g_state.stats) @@ -197,7 +201,7 @@ struct metrics_v2 *pman_get_metrics_v2(uint32_t flags, uint32_t *nstats, int32_t for(uint32_t stat = 0; stat < MODERN_BPF_MAX_KERNEL_COUNTERS_STATS; stat++) { - set_u64_monotonic_kernel_counter(stat, 0); + set_u64_monotonic_kernel_counter(stat, 0, METRICS_V2_KERNEL_COUNTERS); strlcpy(g_state.stats[stat].name, (char*)modern_bpf_kernel_counters_stats_names[stat], METRIC_NAME_MAX); } @@ -234,15 +238,18 @@ struct metrics_v2 *pman_get_metrics_v2(uint32_t flags, uint32_t *nstats, int32_t g_state.stats[MODERN_BPF_N_DROPS_SCRATCH_MAP].value.u64 += cnt_map.n_drops_max_event_size; g_state.stats[MODERN_BPF_N_DROPS].value.u64 += (cnt_map.n_drops_buffer + cnt_map.n_drops_max_event_size); - // We set the num events for that CPU. - set_u64_monotonic_kernel_counter(pos, cnt_map.n_evts); - snprintf(g_state.stats[pos].name, METRIC_NAME_MAX, N_EVENTS_PER_CPU_PREFIX"%d", index); - pos++; - - // We set the drops for that CPU. - set_u64_monotonic_kernel_counter(pos, cnt_map.n_drops_buffer + cnt_map.n_drops_max_event_size); - snprintf(g_state.stats[pos].name, METRIC_NAME_MAX, N_DROPS_PER_CPU_PREFIX"%d", index); - pos++; + if((flags & METRICS_V2_KERNEL_COUNTERS_PER_CPU)) + { + // We set the num events for that CPU. + set_u64_monotonic_kernel_counter(pos, cnt_map.n_evts, METRICS_V2_KERNEL_COUNTERS_PER_CPU); + snprintf(g_state.stats[pos].name, METRIC_NAME_MAX, N_EVENTS_PER_CPU_PREFIX"%d", index); + pos++; + + // We set the drops for that CPU. + set_u64_monotonic_kernel_counter(pos, cnt_map.n_drops_buffer + cnt_map.n_drops_max_event_size, METRICS_V2_KERNEL_COUNTERS_PER_CPU); + snprintf(g_state.stats[pos].name, METRIC_NAME_MAX, N_DROPS_PER_CPU_PREFIX"%d", index); + pos++; + } } offset = pos; } @@ -285,25 +292,22 @@ struct metrics_v2 *pman_get_metrics_v2(uint32_t flags, uint32_t *nstats, int32_t g_state.stats[offset].type = METRIC_VALUE_TYPE_U64; g_state.stats[offset].flags = METRICS_V2_LIBBPF_STATS; strlcpy(g_state.stats[offset].name, info.name, METRIC_NAME_MAX); + strlcat(g_state.stats[offset].name, modern_bpf_libbpf_stats_names[stat], sizeof(g_state.stats[offset].name)); switch(stat) { case RUN_CNT: - strlcat(g_state.stats[offset].name, modern_bpf_libbpf_stats_names[RUN_CNT], sizeof(g_state.stats[offset].name)); - g_state.stats[stat].flags = METRICS_V2_KERNEL_COUNTERS; - g_state.stats[stat].unit = METRIC_VALUE_UNIT_COUNT; - g_state.stats[stat].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; + g_state.stats[offset].unit = METRIC_VALUE_UNIT_COUNT; + g_state.stats[offset].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; g_state.stats[offset].value.u64 = info.run_cnt; break; case RUN_TIME_NS: - strlcat(g_state.stats[offset].name, modern_bpf_libbpf_stats_names[RUN_TIME_NS], sizeof(g_state.stats[offset].name)); - g_state.stats[stat].unit = METRIC_VALUE_UNIT_TIME_NS_COUNT; - g_state.stats[stat].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; + g_state.stats[offset].unit = METRIC_VALUE_UNIT_TIME_NS_COUNT; + g_state.stats[offset].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; g_state.stats[offset].value.u64 = info.run_time_ns; break; case AVG_TIME_NS: - strlcat(g_state.stats[offset].name, modern_bpf_libbpf_stats_names[AVG_TIME_NS], sizeof(g_state.stats[offset].name)); - g_state.stats[stat].unit = METRIC_VALUE_UNIT_TIME_NS; - g_state.stats[stat].metric_type = METRIC_VALUE_METRIC_TYPE_NON_MONOTONIC_CURRENT; + g_state.stats[offset].unit = METRIC_VALUE_UNIT_TIME_NS; + g_state.stats[offset].metric_type = METRIC_VALUE_METRIC_TYPE_NON_MONOTONIC_CURRENT; g_state.stats[offset].value.u64 = 0; if(info.run_cnt > 0) { diff --git a/userspace/libscap/engine/bpf/scap_bpf.c b/userspace/libscap/engine/bpf/scap_bpf.c index 7728408c49..f814072f57 100644 --- a/userspace/libscap/engine/bpf/scap_bpf.c +++ b/userspace/libscap/engine/bpf/scap_bpf.c @@ -50,7 +50,7 @@ limitations under the License. #include static const char * const bpf_kernel_counters_stats_names[] = { - [BPF_N_EVTS] = "n_evts", + [BPF_N_EVTS] = N_EVENTS_PREFIX, [BPF_N_DROPS_BUFFER_TOTAL] = "n_drops_buffer_total", [BPF_N_DROPS_BUFFER_CLONE_FORK_ENTER] = "n_drops_buffer_clone_fork_enter", [BPF_N_DROPS_BUFFER_CLONE_FORK_EXIT] = "n_drops_buffer_clone_fork_exit", @@ -1688,10 +1688,10 @@ int32_t scap_bpf_get_stats(struct scap_engine_handle engine, scap_stats* stats) return SCAP_SUCCESS; } -static void set_u64_monotonic_kernel_counter(struct metrics_v2* m, uint64_t val) +static void set_u64_monotonic_kernel_counter(struct metrics_v2* m, uint64_t val, uint32_t metric_flag) { m->type = METRIC_VALUE_TYPE_U64; - m->flags = METRICS_V2_KERNEL_COUNTERS; + m->flags = metric_flag; m->unit = METRIC_VALUE_UNIT_COUNT; m->metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; m->value.u64 = val; @@ -1722,11 +1722,15 @@ const struct metrics_v2* scap_bpf_get_stats_v2(struct scap_engine_handle engine, } } - // At the moment for each available CPU we want: - // - the number of events. - // - the number of drops. - uint32_t per_cpu_stats = handle->m_ncpus* 2; - + uint32_t per_cpu_stats = 0; + if(flags & METRICS_V2_KERNEL_COUNTERS_PER_CPU) + { + // At the moment for each available CPU we want: + // - the number of events. + // - the number of drops. + per_cpu_stats = handle->m_ncpus* 2; + } + handle->m_nstats = BPF_MAX_KERNEL_COUNTERS_STATS + per_cpu_stats + (nprogs_attached * BPF_MAX_LIBBPF_STATS); handle->m_stats = (metrics_v2*)calloc(handle->m_nstats, sizeof(metrics_v2)); if(!handle->m_stats) @@ -1746,7 +1750,7 @@ const struct metrics_v2* scap_bpf_get_stats_v2(struct scap_engine_handle engine, { for(uint32_t stat = 0; stat < BPF_MAX_KERNEL_COUNTERS_STATS; stat++) { - set_u64_monotonic_kernel_counter(&(stats[stat]), 0); + set_u64_monotonic_kernel_counter(&(stats[stat]), 0, METRICS_V2_KERNEL_COUNTERS); strlcpy(stats[stat].name, (char*)bpf_kernel_counters_stats_names[stat], METRIC_NAME_MAX); } @@ -1783,15 +1787,18 @@ const struct metrics_v2* scap_bpf_get_stats_v2(struct scap_engine_handle engine, v.n_drops_pf + \ v.n_drops_bug; - // We set the num events for that CPU. - set_u64_monotonic_kernel_counter(&(stats[pos]), v.n_evts); - snprintf(stats[pos].name, METRIC_NAME_MAX, N_EVENTS_PER_CPU_PREFIX"%d", cpu); - pos++; - - // We set the drops for that CPU. - set_u64_monotonic_kernel_counter(&(stats[pos]), v.n_drops_buffer + v.n_drops_scratch_map + v.n_drops_pf + v.n_drops_bug); - snprintf(stats[pos].name, METRIC_NAME_MAX, N_DROPS_PER_CPU_PREFIX"%d", cpu); - pos++; + if((flags & METRICS_V2_KERNEL_COUNTERS_PER_CPU)) + { + // We set the num events for that CPU. + set_u64_monotonic_kernel_counter(&(stats[pos]), v.n_evts, METRICS_V2_KERNEL_COUNTERS_PER_CPU); + snprintf(stats[pos].name, METRIC_NAME_MAX, N_EVENTS_PER_CPU_PREFIX"%d", cpu); + pos++; + + // We set the drops for that CPU. + set_u64_monotonic_kernel_counter(&(stats[pos]), v.n_drops_buffer + v.n_drops_scratch_map + v.n_drops_pf + v.n_drops_bug, METRICS_V2_KERNEL_COUNTERS_PER_CPU); + snprintf(stats[pos].name, METRIC_NAME_MAX, N_DROPS_PER_CPU_PREFIX"%d", cpu); + pos++; + } } offset = pos; } @@ -1849,22 +1856,20 @@ const struct metrics_v2* scap_bpf_get_stats_v2(struct scap_engine_handle engine, { strlcpy(stats[offset].name, info.name, METRIC_NAME_MAX); } + strlcat(stats[offset].name, bpf_libbpf_stats_names[stat], sizeof(stats[offset].name)); switch(stat) { case RUN_CNT: - strlcat(stats[offset].name, bpf_libbpf_stats_names[RUN_CNT], sizeof(stats[offset].name)); stats[offset].value.u64 = info.run_cnt; stats[offset].unit = METRIC_VALUE_UNIT_COUNT; stats[offset].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; break; case RUN_TIME_NS: - strlcat(stats[offset].name, bpf_libbpf_stats_names[RUN_TIME_NS], sizeof(stats[offset].name)); stats[offset].value.u64 = info.run_time_ns; stats[offset].unit = METRIC_VALUE_UNIT_TIME_NS_COUNT; stats[offset].metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; break; case AVG_TIME_NS: - strlcat(stats[offset].name, bpf_libbpf_stats_names[AVG_TIME_NS], sizeof(stats[offset].name)); stats[offset].value.u64 = 0; stats[offset].unit = METRIC_VALUE_UNIT_TIME_NS; stats[offset].metric_type = METRIC_VALUE_METRIC_TYPE_NON_MONOTONIC_CURRENT; diff --git a/userspace/libscap/engine/kmod/scap_kmod.c b/userspace/libscap/engine/kmod/scap_kmod.c index 80d3dc6859..3622781987 100644 --- a/userspace/libscap/engine/kmod/scap_kmod.c +++ b/userspace/libscap/engine/kmod/scap_kmod.c @@ -40,7 +40,7 @@ limitations under the License. #include static const char * const kmod_kernel_counters_stats_names[] = { - [KMOD_N_EVTS] = "n_evts", + [KMOD_N_EVTS] = N_EVENTS_PREFIX, [KMOD_N_DROPS_BUFFER_TOTAL] = "n_drops_buffer_total", [KMOD_N_DROPS_BUFFER_CLONE_FORK_ENTER] = "n_drops_buffer_clone_fork_enter", [KMOD_N_DROPS_BUFFER_CLONE_FORK_EXIT] = "n_drops_buffer_clone_fork_exit", @@ -586,10 +586,10 @@ int32_t scap_kmod_get_stats(struct scap_engine_handle engine, scap_stats* stats) return SCAP_SUCCESS; } -static void set_u64_monotonic_kernel_counter(struct metrics_v2* m, uint64_t val) +static void set_u64_monotonic_kernel_counter(struct metrics_v2* m, uint64_t val, uint32_t metric_flag) { m->type = METRIC_VALUE_TYPE_U64; - m->flags = METRICS_V2_KERNEL_COUNTERS; + m->flags = metric_flag; m->unit = METRIC_VALUE_UNIT_COUNT; m->metric_type = METRIC_VALUE_METRIC_TYPE_MONOTONIC; m->value.u64 = val; @@ -606,11 +606,17 @@ const struct metrics_v2* scap_kmod_get_stats_v2(struct scap_engine_handle engine // If it is the first time we call this function, we allocate the stats if(handle->m_stats == NULL) { - // The difference with other drivers is that here we consider only ONLINE CPUs and not the AVILABLE ones. - // At the moment for each ONLINE CPU we want: - // - the number of events. - // - the number of drops. - uint32_t per_dev_stats = devset->m_ndevs* 2; + // We don't allocate space for per-cpu stats, if we don't enable them at init time. + // At the moment we don't support dynamic metrics selection at runtime. + uint32_t per_dev_stats = 0; + if(flags & METRICS_V2_KERNEL_COUNTERS_PER_CPU) + { + // The difference with other drivers is that here we consider only ONLINE CPUs and not the AVILABLE ones. + // At the moment for each ONLINE CPU we want: + // - the number of events. + // - the number of drops. + per_dev_stats = devset->m_ndevs* 2; + } handle->m_nstats = KMOD_MAX_KERNEL_COUNTERS_STATS + per_dev_stats; handle->m_stats = (metrics_v2*)calloc(handle->m_nstats, sizeof(metrics_v2)); @@ -631,7 +637,7 @@ const struct metrics_v2* scap_kmod_get_stats_v2(struct scap_engine_handle engine { for(uint32_t stat = 0; stat < KMOD_MAX_KERNEL_COUNTERS_STATS; stat++) { - set_u64_monotonic_kernel_counter(&(stats[stat]), 0); + set_u64_monotonic_kernel_counter(&(stats[stat]), 0, METRICS_V2_KERNEL_COUNTERS); strlcpy(stats[stat].name, (char*)kmod_kernel_counters_stats_names[stat], METRIC_NAME_MAX); } @@ -660,15 +666,18 @@ const struct metrics_v2* scap_kmod_get_stats_v2(struct scap_engine_handle engine dev->m_bufinfo->n_drops_pf; stats[KMOD_N_PREEMPTIONS].value.u64 += dev->m_bufinfo->n_preemptions; - // We set the num events for that CPU. - set_u64_monotonic_kernel_counter(&(stats[pos]), dev->m_bufinfo->n_evts); - snprintf(stats[pos].name, METRIC_NAME_MAX, N_EVENTS_PER_DEVICE_PREFIX"%d", j); - pos++; - - // We set the drops for that CPU. - set_u64_monotonic_kernel_counter(&(stats[pos]), dev->m_bufinfo->n_drops_buffer + dev->m_bufinfo->n_drops_pf); - snprintf(stats[pos].name, METRIC_NAME_MAX, N_DROPS_PER_DEVICE_PREFIX"%d", j); - pos++; + if((flags & METRICS_V2_KERNEL_COUNTERS_PER_CPU)) + { + // We set the num events for that CPU. + set_u64_monotonic_kernel_counter(&(stats[pos]), dev->m_bufinfo->n_evts, METRICS_V2_KERNEL_COUNTERS_PER_CPU); + snprintf(stats[pos].name, METRIC_NAME_MAX, N_EVENTS_PER_DEVICE_PREFIX"%d", j); + pos++; + + // We set the drops for that CPU. + set_u64_monotonic_kernel_counter(&(stats[pos]), dev->m_bufinfo->n_drops_buffer + dev->m_bufinfo->n_drops_pf, METRICS_V2_KERNEL_COUNTERS_PER_CPU); + snprintf(stats[pos].name, METRIC_NAME_MAX, N_DROPS_PER_DEVICE_PREFIX"%d", j); + pos++; + } } offset = pos; } diff --git a/userspace/libscap/examples/01-open/scap_open.c b/userspace/libscap/examples/01-open/scap_open.c index 43dc6ddedf..c38f228420 100644 --- a/userspace/libscap/examples/01-open/scap_open.c +++ b/userspace/libscap/examples/01-open/scap_open.c @@ -866,7 +866,7 @@ void print_stats() { gettimeofday(&tval_end, NULL); timersub(&tval_end, &tval_start, &tval_result); - uint32_t flags = METRICS_V2_KERNEL_COUNTERS | METRICS_V2_LIBBPF_STATS; + uint32_t flags = METRICS_V2_KERNEL_COUNTERS | METRICS_V2_LIBBPF_STATS | METRICS_V2_KERNEL_COUNTERS_PER_CPU; uint32_t nstats; int32_t rc; const metrics_v2* stats_v2; diff --git a/userspace/libscap/metrics_v2.h b/userspace/libscap/metrics_v2.h index 31d611c621..896ed16709 100644 --- a/userspace/libscap/metrics_v2.h +++ b/userspace/libscap/metrics_v2.h @@ -30,6 +30,11 @@ extern "C" { // #define METRIC_NAME_MAX 512 +// +// Prefix name for n_evts metric (Used by all drivers) +// +#define N_EVENTS_PREFIX "n_evts" + // // Prefix names for per-CPU metrics (Used by legacy ebpf and modern ebpf) // @@ -52,6 +57,7 @@ extern "C" { #define METRICS_V2_RULE_COUNTERS (1 << 4) #define METRICS_V2_MISC (1 << 5) #define METRICS_V2_PLUGINS (1 << 6) +#define METRICS_V2_KERNEL_COUNTERS_PER_CPU (1 << 7) // Requesting this does also silently enable METRICS_V2_KERNEL_COUNTERS typedef union metrics_v2_value { uint32_t u32; diff --git a/userspace/libscap/scap.c b/userspace/libscap/scap.c index 6a143dd8d1..fb5e05500d 100644 --- a/userspace/libscap/scap.c +++ b/userspace/libscap/scap.c @@ -302,6 +302,12 @@ int32_t scap_get_stats(scap_t* handle, scap_stats* stats) // const struct metrics_v2* scap_get_stats_v2(scap_t* handle, uint32_t flags, uint32_t* nstats, int32_t* rc) { + // If we enable per-cpu counters, we also enable kernel global counters by default. + if(flags & METRICS_V2_KERNEL_COUNTERS_PER_CPU) + { + flags |= METRICS_V2_KERNEL_COUNTERS; + } + if(handle && handle->m_vtable) { return handle->m_vtable->get_stats_v2(handle->m_engine, flags, nstats, rc); diff --git a/userspace/libsinsp/metrics_collector.cpp b/userspace/libsinsp/metrics_collector.cpp index 164dbc5011..1ff68279d6 100644 --- a/userspace/libsinsp/metrics_collector.cpp +++ b/userspace/libsinsp/metrics_collector.cpp @@ -696,7 +696,7 @@ void libs_metrics_collector::snapshot() * libscap metrics */ - if((m_metrics_flags & METRICS_V2_KERNEL_COUNTERS) || (m_metrics_flags & METRICS_V2_LIBBPF_STATS)) + if((m_metrics_flags & METRICS_V2_KERNEL_COUNTERS) || (m_metrics_flags & METRICS_V2_LIBBPF_STATS) || (m_metrics_flags & METRICS_V2_KERNEL_COUNTERS_PER_CPU)) { uint32_t nstats = 0; int32_t rc = 0; diff --git a/userspace/libsinsp/metrics_collector.h b/userspace/libsinsp/metrics_collector.h index 4852cad40c..654081336b 100644 --- a/userspace/libsinsp/metrics_collector.h +++ b/userspace/libsinsp/metrics_collector.h @@ -339,7 +339,7 @@ class libs_metrics_collector private: sinsp* m_inspector; std::shared_ptr m_sinsp_stats_v2; - uint32_t m_metrics_flags = METRICS_V2_KERNEL_COUNTERS | METRICS_V2_LIBBPF_STATS | METRICS_V2_RESOURCE_UTILIZATION | METRICS_V2_STATE_COUNTERS | METRICS_V2_PLUGINS; + uint32_t m_metrics_flags = METRICS_V2_KERNEL_COUNTERS | METRICS_V2_LIBBPF_STATS | METRICS_V2_RESOURCE_UTILIZATION | METRICS_V2_STATE_COUNTERS | METRICS_V2_PLUGINS | METRICS_V2_KERNEL_COUNTERS_PER_CPU; std::vector m_metrics; };