|
13 | 13 | #include <linux/perf_event.h> |
14 | 14 | #include <linux/ring_buffer.h> |
15 | 15 | #include "test_ringbuf.lskel.h" |
| 16 | +#include "test_ringbuf_n.lskel.h" |
16 | 17 | #include "test_ringbuf_map_key.lskel.h" |
17 | 18 |
|
18 | 19 | #define EDONE 7777 |
@@ -326,6 +327,68 @@ static void ringbuf_subtest(void) |
326 | 327 | test_ringbuf_lskel__destroy(skel); |
327 | 328 | } |
328 | 329 |
|
| 330 | +/* |
| 331 | + * Test ring_buffer__consume_n() by producing N_TOT_SAMPLES samples in the ring |
| 332 | + * buffer, via getpid(), and consuming them in chunks of N_SAMPLES. |
| 333 | + */ |
| 334 | +#define N_TOT_SAMPLES 32 |
| 335 | +#define N_SAMPLES 4 |
| 336 | + |
| 337 | +/* Sample value to verify the callback validity */ |
| 338 | +#define SAMPLE_VALUE 42L |
| 339 | + |
| 340 | +static int process_n_sample(void *ctx, void *data, size_t len) |
| 341 | +{ |
| 342 | + struct sample *s = data; |
| 343 | + |
| 344 | + ASSERT_EQ(s->value, SAMPLE_VALUE, "sample_value"); |
| 345 | + |
| 346 | + return 0; |
| 347 | +} |
| 348 | + |
| 349 | +static void ringbuf_n_subtest(void) |
| 350 | +{ |
| 351 | + struct test_ringbuf_n_lskel *skel_n; |
| 352 | + int err, i; |
| 353 | + |
| 354 | + skel_n = test_ringbuf_n_lskel__open(); |
| 355 | + if (!ASSERT_OK_PTR(skel_n, "test_ringbuf_n_lskel__open")) |
| 356 | + return; |
| 357 | + |
| 358 | + skel_n->maps.ringbuf.max_entries = getpagesize(); |
| 359 | + skel_n->bss->pid = getpid(); |
| 360 | + |
| 361 | + err = test_ringbuf_n_lskel__load(skel_n); |
| 362 | + if (!ASSERT_OK(err, "test_ringbuf_n_lskel__load")) |
| 363 | + goto cleanup; |
| 364 | + |
| 365 | + ringbuf = ring_buffer__new(skel_n->maps.ringbuf.map_fd, |
| 366 | + process_n_sample, NULL, NULL); |
| 367 | + if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new")) |
| 368 | + goto cleanup; |
| 369 | + |
| 370 | + err = test_ringbuf_n_lskel__attach(skel_n); |
| 371 | + if (!ASSERT_OK(err, "test_ringbuf_n_lskel__attach")) |
| 372 | + goto cleanup_ringbuf; |
| 373 | + |
| 374 | + /* Produce N_TOT_SAMPLES samples in the ring buffer by calling getpid() */ |
| 375 | + skel_n->bss->value = SAMPLE_VALUE; |
| 376 | + for (i = 0; i < N_TOT_SAMPLES; i++) |
| 377 | + syscall(__NR_getpgid); |
| 378 | + |
| 379 | + /* Consume all samples from the ring buffer in batches of N_SAMPLES */ |
| 380 | + for (i = 0; i < N_TOT_SAMPLES; i += err) { |
| 381 | + err = ring_buffer__consume_n(ringbuf, N_SAMPLES); |
| 382 | + if (!ASSERT_EQ(err, N_SAMPLES, "rb_consume")) |
| 383 | + goto cleanup_ringbuf; |
| 384 | + } |
| 385 | + |
| 386 | +cleanup_ringbuf: |
| 387 | + ring_buffer__free(ringbuf); |
| 388 | +cleanup: |
| 389 | + test_ringbuf_n_lskel__destroy(skel_n); |
| 390 | +} |
| 391 | + |
329 | 392 | static int process_map_key_sample(void *ctx, void *data, size_t len) |
330 | 393 | { |
331 | 394 | struct sample *s; |
@@ -384,6 +447,8 @@ void test_ringbuf(void) |
384 | 447 | { |
385 | 448 | if (test__start_subtest("ringbuf")) |
386 | 449 | ringbuf_subtest(); |
| 450 | + if (test__start_subtest("ringbuf_n")) |
| 451 | + ringbuf_n_subtest(); |
387 | 452 | if (test__start_subtest("ringbuf_map_key")) |
388 | 453 | ringbuf_map_key_subtest(); |
389 | 454 | } |
0 commit comments