Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Convert existing examples to increment_exp2_histogram #305

Merged
merged 1 commit into from
Oct 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 6 additions & 17 deletions examples/accept-latency.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

struct socket_latency_key_t {
u16 port;
u64 slot;
u64 bucket;
};

struct {
Expand Down Expand Up @@ -41,32 +41,21 @@ int BPF_KPROBE(kprobe__inet_csk_reqsk_queue_hash_add, struct sock *sk, struct re
SEC("kprobe/inet_csk_accept")
int BPF_KPROBE(kprobe__inet_csk_accept, struct sock *sk)
{
u64 *tsp, delta_us, latency_slot;
u64 *tsp, delta_us, ts = bpf_ktime_get_ns();
struct inet_connection_sock *icsk = (struct inet_connection_sock *) sk;
struct request_sock *req = BPF_CORE_READ(icsk, icsk_accept_queue).rskq_accept_head;
struct socket_latency_key_t latency_key = {};
struct socket_latency_key_t key = {};

tsp = bpf_map_lookup_elem(&start, &req);
if (!tsp) {
return 0;
}

delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
delta_us = (ts - *tsp) / 1000;

// Latency histogram key
latency_slot = log2l(delta_us);
key.port = BPF_CORE_READ(sk, __sk_common).skc_num;

// Cap latency bucket at max value
if (latency_slot > MAX_LATENCY_SLOT) {
latency_slot = MAX_LATENCY_SLOT;
}

latency_key.port = BPF_CORE_READ(sk, __sk_common).skc_num;
latency_key.slot = latency_slot;
increment_map(&accept_latency_seconds, &latency_key, 1);

latency_key.slot = MAX_LATENCY_SLOT + 1;
increment_map(&accept_latency_seconds, &latency_key, delta_us);
increment_exp2_histogram(&accept_latency_seconds, key, delta_us, MAX_LATENCY_SLOT);

bpf_map_delete_elem(&start, &req);

Expand Down
27 changes: 7 additions & 20 deletions examples/biolatency.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
struct disk_latency_key_t {
u32 dev;
u8 op;
u64 slot;
u64 bucket;
};

extern int LINUX_KERNEL_VERSION __kconfig;
Expand Down Expand Up @@ -105,38 +105,25 @@ int block_rq_issue(struct bpf_raw_tracepoint_args *ctx)
SEC("raw_tp/block_rq_complete")
int block_rq_complete(struct bpf_raw_tracepoint_args *ctx)
{
u64 *tsp, flags, delta_us, latency_slot;
u64 *tsp, flags, delta_us, ts = bpf_ktime_get_ns();
struct gendisk *disk;
struct request *rq = (struct request *) ctx->args[0];
struct disk_latency_key_t latency_key = {};
struct disk_latency_key_t key = {};

tsp = bpf_map_lookup_elem(&start, &rq);
if (!tsp) {
return 0;
}

// Delta in microseconds
delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;

// Latency histogram key
latency_slot = log2l(delta_us);

// Cap latency bucket at max value
if (latency_slot > MAX_LATENCY_SLOT) {
latency_slot = MAX_LATENCY_SLOT;
}
delta_us = (ts - *tsp) / 1000;

disk = get_disk(rq);
flags = BPF_CORE_READ(rq, cmd_flags);

latency_key.slot = latency_slot;
latency_key.dev = disk ? MKDEV(BPF_CORE_READ(disk, major), BPF_CORE_READ(disk, first_minor)) : 0;
latency_key.op = flags & REQ_OP_MASK;

increment_map(&bio_latency_seconds, &latency_key, 1);
key.dev = disk ? MKDEV(BPF_CORE_READ(disk, major), BPF_CORE_READ(disk, first_minor)) : 0;
key.op = flags & REQ_OP_MASK;

latency_key.slot = MAX_LATENCY_SLOT + 1;
increment_map(&bio_latency_seconds, &latency_key, delta_us);
increment_exp2_histogram(&bio_latency_seconds, key, delta_us, MAX_LATENCY_SLOT);

bpf_map_delete_elem(&start, &rq);

Expand Down
25 changes: 9 additions & 16 deletions examples/shrinklat.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@
// 27 buckets for latency, max range is 33.6s .. 67.1s
#define MAX_LATENCY_SLOT 26

struct key_t {
u32 bucket;
};

struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 10240);
Expand All @@ -16,7 +20,7 @@ struct {
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, MAX_LATENCY_SLOT + 1);
__type(key, u32);
__type(key, struct key_t);
__type(value, u64);
} shrink_node_latency_seconds SEC(".maps");

Expand All @@ -32,29 +36,18 @@ int shrink_node_enter(struct pt_regs *ctx)
SEC("kretprobe/shrink_node")
int shrink_node_exit(struct pt_regs *ctx)
{
u64 *tsp, delta_us, ts = bpf_ktime_get_ns();
u32 pid = bpf_get_current_pid_tgid();
u64 *tsp, latency_us, latency_slot;
struct key_t key = {};

tsp = bpf_map_lookup_elem(&start, &pid);
if (!tsp) {
return 0;
}

// Latency in microseconds
latency_us = (bpf_ktime_get_ns() - *tsp) / 1000;

// Latency histogram key
latency_slot = log2l(latency_us);

// Cap latency bucket at max value
if (latency_slot > MAX_LATENCY_SLOT) {
latency_slot = MAX_LATENCY_SLOT;
}

increment_map(&shrink_node_latency_seconds, &latency_slot, 1);
delta_us = (ts - *tsp) / 1000;

latency_slot = MAX_LATENCY_SLOT + 1;
increment_map(&shrink_node_latency_seconds, &latency_slot, latency_us);
increment_exp2_histogram(&shrink_node_latency_seconds, key, delta_us, MAX_LATENCY_SLOT);

bpf_map_delete_elem(&start, &pid);

Expand Down
2 changes: 1 addition & 1 deletion examples/shrinklat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,6 @@ metrics:
bucket_multiplier: 0.000001 # microseconds to seconds
labels:
- name: bucket
size: 4
size: 8
decoders:
- name: uint