diff --git a/examples/accept-latency.bpf.c b/examples/accept-latency.bpf.c index a0e4cc07..67473705 100644 --- a/examples/accept-latency.bpf.c +++ b/examples/accept-latency.bpf.c @@ -13,7 +13,7 @@ struct socket_latency_key_t { u16 port; - u64 slot; + u64 bucket; }; struct { @@ -41,32 +41,21 @@ int BPF_KPROBE(kprobe__inet_csk_reqsk_queue_hash_add, struct sock *sk, struct re SEC("kprobe/inet_csk_accept") int BPF_KPROBE(kprobe__inet_csk_accept, struct sock *sk) { - u64 *tsp, delta_us, latency_slot; + u64 *tsp, delta_us, ts = bpf_ktime_get_ns(); struct inet_connection_sock *icsk = (struct inet_connection_sock *) sk; struct request_sock *req = BPF_CORE_READ(icsk, icsk_accept_queue).rskq_accept_head; - struct socket_latency_key_t latency_key = {}; + struct socket_latency_key_t key = {}; tsp = bpf_map_lookup_elem(&start, &req); if (!tsp) { return 0; } - delta_us = (bpf_ktime_get_ns() - *tsp) / 1000; + delta_us = (ts - *tsp) / 1000; - // Latency histogram key - latency_slot = log2l(delta_us); + key.port = BPF_CORE_READ(sk, __sk_common).skc_num; - // Cap latency bucket at max value - if (latency_slot > MAX_LATENCY_SLOT) { - latency_slot = MAX_LATENCY_SLOT; - } - - latency_key.port = BPF_CORE_READ(sk, __sk_common).skc_num; - latency_key.slot = latency_slot; - increment_map(&accept_latency_seconds, &latency_key, 1); - - latency_key.slot = MAX_LATENCY_SLOT + 1; - increment_map(&accept_latency_seconds, &latency_key, delta_us); + increment_exp2_histogram(&accept_latency_seconds, key, delta_us, MAX_LATENCY_SLOT); bpf_map_delete_elem(&start, &req); diff --git a/examples/biolatency.bpf.c b/examples/biolatency.bpf.c index 8136d401..ca89060d 100644 --- a/examples/biolatency.bpf.c +++ b/examples/biolatency.bpf.c @@ -18,7 +18,7 @@ struct disk_latency_key_t { u32 dev; u8 op; - u64 slot; + u64 bucket; }; extern int LINUX_KERNEL_VERSION __kconfig; @@ -105,38 +105,25 @@ int block_rq_issue(struct bpf_raw_tracepoint_args *ctx) SEC("raw_tp/block_rq_complete") int block_rq_complete(struct bpf_raw_tracepoint_args *ctx) { - u64 *tsp, flags, delta_us, latency_slot; + u64 *tsp, flags, delta_us, ts = bpf_ktime_get_ns(); struct gendisk *disk; struct request *rq = (struct request *) ctx->args[0]; - struct disk_latency_key_t latency_key = {}; + struct disk_latency_key_t key = {}; tsp = bpf_map_lookup_elem(&start, &rq); if (!tsp) { return 0; } - // Delta in microseconds - delta_us = (bpf_ktime_get_ns() - *tsp) / 1000; - - // Latency histogram key - latency_slot = log2l(delta_us); - - // Cap latency bucket at max value - if (latency_slot > MAX_LATENCY_SLOT) { - latency_slot = MAX_LATENCY_SLOT; - } + delta_us = (ts - *tsp) / 1000; disk = get_disk(rq); flags = BPF_CORE_READ(rq, cmd_flags); - latency_key.slot = latency_slot; - latency_key.dev = disk ? MKDEV(BPF_CORE_READ(disk, major), BPF_CORE_READ(disk, first_minor)) : 0; - latency_key.op = flags & REQ_OP_MASK; - - increment_map(&bio_latency_seconds, &latency_key, 1); + key.dev = disk ? MKDEV(BPF_CORE_READ(disk, major), BPF_CORE_READ(disk, first_minor)) : 0; + key.op = flags & REQ_OP_MASK; - latency_key.slot = MAX_LATENCY_SLOT + 1; - increment_map(&bio_latency_seconds, &latency_key, delta_us); + increment_exp2_histogram(&bio_latency_seconds, key, delta_us, MAX_LATENCY_SLOT); bpf_map_delete_elem(&start, &rq); diff --git a/examples/shrinklat.bpf.c b/examples/shrinklat.bpf.c index 0a0f08cc..e63cf37e 100644 --- a/examples/shrinklat.bpf.c +++ b/examples/shrinklat.bpf.c @@ -6,6 +6,10 @@ // 27 buckets for latency, max range is 33.6s .. 67.1s #define MAX_LATENCY_SLOT 26 +struct key_t { + u32 bucket; +}; + struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, 10240); @@ -16,7 +20,7 @@ struct { struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, MAX_LATENCY_SLOT + 1); - __type(key, u32); + __type(key, struct key_t); __type(value, u64); } shrink_node_latency_seconds SEC(".maps"); @@ -32,29 +36,18 @@ int shrink_node_enter(struct pt_regs *ctx) SEC("kretprobe/shrink_node") int shrink_node_exit(struct pt_regs *ctx) { + u64 *tsp, delta_us, ts = bpf_ktime_get_ns(); u32 pid = bpf_get_current_pid_tgid(); - u64 *tsp, latency_us, latency_slot; + struct key_t key = {}; tsp = bpf_map_lookup_elem(&start, &pid); if (!tsp) { return 0; } - // Latency in microseconds - latency_us = (bpf_ktime_get_ns() - *tsp) / 1000; - - // Latency histogram key - latency_slot = log2l(latency_us); - - // Cap latency bucket at max value - if (latency_slot > MAX_LATENCY_SLOT) { - latency_slot = MAX_LATENCY_SLOT; - } - - increment_map(&shrink_node_latency_seconds, &latency_slot, 1); + delta_us = (ts - *tsp) / 1000; - latency_slot = MAX_LATENCY_SLOT + 1; - increment_map(&shrink_node_latency_seconds, &latency_slot, latency_us); + increment_exp2_histogram(&shrink_node_latency_seconds, key, delta_us, MAX_LATENCY_SLOT); bpf_map_delete_elem(&start, &pid); diff --git a/examples/shrinklat.yaml b/examples/shrinklat.yaml index 12e29bff..154d28e3 100644 --- a/examples/shrinklat.yaml +++ b/examples/shrinklat.yaml @@ -8,6 +8,6 @@ metrics: bucket_multiplier: 0.000001 # microseconds to seconds labels: - name: bucket - size: 4 + size: 8 decoders: - name: uint