From 07652344213bd4a552098ec1aec9c8b0ae619b94 Mon Sep 17 00:00:00 2001 From: Matthew Ahrens Date: Thu, 27 Apr 2023 10:02:24 -0700 Subject: [PATCH] `zpool iostat -ow` object store latency buckets off by one (#823) In `zpool iostat -ow`, the `object_store` latencies are being counted in one higher bucket than they should be (e.g. 512us instead of 256us), making it hard to compared to the `object_agent` latencies. The bucket index is somewhat confusing due to the kernel's definition of `L_HISTO()`. This commit fixes the issue. --- cmd/zfs_object_agent/zettaobject/src/access_stats.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/zfs_object_agent/zettaobject/src/access_stats.rs b/cmd/zfs_object_agent/zettaobject/src/access_stats.rs index 4604f7a86b40..446a23b96f24 100644 --- a/cmd/zfs_object_agent/zettaobject/src/access_stats.rs +++ b/cmd/zfs_object_agent/zettaobject/src/access_stats.rs @@ -122,7 +122,7 @@ impl<'a> OpInProgress<'a> { // This bucket mapping is equivalent to L_HISTO() macro in zfs.h let latency_bucket = std::cmp::min( - latency.next_power_of_two().trailing_zeros(), + latency.next_power_of_two().trailing_zeros() - 1, LATENCY_HISTOGRAM_BUCKETS - 1, ) as usize;