aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/block/blk-timeout.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-07-13 15:48:16 -0600
committerJens Axboe <axboe@kernel.dk>2020-07-15 09:23:35 -0600
commit9054650fac24b784df8500aba2869ebf240d069a (patch)
tree46ae23f1a8065541a9a0f3649fc38e57c8ba1c07 /block/blk-timeout.c
parentblk-mq: remove redundant validation in __blk_mq_end_request() (diff)
downloadwireguard-linux-9054650fac24b784df8500aba2869ebf240d069a.tar.xz
wireguard-linux-9054650fac24b784df8500aba2869ebf240d069a.zip
block: relax jiffies rounding for timeouts
In doing high IOPS testing, blk-mq is generally pretty well optimized. There are a few things that stuck out as using more CPU than what is really warranted, and one thing is the round_jiffies_up() that we do twice for each request. That accounts for about 0.8% of the CPU in my testing. We can make this cheaper by avoiding an integer division, by just adding a rough HZ mask that we can AND with instead. The timeouts are only on a second granularity already, we don't have to be that accurate here and this patch barely changes that. All we care about is nice grouping. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-timeout.c')
-rw-r--r--block/blk-timeout.c22
1 files changed, 20 insertions, 2 deletions
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 3a1ac6434758..8ab8a82825cd 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -88,11 +88,29 @@ void blk_abort_request(struct request *req)
}
EXPORT_SYMBOL_GPL(blk_abort_request);
+static unsigned long blk_timeout_mask __read_mostly;
+
+int __init blk_timeout_init(void)
+{
+ blk_timeout_mask = roundup_pow_of_two(HZ) - 1;
+ return 0;
+}
+
+late_initcall(blk_timeout_init);
+
+/*
+ * Just a rough estimate, we don't care about specific values for timeouts.
+ */
+static inline unsigned long blk_round_jiffies(unsigned long j)
+{
+ return (j + blk_timeout_mask) + 1;
+}
+
unsigned long blk_rq_timeout(unsigned long timeout)
{
unsigned long maxt;
- maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
+ maxt = blk_round_jiffies(jiffies + BLK_MAX_TIMEOUT);
if (time_after(timeout, maxt))
timeout = maxt;
@@ -129,7 +147,7 @@ void blk_add_timer(struct request *req)
* than an existing one, modify the timer. Round up to next nearest
* second.
*/
- expiry = blk_rq_timeout(round_jiffies_up(expiry));
+ expiry = blk_rq_timeout(blk_round_jiffies(expiry));
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires)) {