aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/block/blk.h
diff options
context:
space:
mode:
authorChengming Zhou <zhouchengming@bytedance.com>2023-07-17 12:00:57 +0800
committerJens Axboe <axboe@kernel.dk>2023-07-17 08:18:21 -0600
commitb175c86739d38e41044d3136065f092a6d95aee6 (patch)
treedb2a72218bc869280bb959bf45f69eea64be17a4 /block/blk.h
parentblk-flush: fix rq->flush.seq for post-flush requests (diff)
downloadwireguard-linux-b175c86739d38e41044d3136065f092a6d95aee6.tar.xz
wireguard-linux-b175c86739d38e41044d3136065f092a6d95aee6.zip
blk-flush: count inflight flush_data requests
The flush state machine use a double list to link all inflight flush_data requests, to avoid issuing separate post-flushes for these flush_data requests which shared PREFLUSH. So we can't reuse rq->queuelist, this is why we need rq->flush.list In preparation of the next patch that reuse rq->queuelist for flush state machine, we change the double linked list to unsigned long counter, which count all inflight flush_data requests. This is ok since we only need to know if there is any inflight flush_data request, so unsigned long counter is good. Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20230717040058.3993930-4-chengming.zhou@linux.dev Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk.h')
-rw-r--r--block/blk.h5
1 files changed, 2 insertions, 3 deletions
diff --git a/block/blk.h b/block/blk.h
index 608c5dcc516b..686712e13835 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -15,15 +15,14 @@ struct elevator_type;
extern struct dentry *blk_debugfs_root;
struct blk_flush_queue {
+ spinlock_t mq_flush_lock;
unsigned int flush_pending_idx:1;
unsigned int flush_running_idx:1;
blk_status_t rq_status;
unsigned long flush_pending_since;
struct list_head flush_queue[2];
- struct list_head flush_data_in_flight;
+ unsigned long flush_data_in_flight;
struct request *flush_rq;
-
- spinlock_t mq_flush_lock;
};
bool is_flush_rq(struct request *req);