aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 13:14:58 -0800
committerJens Axboe <axboe@kernel.dk>2012-03-06 21:27:21 +0100
commitd732580b4eb31553c63744a47d590f770cafb8f0 (patch)
treeea4e8e21df1b639603693e6f5fdfc5a620cd8737 /block
parentelevator: make elevator_init_fn() return 0/-errno (diff)
downloadlinux-dev-d732580b4eb31553c63744a47d590f770cafb8f0.tar.xz
linux-dev-d732580b4eb31553c63744a47d590f770cafb8f0.zip
block: implement blk_queue_bypass_start/end()
Rename and extend elv_queisce_start/end() to blk_queue_bypass_start/end() which are exported and supports nesting via @q->bypass_depth. Also add blk_queue_bypass() to test bypass state. This will be further extended and used for blkio_group management. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c39
-rw-r--r--block/blk.h6
-rw-r--r--block/elevator.c25
3 files changed, 42 insertions, 28 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index fccb25021121..98ddef430093 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -410,6 +410,42 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
}
/**
+ * blk_queue_bypass_start - enter queue bypass mode
+ * @q: queue of interest
+ *
+ * In bypass mode, only the dispatch FIFO queue of @q is used. This
+ * function makes @q enter bypass mode and drains all requests which were
+ * issued before. On return, it's guaranteed that no request has ELVPRIV
+ * set.
+ */
+void blk_queue_bypass_start(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+ spin_unlock_irq(q->queue_lock);
+
+ blk_drain_queue(q, false);
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
+
+/**
+ * blk_queue_bypass_end - leave queue bypass mode
+ * @q: queue of interest
+ *
+ * Leave bypass mode and restore the normal queueing behavior.
+ */
+void blk_queue_bypass_end(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!--q->bypass_depth)
+ queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+ WARN_ON_ONCE(q->bypass_depth < 0);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+
+/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
@@ -862,8 +898,7 @@ retry:
* Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock.
*/
- if (blk_rq_should_init_elevator(bio) &&
- !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
+ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
rw_flags |= REQ_ELVPRIV;
rl->elvpriv++;
if (et->icq_cache && ioc)
diff --git a/block/blk.h b/block/blk.h
index 9c12f80882b0..7422f3133c5d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio);
-void blk_drain_queue(struct request_queue *q, bool drain_all);
+void blk_queue_bypass_start(struct request_queue *q);
+void blk_queue_bypass_end(struct request_queue *q);
void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
bool __blk_end_bidi_request(struct request *rq, int error,
@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
int blk_dev_init(void);
-void elv_quiesce_start(struct request_queue *q);
-void elv_quiesce_end(struct request_queue *q);
-
/*
* Return the threshold (number of used requests) at which the queue is
diff --git a/block/elevator.c b/block/elevator.c
index f81c061dad15..0bdea0ed03a3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -553,25 +553,6 @@ void elv_drain_elevator(struct request_queue *q)
}
}
-void elv_quiesce_start(struct request_queue *q)
-{
- if (!q->elevator)
- return;
-
- spin_lock_irq(q->queue_lock);
- queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
- spin_unlock_irq(q->queue_lock);
-
- blk_drain_queue(q, false);
-}
-
-void elv_quiesce_end(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
- spin_unlock_irq(q->queue_lock);
-}
-
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
trace_block_rq_insert(q, rq);
@@ -903,7 +884,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
* using INSERT_BACK. All requests have SOFTBARRIER set and no
* merge happens either.
*/
- elv_quiesce_start(q);
+ blk_queue_bypass_start(q);
/* unregister and clear all auxiliary data of the old elevator */
if (registered)
@@ -933,7 +914,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
/* done, kill the old one and finish */
elevator_exit(old);
- elv_quiesce_end(q);
+ blk_queue_bypass_end(q);
blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
@@ -945,7 +926,7 @@ fail_init:
/* switch failed, restore and re-register old elevator */
q->elevator = old;
elv_register_queue(q);
- elv_quiesce_end(q);
+ blk_queue_bypass_end(q);
return err;
}