aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/loop.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/loop.c')
-rw-r--r--drivers/block/loop.c67
1 files changed, 30 insertions, 37 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 075377eee0c0..c9f2107f7095 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -510,14 +510,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
return 0;
}
-
-static inline int lo_rw_simple(struct loop_device *lo,
- struct request *rq, loff_t pos, bool rw)
+static int do_req_filebacked(struct loop_device *lo, struct request *rq)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
- if (cmd->use_aio)
- return lo_rw_aio(lo, cmd, pos, rw);
+ loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
/*
* lo_write_simple and lo_read_simple should have been covered
@@ -528,37 +524,30 @@ static inline int lo_rw_simple(struct loop_device *lo,
* of the req at one time. And direct read IO doesn't need to
* run flush_dcache_page().
*/
- if (rw == WRITE)
- return lo_write_simple(lo, rq, pos);
- else
- return lo_read_simple(lo, rq, pos);
-}
-
-static int do_req_filebacked(struct loop_device *lo, struct request *rq)
-{
- loff_t pos;
- int ret;
-
- pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
-
- if (op_is_write(req_op(rq))) {
- if (req_op(rq) == REQ_OP_FLUSH)
- ret = lo_req_flush(lo, rq);
- else if (req_op(rq) == REQ_OP_DISCARD)
- ret = lo_discard(lo, rq, pos);
- else if (lo->transfer)
- ret = lo_write_transfer(lo, rq, pos);
+ switch (req_op(rq)) {
+ case REQ_OP_FLUSH:
+ return lo_req_flush(lo, rq);
+ case REQ_OP_DISCARD:
+ return lo_discard(lo, rq, pos);
+ case REQ_OP_WRITE:
+ if (lo->transfer)
+ return lo_write_transfer(lo, rq, pos);
+ else if (cmd->use_aio)
+ return lo_rw_aio(lo, cmd, pos, WRITE);
else
- ret = lo_rw_simple(lo, rq, pos, WRITE);
-
- } else {
+ return lo_write_simple(lo, rq, pos);
+ case REQ_OP_READ:
if (lo->transfer)
- ret = lo_read_transfer(lo, rq, pos);
+ return lo_read_transfer(lo, rq, pos);
+ else if (cmd->use_aio)
+ return lo_rw_aio(lo, cmd, pos, READ);
else
- ret = lo_rw_simple(lo, rq, pos, READ);
+ return lo_read_simple(lo, rq, pos);
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ break;
}
-
- return ret;
}
struct switch_request {
@@ -1659,11 +1648,15 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (lo->lo_state != Lo_bound)
return -EIO;
- if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH ||
- req_op(cmd->rq) == REQ_OP_DISCARD))
- cmd->use_aio = true;
- else
+ switch (req_op(cmd->rq)) {
+ case REQ_OP_FLUSH:
+ case REQ_OP_DISCARD:
cmd->use_aio = false;
+ break;
+ default:
+ cmd->use_aio = lo->use_dio;
+ break;
+ }
queue_kthread_work(&lo->worker, &cmd->work);