diff options
Diffstat (limited to 'fs/direct-io.c')
-rw-r--r-- | fs/direct-io.c | 198 |
1 files changed, 70 insertions, 128 deletions
diff --git a/fs/direct-io.c b/fs/direct-io.c index 00b4d15bb811..03d381377ae1 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -117,9 +117,7 @@ struct dio_submit { /* dio_state communicated between submission path and end_io */ struct dio { int flags; /* doesn't change */ - int op; - int op_flags; - blk_qc_t bio_cookie; + blk_opf_t opf; /* request operation type and flags */ struct gendisk *bio_disk; struct inode *inode; loff_t i_size; /* i_size when submitted */ @@ -168,12 +166,13 @@ static inline unsigned dio_pages_present(struct dio_submit *sdio) */ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) { + const enum req_op dio_op = dio->opf & REQ_OP_MASK; ssize_t ret; - ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, + ret = iov_iter_get_pages2(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, &sdio->from); - if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) { + if (ret < 0 && sdio->blocks_available && dio_op == REQ_OP_WRITE) { struct page *page = ZERO_PAGE(0); /* * A memory fault, but the filesystem has some outstanding @@ -192,7 +191,6 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) } if (ret >= 0) { - iov_iter_advance(sdio->iter, ret); ret += sdio->from; sdio->head = 0; sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE; @@ -235,6 +233,7 @@ static inline struct page *dio_get_page(struct dio *dio, */ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) { + const enum req_op dio_op = dio->opf & REQ_OP_MASK; loff_t offset = dio->iocb->ki_pos; ssize_t transferred = 0; int err; @@ -252,7 +251,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) transferred = dio->result; /* Check for short read case */ - if ((dio->op == REQ_OP_READ) && + if (dio_op == REQ_OP_READ && ((offset + transferred) > dio->i_size)) transferred = dio->i_size - offset; /* ignore EFAULT if some IO has been done */ @@ -287,7 +286,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) * zeros from unwritten extents. */ if (flags & DIO_COMPLETE_INVALIDATE && - ret > 0 && dio->op == REQ_OP_WRITE && + ret > 0 && dio_op == REQ_OP_WRITE && dio->inode->i_mapping->nrpages) { err = invalidate_inode_pages2_range(dio->inode->i_mapping, offset >> PAGE_SHIFT, @@ -306,9 +305,9 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) */ dio->iocb->ki_pos += transferred; - if (ret > 0 && dio->op == REQ_OP_WRITE) + if (ret > 0 && dio_op == REQ_OP_WRITE) ret = generic_write_sync(dio->iocb, ret); - dio->iocb->ki_complete(dio->iocb, ret, 0); + dio->iocb->ki_complete(dio->iocb, ret); } kmem_cache_free(dio_cache, dio); @@ -330,6 +329,7 @@ static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); static void dio_bio_end_aio(struct bio *bio) { struct dio *dio = bio->bi_private; + const enum req_op dio_op = dio->opf & REQ_OP_MASK; unsigned long remaining; unsigned long flags; bool defer_completion = false; @@ -354,7 +354,7 @@ static void dio_bio_end_aio(struct bio *bio) */ if (dio->result) defer_completion = dio->defer_completion || - (dio->op == REQ_OP_WRITE && + (dio_op == REQ_OP_WRITE && dio->inode->i_mapping->nrpages); if (defer_completion) { INIT_WORK(&dio->complete_work, dio_aio_complete_work); @@ -386,25 +386,6 @@ static void dio_bio_end_io(struct bio *bio) spin_unlock_irqrestore(&dio->bio_lock, flags); } -/** - * dio_end_io - handle the end io action for the given bio - * @bio: The direct io bio thats being completed - * - * This is meant to be called by any filesystem that uses their own dio_submit_t - * so that the DIO specific endio actions are dealt with after the filesystem - * has done it's completion work. - */ -void dio_end_io(struct bio *bio) -{ - struct dio *dio = bio->bi_private; - - if (dio->is_async) - dio_bio_end_aio(bio); - else - dio_bio_end_io(bio); -} -EXPORT_SYMBOL_GPL(dio_end_io); - static inline void dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, struct block_device *bdev, @@ -416,18 +397,12 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, * bio_alloc() is guaranteed to return a bio when allowed to sleep and * we request a valid number of vectors. */ - bio = bio_alloc(GFP_KERNEL, nr_vecs); - - bio_set_dev(bio, bdev); + bio = bio_alloc(bdev, nr_vecs, dio->opf, GFP_KERNEL); bio->bi_iter.bi_sector = first_sector; - bio_set_op_attrs(bio, dio->op, dio->op_flags); if (dio->is_async) bio->bi_end_io = dio_bio_end_aio; else bio->bi_end_io = dio_bio_end_io; - - bio->bi_write_hint = dio->iocb->ki_hint; - sdio->bio = bio; sdio->logical_offset_in_bio = sdio->cur_page_fs_offset; } @@ -441,6 +416,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, */ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) { + const enum req_op dio_op = dio->opf & REQ_OP_MASK; struct bio *bio = sdio->bio; unsigned long flags; @@ -450,16 +426,15 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) dio->refcount++; spin_unlock_irqrestore(&dio->bio_lock, flags); - if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) + if (dio->is_async && dio_op == REQ_OP_READ && dio->should_dirty) bio_set_pages_dirty(bio); - dio->bio_disk = bio->bi_disk; + dio->bio_disk = bio->bi_bdev->bd_disk; - if (sdio->submit_io) { + if (sdio->submit_io) sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); - dio->bio_cookie = BLK_QC_T_NONE; - } else - dio->bio_cookie = submit_bio(bio); + else + submit_bio(bio); sdio->bio = NULL; sdio->boundary = 0; @@ -479,7 +454,7 @@ static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) * Wait for the next BIO to complete. Remove it and return it. NULL is * returned once all BIOs have been completed. This must only be called once * all bios have been issued so that dio->refcount can only decrease. This - * requires that that the caller hold a reference on the dio. + * requires that the caller hold a reference on the dio. */ static struct bio *dio_await_one(struct dio *dio) { @@ -498,9 +473,7 @@ static struct bio *dio_await_one(struct dio *dio) __set_current_state(TASK_UNINTERRUPTIBLE); dio->waiter = current; spin_unlock_irqrestore(&dio->bio_lock, flags); - if (!(dio->iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true)) - io_schedule(); + blk_io_schedule(); /* wake up sets us TASK_RUNNING */ spin_lock_irqsave(&dio->bio_lock, flags); dio->waiter = NULL; @@ -519,7 +492,8 @@ static struct bio *dio_await_one(struct dio *dio) static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio) { blk_status_t err = bio->bi_status; - bool should_dirty = dio->op == REQ_OP_READ && dio->should_dirty; + const enum req_op dio_op = dio->opf & REQ_OP_MASK; + bool should_dirty = dio_op == REQ_OP_READ && dio->should_dirty; if (err) { if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT)) @@ -646,6 +620,7 @@ static int dio_set_defer_completion(struct dio *dio) static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, struct buffer_head *map_bh) { + const enum req_op dio_op = dio->opf & REQ_OP_MASK; int ret; sector_t fs_startblk; /* Into file, in filesystem-sized blocks */ sector_t fs_endblk; /* Into file, in filesystem-sized blocks */ @@ -680,7 +655,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio, * which may decide to handle it or also return an unmapped * buffer head. */ - create = dio->op == REQ_OP_WRITE; + create = dio_op == REQ_OP_WRITE; if (dio->flags & DIO_SKIP_HOLES) { i_size = i_size_read(dio->inode); if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits) @@ -712,7 +687,7 @@ static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio, if (ret) goto out; sector = start_sector << (sdio->blkbits - 9); - nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES); + nr_pages = bio_max_segs(sdio->pages_in_io); BUG_ON(nr_pages <= 0); dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages); sdio->boundary = 0; @@ -828,9 +803,11 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, unsigned offset, unsigned len, sector_t blocknr, struct buffer_head *map_bh) { + const enum req_op dio_op = dio->opf & REQ_OP_MASK; int ret = 0; + int boundary = sdio->boundary; /* dio_send_cur_page may clear it */ - if (dio->op == REQ_OP_WRITE) { + if (dio_op == REQ_OP_WRITE) { /* * Read accounting is performed in submit_bio() */ @@ -867,10 +844,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits; out: /* - * If sdio->boundary then we want to schedule the IO now to + * If boundary then we want to schedule the IO now to * avoid metadata seeks. */ - if (sdio->boundary) { + if (boundary) { ret = dio_send_cur_page(dio, sdio, map_bh); if (sdio->bio) dio_bio_submit(dio, sdio); @@ -943,6 +920,7 @@ static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio, static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, struct buffer_head *map_bh) { + const enum req_op dio_op = dio->opf & REQ_OP_MASK; const unsigned blkbits = sdio->blkbits; const unsigned i_blkbits = blkbits + sdio->blkfactor; int ret = 0; @@ -1018,7 +996,7 @@ do_holes: loff_t i_size_aligned; /* AKPM: eargh, -ENOTBLK is a hack */ - if (dio->op == REQ_OP_WRITE) { + if (dio_op == REQ_OP_WRITE) { put_page(page); return -ENOTBLK; } @@ -1141,11 +1119,10 @@ static inline int drop_refcount(struct dio *dio) * individual fields and will generate much worse code. This is important * for the whole file. */ -static inline ssize_t -do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, - struct block_device *bdev, struct iov_iter *iter, - get_block_t get_block, dio_iodone_t end_io, - dio_submit_t submit_io, int flags) +ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, + struct block_device *bdev, struct iov_iter *iter, + get_block_t get_block, dio_iodone_t end_io, + dio_submit_t submit_io, int flags) { unsigned i_blkbits = READ_ONCE(inode->i_blkbits); unsigned blkbits = i_blkbits; @@ -1165,22 +1142,13 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, * the early prefetch in the caller enough time. */ - if (align & blocksize_mask) { - if (bdev) - blkbits = blksize_bits(bdev_logical_block_size(bdev)); - blocksize_mask = (1 << blkbits) - 1; - if (align & blocksize_mask) - goto out; - } - /* watch out for a 0 len io from a tricksy fs */ if (iov_iter_rw(iter) == READ && !count) return 0; dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); - retval = -ENOMEM; if (!dio) - goto out; + return -ENOMEM; /* * Believe it or not, zeroing out the page array caused a .5% * performance regression in a database benchmark. So, we take @@ -1189,32 +1157,32 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, memset(dio, 0, offsetof(struct dio, pages)); dio->flags = flags; - if (dio->flags & DIO_LOCKING) { - if (iov_iter_rw(iter) == READ) { - struct address_space *mapping = - iocb->ki_filp->f_mapping; - - /* will be released by direct_io_worker */ - inode_lock(inode); - - retval = filemap_write_and_wait_range(mapping, offset, - end - 1); - if (retval) { - inode_unlock(inode); - kmem_cache_free(dio_cache, dio); - goto out; - } - } + if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { + /* will be released by direct_io_worker */ + inode_lock(inode); } /* Once we sampled i_size check for reads beyond EOF */ dio->i_size = i_size_read(inode); if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { - if (dio->flags & DIO_LOCKING) - inode_unlock(inode); - kmem_cache_free(dio_cache, dio); retval = 0; - goto out; + goto fail_dio; + } + + if (align & blocksize_mask) { + if (bdev) + blkbits = blksize_bits(bdev_logical_block_size(bdev)); + blocksize_mask = (1 << blkbits) - 1; + if (align & blocksize_mask) + goto fail_dio; + } + + if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { + struct address_space *mapping = iocb->ki_filp->f_mapping; + + retval = filemap_write_and_wait_range(mapping, offset, end - 1); + if (retval) + goto fail_dio; } /* @@ -1232,15 +1200,12 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, dio->inode = inode; if (iov_iter_rw(iter) == WRITE) { - dio->op = REQ_OP_WRITE; - dio->op_flags = REQ_SYNC | REQ_IDLE; + dio->opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; if (iocb->ki_flags & IOCB_NOWAIT) - dio->op_flags |= REQ_NOWAIT; + dio->opf |= REQ_NOWAIT; } else { - dio->op = REQ_OP_READ; + dio->opf = REQ_OP_READ; } - if (iocb->ki_flags & IOCB_HIPRI) - dio->op_flags |= REQ_HIPRI; /* * For AIO O_(D)SYNC writes we need to defer completions to a workqueue @@ -1248,7 +1213,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, */ if (dio->is_async && iov_iter_rw(iter) == WRITE) { retval = 0; - if (iocb->ki_flags & IOCB_DSYNC) + if (iocb_is_dsync(iocb)) retval = dio_set_defer_completion(dio); else if (!dio->inode->i_sb->s_dio_done_wq) { /* @@ -1258,14 +1223,8 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, */ retval = sb_init_dio_done_wq(dio->inode->i_sb); } - if (retval) { - /* - * We grab i_mutex only for reads so we don't have - * to release it here - */ - kmem_cache_free(dio_cache, dio); - goto out; - } + if (retval) + goto fail_dio; } /* @@ -1289,7 +1248,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, spin_lock_init(&dio->bio_lock); dio->refcount = 1; - dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ; + dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ; sdio.iter = iter; sdio.final_block_in_request = end >> blkbits; @@ -1311,7 +1270,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, if (retval == -ENOTBLK) { /* * The remaining part of the request will be - * be handled by buffered I/O when we return + * handled by buffered I/O when we return */ retval = 0; } @@ -1368,32 +1327,15 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, } else BUG_ON(retval != -EIOCBQUEUED); -out: return retval; -} -ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, - struct block_device *bdev, struct iov_iter *iter, - get_block_t get_block, - dio_iodone_t end_io, dio_submit_t submit_io, - int flags) -{ - /* - * The block device state is needed in the end to finally - * submit everything. Since it's likely to be cache cold - * prefetch it here as first thing to hide some of the - * latency. - * - * Attempt to prefetch the pieces we likely need later. - */ - prefetch(&bdev->bd_disk->part_tbl); - prefetch(bdev->bd_queue); - prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); +fail_dio: + if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) + inode_unlock(inode); - return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block, - end_io, submit_io, flags); + kmem_cache_free(dio_cache, dio); + return retval; } - EXPORT_SYMBOL(__blockdev_direct_IO); static __init int dio_init(void) |