aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/amiflop.c10
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/aoe/aoecmd.c12
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/ataflop.c16
-rw-r--r--drivers/block/brd.c1
-rw-r--r--drivers/block/cciss.c4
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c6
-rw-r--r--drivers/block/drbd/drbd_int.h5
-rw-r--r--drivers/block/drbd/drbd_main.c17
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_req.c35
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/drbd/drbd_worker.c16
-rw-r--r--drivers/block/floppy.c9
-rw-r--r--drivers/block/loop.c73
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c54
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h2
-rw-r--r--drivers/block/nbd.c59
-rw-r--r--drivers/block/null_blk.c125
-rw-r--r--drivers/block/paride/pcd.c9
-rw-r--r--drivers/block/paride/pd.c3
-rw-r--r--drivers/block/paride/pf.c19
-rw-r--r--drivers/block/pktcdvd.c75
-rw-r--r--drivers/block/ps3disk.c11
-rw-r--r--drivers/block/ps3vram.c16
-rw-r--r--drivers/block/rbd.c30
-rw-r--r--drivers/block/rsxx/dev.c17
-rw-r--r--drivers/block/rsxx/dma.c13
-rw-r--r--drivers/block/rsxx/rsxx_priv.h2
-rw-r--r--drivers/block/skd_main.c32
-rw-r--r--drivers/block/sunvdc.c4
-rw-r--r--drivers/block/swim.c8
-rw-r--r--drivers/block/swim3.c29
-rw-r--r--drivers/block/sx8.c20
-rw-r--r--drivers/block/umem.c4
-rw-r--r--drivers/block/virtio_blk.c23
-rw-r--r--drivers/block/xen-blkback/blkback.c45
-rw-r--r--drivers/block/xen-blkback/common.h26
-rw-r--r--drivers/block/xen-blkback/xenbus.c23
-rw-r--r--drivers/block/xen-blkfront.c81
-rw-r--r--drivers/block/xsysace.c9
-rw-r--r--drivers/block/z2ram.c4
-rw-r--r--drivers/block/zram/zram_drv.c26
48 files changed, 516 insertions, 475 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 26a51be77227..245a879b036e 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3464,7 +3464,7 @@ static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
bool SuccessfulIO)
{
struct request *Request = Command->Request;
- int Error = SuccessfulIO ? 0 : -EIO;
+ blk_status_t Error = SuccessfulIO ? BLK_STS_OK : BLK_STS_IOERR;
pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
Command->SegmentCount, Command->DmaDirection);
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index a328f673adfe..49908c74bfcb 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1378,7 +1378,7 @@ static void redo_fd_request(void)
struct amiga_floppy_struct *floppy;
char *data;
unsigned long flags;
- int err;
+ blk_status_t err;
next_req:
rq = set_next_request();
@@ -1392,7 +1392,7 @@ next_req:
next_segment:
/* Here someone could investigate to be more efficient */
- for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
+ for (cnt = 0, err = BLK_STS_OK; cnt < blk_rq_cur_sectors(rq); cnt++) {
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
blk_rq_pos(rq), cnt,
@@ -1400,7 +1400,7 @@ next_segment:
#endif
block = blk_rq_pos(rq) + cnt;
if ((int)block > floppy->blocks) {
- err = -EIO;
+ err = BLK_STS_IOERR;
break;
}
@@ -1413,7 +1413,7 @@ next_segment:
#endif
if (get_track(drive, track) == -1) {
- err = -EIO;
+ err = BLK_STS_IOERR;
break;
}
@@ -1424,7 +1424,7 @@ next_segment:
/* keep the drive spinning while writes are scheduled */
if (!fd_motor_on(drive)) {
- err = -EIO;
+ err = BLK_STS_IOERR;
break;
}
/*
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 027b876370bc..6797e6c23c8a 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -388,6 +388,7 @@ aoeblk_gdalloc(void *vp)
d->aoemajor, d->aoeminor);
goto err_mempool;
}
+ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
spin_lock_irqsave(&d->lock, flags);
WARN_ON(!(d->flags & DEVFL_GD_NOW));
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 3c606c09fd5a..dc43254e05a4 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1070,8 +1070,8 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
d->ip.rq = NULL;
do {
bio = rq->bio;
- bok = !fastfail && !bio->bi_error;
- } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
+ bok = !fastfail && !bio->bi_status;
+ } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
@@ -1131,7 +1131,7 @@ ktiocomplete(struct frame *f)
ahout->cmdstat, ahin->cmdstat,
d->aoemajor, d->aoeminor);
noskb: if (buf)
- buf->bio->bi_error = -EIO;
+ buf->bio->bi_status = BLK_STS_IOERR;
goto out;
}
@@ -1144,7 +1144,7 @@ noskb: if (buf)
"aoe: runt data size in read from",
(long) d->aoemajor, d->aoeminor,
skb->len, n);
- buf->bio->bi_error = -EIO;
+ buf->bio->bi_status = BLK_STS_IOERR;
break;
}
if (n > f->iter.bi_size) {
@@ -1152,7 +1152,7 @@ noskb: if (buf)
"aoe: too-large data size in read from",
(long) d->aoemajor, d->aoeminor,
n, f->iter.bi_size);
- buf->bio->bi_error = -EIO;
+ buf->bio->bi_status = BLK_STS_IOERR;
break;
}
bvcpy(skb, f->buf->bio, f->iter, n);
@@ -1654,7 +1654,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
if (buf == NULL)
return;
buf->iter.bi_size = 0;
- buf->bio->bi_error = -EIO;
+ buf->bio->bi_status = BLK_STS_IOERR;
if (buf->nframesout == 0)
aoe_end_buf(d, buf);
}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index ffd1947500c6..b28fefb90391 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d)
if (rq == NULL)
return;
while ((bio = d->ip.nxbio)) {
- bio->bi_error = -EIO;
+ bio->bi_status = BLK_STS_IOERR;
d->ip.nxbio = bio->bi_next;
n = (unsigned long) rq->special;
rq->special = (void *) --n;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index fa69ecd52cb5..92da886180aa 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -378,7 +378,7 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
static DEFINE_TIMER(fd_timer, check_change, 0, 0);
-static void fd_end_request_cur(int err)
+static void fd_end_request_cur(blk_status_t err)
{
if (!__blk_end_request_cur(fd_request, err))
fd_request = NULL;
@@ -620,7 +620,7 @@ static void fd_error( void )
fd_request->error_count++;
if (fd_request->error_count >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
- fd_end_request_cur(-EIO);
+ fd_end_request_cur(BLK_STS_IOERR);
}
else if (fd_request->error_count == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
@@ -739,7 +739,7 @@ static void do_fd_action( int drive )
}
else {
/* all sectors finished */
- fd_end_request_cur(0);
+ fd_end_request_cur(BLK_STS_OK);
redo_fd_request();
return;
}
@@ -1144,7 +1144,7 @@ static void fd_rwsec_done1(int status)
}
else {
/* all sectors finished */
- fd_end_request_cur(0);
+ fd_end_request_cur(BLK_STS_OK);
redo_fd_request();
}
return;
@@ -1445,7 +1445,7 @@ repeat:
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
- fd_end_request_cur(-EIO);
+ fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
}
@@ -1461,12 +1461,12 @@ repeat:
/* user supplied disk type */
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
- fd_end_request_cur(-EIO);
+ fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
- fd_end_request_cur(-EIO);
+ fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
}
type = minor2disktype[type].index;
@@ -1476,7 +1476,7 @@ repeat:
}
if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
- fd_end_request_cur(-EIO);
+ fd_end_request_cur(BLK_STS_IOERR);
goto repeat;
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 57b574f2f66a..6112e99bedf7 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -418,7 +418,6 @@ static struct brd_device *brd_alloc(int i)
blk_queue_make_request(brd->brd_queue, brd_make_request);
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
- blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
/* This is so fdisk will align partitions on 4k, because of
* direct_access API needing 4k alignment, returning a PFN
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index cd375503f7b0..02a611993bb4 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1864,7 +1864,8 @@ static void cciss_softirq_done(struct request *rq)
/* set the residual count for pc requests */
if (blk_rq_is_passthrough(rq))
scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
- blk_end_request_all(rq, scsi_req(rq)->result ? -EIO : 0);
+ blk_end_request_all(rq, scsi_req(rq)->result ?
+ BLK_STS_IOERR : BLK_STS_OK);
spin_lock_irqsave(&h->lock, flags);
cmd_free(h, c);
@@ -1956,6 +1957,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
disk->queue->cmd_size = sizeof(struct scsi_request);
disk->queue->request_fn = do_cciss_request;
disk->queue->queue_lock = &h->lock;
+ queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, disk->queue);
if (blk_init_allocated_queue(disk->queue) < 0)
goto cleanup_queue;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 8d7bcfa49c12..e02c45cd3c5a 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -178,7 +178,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
else
submit_bio(bio);
wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
- if (!bio->bi_error)
+ if (!bio->bi_status)
err = device->md_io.error;
out:
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index a804a4107fbc..809fd245c3dc 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -959,16 +959,16 @@ static void drbd_bm_endio(struct bio *bio)
!bm_test_page_unchanged(b->bm_pages[idx]))
drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
- if (bio->bi_error) {
+ if (bio->bi_status) {
/* ctx error will hold the completed-last non-zero error code,
* in case error codes differ. */
- ctx->error = bio->bi_error;
+ ctx->error = blk_status_to_errno(bio->bi_status);
bm_set_page_io_err(b->bm_pages[idx]);
/* Not identical to on disk version of it.
* Is BM_PAGE_IO_ERROR enough? */
if (__ratelimit(&drbd_ratelimit_state))
drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
- bio->bi_error, idx);
+ bio->bi_status, idx);
} else {
bm_clear_page_io_err(b->bm_pages[idx]);
dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d5da45bb03a6..d17b6e6393c7 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1441,6 +1441,9 @@ extern struct bio_set *drbd_md_io_bio_set;
/* to allocate from that set */
extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
+/* And a bio_set for cloning */
+extern struct bio_set *drbd_io_bio_set;
+
extern struct mutex resources_mutex;
extern int conn_lowest_minor(struct drbd_connection *connection);
@@ -1627,7 +1630,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
__release(local);
if (!bio->bi_bdev) {
drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
- bio->bi_error = -ENODEV;
+ bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return;
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 84455c365f57..e2ed28d45ce1 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -128,6 +128,7 @@ mempool_t *drbd_request_mempool;
mempool_t *drbd_ee_mempool;
mempool_t *drbd_md_io_page_pool;
struct bio_set *drbd_md_io_bio_set;
+struct bio_set *drbd_io_bio_set;
/* I do not use a standard mempool, because:
1) I want to hand out the pre-allocated objects first.
@@ -1550,7 +1551,6 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
int offset, size_t size, unsigned msg_flags)
{
struct socket *socket = peer_device->connection->data.socket;
- mm_segment_t oldfs = get_fs();
int len = size;
int err = -EIO;
@@ -1565,7 +1565,6 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
msg_flags |= MSG_NOSIGNAL;
drbd_update_congested(peer_device->connection);
- set_fs(KERNEL_DS);
do {
int sent;
@@ -1585,7 +1584,6 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
len -= sent;
offset += sent;
} while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
- set_fs(oldfs);
clear_bit(NET_CONGESTED, &peer_device->connection->flags);
if (len == 0) {
@@ -2098,6 +2096,8 @@ static void drbd_destroy_mempools(void)
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
+ if (drbd_io_bio_set)
+ bioset_free(drbd_io_bio_set);
if (drbd_md_io_bio_set)
bioset_free(drbd_md_io_bio_set);
if (drbd_md_io_page_pool)
@@ -2115,6 +2115,7 @@ static void drbd_destroy_mempools(void)
if (drbd_al_ext_cache)
kmem_cache_destroy(drbd_al_ext_cache);
+ drbd_io_bio_set = NULL;
drbd_md_io_bio_set = NULL;
drbd_md_io_page_pool = NULL;
drbd_ee_mempool = NULL;
@@ -2142,6 +2143,7 @@ static int drbd_create_mempools(void)
drbd_pp_pool = NULL;
drbd_md_io_page_pool = NULL;
drbd_md_io_bio_set = NULL;
+ drbd_io_bio_set = NULL;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -2165,7 +2167,13 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
- drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
+ drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_RESCUER);
+ if (drbd_io_bio_set == NULL)
+ goto Enomem;
+
+ drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0,
+ BIOSET_NEED_BVECS |
+ BIOSET_NEED_RESCUER);
if (drbd_md_io_bio_set == NULL)
goto Enomem;
@@ -2839,7 +2847,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
- blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
q->queue_lock = &resource->req_lock;
device->md_io.page = alloc_page(GFP_KERNEL);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 02255a0d68b9..ad0fcb43e45c 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2294,7 +2294,7 @@ _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_
static enum drbd_ret_code
check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
{
- static enum drbd_ret_code rv;
+ enum drbd_ret_code rv;
struct drbd_peer_device *peer_device;
int i;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 1b0a2be24f39..c7e95e6380fb 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1229,9 +1229,9 @@ void one_flush_endio(struct bio *bio)
struct drbd_device *device = octx->device;
struct issue_flush_context *ctx = octx->ctx;
- if (bio->bi_error) {
- ctx->error = bio->bi_error;
- drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error);
+ if (bio->bi_status) {
+ ctx->error = blk_status_to_errno(bio->bi_status);
+ drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
}
kfree(octx);
bio_put(bio);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index b5730e17b455..f6e865b2d543 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -203,7 +203,7 @@ void start_new_tl_epoch(struct drbd_connection *connection)
void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m)
{
- m->bio->bi_error = m->error;
+ m->bio->bi_status = errno_to_blk_status(m->error);
bio_endio(m->bio);
dec_ap_bio(device);
}
@@ -315,24 +315,32 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
}
/* still holds resource->req_lock */
-static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
+static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
{
struct drbd_device *device = req->device;
D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
+ if (!put)
+ return;
+
if (!atomic_sub_and_test(put, &req->completion_ref))
- return 0;
+ return;
drbd_req_complete(req, m);
+ /* local completion may still come in later,
+ * we need to keep the req object around. */
+ if (req->rq_state & RQ_LOCAL_ABORTED)
+ return;
+
if (req->rq_state & RQ_POSTPONED) {
/* don't destroy the req object just yet,
* but queue it for retry */
drbd_restart_request(req);
- return 0;
+ return;
}
- return 1;
+ kref_put(&req->kref, drbd_req_destroy);
}
static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
@@ -519,12 +527,8 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
if (req->i.waiting)
wake_up(&device->misc_wait);
- if (c_put) {
- if (drbd_req_put_completion_ref(req, m, c_put))
- kref_put(&req->kref, drbd_req_destroy);
- } else {
- kref_put(&req->kref, drbd_req_destroy);
- }
+ drbd_req_put_completion_ref(req, m, c_put);
+ kref_put(&req->kref, drbd_req_destroy);
}
static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
@@ -1153,7 +1157,7 @@ static void drbd_process_discard_req(struct drbd_request *req)
if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9,
GFP_NOIO, 0))
- req->private_bio->bi_error = -EIO;
+ req->private_bio->bi_status = BLK_STS_IOERR;
bio_endio(req->private_bio);
}
@@ -1221,7 +1225,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
/* only pass the error to the upper layers.
* if user cannot handle io errors, that's not our business. */
drbd_err(device, "could not kmalloc() req\n");
- bio->bi_error = -ENOMEM;
+ bio->bi_status = BLK_STS_RESOURCE;
bio_endio(bio);
return ERR_PTR(-ENOMEM);
}
@@ -1366,8 +1370,7 @@ nodata:
}
out:
- if (drbd_req_put_completion_ref(req, &m, 1))
- kref_put(&req->kref, drbd_req_destroy);
+ drbd_req_put_completion_ref(req, &m, 1);
spin_unlock_irq(&resource->req_lock);
/* Even though above is a kref_put(), this is safe.
@@ -1557,7 +1560,7 @@ blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
struct drbd_device *device = (struct drbd_device *) q->queuedata;
unsigned long start_jif;
- blk_queue_split(q, &bio, q->bio_split);
+ blk_queue_split(q, &bio);
start_jif = jiffies;
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index eb49e7f2da91..9e1866ab238f 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -263,7 +263,7 @@ enum drbd_req_state_bits {
static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
{
struct bio *bio;
- bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
+ bio = bio_clone_fast(bio_src, GFP_NOIO, drbd_io_bio_set);
req->private_bio = bio;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1afcb4e02d8d..1d8726a8df34 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -63,7 +63,7 @@ void drbd_md_endio(struct bio *bio)
struct drbd_device *device;
device = bio->bi_private;
- device->md_io.error = bio->bi_error;
+ device->md_io.error = blk_status_to_errno(bio->bi_status);
/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
* to timeout on the lower level device, and eventually detach from it.
@@ -177,13 +177,13 @@ void drbd_peer_request_endio(struct bio *bio)
bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
bio_op(bio) == REQ_OP_DISCARD;
- if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
+ if (bio->bi_status && __ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "%s: error=%d s=%llus\n",
is_write ? (is_discard ? "discard" : "write")
- : "read", bio->bi_error,
+ : "read", bio->bi_status,
(unsigned long long)peer_req->i.sector);
- if (bio->bi_error)
+ if (bio->bi_status)
set_bit(__EE_WAS_ERROR, &peer_req->flags);
bio_put(bio); /* no need for the bio anymore */
@@ -243,16 +243,16 @@ void drbd_request_endio(struct bio *bio)
if (__ratelimit(&drbd_ratelimit_state))
drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
- if (!bio->bi_error)
+ if (!bio->bi_status)
drbd_panic_after_delayed_completion_of_aborted_request(device);
}
/* to avoid recursion in __req_mod */
- if (unlikely(bio->bi_error)) {
+ if (unlikely(bio->bi_status)) {
switch (bio_op(bio)) {
case REQ_OP_WRITE_ZEROES:
case REQ_OP_DISCARD:
- if (bio->bi_error == -EOPNOTSUPP)
+ if (bio->bi_status == BLK_STS_NOTSUPP)
what = DISCARD_COMPLETED_NOTSUPP;
else
what = DISCARD_COMPLETED_WITH_ERROR;
@@ -272,7 +272,7 @@ void drbd_request_endio(struct bio *bio)
}
bio_put(req->private_bio);
- req->private_bio = ERR_PTR(bio->bi_error);
+ req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
/* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->resource->req_lock, flags);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 60d4c7653178..ce823647a9c4 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2202,7 +2202,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
* =============================
*/
-static void floppy_end_request(struct request *req, int error)
+static void floppy_end_request(struct request *req, blk_status_t error)
{
unsigned int nr_sectors = current_count_sectors;
unsigned int drive = (unsigned long)req->rq_disk->private_data;
@@ -2263,7 +2263,7 @@ static void request_done(int uptodate)
DRWE->last_error_generation = DRS->generation;
}
spin_lock_irqsave(q->queue_lock, flags);
- floppy_end_request(req, -EIO);
+ floppy_end_request(req, BLK_STS_IOERR);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
@@ -3780,9 +3780,9 @@ static void floppy_rb0_cb(struct bio *bio)
struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
int drive = cbdata->drive;
- if (bio->bi_error) {
+ if (bio->bi_status) {
pr_info("floppy: error %d while reading block 0\n",
- bio->bi_error);
+ bio->bi_status);
set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
}
complete(&cbdata->complete);
@@ -4203,6 +4203,7 @@ static int __init do_floppy_init(void)
goto out_put_disk;
}
+ blk_queue_bounce_limit(disks[drive]->queue, BLK_BOUNCE_HIGH);
blk_queue_max_hw_sectors(disks[drive]->queue, 64);
disks[drive]->major = FLOPPY_MAJOR;
disks[drive]->first_minor = TOMINOR(drive);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 28d932906f24..ef8334949b42 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -221,7 +221,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
}
static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
+ loff_t logical_blocksize)
{
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size;
@@ -233,6 +234,12 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
lo->lo_offset = offset;
if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit;
+ if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
+ lo->lo_logical_blocksize = logical_blocksize;
+ blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
+ blk_queue_logical_block_size(lo->lo_queue,
+ lo->lo_logical_blocksize);
+ }
set_capacity(lo->lo_disk, x);
bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
/* let user-space know about the new size */
@@ -266,7 +273,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
file_start_write(file);
- bw = vfs_iter_write(file, &i, ppos);
+ bw = vfs_iter_write(file, &i, ppos, 0);
file_end_write(file);
if (likely(bw == bvec->bv_len))
@@ -342,7 +349,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
rq_for_each_segment(bvec, rq, iter) {
iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
- len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
+ len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0)
return len;
@@ -383,7 +390,7 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
b.bv_len = bvec.bv_len;
iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len);
- len = vfs_iter_read(lo->lo_backing_file, &i, &pos);
+ len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0) {
ret = len;
goto out_free_page;
@@ -457,7 +464,7 @@ static void lo_complete_rq(struct request *rq)
zero_fill_bio(bio);
}
- blk_mq_end_request(rq, cmd->ret < 0 ? -EIO : 0);
+ blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
}
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -608,6 +615,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
*/
static int loop_flush(struct loop_device *lo)
{
+ /* loop not yet configured, no running thread, nothing to flush */
+ if (lo->lo_state != Lo_bound)
+ return 0;
return loop_switch(lo, NULL);
}
@@ -810,6 +820,7 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
+ int lo_bits = 9;
/*
* We use punch hole to reclaim the free space used by the
@@ -829,8 +840,11 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+ if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
+ lo_bits = blksize_bits(lo->lo_logical_blocksize);
+
+ blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
+ blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
}
@@ -840,10 +854,16 @@ static void loop_unprepare_queue(struct loop_device *lo)
kthread_stop(lo->worker_task);
}
+static int loop_kthread_worker_fn(void *worker_ptr)
+{
+ current->flags |= PF_LESS_THROTTLE;
+ return kthread_worker_fn(worker_ptr);
+}
+
static int loop_prepare_queue(struct loop_device *lo)
{
kthread_init_worker(&lo->worker);
- lo->worker_task = kthread_run(kthread_worker_fn,
+ lo->worker_task = kthread_run(loop_kthread_worker_fn,
&lo->worker, "loop%d", lo->lo_number);
if (IS_ERR(lo->worker_task))
return -ENOMEM;
@@ -918,6 +938,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->use_dio = false;
lo->lo_blocksize = lo_blocksize;
+ lo->lo_logical_blocksize = 512;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
@@ -1083,6 +1104,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err;
struct loop_func_table *xfer;
kuid_t uid = current_uid();
+ int lo_flags = lo->lo_flags;
if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) &&
@@ -1115,12 +1137,30 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (err)
goto exit;
+ if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
+ if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
+ lo->lo_logical_blocksize = 512;
+ lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
+ if (LO_INFO_BLOCKSIZE(info) != 512 &&
+ LO_INFO_BLOCKSIZE(info) != 1024 &&
+ LO_INFO_BLOCKSIZE(info) != 2048 &&
+ LO_INFO_BLOCKSIZE(info) != 4096)
+ return -EINVAL;
+ if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
+ return -EINVAL;
+ }
+
if (lo->lo_offset != info->lo_offset ||
- lo->lo_sizelimit != info->lo_sizelimit)
- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
+ lo->lo_sizelimit != info->lo_sizelimit ||
+ lo->lo_flags != lo_flags ||
+ ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
+ lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
+ if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
+ LO_INFO_BLOCKSIZE(info))) {
err = -EFBIG;
goto exit;
}
+ }
loop_config_discard(lo);
@@ -1303,12 +1343,13 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
return err;
}
-static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
+static int loop_set_capacity(struct loop_device *lo)
{
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
+ return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
+ lo->lo_logical_blocksize);
}
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
@@ -1366,7 +1407,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_SET_CAPACITY:
err = -EPERM;
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
- err = loop_set_capacity(lo, bdev);
+ err = loop_set_capacity(lo);
break;
case LOOP_SET_DIRECT_IO:
err = -EPERM;
@@ -1642,7 +1683,7 @@ int loop_unregister_transfer(int number)
EXPORT_SYMBOL(loop_register_transfer);
EXPORT_SYMBOL(loop_unregister_transfer);
-static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -1651,7 +1692,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(bd->rq);
if (lo->lo_state != Lo_bound)
- return BLK_MQ_RQ_QUEUE_ERROR;
+ return BLK_STS_IOERR;
switch (req_op(cmd->rq)) {
case REQ_OP_FLUSH:
@@ -1666,7 +1707,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
kthread_queue_work(&lo->worker, &cmd->work);
- return BLK_MQ_RQ_QUEUE_OK;
+ return BLK_STS_OK;
}
static void loop_handle_cmd(struct loop_cmd *cmd)
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index fecd3f97ef8c..2c096b9a17b8 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -49,6 +49,7 @@ struct loop_device {
struct file * lo_backing_file;
struct block_device *lo_device;
unsigned lo_blocksize;
+ unsigned lo_logical_blocksize;
void *key_data;
gfp_t old_gfp_mask;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3a779a4f5653..61b046f256ca 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -532,7 +532,7 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
struct smart_attr *attrib);
-static void mtip_complete_command(struct mtip_cmd *cmd, int status)
+static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
@@ -568,7 +568,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
- mtip_complete_command(cmd, -EIO);
+ mtip_complete_command(cmd, BLK_STS_IOERR);
return;
}
@@ -667,7 +667,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
tag,
fail_reason != NULL ?
fail_reason : "unknown");
- mtip_complete_command(cmd, -ENODATA);
+ mtip_complete_command(cmd, BLK_STS_MEDIUM);
continue;
}
}
@@ -690,7 +690,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
dev_warn(&port->dd->pdev->dev,
"retiring tag %d\n", tag);
- mtip_complete_command(cmd, -EIO);
+ mtip_complete_command(cmd, BLK_STS_IOERR);
}
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
@@ -1063,23 +1063,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* insert request and run queue */
blk_execute_rq(rq->q, NULL, rq, true);
- rv = int_cmd->status;
- if (rv < 0) {
- if (rv == -ERESTARTSYS) { /* interrupted */
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] was interrupted after %u ms\n",
- fis->command,
- jiffies_to_msecs(jiffies - start));
- rv = -EINTR;
- goto exec_ic_exit;
- } else if (rv == 0) /* timeout */
- dev_err(&dd->pdev->dev,
- "Internal command did not complete [%02X] within timeout of %lu ms\n",
- fis->command, timeout);
- else
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
- fis->command, rv, timeout);
+ if (int_cmd->status) {
+ dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
+ fis->command, int_cmd->status);
+ rv = -EIO;
if (mtip_check_surprise_removal(dd->pdev) ||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
@@ -2753,7 +2740,7 @@ static void mtip_abort_cmd(struct request *req, void *data,
dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
clear_bit(req->tag, dd->port->cmds_to_issue);
- cmd->status = -EIO;
+ cmd->status = BLK_STS_IOERR;
mtip_softirq_done_fn(req);
}
@@ -3597,7 +3584,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
int err;
err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
- blk_mq_end_request(rq, err);
+ blk_mq_end_request(rq, err ? BLK_STS_IOERR : BLK_STS_OK);
return 0;
}
@@ -3633,8 +3620,8 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
return false;
}
-static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
{
struct driver_data *dd = hctx->queue->queuedata;
struct mtip_int_cmd *icmd = rq->special;
@@ -3642,7 +3629,7 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
struct mtip_cmd_sg *command_sg;
if (mtip_commands_active(dd->port))
- return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_STS_RESOURCE;
/* Populate the SG list */
cmd->command_header->opts =
@@ -3666,10 +3653,10 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
mtip_issue_non_ncq_command(dd->port, rq->tag);
- return BLK_MQ_RQ_QUEUE_OK;
+ return 0;
}
-static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
@@ -3681,15 +3668,14 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
return mtip_issue_reserved_cmd(hctx, rq);
if (unlikely(mtip_check_unal_depth(hctx, rq)))
- return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_STS_RESOURCE;
blk_mq_start_request(rq);
ret = mtip_submit_request(hctx, rq);
if (likely(!ret))
- return BLK_MQ_RQ_QUEUE_OK;
-
- return BLK_MQ_RQ_QUEUE_ERROR;
+ return BLK_STS_OK;
+ return BLK_STS_IOERR;
}
static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
@@ -3730,7 +3716,7 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
if (reserved) {
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
- cmd->status = -ETIME;
+ cmd->status = BLK_STS_TIMEOUT;
return BLK_EH_HANDLED;
}
@@ -3961,7 +3947,7 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
- cmd->status = -ENODEV;
+ cmd->status = BLK_STS_IOERR;
blk_mq_complete_request(rq);
}
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 37b8e3e0bb78..e8286af50e16 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -342,7 +342,7 @@ struct mtip_cmd {
int retries; /* The number of retries left for this command. */
int direction; /* Data transfer direction */
- int status;
+ blk_status_t status;
};
/* Structure used to describe a port. */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9a7bb2c29447..977ec960dd2f 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -116,7 +116,7 @@ struct nbd_cmd {
int index;
int cookie;
struct completion send_complete;
- int status;
+ blk_status_t status;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -286,7 +286,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
struct nbd_config *config;
if (!refcount_inc_not_zero(&nbd->config_refs)) {
- cmd->status = -EIO;
+ cmd->status = BLK_STS_TIMEOUT;
return BLK_EH_HANDLED;
}
@@ -331,7 +331,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
"Connection timed out\n");
}
set_bit(NBD_TIMEDOUT, &config->runtime_flags);
- cmd->status = -EIO;
+ cmd->status = BLK_STS_IOERR;
sock_shutdown(nbd);
nbd_config_put(nbd);
@@ -400,6 +400,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
u32 type;
+ u32 nbd_cmd_flags = 0;
u32 tag = blk_mq_unique_tag(req);
int sent = nsock->sent, skip = 0;
@@ -429,6 +430,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
return -EIO;
}
+ if (req->cmd_flags & REQ_FUA)
+ nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
+
/* We did a partial send previously, and we at least sent the whole
* request struct, so just go and send the rest of the pages in the
* request.
@@ -442,7 +446,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
}
cmd->index = index;
cmd->cookie = nsock->cookie;
- request.type = htonl(type);
+ request.type = htonl(type | nbd_cmd_flags);
if (type != NBD_CMD_FLUSH) {
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
@@ -465,7 +469,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->pending = req;
nsock->sent = sent;
}
- return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
@@ -506,7 +510,7 @@ send_pages:
*/
nsock->pending = req;
nsock->sent = sent;
- return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -574,7 +578,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
- cmd->status = -EIO;
+ cmd->status = BLK_STS_IOERR;
return cmd;
}
@@ -599,7 +603,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
*/
if (nbd_disconnected(config) ||
config->num_connections <= 1) {
- cmd->status = -EIO;
+ cmd->status = BLK_STS_IOERR;
return cmd;
}
return ERR_PTR(-EIO);
@@ -651,7 +655,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
if (!blk_mq_request_started(req))
return;
cmd = blk_mq_rq_to_pdu(req);
- cmd->status = -EIO;
+ cmd->status = BLK_STS_IOERR;
blk_mq_complete_request(req);
}
@@ -740,7 +744,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
nbd_config_put(nbd);
return -EINVAL;
}
- cmd->status = 0;
+ cmd->status = BLK_STS_OK;
again:
nsock = config->socks[index];
mutex_lock(&nsock->tx_lock);
@@ -794,7 +798,7 @@ out:
return ret;
}
-static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -818,13 +822,9 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* appropriate.
*/
ret = nbd_handle_cmd(cmd, hctx->queue_num);
- if (ret < 0)
- ret = BLK_MQ_RQ_QUEUE_ERROR;
- if (!ret)
- ret = BLK_MQ_RQ_QUEUE_OK;
complete(&cmd->send_complete);
- return ret;
+ return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
}
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
@@ -910,6 +910,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
continue;
}
sk_set_memalloc(sock->sk);
+ sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
old = nsock->sock;
@@ -937,14 +938,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
return -ENOSPC;
}
-/* Reset all properties of an NBD device */
-static void nbd_reset(struct nbd_device *nbd)
-{
- nbd->config = NULL;
- nbd->tag_set.timeout = 0;
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
-}
-
static void nbd_bdev_reset(struct block_device *bdev)
{
if (bdev->bd_openers > 1)
@@ -965,8 +958,12 @@ static void nbd_parse_flags(struct nbd_device *nbd)
set_disk_ro(nbd->disk, false);
if (config->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
- if (config->flags & NBD_FLAG_SEND_FLUSH)
- blk_queue_write_cache(nbd->disk->queue, true, false);
+ if (config->flags & NBD_FLAG_SEND_FLUSH) {
+ if (config->flags & NBD_FLAG_SEND_FUA)
+ blk_queue_write_cache(nbd->disk->queue, true, true);
+ else
+ blk_queue_write_cache(nbd->disk->queue, true, false);
+ }
else
blk_queue_write_cache(nbd->disk->queue, false, false);
}
@@ -1029,7 +1026,11 @@ static void nbd_config_put(struct nbd_device *nbd)
}
kfree(config->socks);
}
- nbd_reset(nbd);
+ kfree(nbd->config);
+ nbd->config = NULL;
+
+ nbd->tag_set.timeout = 0;
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
@@ -1075,6 +1076,7 @@ static int nbd_start_device(struct nbd_device *nbd)
return -ENOMEM;
}
sk_set_memalloc(config->socks[i]->sock->sk);
+ config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
atomic_inc(&config->recv_threads);
refcount_inc(&nbd->config_refs);
INIT_WORK(&args->work, recv_work);
@@ -1309,6 +1311,8 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
seq_puts(s, "NBD_FLAG_READ_ONLY\n");
if (flags & NBD_FLAG_SEND_FLUSH)
seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
+ if (flags & NBD_FLAG_SEND_FUA)
+ seq_puts(s, "NBD_FLAG_SEND_FUA\n");
if (flags & NBD_FLAG_SEND_TRIM)
seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
@@ -1483,7 +1487,6 @@ static int nbd_dev_add(int index)
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
- nbd_reset(nbd);
add_disk(disk);
nbd_total_devices++;
return index;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index d946e1eeac8e..71f4422eba81 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -35,7 +35,8 @@ struct nullb {
struct request_queue *q;
struct gendisk *disk;
struct nvm_dev *ndev;
- struct blk_mq_tag_set tag_set;
+ struct blk_mq_tag_set *tag_set;
+ struct blk_mq_tag_set __tag_set;
struct hrtimer timer;
unsigned int queue_depth;
spinlock_t lock;
@@ -50,6 +51,7 @@ static struct mutex lock;
static int null_major;
static int nullb_indexes;
static struct kmem_cache *ppa_cache;
+static struct blk_mq_tag_set tag_set;
enum {
NULL_IRQ_NONE = 0,
@@ -109,7 +111,7 @@ static int bs = 512;
module_param(bs, int, S_IRUGO);
MODULE_PARM_DESC(bs, "Block size (in bytes)");
-static int nr_devices = 2;
+static int nr_devices = 1;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
@@ -121,6 +123,10 @@ static bool blocking;
module_param(blocking, bool, S_IRUGO);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
+static bool shared_tags;
+module_param(shared_tags, bool, S_IRUGO);
+MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
+
static int irqmode = NULL_IRQ_SOFTIRQ;
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -229,11 +235,11 @@ static void end_cmd(struct nullb_cmd *cmd)
switch (queue_mode) {
case NULL_Q_MQ:
- blk_mq_end_request(cmd->rq, 0);
+ blk_mq_end_request(cmd->rq, BLK_STS_OK);
return;
case NULL_Q_RQ:
INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, 0);
+ blk_end_request_all(cmd->rq, BLK_STS_OK);
break;
case NULL_Q_BIO:
bio_endio(cmd->bio);
@@ -356,7 +362,7 @@ static void null_request_fn(struct request_queue *q)
}
}
-static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -373,34 +379,11 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(bd->rq);
null_handle_cmd(cmd);
- return BLK_MQ_RQ_QUEUE_OK;
-}
-
-static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
-{
- BUG_ON(!nullb);
- BUG_ON(!nq);
-
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = nullb->queue_depth;
-}
-
-static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int index)
-{
- struct nullb *nullb = data;
- struct nullb_queue *nq = &nullb->queues[index];
-
- hctx->driver_data = nq;
- null_init_queue(nullb, nq);
- nullb->nr_queues++;
-
- return 0;
+ return BLK_STS_OK;
}
static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
- .init_hctx = null_init_hctx,
.complete = null_softirq_done_fn,
};
@@ -422,11 +405,12 @@ static void cleanup_queues(struct nullb *nullb)
#ifdef CONFIG_NVM
-static void null_lnvm_end_io(struct request *rq, int error)
+static void null_lnvm_end_io(struct request *rq, blk_status_t status)
{
struct nvm_rq *rqd = rq->end_io_data;
- rqd->error = error;
+ /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
+ rqd->error = status ? -EIO : 0;
nvm_end_io(rqd);
blk_put_request(rq);
@@ -591,8 +575,8 @@ static void null_del_dev(struct nullb *nullb)
else
del_gendisk(nullb->disk);
blk_cleanup_queue(nullb->q);
- if (queue_mode == NULL_Q_MQ)
- blk_mq_free_tag_set(&nullb->tag_set);
+ if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+ blk_mq_free_tag_set(nullb->tag_set);
if (!use_lightnvm)
put_disk(nullb->disk);
cleanup_queues(nullb);
@@ -614,6 +598,32 @@ static const struct block_device_operations null_fops = {
.release = null_release,
};
+static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
+{
+ BUG_ON(!nullb);
+ BUG_ON(!nq);
+
+ init_waitqueue_head(&nq->wait);
+ nq->queue_depth = nullb->queue_depth;
+}
+
+static void null_init_queues(struct nullb *nullb)
+{
+ struct request_queue *q = nullb->q;
+ struct blk_mq_hw_ctx *hctx;
+ struct nullb_queue *nq;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (!hctx->nr_ctx || !hctx->tags)
+ continue;
+ nq = &nullb->queues[i];
+ hctx->driver_data = nq;
+ null_init_queue(nullb, nq);
+ nullb->nr_queues++;
+ }
+}
+
static int setup_commands(struct nullb_queue *nq)
{
struct nullb_cmd *cmd;
@@ -694,6 +704,22 @@ static int null_gendisk_register(struct nullb *nullb)
return 0;
}
+static int null_init_tag_set(struct blk_mq_tag_set *set)
+{
+ set->ops = &null_mq_ops;
+ set->nr_hw_queues = submit_queues;
+ set->queue_depth = hw_queue_depth;
+ set->numa_node = home_node;
+ set->cmd_size = sizeof(struct nullb_cmd);
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->driver_data = NULL;
+
+ if (blocking)
+ set->flags |= BLK_MQ_F_BLOCKING;
+
+ return blk_mq_alloc_tag_set(set);
+}
+
static int null_add_dev(void)
{
struct nullb *nullb;
@@ -715,26 +741,23 @@ static int null_add_dev(void)
goto out_free_nullb;
if (queue_mode == NULL_Q_MQ) {
- nullb->tag_set.ops = &null_mq_ops;
- nullb->tag_set.nr_hw_queues = submit_queues;
- nullb->tag_set.queue_depth = hw_queue_depth;
- nullb->tag_set.numa_node = home_node;
- nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
- nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- nullb->tag_set.driver_data = nullb;
-
- if (blocking)
- nullb->tag_set.flags |= BLK_MQ_F_BLOCKING;
-
- rv = blk_mq_alloc_tag_set(&nullb->tag_set);
+ if (shared_tags) {
+ nullb->tag_set = &tag_set;
+ rv = 0;
+ } else {
+ nullb->tag_set = &nullb->__tag_set;
+ rv = null_init_tag_set(nullb->tag_set);
+ }
+
if (rv)
goto out_cleanup_queues;
- nullb->q = blk_mq_init_queue(&nullb->tag_set);
+ nullb->q = blk_mq_init_queue(nullb->tag_set);
if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
goto out_cleanup_tags;
}
+ null_init_queues(nullb);
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
if (!nullb->q) {
@@ -787,8 +810,8 @@ static int null_add_dev(void)
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
- if (queue_mode == NULL_Q_MQ)
- blk_mq_free_tag_set(&nullb->tag_set);
+ if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+ blk_mq_free_tag_set(nullb->tag_set);
out_cleanup_queues:
cleanup_queues(nullb);
out_free_nullb:
@@ -821,6 +844,9 @@ static int __init null_init(void)
queue_mode = NULL_Q_MQ;
}
+ if (queue_mode == NULL_Q_MQ && shared_tags)
+ null_init_tag_set(&tag_set);
+
if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
if (submit_queues < nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.",
@@ -881,6 +907,9 @@ static void __exit null_exit(void)
}
mutex_unlock(&lock);
+ if (queue_mode == NULL_Q_MQ && shared_tags)
+ blk_mq_free_tag_set(&tag_set);
+
kmem_cache_destroy(ppa_cache);
}
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index b1267ef34d5a..7b8c6368beb7 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -305,6 +305,7 @@ static void pcd_init_units(void)
put_disk(disk);
continue;
}
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
cd->disk = disk;
cd->pi = &cd->pia;
cd->present = 0;
@@ -783,7 +784,7 @@ static void pcd_request(void)
ps_set_intr(do_pcd_read, NULL, 0, nice);
return;
} else {
- __blk_end_request_all(pcd_req, -EIO);
+ __blk_end_request_all(pcd_req, BLK_STS_IOERR);
pcd_req = NULL;
}
}
@@ -794,7 +795,7 @@ static void do_pcd_request(struct request_queue *q)
pcd_request();
}
-static inline void next_request(int err)
+static inline void next_request(blk_status_t err)
{
unsigned long saved_flags;
@@ -837,7 +838,7 @@ static void pcd_start(void)
if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
pcd_bufblk = -1;
- next_request(-EIO);
+ next_request(BLK_STS_IOERR);
return;
}
@@ -871,7 +872,7 @@ static void do_pcd_read_drq(void)
return;
}
pcd_bufblk = -1;
- next_request(-EIO);
+ next_request(BLK_STS_IOERR);
return;
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 7d2402f90978..27a44b97393a 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -438,7 +438,7 @@ static void run_fsm(void)
phase = NULL;
spin_lock_irqsave(&pd_lock, saved_flags);
if (!__blk_end_request_cur(pd_req,
- res == Ok ? 0 : -EIO)) {
+ res == Ok ? 0 : BLK_STS_IOERR)) {
if (!set_next_request())
stop = 1;
}
@@ -863,6 +863,7 @@ static void pd_probe_drive(struct pd_unit *disk)
return;
}
blk_queue_max_hw_sectors(p->queue, cluster);
+ blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
if (disk->drive == -1) {
for (disk->drive = 0; disk->drive <= 1; disk->drive++)
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index f24ca7315ddc..eef7a91f667d 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -293,6 +293,7 @@ static void __init pf_init_units(void)
return;
}
blk_queue_max_segments(disk->queue, cluster);
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
pf->disk = disk;
pf->pi = &pf->pia;
pf->media_status = PF_NM;
@@ -801,7 +802,7 @@ static int set_next_request(void)
return pf_req != NULL;
}
-static void pf_end_request(int err)
+static void pf_end_request(blk_status_t err)
{
if (pf_req && !__blk_end_request_cur(pf_req, err))
pf_req = NULL;
@@ -821,7 +822,7 @@ repeat:
pf_count = blk_rq_cur_sectors(pf_req);
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
- pf_end_request(-EIO);
+ pf_end_request(BLK_STS_IOERR);
goto repeat;
}
@@ -836,7 +837,7 @@ repeat:
pi_do_claimed(pf_current->pi, do_pf_write);
else {
pf_busy = 0;
- pf_end_request(-EIO);
+ pf_end_request(BLK_STS_IOERR);
goto repeat;
}
}
@@ -868,7 +869,7 @@ static int pf_next_buf(void)
return 0;
}
-static inline void next_request(int err)
+static inline void next_request(blk_status_t err)
{
unsigned long saved_flags;
@@ -896,7 +897,7 @@ static void do_pf_read_start(void)
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
- next_request(-EIO);
+ next_request(BLK_STS_IOERR);
return;
}
pf_mask = STAT_DRQ;
@@ -915,7 +916,7 @@ static void do_pf_read_drq(void)
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
- next_request(-EIO);
+ next_request(BLK_STS_IOERR);
return;
}
pi_read_block(pf_current->pi, pf_buf, 512);
@@ -942,7 +943,7 @@ static void do_pf_write_start(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(-EIO);
+ next_request(BLK_STS_IOERR);
return;
}
@@ -955,7 +956,7 @@ static void do_pf_write_start(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(-EIO);
+ next_request(BLK_STS_IOERR);
return;
}
pi_write_block(pf_current->pi, pf_buf, 512);
@@ -975,7 +976,7 @@ static void do_pf_write_done(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
- next_request(-EIO);
+ next_request(BLK_STS_IOERR);
return;
}
pi_disconnect(pf_current->pi);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 205b865ebeb9..6b8b097abbb9 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -98,6 +98,7 @@ static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
static mempool_t *psd_pool;
+static struct bio_set *pkt_bio_set;
static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
@@ -348,9 +349,9 @@ static void class_pktcdvd_release(struct class *cls)
{
kfree(cls);
}
-static ssize_t class_pktcdvd_show_map(struct class *c,
- struct class_attribute *attr,
- char *data)
+
+static ssize_t device_map_show(struct class *c, struct class_attribute *attr,
+ char *data)
{
int n = 0;
int idx;
@@ -368,11 +369,10 @@ static ssize_t class_pktcdvd_show_map(struct class *c,
mutex_unlock(&ctl_mutex);
return n;
}
+static CLASS_ATTR_RO(device_map);
-static ssize_t class_pktcdvd_store_add(struct class *c,
- struct class_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t add_store(struct class *c, struct class_attribute *attr,
+ const char *buf, size_t count)
{
unsigned int major, minor;
@@ -390,11 +390,10 @@ static ssize_t class_pktcdvd_store_add(struct class *c,
return -EINVAL;
}
+static CLASS_ATTR_WO(add);
-static ssize_t class_pktcdvd_store_remove(struct class *c,
- struct class_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t remove_store(struct class *c, struct class_attribute *attr,
+ const char *buf, size_t count)
{
unsigned int major, minor;
if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
@@ -403,14 +402,15 @@ static ssize_t class_pktcdvd_store_remove(struct class *c,
}
return -EINVAL;
}
+static CLASS_ATTR_WO(remove);
-static struct class_attribute class_pktcdvd_attrs[] = {
- __ATTR(add, 0200, NULL, class_pktcdvd_store_add),
- __ATTR(remove, 0200, NULL, class_pktcdvd_store_remove),
- __ATTR(device_map, 0444, class_pktcdvd_show_map, NULL),
- __ATTR_NULL
+static struct attribute *class_pktcdvd_attrs[] = {
+ &class_attr_add.attr,
+ &class_attr_remove.attr,
+ &class_attr_device_map.attr,
+ NULL,
};
-
+ATTRIBUTE_GROUPS(class_pktcdvd);
static int pkt_sysfs_init(void)
{
@@ -426,7 +426,7 @@ static int pkt_sysfs_init(void)
class_pktcdvd->name = DRIVER_NAME;
class_pktcdvd->owner = THIS_MODULE;
class_pktcdvd->class_release = class_pktcdvd_release;
- class_pktcdvd->class_attrs = class_pktcdvd_attrs;
+ class_pktcdvd->class_groups = class_pktcdvd_groups;
ret = class_register(class_pktcdvd);
if (ret) {
kfree(class_pktcdvd);
@@ -707,7 +707,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
if (IS_ERR(rq))
return PTR_ERR(rq);
- scsi_req_init(rq);
if (cgc->buflen) {
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
@@ -952,9 +951,9 @@ static void pkt_end_io_read(struct bio *bio)
pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
bio, (unsigned long long)pkt->sector,
- (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error);
+ (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
- if (bio->bi_error)
+ if (bio->bi_status)
atomic_inc(&pkt->io_errors);
if (atomic_dec_and_test(&pkt->io_wait)) {
atomic_inc(&pkt->run_sm);
@@ -969,7 +968,7 @@ static void pkt_end_io_packet_write(struct bio *bio)
struct pktcdvd_device *pd = pkt->pd;
BUG_ON(!pd);
- pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error);
+ pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
pd->stats.pkt_ended++;
@@ -1305,16 +1304,16 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt_queue_bio(pd, pkt->w_bio);
}
-static void pkt_finish_packet(struct packet_data *pkt, int error)
+static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
{
struct bio *bio;
- if (error)
+ if (status)
pkt->cache_valid = 0;
/* Finish all bios corresponding to this packet */
while ((bio = bio_list_pop(&pkt->orig_bios))) {
- bio->bi_error = error;
+ bio->bi_status = status;
bio_endio(bio);
}
}
@@ -1349,7 +1348,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
if (atomic_read(&pkt->io_wait) > 0)
return;
- if (!pkt->w_bio->bi_error) {
+ if (!pkt->w_bio->bi_status) {
pkt_set_state(pkt, PACKET_FINISHED_STATE);
} else {
pkt_set_state(pkt, PACKET_RECOVERY_STATE);
@@ -1366,7 +1365,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
break;
case PACKET_FINISHED_STATE:
- pkt_finish_packet(pkt, pkt->w_bio->bi_error);
+ pkt_finish_packet(pkt, pkt->w_bio->bi_status);
return;
default:
@@ -2301,7 +2300,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
struct packet_stacked_data *psd = bio->bi_private;
struct pktcdvd_device *pd = psd->pd;
- psd->bio->bi_error = bio->bi_error;
+ psd->bio->bi_status = bio->bi_status;
bio_put(bio);
bio_endio(psd->bio);
mempool_free(psd, psd_pool);
@@ -2310,7 +2309,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+ struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, pkt_bio_set);
struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
psd->pd = pd;
@@ -2412,9 +2411,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
char b[BDEVNAME_SIZE];
struct bio *split;
- blk_queue_bounce(q, &bio);
-
- blk_queue_split(q, &bio, q->bio_split);
+ blk_queue_split(q, &bio);
pd = q->queuedata;
if (!pd) {
@@ -2455,7 +2452,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
split = bio_split(bio, last_zone -
bio->bi_iter.bi_sector,
- GFP_NOIO, fs_bio_set);
+ GFP_NOIO, pkt_bio_set);
bio_chain(split, bio);
} else {
split = bio;
@@ -2583,6 +2580,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = bdget(dev);
if (!bdev)
return -ENOMEM;
+ if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
+ WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
+ bdput(bdev);
+ return -EINVAL;
+ }
ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
if (ret)
return ret;
@@ -2919,6 +2921,11 @@ static int __init pkt_init(void)
sizeof(struct packet_stacked_data));
if (!psd_pool)
return -ENOMEM;
+ pkt_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
+ if (!pkt_bio_set) {
+ mempool_destroy(psd_pool);
+ return -ENOMEM;
+ }
ret = register_blkdev(pktdev_major, DRIVER_NAME);
if (ret < 0) {
@@ -2951,6 +2958,7 @@ out:
unregister_blkdev(pktdev_major, DRIVER_NAME);
out2:
mempool_destroy(psd_pool);
+ bioset_free(pkt_bio_set);
return ret;
}
@@ -2964,6 +2972,7 @@ static void __exit pkt_exit(void)
unregister_blkdev(pktdev_major, DRIVER_NAME);
mempool_destroy(psd_pool);
+ bioset_free(pkt_bio_set);
}
MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index a809e3e9feb8..075662f2cf46 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -158,7 +158,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
return 0;
}
@@ -180,7 +180,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
return 0;
}
@@ -208,7 +208,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
break;
default:
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
}
}
}
@@ -231,7 +231,8 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
struct ps3_storage_device *dev = data;
struct ps3disk_private *priv;
struct request *req;
- int res, read, error;
+ int res, read;
+ blk_status_t error;
u64 tag, status;
const char *op;
@@ -269,7 +270,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
if (status) {
dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
__LINE__, op, status);
- error = -EIO;
+ error = BLK_STS_IOERR;
} else {
dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
__LINE__, op);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 456b4fe21559..e0e81cacd781 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -428,7 +428,7 @@ static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
kfree(priv->cache.tags);
}
-static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
+static blk_status_t ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -438,7 +438,7 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
(unsigned int)from, len);
if (from >= priv->size)
- return -EIO;
+ return BLK_STS_IOERR;
if (len > priv->size - from)
len = priv->size - from;
@@ -472,14 +472,14 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
return 0;
}
-static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
+static blk_status_t ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
size_t len, size_t *retlen, const u_char *buf)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
unsigned int cached, count;
if (to >= priv->size)
- return -EIO;
+ return BLK_STS_IOERR;
if (len > priv->size - to)
len = priv->size - to;
@@ -554,7 +554,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
int write = bio_data_dir(bio) == WRITE;
const char *op = write ? "write" : "read";
loff_t offset = bio->bi_iter.bi_sector << 9;
- int error = 0;
+ blk_status_t error = 0;
struct bio_vec bvec;
struct bvec_iter iter;
struct bio *next;
@@ -578,7 +578,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
if (retlen != len) {
dev_err(&dev->core, "Short %s\n", op);
- error = -EIO;
+ error = BLK_STS_IOERR;
goto out;
}
@@ -593,7 +593,7 @@ out:
next = bio_list_peek(&priv->list);
spin_unlock_irq(&priv->lock);
- bio->bi_error = error;
+ bio->bi_status = error;
bio_endio(bio);
return next;
}
@@ -606,7 +606,7 @@ static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio)
dev_dbg(&dev->core, "%s\n", __func__);
- blk_queue_split(q, &bio, q->bio_split);
+ blk_queue_split(q, &bio);
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 454bf9c34882..b008b6a98098 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -442,6 +442,8 @@ static DEFINE_SPINLOCK(rbd_client_list_lock);
static struct kmem_cache *rbd_img_request_cache;
static struct kmem_cache *rbd_obj_request_cache;
+static struct bio_set *rbd_bio_clone;
+
static int rbd_major;
static DEFINE_IDA(rbd_dev_id_ida);
@@ -1363,7 +1365,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
{
struct bio *bio;
- bio = bio_clone(bio_src, gfpmask);
+ bio = bio_clone_fast(bio_src, gfpmask, rbd_bio_clone);
if (!bio)
return NULL; /* ENOMEM */
@@ -2293,11 +2295,13 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
rbd_assert(img_request->obj_request != NULL);
more = obj_request->which < img_request->obj_request_count - 1;
} else {
+ blk_status_t status = errno_to_blk_status(result);
+
rbd_assert(img_request->rq != NULL);
- more = blk_update_request(img_request->rq, result, xferred);
+ more = blk_update_request(img_request->rq, status, xferred);
if (!more)
- __blk_mq_end_request(img_request->rq, result);
+ __blk_mq_end_request(img_request->rq, status);
}
return more;
@@ -4023,6 +4027,7 @@ static void rbd_queue_workfn(struct work_struct *work)
switch (req_op(rq)) {
case REQ_OP_DISCARD:
+ case REQ_OP_WRITE_ZEROES:
op_type = OBJ_OP_DISCARD;
break;
case REQ_OP_WRITE:
@@ -4149,17 +4154,17 @@ err_rq:
obj_op_name(op_type), length, offset, result);
ceph_put_snap_context(snapc);
err:
- blk_mq_end_request(rq, result);
+ blk_mq_end_request(rq, errno_to_blk_status(result));
}
-static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
struct work_struct *work = blk_mq_rq_to_pdu(rq);
queue_work(rbd_wq, work);
- return BLK_MQ_RQ_QUEUE_OK;
+ return BLK_STS_OK;
}
static void rbd_free_disk(struct rbd_device *rbd_dev)
@@ -4420,6 +4425,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.discard_granularity = segment_size;
q->limits.discard_alignment = segment_size;
blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
+ blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
@@ -6412,8 +6418,16 @@ static int rbd_slab_init(void)
if (!rbd_obj_request_cache)
goto out_err;
+ rbd_assert(!rbd_bio_clone);
+ rbd_bio_clone = bioset_create(BIO_POOL_SIZE, 0, 0);
+ if (!rbd_bio_clone)
+ goto out_err_clone;
+
return 0;
+out_err_clone:
+ kmem_cache_destroy(rbd_obj_request_cache);
+ rbd_obj_request_cache = NULL;
out_err:
kmem_cache_destroy(rbd_img_request_cache);
rbd_img_request_cache = NULL;
@@ -6429,6 +6443,10 @@ static void rbd_slab_exit(void)
rbd_assert(rbd_img_request_cache);
kmem_cache_destroy(rbd_img_request_cache);
rbd_img_request_cache = NULL;
+
+ rbd_assert(rbd_bio_clone);
+ bioset_free(rbd_bio_clone);
+ rbd_bio_clone = NULL;
}
static int __init rbd_init(void)
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 9c566364ac9c..7f4acebf4657 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -149,9 +149,9 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
{
struct rsxx_cardinfo *card = q->queuedata;
struct rsxx_bio_meta *bio_meta;
- int st = -EINVAL;
+ blk_status_t st = BLK_STS_IOERR;
- blk_queue_split(q, &bio, q->bio_split);
+ blk_queue_split(q, &bio);
might_sleep();
@@ -161,15 +161,11 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
if (bio_end_sector(bio) > get_capacity(card->gendisk))
goto req_err;
- if (unlikely(card->halt)) {
- st = -EFAULT;
+ if (unlikely(card->halt))
goto req_err;
- }
- if (unlikely(card->dma_fault)) {
- st = (-EFAULT);
+ if (unlikely(card->dma_fault))
goto req_err;
- }
if (bio->bi_iter.bi_size == 0) {
dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
@@ -178,7 +174,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
if (!bio_meta) {
- st = -ENOMEM;
+ st = BLK_STS_RESOURCE;
goto req_err;
}
@@ -205,7 +201,7 @@ queue_err:
kmem_cache_free(bio_meta_pool, bio_meta);
req_err:
if (st)
- bio->bi_error = st;
+ bio->bi_status = st;
bio_endio(bio);
return BLK_QC_T_NONE;
}
@@ -288,7 +284,6 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
}
blk_queue_make_request(card->queue, rsxx_make_request);
- blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 5a20385f87d0..6a1b2177951c 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -611,7 +611,7 @@ static void rsxx_schedule_done(struct work_struct *work)
mutex_unlock(&ctrl->work_lock);
}
-static int rsxx_queue_discard(struct rsxx_cardinfo *card,
+static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card,
struct list_head *q,
unsigned int laddr,
rsxx_dma_cb cb,
@@ -621,7 +621,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
if (!dma)
- return -ENOMEM;
+ return BLK_STS_RESOURCE;
dma->cmd = HW_CMD_BLK_DISCARD;
dma->laddr = laddr;
@@ -640,7 +640,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
return 0;
}
-static int rsxx_queue_dma(struct rsxx_cardinfo *card,
+static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card,
struct list_head *q,
int dir,
unsigned int dma_off,
@@ -655,7 +655,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
if (!dma)
- return -ENOMEM;
+ return BLK_STS_RESOURCE;
dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
dma->laddr = laddr;
@@ -677,7 +677,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
return 0;
}
-int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
struct bio *bio,
atomic_t *n_dmas,
rsxx_dma_cb cb,
@@ -694,7 +694,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
unsigned int dma_len;
int dma_cnt[RSXX_MAX_TARGETS];
int tgt;
- int st;
+ blk_status_t st;
int i;
addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
@@ -769,7 +769,6 @@ bvec_err:
for (i = 0; i < card->n_targets; i++)
rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
FREE_DMA);
-
return st;
}
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 6bbc64d0f690..277f27e673a2 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -391,7 +391,7 @@ int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
void rsxx_dma_cleanup(void);
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
int rsxx_dma_configure(struct rsxx_cardinfo *card);
-int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
struct bio *bio,
atomic_t *n_dmas,
rsxx_dma_cb cb,
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 27833e4dae2a..d0368682bd43 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -451,8 +451,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
struct skd_special_context *skspcl);
static void skd_request_fn(struct request_queue *rq);
static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, int error);
-static int skd_preop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq, blk_status_t status);
+static bool skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq);
static void skd_postop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq);
@@ -491,7 +491,7 @@ static void skd_fail_all_pending(struct skd_device *skdev)
if (req == NULL)
break;
blk_start_request(req);
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
}
}
@@ -545,7 +545,6 @@ static void skd_request_fn(struct request_queue *q)
struct request *req = NULL;
struct skd_scsi_request *scsi_req;
unsigned long io_flags;
- int error;
u32 lba;
u32 count;
int data_dir;
@@ -716,9 +715,7 @@ static void skd_request_fn(struct request_queue *q)
if (!req->bio)
goto skip_sg;
- error = skd_preop_sg_list(skdev, skreq);
-
- if (error != 0) {
+ if (!skd_preop_sg_list(skdev, skreq)) {
/*
* Complete the native request with error.
* Note that the request context is still at the
@@ -730,7 +727,7 @@ static void skd_request_fn(struct request_queue *q)
*/
pr_debug("%s:%s:%d error Out\n",
skdev->name, __func__, __LINE__);
- skd_end_request(skdev, skreq, error);
+ skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
continue;
}
@@ -805,7 +802,7 @@ skip_sg:
}
static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, int error)
+ struct skd_request_context *skreq, blk_status_t error)
{
if (unlikely(error)) {
struct request *req = skreq->req;
@@ -822,7 +819,7 @@ static void skd_end_request(struct skd_device *skdev,
__blk_end_request_all(skreq->req, error);
}
-static int skd_preop_sg_list(struct skd_device *skdev,
+static bool skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq)
{
struct request *req = skreq->req;
@@ -839,7 +836,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
n_sg = blk_rq_map_sg(skdev->queue, req, sg);
if (n_sg <= 0)
- return -EINVAL;
+ return false;
/*
* Map scatterlist to PCI bus addresses.
@@ -847,7 +844,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
*/
n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
if (n_sg <= 0)
- return -EINVAL;
+ return false;
SKD_ASSERT(n_sg <= skdev->sgs_per_request);
@@ -882,7 +879,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
}
}
- return 0;
+ return true;
}
static void skd_postop_sg_list(struct skd_device *skdev,
@@ -2333,7 +2330,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
case SKD_CHECK_STATUS_REPORT_GOOD:
case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
- skd_end_request(skdev, skreq, 0);
+ skd_end_request(skdev, skreq, BLK_STS_OK);
break;
case SKD_CHECK_STATUS_BUSY_IMMINENT:
@@ -2355,7 +2352,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_REPORT_ERROR:
default:
- skd_end_request(skdev, skreq, -EIO);
+ skd_end_request(skdev, skreq, BLK_STS_IOERR);
break;
}
}
@@ -2748,7 +2745,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
* native request.
*/
if (likely(cmp_status == SAM_STAT_GOOD))
- skd_end_request(skdev, skreq, 0);
+ skd_end_request(skdev, skreq, BLK_STS_OK);
else
skd_resolve_req_exception(skdev, skreq);
}
@@ -3190,7 +3187,7 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue)
SKD_MAX_RETRIES)
blk_requeue_request(skdev->queue, skreq->req);
else
- skd_end_request(skdev, skreq, -EIO);
+ skd_end_request(skdev, skreq, BLK_STS_IOERR);
skreq->req = NULL;
@@ -4276,6 +4273,7 @@ static int skd_cons_disk(struct skd_device *skdev)
rc = -ENOMEM;
goto err_out;
}
+ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
skdev->queue = q;
disk->queue = q;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 3f3a3ab3d50a..6b16ead1da58 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -316,7 +316,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
rqe->req = NULL;
- __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
+ __blk_end_request(req, (desc->status ? BLK_STS_IOERR : 0), desc->size);
vdc_blk_queue_start(port);
}
@@ -1023,7 +1023,7 @@ static void vdc_queue_drain(struct vdc_port *port)
struct request *req;
while ((req = blk_fetch_request(port->disk->queue)) != NULL)
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
}
static void vdc_ldc_reset_timer(unsigned long _arg)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 3064be6cf375..84434d3ea19b 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -493,7 +493,7 @@ static inline int swim_read_sector(struct floppy_state *fs,
return ret;
}
-static int floppy_read_sectors(struct floppy_state *fs,
+static blk_status_t floppy_read_sectors(struct floppy_state *fs,
int req_sector, int sectors_nb,
unsigned char *buffer)
{
@@ -516,7 +516,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
ret = swim_read_sector(fs, side, track, sector,
buffer);
if (try-- == 0)
- return -EIO;
+ return BLK_STS_IOERR;
} while (ret != 512);
buffer += ret;
@@ -553,7 +553,7 @@ static void do_fd_request(struct request_queue *q)
req = swim_next_request(swd);
while (req) {
- int err = -EIO;
+ blk_status_t err = BLK_STS_IOERR;
fs = req->rq_disk->private_data;
if (blk_rq_pos(req) >= fs->total_secs)
@@ -864,6 +864,8 @@ static int swim_floppy_init(struct swim_priv *swd)
put_disk(swd->unit[drive].disk);
goto exit_put_disks;
}
+ blk_queue_bounce_limit(swd->unit[drive].disk->queue,
+ BLK_BOUNCE_HIGH);
swd->unit[drive].disk->queue->queuedata = swd;
swd->unit[drive].swd = swd;
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ba4809c9bdba..9f931f8f6b4c 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -257,7 +257,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
-static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
{
struct request *req = fs->cur_req;
int rc;
@@ -334,7 +334,7 @@ static void start_request(struct floppy_state *fs)
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD) {
swim3_dbg("%s", " media bay absent, dropping req\n");
- swim3_end_request(fs, -ENODEV, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
@@ -350,12 +350,12 @@ static void start_request(struct floppy_state *fs)
if (blk_rq_pos(req) >= fs->total_secs) {
swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
(long)blk_rq_pos(req), (long)fs->total_secs);
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
if (fs->ejected) {
swim3_dbg("%s", " disk ejected\n");
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
@@ -364,7 +364,7 @@ static void start_request(struct floppy_state *fs)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
swim3_dbg("%s", " try to write, disk write protected\n");
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
continue;
}
}
@@ -548,7 +548,7 @@ static void act(struct floppy_state *fs)
if (fs->retries > 5) {
swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
fs->req_cyl, fs->cur_cyl);
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
return;
}
@@ -584,7 +584,7 @@ static void scan_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
} else {
@@ -608,7 +608,7 @@ static void seek_timeout(unsigned long data)
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
swim3_err("%s", "Seek timeout\n");
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
@@ -637,7 +637,7 @@ static void settle_timeout(unsigned long data)
goto unlock;
}
swim3_err("%s", "Seek settle timeout\n");
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
unlock:
@@ -666,7 +666,7 @@ static void xfer_timeout(unsigned long data)
swim3_err("Timeout %sing sector %ld\n",
(rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
(long)blk_rq_pos(fs->cur_req));
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
@@ -703,7 +703,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
} else {
@@ -786,7 +786,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
swim3_err("Error %sing block %ld (err=%x)\n",
rq_data_dir(req) == WRITE? "writ": "read",
(long)blk_rq_pos(req), err);
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
}
} else {
@@ -795,7 +795,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(req), intr, err);
- swim3_end_request(fs, -EIO, 0);
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
start_request(fs);
break;
@@ -1223,6 +1223,7 @@ static int swim3_attach(struct macio_dev *mdev,
put_disk(disk);
return -ENOMEM;
}
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
disk->queue->queuedata = &floppy_states[index];
if (index == 0) {
@@ -1245,7 +1246,7 @@ static int swim3_attach(struct macio_dev *mdev,
return 0;
}
-static struct of_device_id swim3_match[] =
+static const struct of_device_id swim3_match[] =
{
{
.name = "swim3",
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index c8e072caf56f..08586dc14e85 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -745,7 +745,7 @@ static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
static inline void carm_end_request_queued(struct carm_host *host,
struct carm_request *crq,
- int error)
+ blk_status_t error)
{
struct request *req = crq->rq;
int rc;
@@ -791,7 +791,7 @@ static inline void carm_round_robin(struct carm_host *host)
}
static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
- int error)
+ blk_status_t error)
{
carm_end_request_queued(host, crq, error);
if (max_queue == 1)
@@ -869,14 +869,14 @@ queue_one_request:
sg = &crq->sg[0];
n_elem = blk_rq_map_sg(q, rq, sg);
if (n_elem <= 0) {
- carm_end_rq(host, crq, -EIO);
+ carm_end_rq(host, crq, BLK_STS_IOERR);
return; /* request with no s/g entries? */
}
/* map scatterlist to PCI bus addresses */
n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
if (n_elem <= 0) {
- carm_end_rq(host, crq, -EIO);
+ carm_end_rq(host, crq, BLK_STS_IOERR);
return; /* request with no s/g entries? */
}
crq->n_elem = n_elem;
@@ -937,7 +937,7 @@ queue_one_request:
static void carm_handle_array_info(struct carm_host *host,
struct carm_request *crq, u8 *mem,
- int error)
+ blk_status_t error)
{
struct carm_port *port;
u8 *msg_data = mem + sizeof(struct carm_array_info);
@@ -997,7 +997,7 @@ out:
static void carm_handle_scan_chan(struct carm_host *host,
struct carm_request *crq, u8 *mem,
- int error)
+ blk_status_t error)
{
u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
unsigned int i, dev_count = 0;
@@ -1029,7 +1029,7 @@ out:
}
static void carm_handle_generic(struct carm_host *host,
- struct carm_request *crq, int error,
+ struct carm_request *crq, blk_status_t error,
int cur_state, int next_state)
{
DPRINTK("ENTER\n");
@@ -1045,7 +1045,7 @@ static void carm_handle_generic(struct carm_host *host,
}
static inline void carm_handle_rw(struct carm_host *host,
- struct carm_request *crq, int error)
+ struct carm_request *crq, blk_status_t error)
{
int pci_dir;
@@ -1067,7 +1067,7 @@ static inline void carm_handle_resp(struct carm_host *host,
u32 handle = le32_to_cpu(ret_handle_le);
unsigned int msg_idx;
struct carm_request *crq;
- int error = (status == RMSG_OK) ? 0 : -EIO;
+ blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
u8 *mem;
VPRINTK("ENTER, handle == 0x%x\n", handle);
@@ -1155,7 +1155,7 @@ static inline void carm_handle_resp(struct carm_host *host,
err_out:
printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
- carm_end_rq(host, crq, -EIO);
+ carm_end_rq(host, crq, BLK_STS_IOERR);
}
static inline void carm_handle_responses(struct carm_host *host)
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index c141cc3be22b..0677d2514665 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -454,7 +454,7 @@ static void process_page(unsigned long data)
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
if (control & DMASCR_HARD_ERROR) {
/* error */
- bio->bi_error = -EIO;
+ bio->bi_status = BLK_STS_IOERR;
dev_printk(KERN_WARNING, &card->dev->dev,
"I/O error on sector %d/%d\n",
le32_to_cpu(desc->local_addr)>>9,
@@ -529,7 +529,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
(unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size);
- blk_queue_split(q, &bio, q->bio_split);
+ blk_queue_split(q, &bio);
spin_lock_irq(&card->lock);
*card->biotail = bio;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 553cc4c542b4..0297ad7c1452 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -64,15 +64,15 @@ struct virtblk_req {
struct scatterlist sg[];
};
-static inline int virtblk_result(struct virtblk_req *vbr)
+static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
{
switch (vbr->status) {
case VIRTIO_BLK_S_OK:
- return 0;
+ return BLK_STS_OK;
case VIRTIO_BLK_S_UNSUPP:
- return -ENOTTY;
+ return BLK_STS_NOTSUPP;
default:
- return -EIO;
+ return BLK_STS_IOERR;
}
}
@@ -214,7 +214,7 @@ static void virtblk_done(struct virtqueue *vq)
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
}
-static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct virtio_blk *vblk = hctx->queue->queuedata;
@@ -246,7 +246,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
break;
default:
WARN_ON_ONCE(1);
- return BLK_MQ_RQ_QUEUE_ERROR;
+ return BLK_STS_IOERR;
}
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
@@ -276,8 +276,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
/* Out of mem doesn't actually happen, since we fall back
* to direct descriptors */
if (err == -ENOMEM || err == -ENOSPC)
- return BLK_MQ_RQ_QUEUE_BUSY;
- return BLK_MQ_RQ_QUEUE_ERROR;
+ return BLK_STS_RESOURCE;
+ return BLK_STS_IOERR;
}
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
@@ -286,7 +286,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
if (notify)
virtqueue_notify(vblk->vqs[qid].vq);
- return BLK_MQ_RQ_QUEUE_OK;
+ return BLK_STS_OK;
}
/* return id (s/n) string for *disk to *id_str
@@ -307,7 +307,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
goto out;
blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
- err = virtblk_result(blk_mq_rq_to_pdu(req));
+ err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
out:
blk_put_request(req);
return err;
@@ -720,9 +720,6 @@ static int virtblk_probe(struct virtio_device *vdev)
/* We can handle whatever the host told us to handle. */
blk_queue_max_segments(q, vblk->sg_elems-2);
- /* No need to bounce any requests */
- blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
-
/* No real sector limit. */
blk_queue_max_hw_sectors(q, -1U);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 726c32e35db9..fe7cd58c43d0 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
unsigned long timeout;
int ret;
- xen_blkif_get(blkif);
-
set_freezable();
while (!kthread_should_stop()) {
if (try_to_freeze())
@@ -665,7 +663,6 @@ purge_gnt_list:
print_stats(ring);
ring->xenblkd = NULL;
- xen_blkif_put(blkif);
return 0;
}
@@ -1069,20 +1066,17 @@ static void xen_blk_drain_io(struct xen_blkif_ring *ring)
atomic_set(&blkif->drain, 0);
}
-/*
- * Completion callback on the bio's. Called as bh->b_end_io()
- */
-
-static void __end_block_io_op(struct pending_req *pending_req, int error)
+static void __end_block_io_op(struct pending_req *pending_req,
+ blk_status_t error)
{
/* An error fails the entire request. */
- if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
- (error == -EOPNOTSUPP)) {
+ if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
+ error == BLK_STS_NOTSUPP) {
pr_debug("flush diskcache op failed, not supported\n");
xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
- } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
- (error == -EOPNOTSUPP)) {
+ } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
+ error == BLK_STS_NOTSUPP) {
pr_debug("write barrier op failed, not supported\n");
xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
pending_req->status = BLKIF_RSP_EOPNOTSUPP;
@@ -1106,7 +1100,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
*/
static void end_block_io_op(struct bio *bio)
{
- __end_block_io_op(bio->bi_private, bio->bi_error);
+ __end_block_io_op(bio->bi_private, bio->bi_status);
bio_put(bio);
}
@@ -1423,7 +1417,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
for (i = 0; i < nbio; i++)
bio_put(biolist[i]);
atomic_set(&pending_req->pendcnt, 1);
- __end_block_io_op(pending_req, -EINVAL);
+ __end_block_io_op(pending_req, BLK_STS_RESOURCE);
msleep(1); /* back off a bit */
return -EIO;
}
@@ -1436,34 +1430,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st)
{
- struct blkif_response resp;
+ struct blkif_response *resp;
unsigned long flags;
union blkif_back_rings *blk_rings;
int notify;
- resp.id = id;
- resp.operation = op;
- resp.status = st;
-
spin_lock_irqsave(&ring->blk_ring_lock, flags);
blk_rings = &ring->blk_rings;
/* Place on the response ring for the relevant domain. */
switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE:
- memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->native,
+ blk_rings->native.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_32:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_32,
+ blk_rings->x86_32.rsp_prod_pvt);
break;
case BLKIF_PROTOCOL_X86_64:
- memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
- &resp, sizeof(resp));
+ resp = RING_GET_RESPONSE(&blk_rings->x86_64,
+ blk_rings->x86_64.rsp_prod_pvt);
break;
default:
BUG();
}
+
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+
blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dea61f6ab8cb..ecb35fe8ca8d 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
struct blkif_common_request {
char dummy;
};
-struct blkif_common_response {
- char dummy;
-};
+
+/* i386 protocol version */
struct blkif_x86_32_request_rw {
uint8_t nr_segments; /* number of segments */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
} u;
} __attribute__((__packed__));
-/* i386 protocol version */
-#pragma pack(push, 4)
-struct blkif_x86_32_response {
- uint64_t id; /* copied from request */
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
-#pragma pack(pop)
/* x86_64 protocol version */
struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
} u;
} __attribute__((__packed__));
-struct blkif_x86_64_response {
- uint64_t __attribute__((__aligned__(8))) id;
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
-
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
- struct blkif_common_response);
+ struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
- struct blkif_x86_32_response);
+ struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
- struct blkif_x86_64_response);
+ struct blkif_response);
union blkif_back_rings {
struct blkif_back_ring native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
wait_queue_head_t wq;
atomic_t inflight;
+ bool active;
/* One thread per blkif ring. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 8fe61b5dc5a6..792da683e70d 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
init_waitqueue_head(&ring->shutdown_wq);
ring->blkif = blkif;
ring->st_print = jiffies;
- xen_blkif_get(blkif);
+ ring->active = true;
}
return 0;
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
struct xen_blkif_ring *ring = &blkif->rings[r];
unsigned int i = 0;
+ if (!ring->active)
+ continue;
+
if (ring->xenblkd) {
kthread_stop(ring->xenblkd);
wake_up(&ring->shutdown_wq);
- ring->xenblkd = NULL;
}
/* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
BUG_ON(ring->free_pages_num != 0);
BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
- xen_blkif_put(blkif);
+ ring->active = false;
}
blkif->nr_ring_pages = 0;
/*
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
static void xen_blkif_free(struct xen_blkif *blkif)
{
-
- xen_blkif_disconnect(blkif);
+ WARN_ON(xen_blkif_disconnect(blkif));
xen_vbd_free(&blkif->vbd);
+ kfree(blkif->be->mode);
+ kfree(blkif->be);
/* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif);
@@ -504,13 +507,13 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
dev_set_drvdata(&dev->dev, NULL);
- if (be->blkif)
+ if (be->blkif) {
xen_blkif_disconnect(be->blkif);
- /* Put the reference we set in xen_blkif_alloc(). */
- xen_blkif_put(be->blkif);
- kfree(be->mode);
- kfree(be);
+ /* Put the reference we set in xen_blkif_alloc(). */
+ xen_blkif_put(be->blkif);
+ }
+
return 0;
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 39459631667c..c852ed3c01d5 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -110,11 +110,6 @@ struct blk_shadow {
unsigned long associated_id;
};
-struct split_bio {
- struct bio *bio;
- atomic_t pending;
-};
-
struct blkif_req {
int error;
};
@@ -881,7 +876,7 @@ static inline bool blkif_request_flush_invalid(struct request *req,
!info->feature_fua));
}
-static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
unsigned long flags;
@@ -904,16 +899,16 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
flush_requests(rinfo);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
- return BLK_MQ_RQ_QUEUE_OK;
+ return BLK_STS_OK;
out_err:
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
- return BLK_MQ_RQ_QUEUE_ERROR;
+ return BLK_STS_IOERR;
out_busy:
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
blk_mq_stop_hw_queue(hctx);
- return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_STS_RESOURCE;
}
static void blkif_complete_rq(struct request *rq)
@@ -958,9 +953,6 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
/* Make sure buffer addresses are sector-aligned. */
blk_queue_dma_alignment(rq, 511);
-
- /* Make sure we don't use bounce buffers. */
- blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
}
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@@ -1601,14 +1593,18 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
continue;
}
- blkif_req(req)->error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+ if (bret->status == BLKIF_RSP_OKAY)
+ blkif_req(req)->error = BLK_STS_OK;
+ else
+ blkif_req(req)->error = BLK_STS_IOERR;
+
switch (bret->operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- blkif_req(req)->error = -EOPNOTSUPP;
+ blkif_req(req)->error = BLK_STS_NOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
@@ -1626,11 +1622,11 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- blkif_req(req)->error = -EOPNOTSUPP;
+ blkif_req(req)->error = BLK_STS_NOTSUPP;
}
if (unlikely(blkif_req(req)->error)) {
- if (blkif_req(req)->error == -EOPNOTSUPP)
- blkif_req(req)->error = 0;
+ if (blkif_req(req)->error == BLK_STS_NOTSUPP)
+ blkif_req(req)->error = BLK_STS_OK;
info->feature_fua = 0;
info->feature_flush = 0;
xlvbd_flush(info);
@@ -1996,28 +1992,13 @@ static int blkfront_probe(struct xenbus_device *dev,
return 0;
}
-static void split_bio_end(struct bio *bio)
-{
- struct split_bio *split_bio = bio->bi_private;
-
- if (atomic_dec_and_test(&split_bio->pending)) {
- split_bio->bio->bi_phys_segments = 0;
- split_bio->bio->bi_error = bio->bi_error;
- bio_endio(split_bio->bio);
- kfree(split_bio);
- }
- bio_put(bio);
-}
-
static int blkif_recover(struct blkfront_info *info)
{
- unsigned int i, r_index;
+ unsigned int r_index;
struct request *req, *n;
int rc;
- struct bio *bio, *cloned_bio;
- unsigned int segs, offset;
- int pending, size;
- struct split_bio *split_bio;
+ struct bio *bio;
+ unsigned int segs;
blkfront_gather_backend_features(info);
/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
@@ -2056,34 +2037,6 @@ static int blkif_recover(struct blkfront_info *info)
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
- if (bio_segments(bio) > segs) {
- /*
- * This bio has more segments than what we can
- * handle, we have to split it.
- */
- pending = (bio_segments(bio) + segs - 1) / segs;
- split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
- BUG_ON(split_bio == NULL);
- atomic_set(&split_bio->pending, pending);
- split_bio->bio = bio;
- for (i = 0; i < pending; i++) {
- offset = (i * segs * XEN_PAGE_SIZE) >> 9;
- size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
- (unsigned int)bio_sectors(bio) - offset);
- cloned_bio = bio_clone(bio, GFP_NOIO);
- BUG_ON(cloned_bio == NULL);
- bio_trim(cloned_bio, offset, size);
- cloned_bio->bi_private = split_bio;
- cloned_bio->bi_end_io = split_bio_end;
- submit_bio(cloned_bio);
- }
- /*
- * Now we have to wait for all those smaller bios to
- * end, so we can also end the "parent" bio.
- */
- continue;
- }
- /* We don't need to split this bio */
submit_bio(bio);
}
@@ -2137,7 +2090,7 @@ static int blkfront_resume(struct xenbus_device *dev)
merge_bio.tail = shadow[j].request->biotail;
bio_list_merge(&info->bio_list, &merge_bio);
shadow[j].request->bio = NULL;
- blk_mq_end_request(shadow[j].request, 0);
+ blk_mq_end_request(shadow[j].request, BLK_STS_OK);
}
}
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 757dce2147e0..14459d66ef0c 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -471,7 +471,7 @@ static struct request *ace_get_next_request(struct request_queue *q)
if (!blk_rq_is_passthrough(req))
break;
blk_start_request(req);
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
}
return req;
}
@@ -499,11 +499,11 @@ static void ace_fsm_dostate(struct ace_device *ace)
/* Drop all in-flight and pending requests */
if (ace->req) {
- __blk_end_request_all(ace->req, -EIO);
+ __blk_end_request_all(ace->req, BLK_STS_IOERR);
ace->req = NULL;
}
while ((req = blk_fetch_request(ace->queue)) != NULL)
- __blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, BLK_STS_IOERR);
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -728,7 +728,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
}
/* bio finished; is there another one? */
- if (__blk_end_request_cur(ace->req, 0)) {
+ if (__blk_end_request_cur(ace->req, BLK_STS_OK)) {
/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
* blk_rq_sectors(ace->req),
* blk_rq_cur_sectors(ace->req));
@@ -993,6 +993,7 @@ static int ace_setup(struct ace_device *ace)
if (ace->queue == NULL)
goto err_blk_initq;
blk_queue_logical_block_size(ace->queue, 512);
+ blk_queue_bounce_limit(ace->queue, BLK_BOUNCE_HIGH);
/*
* Allocate and initialize GD structure
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 968f9e52effa..41c95c9b2ab4 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -74,14 +74,14 @@ static void do_z2_request(struct request_queue *q)
while (req) {
unsigned long start = blk_rq_pos(req) << 9;
unsigned long len = blk_rq_cur_bytes(req);
- int err = 0;
+ blk_status_t err = BLK_STS_OK;
if (start + len > z2ram_size) {
pr_err(DEVICE_NAME ": bad access: block=%llu, "
"count=%u\n",
(unsigned long long)blk_rq_pos(req),
blk_rq_cur_sectors(req));
- err = -EIO;
+ err = BLK_STS_IOERR;
goto done;
}
while (len) {
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index debee952dcc1..558e245fab0c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1272,6 +1272,13 @@ static int zram_remove(struct zram *zram)
}
/* zram-control sysfs attributes */
+
+/*
+ * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
+ * sense that reading from this file does alter the state of your system -- it
+ * creates a new un-initialized zram device and returns back this device's
+ * device_id (or an error code if it fails to create a new device).
+ */
static ssize_t hot_add_show(struct class *class,
struct class_attribute *attr,
char *buf)
@@ -1286,6 +1293,7 @@ static ssize_t hot_add_show(struct class *class,
return ret;
return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
}
+static CLASS_ATTR_RO(hot_add);
static ssize_t hot_remove_store(struct class *class,
struct class_attribute *attr,
@@ -1316,23 +1324,19 @@ static ssize_t hot_remove_store(struct class *class,
mutex_unlock(&zram_index_mutex);
return ret ? ret : count;
}
+static CLASS_ATTR_WO(hot_remove);
-/*
- * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
- * sense that reading from this file does alter the state of your system -- it
- * creates a new un-initialized zram device and returns back this device's
- * device_id (or an error code if it fails to create a new device).
- */
-static struct class_attribute zram_control_class_attrs[] = {
- __ATTR(hot_add, 0400, hot_add_show, NULL),
- __ATTR_WO(hot_remove),
- __ATTR_NULL,
+static struct attribute *zram_control_class_attrs[] = {
+ &class_attr_hot_add.attr,
+ &class_attr_hot_remove.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(zram_control_class);
static struct class zram_control_class = {
.name = "zram-control",
.owner = THIS_MODULE,
- .class_attrs = zram_control_class_attrs,
+ .class_groups = zram_control_class_groups,
};
static int zram_remove_cb(int id, void *ptr, void *data)