aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-11-26 13:16:55 +0100
committerPeter Zijlstra <peterz@infradead.org>2020-11-26 13:16:55 +0100
commit20c7775aecea04d8ca322039969d49dcf568e0e9 (patch)
tree138c057839197c9021043353e994815c0250e669 /drivers/block
parentperf/x86/intel: Add event constraint for CYCLE_ACTIVITY.STALLS_MEM_ANY (diff)
parentMerge tag 'media/v5.10-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media (diff)
downloadlinux-dev-20c7775aecea04d8ca322039969d49dcf568e0e9.tar.xz
linux-dev-20c7775aecea04d8ca322039969d49dcf568e0e9.zip
Merge remote-tracking branch 'origin/master' into perf/core
Further perf/core patches will depend on: d3f7b1bb2040 ("mm/gup: fix gup_fast with dynamic page table folding") which is already in Linus' tree.
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoeblk.c3
-rw-r--r--drivers/block/aoe/aoecmd.c6
-rw-r--r--drivers/block/ataflop.c9
-rw-r--r--drivers/block/brd.c1
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_int.h3
-rw-r--r--drivers/block/drbd/drbd_main.c35
-rw-r--r--drivers/block/drbd/drbd_nl.c28
-rw-r--r--drivers/block/drbd/drbd_receiver.c24
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c6
-rw-r--r--drivers/block/floppy.c12
-rw-r--r--drivers/block/loop.c48
-rw-r--r--drivers/block/nbd.c35
-rw-r--r--drivers/block/null_blk.h7
-rw-r--r--drivers/block/null_blk_main.c24
-rw-r--r--drivers/block/null_blk_zoned.c454
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/pktcdvd.c96
-rw-r--r--drivers/block/rbd.c32
-rw-r--r--drivers/block/rnbd/rnbd-clt.c31
-rw-r--r--drivers/block/rnbd/rnbd-srv.c3
-rw-r--r--drivers/block/rsxx/core.c4
-rw-r--r--drivers/block/skd_main.c3
-rw-r--r--drivers/block/swim.c22
-rw-r--r--drivers/block/swim3.c4
-rw-r--r--drivers/block/virtio_blk.c35
-rw-r--r--drivers/block/xen-blkback/blkback.c24
-rw-r--r--drivers/block/xen-blkback/xenbus.c29
-rw-r--r--drivers/block/xen-blkfront.c25
-rw-r--r--drivers/block/xsysace.c75
-rw-r--r--drivers/block/zram/zram_drv.c40
34 files changed, 714 insertions, 420 deletions
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 226219da3da6..71c2b1564558 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1670,7 +1670,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
}
if (mode & (FMODE_READ|FMODE_WRITE)) {
- check_disk_change(bdev);
+ bdev_check_media_change(bdev);
if (mode & FMODE_WRITE) {
int wrprot;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 5ca7216e9e01..c34e71b0c4a9 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -347,7 +347,6 @@ aoeblk_gdalloc(void *vp)
mempool_t *mp;
struct request_queue *q;
struct blk_mq_tag_set *set;
- enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
ulong flags;
int late = 0;
int err;
@@ -407,7 +406,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
+ blk_queue_io_opt(q, SZ_2M);
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 3cf9bc5d8d95..313f0b946fe2 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -900,9 +900,7 @@ aoecmd_sleepwork(struct work_struct *work)
ssize = get_capacity(d->gd);
bd = bdget_disk(d->gd, 0);
if (bd) {
- inode_lock(bd->bd_inode);
- i_size_write(bd->bd_inode, (loff_t)ssize<<9);
- inode_unlock(bd->bd_inode);
+ bd_set_nr_sectors(bd, ssize);
bdput(bd);
}
spin_lock_irq(&d->lock);
@@ -1135,7 +1133,7 @@ noskb: if (buf)
break;
}
bvcpy(skb, f->buf->bio, f->iter, n);
- /* fall through */
+ fallthrough;
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
spin_lock_irq(&d->lock);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 1553d41f0b91..3e881fdb06e0 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1726,13 +1726,14 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* MSch: invalidate default_params */
default_params[drive].blocks = 0;
set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
- /* Fall through */
+ fallthrough;
case FDFMTEND:
case FDFLUSH:
/* invalidate the buffer track to force a reread */
BufferDrive = -1;
set_bit(drive, &fake_change);
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
return 0;
default:
return -EINVAL;
@@ -1909,7 +1910,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
if (mode & (FMODE_READ|FMODE_WRITE)) {
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
if (mode & FMODE_WRITE) {
if (p->wpstat) {
if (p->ref < 0)
@@ -1953,7 +1955,6 @@ static const struct block_device_operations floppy_fops = {
.release = floppy_release,
.ioctl = fd_ioctl,
.check_events = floppy_check_events,
- .revalidate_disk= floppy_revalidate,
};
static const struct blk_mq_ops ataflop_mq_ops = {
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 2723a70eb855..cc49a921339f 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -403,7 +403,6 @@ static struct brd_device *brd_alloc(int i)
disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "ram%d", i);
set_capacity(disk, rd_size * 2);
- brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index b41897dceb2b..7227fc7ab8ed 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -865,7 +865,7 @@ int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
if (!get_ldev(device))
return 0; /* no disk, no metadata, no bitmap to manipulate bits in */
- nr_sectors = drbd_get_capacity(device->this_bdev);
+ nr_sectors = get_capacity(device->vdisk);
esector = sector + (size >> 9) - 1;
if (!expect(sector < nr_sectors))
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index fe6cb99eb917..8f879e5c2f67 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -841,7 +841,6 @@ struct drbd_device {
sector_t p_size; /* partner's disk size */
struct request_queue *rq_queue;
- struct block_device *this_bdev;
struct gendisk *vdisk;
unsigned long last_reattach_jif;
@@ -1733,7 +1732,7 @@ static inline void __drbd_chk_io_error_(struct drbd_device *device,
_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
- /* fall through - for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
+ fallthrough; /* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
case EP_DETACH:
case EP_CALL_HELPER:
/* Remember whether we saw a READ or WRITE error.
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index cb687ccdbd96..65b95aef8dbc 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -430,7 +430,7 @@ int drbd_thread_start(struct drbd_thread *thi)
thi->t_state = RESTARTING;
drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid);
- /* fall through */
+ fallthrough;
case RUNNING:
case RESTARTING:
default:
@@ -984,7 +984,10 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu
p->d_size = cpu_to_be64(d_size);
p->u_size = cpu_to_be64(u_size);
- p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
+ if (trigger_reply)
+ p->c_size = 0;
+ else
+ p->c_size = cpu_to_be64(get_capacity(device->vdisk));
p->max_bio_size = cpu_to_be32(max_bio_size);
p->queue_order_type = cpu_to_be16(q_order_type);
p->dds_flags = cpu_to_be16(flags);
@@ -1553,7 +1556,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
* put_page(); and would cause either a VM_BUG directly, or
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
- if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
+ if (drbd_disable_sendpage || !sendpage_ok(page))
return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL;
@@ -2029,17 +2032,13 @@ void drbd_init_set_defaults(struct drbd_device *device)
device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
}
-static void _drbd_set_my_capacity(struct drbd_device *device, sector_t size)
-{
- /* set_capacity(device->this_bdev->bd_disk, size); */
- set_capacity(device->vdisk, size);
- device->this_bdev->bd_inode->i_size = (loff_t)size << 9;
-}
-
void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
{
char ppb[10];
- _drbd_set_my_capacity(device, size);
+
+ set_capacity(device->vdisk, size);
+ revalidate_disk_size(device->vdisk, false);
+
drbd_info(device, "size = %s (%llu KB)\n",
ppsize(ppb, size>>1), (unsigned long long)size>>1);
}
@@ -2069,7 +2068,8 @@ void drbd_device_cleanup(struct drbd_device *device)
}
D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
- _drbd_set_my_capacity(device, 0);
+ set_capacity(device->vdisk, 0);
+ revalidate_disk_size(device->vdisk, false);
if (device->bitmap) {
/* maybe never allocated. */
drbd_bm_resize(device, 0, 1);
@@ -2236,9 +2236,6 @@ void drbd_destroy_device(struct kref *kref)
/* cleanup stuff that may have been allocated during
* device (re-)configuration or state changes */
- if (device->this_bdev)
- bdput(device->this_bdev);
-
drbd_backing_dev_free(device, device->ldev);
device->ldev = NULL;
@@ -2765,10 +2762,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
- device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
- /* we have no partitions. we contain only ourselves. */
- device->this_bdev->bd_contains = device->this_bdev;
-
blk_queue_write_cache(q, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
@@ -3044,7 +3037,7 @@ void drbd_md_write(struct drbd_device *device, void *b)
memset(buffer, 0, sizeof(*buffer));
- buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
+ buffer->la_size_sect = cpu_to_be64(get_capacity(device->vdisk));
for (i = UI_CURRENT; i < UI_SIZE; i++)
buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
buffer->flags = cpu_to_be32(device->ldev->md.flags);
@@ -3102,7 +3095,7 @@ void drbd_md_sync(struct drbd_device *device)
/* Update device->ldev->md.la_size_sect,
* since we updated it on metadata. */
- device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
+ device->ldev->md.la_size_sect = get_capacity(device->vdisk);
drbd_md_put_buffer(device);
out:
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 28eb078f8b75..bf7de4c7b96c 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -996,7 +996,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct
goto err_out;
}
- if (drbd_get_capacity(device->this_bdev) != size ||
+ if (get_capacity(device->vdisk) != size ||
drbd_bm_capacity(device) != size) {
int err;
err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
@@ -1362,15 +1362,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
if (b) {
blk_stack_limits(&q->limits, &b->limits, 0);
-
- if (q->backing_dev_info->ra_pages !=
- b->backing_dev_info->ra_pages) {
- drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
- q->backing_dev_info->ra_pages,
- b->backing_dev_info->ra_pages);
- q->backing_dev_info->ra_pages =
- b->backing_dev_info->ra_pages;
- }
+ blk_queue_update_readahead(q);
}
fixup_discard_if_not_supported(q);
fixup_write_zeroes(device, q);
@@ -1941,8 +1933,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
/* Make sure the new disk is big enough
* (we may currently be R_PRIMARY with no local disk...) */
- if (drbd_get_max_capacity(nbc) <
- drbd_get_capacity(device->this_bdev)) {
+ if (drbd_get_max_capacity(nbc) < get_capacity(device->vdisk)) {
retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -3370,7 +3361,6 @@ static void device_to_statistics(struct device_statistics *s,
if (get_ldev(device)) {
struct drbd_md *md = &device->ldev->md;
u64 *history_uuids = (u64 *)s->history_uuids;
- struct request_queue *q;
int n;
spin_lock_irq(&md->uuid_lock);
@@ -3384,14 +3374,9 @@ static void device_to_statistics(struct device_statistics *s,
spin_unlock_irq(&md->uuid_lock);
s->dev_disk_flags = md->flags;
- q = bdev_get_queue(device->ldev->backing_bdev);
- s->dev_lower_blocked =
- bdi_congested(q->backing_dev_info,
- (1 << WB_async_congested) |
- (1 << WB_sync_congested));
put_ldev(device);
}
- s->dev_size = drbd_get_capacity(device->this_bdev);
+ s->dev_size = get_capacity(device->vdisk);
s->dev_read = device->read_cnt;
s->dev_write = device->writ_cnt;
s->dev_al_writes = device->al_writ_cnt;
@@ -3831,8 +3816,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
nla_put_u32(skb, T_current_state, device->state.i) ||
nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
- nla_put_u64_0pad(skb, T_capacity,
- drbd_get_capacity(device->this_bdev)) ||
+ nla_put_u64_0pad(skb, T_capacity, get_capacity(device->vdisk)) ||
nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
@@ -3883,7 +3867,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
if (nla_put_u32(skb, T_helper_exit_code,
sib->helper_exit_code))
goto nla_put_failure;
- /* fall through */
+ fallthrough;
case SIB_HELPER_PRE:
if (nla_put_string(skb, T_helper, sib->helper_name))
goto nla_put_failure;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 1d17593f5d2b..dc333dbe5232 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1797,7 +1797,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
break;
else
drbd_warn(connection, "Allocation of an epoch failed, slowing down\n");
- /* Fall through */
+ fallthrough;
case WO_BDEV_FLUSH:
case WO_DRAIN_IO:
@@ -1860,7 +1860,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
struct packet_info *pi) __must_hold(local)
{
struct drbd_device *device = peer_device->device;
- const sector_t capacity = drbd_get_capacity(device->this_bdev);
+ const sector_t capacity = get_capacity(device->vdisk);
struct drbd_peer_request *peer_req;
struct page *page;
int digest_size, err;
@@ -2789,7 +2789,7 @@ bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
{
- struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
+ struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
unsigned long db, dt, dbdt;
unsigned int c_min_rate;
int curr_events;
@@ -2849,7 +2849,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
if (!peer_device)
return -EIO;
device = peer_device->device;
- capacity = drbd_get_capacity(device->this_bdev);
+ capacity = get_capacity(device->vdisk);
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
@@ -2917,7 +2917,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
then we would do something smarter here than reading
the block... */
peer_req->flags |= EE_RS_THIN_REQ;
- /* fall through */
+ fallthrough;
case P_RS_DATA_REQUEST:
peer_req->w.cb = w_e_end_rsdata_req;
fault_type = DRBD_FAULT_RS_RD;
@@ -3083,7 +3083,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
rv = 1;
break;
}
- /* Else fall through - to one of the other strategies... */
+ fallthrough; /* to one of the other strategies */
case ASB_DISCARD_OLDER_PRI:
if (self == 0 && peer == 1) {
rv = 1;
@@ -3096,7 +3096,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
/* Else fall through to one of the other strategies... */
drbd_warn(device, "Discard younger/older primary did not find a decision\n"
"Using discard-least-changes instead\n");
- /* fall through */
+ fallthrough;
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags)
@@ -3108,7 +3108,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
}
if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
- /* else, fall through */
+ fallthrough;
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
rv = -1;
@@ -3608,7 +3608,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
switch (rr_conflict) {
case ASB_CALL_HELPER:
drbd_khelper(device, "pri-lost");
- /* fall through */
+ fallthrough;
case ASB_DISCONNECT:
drbd_err(device, "I shall become SyncTarget, but I am primary!\n");
return C_MASK;
@@ -4117,7 +4117,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
if (!peer_device)
return config_unknown_volume(connection, pi);
device = peer_device->device;
- cur_size = drbd_get_capacity(device->this_bdev);
+ cur_size = get_capacity(device->vdisk);
p_size = be64_to_cpu(p->d_size);
p_usize = be64_to_cpu(p->u_size);
@@ -4252,8 +4252,8 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info
}
if (device->state.conn > C_WF_REPORT_PARAMS) {
- if (be64_to_cpu(p->c_size) !=
- drbd_get_capacity(device->this_bdev) || ldsc) {
+ if (be64_to_cpu(p->c_size) != get_capacity(device->vdisk) ||
+ ldsc) {
/* we have different sizes, probably peer
* needs to know my new size... */
drbd_send_sizes(peer_device, 0, ddsf);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 674be09b2da9..330f851cb8f0 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -611,7 +611,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
drbd_set_out_of_sync(device, req->i.sector, req->i.size);
drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_READ_ERROR);
- /* fall through. */
+ fallthrough;
case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
@@ -836,7 +836,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
} /* else: FIXME can this happen? */
break;
}
- /* else, fall through - to BARRIER_ACKED */
+ fallthrough; /* to BARRIER_ACKED */
case BARRIER_ACKED:
/* barrier ack for READ requests does not make sense */
@@ -888,7 +888,7 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
if (device->state.disk != D_INCONSISTENT)
return false;
esector = sector + (size >> 9) - 1;
- nr_sectors = drbd_get_capacity(device->this_bdev);
+ nr_sectors = get_capacity(device->vdisk);
D_ASSERT(device, sector < nr_sectors);
D_ASSERT(device, esector < nr_sectors);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 7c903de5c4e1..ba56f3f05312 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -591,7 +591,7 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
unsigned long bit;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(device->this_bdev);
+ const sector_t capacity = get_capacity(device->vdisk);
int max_bio_size;
int number, rollback_i, size;
int align, requeue = 0;
@@ -769,7 +769,7 @@ static int make_ov_request(struct drbd_device *device, int cancel)
{
int number, i, size;
sector_t sector;
- const sector_t capacity = drbd_get_capacity(device->this_bdev);
+ const sector_t capacity = get_capacity(device->vdisk);
bool stop_sector_reached = false;
if (unlikely(cancel))
@@ -1672,7 +1672,7 @@ void drbd_resync_after_changed(struct drbd_device *device)
void drbd_rs_controller_reset(struct drbd_device *device)
{
- struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
+ struct gendisk *disk = device->ldev->backing_bdev->bd_disk;
struct fifo_buffer *plan;
atomic_set(&device->rs_sect_in, 0);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 09079aee8dc4..7df79ae6b0a1 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -561,6 +561,7 @@ static void floppy_release_irq_and_dma(void);
* output_byte is automatically disabled when reset is set.
*/
static void reset_fdc(void);
+static int floppy_revalidate(struct gendisk *disk);
/*
* These are global variables, as that's the easiest way to give
@@ -1680,7 +1681,7 @@ static void recal_interrupt(void)
clear_bit(FD_DISK_NEWCHANGE_BIT,
&drive_state[current_drive].flags);
drive_state[current_drive].select_date = jiffies;
- /* fall through */
+ fallthrough;
default:
debugt(__func__, "default");
/* Recalibrate moves the head by at
@@ -3275,7 +3276,8 @@ static int invalidate_drive(struct block_device *bdev)
/* invalidate the buffer track to force a reread */
set_bit((long)bdev->bd_disk->private_data, &fake_change);
process_fd_request();
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
return 0;
}
@@ -3592,7 +3594,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
process_fd_request();
- /* fall through */
+ fallthrough;
case FDGETDRVSTAT:
outparam = &drive_state[drive];
break;
@@ -4123,7 +4125,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
drive_state[drive].last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
&drive_state[drive].flags);
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
@@ -4291,7 +4294,6 @@ static const struct block_device_operations floppy_fops = {
.ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.check_events = floppy_check_events,
- .revalidate_disk = floppy_revalidate,
#ifdef CONFIG_COMPAT
.compat_ioctl = fd_compat_ioctl,
#endif
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 2f137d6ce169..a58084c2ed7c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -253,9 +253,10 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
{
struct block_device *bdev = lo->lo_device;
- bd_set_size(bdev, size << SECTOR_SHIFT);
+ bd_set_nr_sectors(bdev, size);
- set_capacity_revalidate_and_notify(lo->lo_disk, size, false);
+ if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, false))
+ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
}
static inline int
@@ -878,6 +879,7 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
+ u32 granularity, max_discard_sectors;
/*
* If the backing device is a block device, mirror its zeroing
@@ -890,11 +892,10 @@ static void loop_config_discard(struct loop_device *lo)
struct request_queue *backingq;
backingq = bdev_get_queue(inode->i_bdev);
- blk_queue_max_discard_sectors(q,
- backingq->limits.max_write_zeroes_sectors);
- blk_queue_max_write_zeroes_sectors(q,
- backingq->limits.max_write_zeroes_sectors);
+ max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
+ granularity = backingq->limits.discard_granularity ?:
+ queue_physical_block_size(backingq);
/*
* We use punch hole to reclaim the free space used by the
@@ -903,23 +904,26 @@ static void loop_config_discard(struct loop_device *lo)
* useful information.
*/
} else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
- q->limits.discard_granularity = 0;
- q->limits.discard_alignment = 0;
- blk_queue_max_discard_sectors(q, 0);
- blk_queue_max_write_zeroes_sectors(q, 0);
+ max_discard_sectors = 0;
+ granularity = 0;
} else {
- q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = 0;
-
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+ max_discard_sectors = UINT_MAX >> 9;
+ granularity = inode->i_sb->s_blocksize;
}
- if (q->limits.max_write_zeroes_sectors)
+ if (max_discard_sectors) {
+ q->limits.discard_granularity = granularity;
+ blk_queue_max_discard_sectors(q, max_discard_sectors);
+ blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- else
+ } else {
+ q->limits.discard_granularity = 0;
+ blk_queue_max_discard_sectors(q, 0);
+ blk_queue_max_write_zeroes_sectors(q, 0);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
+ }
+ q->limits.discard_alignment = 0;
}
static void loop_unprepare_queue(struct loop_device *lo)
@@ -1111,8 +1115,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
mapping = file->f_mapping;
inode = mapping->host;
- size = get_loop_size(lo, file);
-
if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
error = -EINVAL;
goto out_unlock;
@@ -1162,6 +1164,8 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
loop_update_rotational(lo);
loop_update_dio(lo);
loop_sysfs_init(lo);
+
+ size = get_loop_size(lo, file);
loop_set_size(lo, size);
set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
@@ -1248,7 +1252,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
set_capacity(lo->lo_disk, 0);
loop_sysfs_exit(lo);
if (bdev) {
- bd_set_size(bdev, 0);
+ bd_set_nr_sectors(bdev, 0);
/* let user-space know about this change */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
}
@@ -1719,7 +1723,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_SET_BLOCK_SIZE:
if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
return -EPERM;
- /* Fall through */
+ fallthrough;
default:
err = lo_simple_ioctl(lo, cmd, arg);
break;
@@ -1867,7 +1871,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
case LOOP_SET_STATUS64:
case LOOP_CONFIGURE:
arg = (unsigned long) compat_ptr(arg);
- /* fall through */
+ fallthrough;
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
case LOOP_SET_BLOCK_SIZE:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3ff4054d6834..aaae9220f3a0 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -296,10 +296,11 @@ static void nbd_size_clear(struct nbd_device *nbd)
}
}
-static void nbd_size_update(struct nbd_device *nbd)
+static void nbd_size_update(struct nbd_device *nbd, bool start)
{
struct nbd_config *config = nbd->config;
struct block_device *bdev = bdget_disk(nbd->disk, 0);
+ sector_t nr_sectors = config->bytesize >> 9;
if (config->flags & NBD_FLAG_SEND_TRIM) {
nbd->disk->queue->limits.discard_granularity = config->blksize;
@@ -308,13 +309,14 @@ static void nbd_size_update(struct nbd_device *nbd)
}
blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
- set_capacity(nbd->disk, config->bytesize >> 9);
+ set_capacity(nbd->disk, nr_sectors);
if (bdev) {
if (bdev->bd_disk) {
- bd_set_size(bdev, config->bytesize);
- set_blocksize(bdev, config->blksize);
+ bd_set_nr_sectors(bdev, nr_sectors);
+ if (start)
+ set_blocksize(bdev, config->blksize);
} else
- bdev->bd_invalidated = 1;
+ set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
bdput(bdev);
}
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
@@ -327,7 +329,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
config->blksize = blocksize;
config->bytesize = blocksize * nr_blocks;
if (nbd->task_recv != NULL)
- nbd_size_update(nbd);
+ nbd_size_update(nbd, false);
}
static void nbd_complete_rq(struct request *req)
@@ -801,9 +803,9 @@ static void recv_work(struct work_struct *work)
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
+ nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
- nbd_config_put(nbd);
kfree(args);
}
@@ -1138,7 +1140,7 @@ static void nbd_bdev_reset(struct block_device *bdev)
{
if (bdev->bd_openers > 1)
return;
- bd_set_size(bdev, 0);
+ bd_set_nr_sectors(bdev, 0);
}
static void nbd_parse_flags(struct nbd_device *nbd)
@@ -1307,7 +1309,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
- nbd_size_update(nbd);
+ nbd_size_update(nbd, true);
return error;
}
@@ -1321,7 +1323,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
return ret;
if (max_part)
- bdev->bd_invalidated = 1;
+ set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
mutex_unlock(&nbd->config_lock);
ret = wait_event_interruptible(config->recv_wq,
atomic_read(&config->recv_threads) == 0);
@@ -1363,6 +1365,8 @@ static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
nbd->tag_set.timeout = timeout * HZ;
if (timeout)
blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
+ else
+ blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
}
/* Must be called with config_lock held */
@@ -1497,9 +1501,9 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
- bdev->bd_invalidated = 1;
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
} else if (nbd_disconnected(nbd->config)) {
- bdev->bd_invalidated = 1;
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
}
out:
mutex_unlock(&nbd_index_mutex);
@@ -1514,6 +1518,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
bdev->bd_openers == 0)
nbd_disconnect_and_put(nbd);
+ bdput(bdev);
nbd_config_put(nbd);
nbd_put(nbd);
@@ -2181,7 +2186,7 @@ out:
return ret;
}
-static const struct genl_ops nbd_connect_genl_ops[] = {
+static const struct genl_small_ops nbd_connect_genl_ops[] = {
{
.cmd = NBD_CMD_CONNECT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -2213,8 +2218,8 @@ static struct genl_family nbd_genl_family __ro_after_init = {
.name = NBD_GENL_FAMILY_NAME,
.version = NBD_GENL_VERSION,
.module = THIS_MODULE,
- .ops = nbd_connect_genl_ops,
- .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
+ .small_ops = nbd_connect_genl_ops,
+ .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
.maxattr = NBD_ATTR_MAX,
.policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index daed4a9c3436..c24d9b5ad81a 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -42,8 +42,13 @@ struct nullb_device {
struct badblocks badblocks;
unsigned int nr_zones;
+ unsigned int nr_zones_imp_open;
+ unsigned int nr_zones_exp_open;
+ unsigned int nr_zones_closed;
struct blk_zone *zones;
sector_t zone_size_sects;
+ spinlock_t zone_lock;
+ unsigned long *zone_locks;
unsigned long size; /* device size in MB */
unsigned long completion_nsec; /* time in ns to complete a request */
@@ -51,6 +56,8 @@ struct nullb_device {
unsigned long zone_size; /* zone size in MB if device is zoned */
unsigned long zone_capacity; /* zone capacity in MB if device is zoned */
unsigned int zone_nr_conv; /* number of conventional zones */
+ unsigned int zone_max_open; /* max number of open zones */
+ unsigned int zone_max_active; /* max number of active zones */
unsigned int submit_queues; /* number of submission queues */
unsigned int home_node; /* home node for the device */
unsigned int queue_mode; /* block interface */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 47a9dad880af..4685ea401d5b 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -164,6 +164,10 @@ static bool shared_tags;
module_param(shared_tags, bool, 0444);
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
+static bool g_shared_tag_bitmap;
+module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
+MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
+
static int g_irqmode = NULL_IRQ_SOFTIRQ;
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -208,6 +212,14 @@ static unsigned int g_zone_nr_conv;
module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
+static unsigned int g_zone_max_open;
+module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
+MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)");
+
+static unsigned int g_zone_max_active;
+module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
+MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -347,6 +359,8 @@ NULLB_DEVICE_ATTR(zoned, bool, NULL);
NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
+NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
+NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -464,6 +478,8 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_size,
&nullb_device_attr_zone_capacity,
&nullb_device_attr_zone_nr_conv,
+ &nullb_device_attr_zone_max_open,
+ &nullb_device_attr_zone_max_active,
NULL,
};
@@ -517,7 +533,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
static ssize_t memb_group_features_show(struct config_item *item, char *page)
{
return snprintf(page, PAGE_SIZE,
- "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv\n");
+ "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -580,6 +596,8 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_size = g_zone_size;
dev->zone_capacity = g_zone_capacity;
dev->zone_nr_conv = g_zone_nr_conv;
+ dev->zone_max_open = g_zone_max_open;
+ dev->zone_max_active = g_zone_max_active;
return dev;
}
@@ -1147,7 +1165,7 @@ static int null_handle_rq(struct nullb_cmd *cmd)
len = bvec.bv_len;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
op_is_write(req_op(rq)), sector,
- req_op(rq) & REQ_FUA);
+ rq->cmd_flags & REQ_FUA);
if (err) {
spin_unlock_irq(&nullb->lock);
return err;
@@ -1692,6 +1710,8 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
set->flags = BLK_MQ_F_SHOULD_MERGE;
if (g_no_sched)
set->flags |= BLK_MQ_F_NO_SCHED;
+ if (g_shared_tag_bitmap)
+ set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
set->driver_data = NULL;
if ((nullb && nullb->dev->blocking) || g_blocking)
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 3d25c9ad2383..beb34b4f76b0 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
#include "null_blk.h"
#define CREATE_TRACE_POINTS
@@ -45,12 +46,44 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
if (!dev->zones)
return -ENOMEM;
+ /*
+ * With memory backing, the zone_lock spinlock needs to be temporarily
+ * released to avoid scheduling in atomic context. To guarantee zone
+ * information protection, use a bitmap to lock zones with
+ * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
+ * implies that the queue is marked with BLK_MQ_F_BLOCKING.
+ */
+ spin_lock_init(&dev->zone_lock);
+ if (dev->memory_backed) {
+ dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
+ if (!dev->zone_locks) {
+ kvfree(dev->zones);
+ return -ENOMEM;
+ }
+ }
+
if (dev->zone_nr_conv >= dev->nr_zones) {
dev->zone_nr_conv = dev->nr_zones - 1;
pr_info("changed the number of conventional zones to %u",
dev->zone_nr_conv);
}
+ /* Max active zones has to be < nbr of seq zones in order to be enforceable */
+ if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
+ dev->zone_max_active = 0;
+ pr_info("zone_max_active limit disabled, limit >= zone count\n");
+ }
+
+ /* Max open zones has to be <= max active zones */
+ if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
+ dev->zone_max_open = dev->zone_max_active;
+ pr_info("changed the maximum number of open zones to %u\n",
+ dev->nr_zones);
+ } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
+ dev->zone_max_open = 0;
+ pr_info("zone_max_open limit disabled, limit >= zone count\n");
+ }
+
for (i = 0; i < dev->zone_nr_conv; i++) {
struct blk_zone *zone = &dev->zones[i];
@@ -99,21 +132,39 @@ int null_register_zoned_dev(struct nullb *nullb)
}
blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
+ blk_queue_max_open_zones(q, dev->zone_max_open);
+ blk_queue_max_active_zones(q, dev->zone_max_active);
return 0;
}
void null_free_zoned_dev(struct nullb_device *dev)
{
+ bitmap_free(dev->zone_locks);
kvfree(dev->zones);
}
+static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
+{
+ if (dev->memory_backed)
+ wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
+ spin_lock_irq(&dev->zone_lock);
+}
+
+static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
+{
+ spin_unlock_irq(&dev->zone_lock);
+
+ if (dev->memory_backed)
+ clear_and_wake_up_bit(zno, dev->zone_locks);
+}
+
int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
- unsigned int first_zone, i;
+ unsigned int first_zone, i, zno;
struct blk_zone zone;
int error;
@@ -124,15 +175,18 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
nr_zones = min(nr_zones, dev->nr_zones - first_zone);
trace_nullb_report_zones(nullb, nr_zones);
- for (i = 0; i < nr_zones; i++) {
+ zno = first_zone;
+ for (i = 0; i < nr_zones; i++, zno++) {
/*
* Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback.
* So use a local copy to avoid corruption of the device zone
* array.
*/
- memcpy(&zone, &dev->zones[first_zone + i],
- sizeof(struct blk_zone));
+ null_lock_zone(dev, zno);
+ memcpy(&zone, &dev->zones[zno], sizeof(struct blk_zone));
+ null_unlock_zone(dev, zno);
+
error = cb(&zone, i, data);
if (error)
return error;
@@ -141,6 +195,10 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
return nr_zones;
}
+/*
+ * This is called in the case of memory backing from null_process_cmd()
+ * with the target zone already locked.
+ */
size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len)
{
@@ -159,6 +217,111 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
return (zone->wp - sector) << SECTOR_SHIFT;
}
+static blk_status_t null_close_zone(struct nullb_device *dev, struct blk_zone *zone)
+{
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_CLOSED:
+ /* close operation on closed is not an error */
+ return BLK_STS_OK;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_FULL:
+ default:
+ return BLK_STS_IOERR;
+ }
+
+ if (zone->wp == zone->start) {
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ } else {
+ zone->cond = BLK_ZONE_COND_CLOSED;
+ dev->nr_zones_closed++;
+ }
+
+ return BLK_STS_OK;
+}
+
+static void null_close_first_imp_zone(struct nullb_device *dev)
+{
+ unsigned int i;
+
+ for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
+ if (dev->zones[i].cond == BLK_ZONE_COND_IMP_OPEN) {
+ null_close_zone(dev, &dev->zones[i]);
+ return;
+ }
+ }
+}
+
+static blk_status_t null_check_active(struct nullb_device *dev)
+{
+ if (!dev->zone_max_active)
+ return BLK_STS_OK;
+
+ if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
+ dev->nr_zones_closed < dev->zone_max_active)
+ return BLK_STS_OK;
+
+ return BLK_STS_ZONE_ACTIVE_RESOURCE;
+}
+
+static blk_status_t null_check_open(struct nullb_device *dev)
+{
+ if (!dev->zone_max_open)
+ return BLK_STS_OK;
+
+ if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
+ return BLK_STS_OK;
+
+ if (dev->nr_zones_imp_open) {
+ if (null_check_active(dev) == BLK_STS_OK) {
+ null_close_first_imp_zone(dev);
+ return BLK_STS_OK;
+ }
+ }
+
+ return BLK_STS_ZONE_OPEN_RESOURCE;
+}
+
+/*
+ * This function matches the manage open zone resources function in the ZBC standard,
+ * with the addition of max active zones support (added in the ZNS standard).
+ *
+ * The function determines if a zone can transition to implicit open or explicit open,
+ * while maintaining the max open zone (and max active zone) limit(s). It may close an
+ * implicit open zone in order to make additional zone resources available.
+ *
+ * ZBC states that an implicit open zone shall be closed only if there is not
+ * room within the open limit. However, with the addition of an active limit,
+ * it is not certain that closing an implicit open zone will allow a new zone
+ * to be opened, since we might already be at the active limit capacity.
+ */
+static blk_status_t null_check_zone_resources(struct nullb_device *dev, struct blk_zone *zone)
+{
+ blk_status_t ret;
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EMPTY:
+ ret = null_check_active(dev);
+ if (ret != BLK_STS_OK)
+ return ret;
+ fallthrough;
+ case BLK_ZONE_COND_CLOSED:
+ return null_check_open(dev);
+ default:
+ /* Should never be called for other states */
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+}
+
static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors, bool append)
{
@@ -172,123 +335,272 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ null_lock_zone(dev, zno);
+
switch (zone->cond) {
case BLK_ZONE_COND_FULL:
/* Cannot write to a full zone */
- return BLK_STS_IOERR;
+ ret = BLK_STS_IOERR;
+ goto unlock;
case BLK_ZONE_COND_EMPTY:
+ case BLK_ZONE_COND_CLOSED:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ goto unlock;
+ break;
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
+ break;
+ default:
+ /* Invalid zone condition */
+ ret = BLK_STS_IOERR;
+ goto unlock;
+ }
+
+ /*
+ * Regular writes must be at the write pointer position.
+ * Zone append writes are automatically issued at the write
+ * pointer and the position returned using the request or BIO
+ * sector.
+ */
+ if (append) {
+ sector = zone->wp;
+ if (cmd->bio)
+ cmd->bio->bi_iter.bi_sector = sector;
+ else
+ cmd->rq->__sector = sector;
+ } else if (sector != zone->wp) {
+ ret = BLK_STS_IOERR;
+ goto unlock;
+ }
+
+ if (zone->wp + nr_sectors > zone->start + zone->capacity) {
+ ret = BLK_STS_IOERR;
+ goto unlock;
+ }
+
+ if (zone->cond == BLK_ZONE_COND_CLOSED) {
+ dev->nr_zones_closed--;
+ dev->nr_zones_imp_open++;
+ } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
+ dev->nr_zones_imp_open++;
+ }
+ if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
+ zone->cond = BLK_ZONE_COND_IMP_OPEN;
+
+ /*
+ * Memory backing allocation may sleep: release the zone_lock spinlock
+ * to avoid scheduling in atomic context. Zone operation atomicity is
+ * still guaranteed through the zone_locks bitmap.
+ */
+ if (dev->memory_backed)
+ spin_unlock_irq(&dev->zone_lock);
+ ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ if (dev->memory_backed)
+ spin_lock_irq(&dev->zone_lock);
+
+ if (ret != BLK_STS_OK)
+ goto unlock;
+
+ zone->wp += nr_sectors;
+ if (zone->wp == zone->start + zone->capacity) {
+ if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
+ dev->nr_zones_exp_open--;
+ else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
+ dev->nr_zones_imp_open--;
+ zone->cond = BLK_ZONE_COND_FULL;
+ }
+ ret = BLK_STS_OK;
+
+unlock:
+ null_unlock_zone(dev, zno);
+
+ return ret;
+}
+
+static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone)
+{
+ blk_status_t ret;
+
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EXP_OPEN:
+ /* open operation on exp open is not an error */
+ return BLK_STS_OK;
+ case BLK_ZONE_COND_EMPTY:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ return ret;
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
case BLK_ZONE_COND_CLOSED:
- /*
- * Regular writes must be at the write pointer position.
- * Zone append writes are automatically issued at the write
- * pointer and the position returned using the request or BIO
- * sector.
- */
- if (append) {
- sector = zone->wp;
- if (cmd->bio)
- cmd->bio->bi_iter.bi_sector = sector;
- else
- cmd->rq->__sector = sector;
- } else if (sector != zone->wp) {
- return BLK_STS_IOERR;
- }
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ return ret;
+ dev->nr_zones_closed--;
+ break;
+ case BLK_ZONE_COND_FULL:
+ default:
+ return BLK_STS_IOERR;
+ }
+
+ zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ dev->nr_zones_exp_open++;
- if (zone->wp + nr_sectors > zone->start + zone->capacity)
- return BLK_STS_IOERR;
+ return BLK_STS_OK;
+}
- if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
- zone->cond = BLK_ZONE_COND_IMP_OPEN;
+static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *zone)
+{
+ blk_status_t ret;
+
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
- ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
+ switch (zone->cond) {
+ case BLK_ZONE_COND_FULL:
+ /* finish operation on full is not an error */
+ return BLK_STS_OK;
+ case BLK_ZONE_COND_EMPTY:
+ ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK)
return ret;
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ ret = null_check_zone_resources(dev, zone);
+ if (ret != BLK_STS_OK)
+ return ret;
+ dev->nr_zones_closed--;
+ break;
+ default:
+ return BLK_STS_IOERR;
+ }
+
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = zone->start + zone->len;
- zone->wp += nr_sectors;
- if (zone->wp == zone->start + zone->capacity)
- zone->cond = BLK_ZONE_COND_FULL;
+ return BLK_STS_OK;
+}
+
+static blk_status_t null_reset_zone(struct nullb_device *dev, struct blk_zone *zone)
+{
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ return BLK_STS_IOERR;
+
+ switch (zone->cond) {
+ case BLK_ZONE_COND_EMPTY:
+ /* reset operation on empty is not an error */
return BLK_STS_OK;
+ case BLK_ZONE_COND_IMP_OPEN:
+ dev->nr_zones_imp_open--;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ dev->nr_zones_exp_open--;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ dev->nr_zones_closed--;
+ break;
+ case BLK_ZONE_COND_FULL:
+ break;
default:
- /* Invalid zone condition */
return BLK_STS_IOERR;
}
+
+ zone->cond = BLK_ZONE_COND_EMPTY;
+ zone->wp = zone->start;
+
+ return BLK_STS_OK;
}
static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
- unsigned int zone_no = null_zone_no(dev, sector);
- struct blk_zone *zone = &dev->zones[zone_no];
+ unsigned int zone_no;
+ struct blk_zone *zone;
+ blk_status_t ret;
size_t i;
- switch (op) {
- case REQ_OP_ZONE_RESET_ALL:
- for (i = 0; i < dev->nr_zones; i++) {
- if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
- continue;
- zone[i].cond = BLK_ZONE_COND_EMPTY;
- zone[i].wp = zone[i].start;
+ if (op == REQ_OP_ZONE_RESET_ALL) {
+ for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
+ null_lock_zone(dev, i);
+ zone = &dev->zones[i];
+ if (zone->cond != BLK_ZONE_COND_EMPTY) {
+ null_reset_zone(dev, zone);
+ trace_nullb_zone_op(cmd, i, zone->cond);
+ }
+ null_unlock_zone(dev, i);
}
- break;
- case REQ_OP_ZONE_RESET:
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- return BLK_STS_IOERR;
+ return BLK_STS_OK;
+ }
- zone->cond = BLK_ZONE_COND_EMPTY;
- zone->wp = zone->start;
+ zone_no = null_zone_no(dev, sector);
+ zone = &dev->zones[zone_no];
+
+ null_lock_zone(dev, zone_no);
+
+ switch (op) {
+ case REQ_OP_ZONE_RESET:
+ ret = null_reset_zone(dev, zone);
break;
case REQ_OP_ZONE_OPEN:
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- return BLK_STS_IOERR;
- if (zone->cond == BLK_ZONE_COND_FULL)
- return BLK_STS_IOERR;
-
- zone->cond = BLK_ZONE_COND_EXP_OPEN;
+ ret = null_open_zone(dev, zone);
break;
case REQ_OP_ZONE_CLOSE:
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- return BLK_STS_IOERR;
- if (zone->cond == BLK_ZONE_COND_FULL)
- return BLK_STS_IOERR;
-
- if (zone->wp == zone->start)
- zone->cond = BLK_ZONE_COND_EMPTY;
- else
- zone->cond = BLK_ZONE_COND_CLOSED;
+ ret = null_close_zone(dev, zone);
break;
case REQ_OP_ZONE_FINISH:
- if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
- return BLK_STS_IOERR;
-
- zone->cond = BLK_ZONE_COND_FULL;
- zone->wp = zone->start + zone->len;
+ ret = null_finish_zone(dev, zone);
break;
default:
- return BLK_STS_NOTSUPP;
+ ret = BLK_STS_NOTSUPP;
+ break;
}
- trace_nullb_zone_op(cmd, zone_no, zone->cond);
- return BLK_STS_OK;
+ if (ret == BLK_STS_OK)
+ trace_nullb_zone_op(cmd, zone_no, zone->cond);
+
+ null_unlock_zone(dev, zone_no);
+
+ return ret;
}
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector, sector_t nr_sectors)
{
+ struct nullb_device *dev = cmd->nq->dev;
+ unsigned int zno = null_zone_no(dev, sector);
+ blk_status_t sts;
+
switch (op) {
case REQ_OP_WRITE:
- return null_zone_write(cmd, sector, nr_sectors, false);
+ sts = null_zone_write(cmd, sector, nr_sectors, false);
+ break;
case REQ_OP_ZONE_APPEND:
- return null_zone_write(cmd, sector, nr_sectors, true);
+ sts = null_zone_write(cmd, sector, nr_sectors, true);
+ break;
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
- return null_zone_mgmt(cmd, op, sector);
+ sts = null_zone_mgmt(cmd, op, sector);
+ break;
default:
- return null_process_cmd(cmd, op, sector, nr_sectors);
+ null_lock_zone(dev, zno);
+ sts = null_process_cmd(cmd, op, sector, nr_sectors);
+ null_unlock_zone(dev, zno);
}
+
+ return sts;
}
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 5124eca90e83..70da8b86ce58 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -233,7 +233,7 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
struct pcd_unit *cd = bdev->bd_disk->private_data;
int ret;
- check_disk_change(bdev);
+ bdev_check_media_change(bdev);
mutex_lock(&pcd_mutex);
ret = cdrom_open(&cd->info, bdev, mode);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index c0967507d085..a7af4f27b7c3 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -440,7 +440,7 @@ static void run_fsm(void)
pd_claimed = 1;
if (!pi_schedule_claimed(pi_current, run_fsm))
return;
- /* fall through */
+ fallthrough;
case 1:
pd_claimed = 2;
pi_current->proto->connect(pi_current);
@@ -465,7 +465,7 @@ static void run_fsm(void)
if (stop)
return;
}
- /* fall through */
+ fallthrough;
case Hold:
schedule_fsm();
return;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 4becc1efe775..467dbd06b7cd 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1082,65 +1082,6 @@ static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *p
}
}
-/*
- * recover a failed write, query for relocation if possible
- *
- * returns 1 if recovery is possible, or 0 if not
- *
- */
-static int pkt_start_recovery(struct packet_data *pkt)
-{
- /*
- * FIXME. We need help from the file system to implement
- * recovery handling.
- */
- return 0;
-#if 0
- struct request *rq = pkt->rq;
- struct pktcdvd_device *pd = rq->rq_disk->private_data;
- struct block_device *pkt_bdev;
- struct super_block *sb = NULL;
- unsigned long old_block, new_block;
- sector_t new_sector;
-
- pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
- if (pkt_bdev) {
- sb = get_super(pkt_bdev);
- bdput(pkt_bdev);
- }
-
- if (!sb)
- return 0;
-
- if (!sb->s_op->relocate_blocks)
- goto out;
-
- old_block = pkt->sector / (CD_FRAMESIZE >> 9);
- if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
- goto out;
-
- new_sector = new_block * (CD_FRAMESIZE >> 9);
- pkt->sector = new_sector;
-
- bio_reset(pkt->bio);
- bio_set_dev(pkt->bio, pd->bdev);
- bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
- pkt->bio->bi_iter.bi_sector = new_sector;
- pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
- pkt->bio->bi_vcnt = pkt->frames;
-
- pkt->bio->bi_end_io = pkt_end_io_packet_write;
- pkt->bio->bi_private = pkt;
-
- drop_super(sb);
- return 1;
-
-out:
- drop_super(sb);
- return 0;
-#endif
-}
-
static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
{
#if PACKET_DEBUG > 1
@@ -1357,12 +1298,8 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
break;
case PACKET_RECOVERY_STATE:
- if (pkt_start_recovery(pkt)) {
- pkt_start_write(pd, pkt);
- } else {
- pkt_dbg(2, pd, "No recovery possible\n");
- pkt_set_state(pkt, PACKET_FINISHED_STATE);
- }
+ pkt_dbg(2, pd, "No recovery possible\n");
+ pkt_set_state(pkt, PACKET_FINISHED_STATE);
break;
case PACKET_FINISHED_STATE:
@@ -2173,16 +2110,18 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
int ret;
long lba;
struct request_queue *q;
+ struct block_device *bdev;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
* to read/write from/to it. It is already opened in O_NONBLOCK mode
- * so bdget() can't fail.
+ * so open should not fail.
*/
- bdget(pd->bdev->bd_dev);
- ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd);
- if (ret)
+ bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
goto out;
+ }
ret = pkt_get_last_written(pd, &lba);
if (ret) {
@@ -2192,7 +2131,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
set_capacity(pd->disk, lba << 2);
set_capacity(pd->bdev->bd_disk, lba << 2);
- bd_set_size(pd->bdev, (loff_t)lba << 11);
+ bd_set_nr_sectors(pd->bdev, lba << 2);
q = bdev_get_queue(pd->bdev);
if (write) {
@@ -2226,7 +2165,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
return 0;
out_putdev:
- blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
+ blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
out:
return ret;
}
@@ -2563,7 +2502,6 @@ static int pkt_seq_show(struct seq_file *m, void *p)
static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
int i;
- int ret = 0;
char b[BDEVNAME_SIZE];
struct block_device *bdev;
@@ -2586,12 +2524,9 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
}
- bdev = bdget(dev);
- if (!bdev)
- return -ENOMEM;
- ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
- if (ret)
- return ret;
+ bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
@@ -2609,7 +2544,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
if (IS_ERR(pd->cdrw.thread)) {
pkt_err(pd, "can't start kernel thread\n");
- ret = -ENOMEM;
goto out_mem;
}
@@ -2621,7 +2555,7 @@ out_mem:
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- return ret;
+ return -ENOMEM;
}
static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
@@ -2641,7 +2575,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
*/
if (pd->refcnt == 1)
pkt_lock_door(pd, 0);
- /* fall through */
+ fallthrough;
/*
* forward selected CDROM ioctls to CD-ROM, for UDF
*/
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d9c0e7d154f9..f84128abade3 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3293,7 +3293,7 @@ again:
case __RBD_OBJ_COPYUP_OBJECT_MAPS:
if (!pending_result_dec(&obj_req->pending, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_OBJ_COPYUP_OBJECT_MAPS:
if (*result) {
rbd_warn(rbd_dev, "snap object map update failed: %d",
@@ -3312,7 +3312,7 @@ again:
case __RBD_OBJ_COPYUP_WRITE_OBJECT:
if (!pending_result_dec(&obj_req->pending, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_OBJ_COPYUP_WRITE_OBJECT:
return true;
default:
@@ -3399,7 +3399,7 @@ again:
case __RBD_OBJ_WRITE_COPYUP:
if (!rbd_obj_advance_copyup(obj_req, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_OBJ_WRITE_COPYUP:
if (*result) {
rbd_warn(rbd_dev, "copyup failed: %d", *result);
@@ -3592,7 +3592,7 @@ again:
case __RBD_IMG_OBJECT_REQUESTS:
if (!pending_result_dec(&img_req->pending, result))
return false;
- /* fall through */
+ fallthrough;
case RBD_IMG_OBJECT_REQUESTS:
return true;
default:
@@ -4010,10 +4010,10 @@ static int rbd_try_lock(struct rbd_device *rbd_dev)
rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
ENTITY_NAME(lockers[0].id.name));
- ret = ceph_monc_blacklist_add(&client->monc,
+ ret = ceph_monc_blocklist_add(&client->monc,
&lockers[0].info.addr);
if (ret) {
- rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
+ rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
ENTITY_NAME(lockers[0].id.name), ret);
goto out;
}
@@ -4077,7 +4077,7 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
ret = rbd_try_lock(rbd_dev);
if (ret < 0) {
rbd_warn(rbd_dev, "failed to lock header: %d", ret);
- if (ret == -EBLACKLISTED)
+ if (ret == -EBLOCKLISTED)
goto out;
ret = 1; /* request lock anyway */
@@ -4613,7 +4613,7 @@ static void rbd_reregister_watch(struct work_struct *work)
ret = __rbd_register_watch(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
- if (ret != -EBLACKLISTED && ret != -ENOENT) {
+ if (ret != -EBLOCKLISTED && ret != -ENOENT) {
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->watch_dwork,
RBD_RETRY_DELAY);
@@ -4921,7 +4921,7 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity(rbd_dev->disk, size);
- revalidate_disk(rbd_dev->disk);
+ revalidate_disk_size(rbd_dev->disk, true);
}
}
@@ -5022,7 +5022,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
}
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
- q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
/*
* disk_release() expects a queue ref from add_disk() and will
@@ -5120,6 +5120,9 @@ static ssize_t rbd_config_info_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
return sprintf(buf, "%s\n", rbd_dev->config_info);
}
@@ -5231,6 +5234,9 @@ static ssize_t rbd_image_refresh(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
ret = rbd_dev_refresh(rbd_dev);
if (ret)
return ret;
@@ -7059,6 +7065,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
struct rbd_client *rbdc;
int rc;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (!try_module_get(THIS_MODULE))
return -ENODEV;
@@ -7209,6 +7218,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
bool force = false;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
dev_id = -1;
opt_buf[0] = '\0';
sscanf(buf, "%d %5s", &dev_id, opt_buf);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index cc6a4e2587ae..8b2411ccbda9 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -91,29 +91,18 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
dev->max_segments = BMAX_SEGMENTS;
- dev->max_hw_sectors = min_t(u32, dev->max_hw_sectors,
- le32_to_cpu(rsp->max_hw_sectors));
- dev->max_segments = min_t(u16, dev->max_segments,
- le16_to_cpu(rsp->max_segments));
-
return 0;
}
static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
size_t new_nsectors)
{
- int err = 0;
-
rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
dev->nsectors, new_nsectors);
dev->nsectors = new_nsectors;
set_capacity(dev->gd, dev->nsectors);
- err = revalidate_disk(dev->gd);
- if (err)
- rnbd_clt_err(dev,
- "Failed to change device size from %zu to %zu, err: %d\n",
- dev->nsectors, new_nsectors, err);
- return err;
+ revalidate_disk_size(dev->gd, true);
+ return 0;
}
static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
@@ -433,7 +422,7 @@ enum wait_type {
};
static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
- struct rnbd_iu *iu, struct kvec *vec, size_t nr,
+ struct rnbd_iu *iu, struct kvec *vec,
size_t len, struct scatterlist *sg, unsigned int sg_len,
void (*conf)(struct work_struct *work),
int *errno, enum wait_type wait)
@@ -447,7 +436,7 @@ static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
.conf_fn = msg_conf,
};
err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit,
- vec, nr, len, sg, sg_len);
+ vec, 1, len, sg, sg_len);
if (!err && wait) {
wait_event(iu->comp.wait, iu->comp.errno != INT_MAX);
*errno = iu->comp.errno;
@@ -492,7 +481,7 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
msg.device_id = cpu_to_le32(device_id);
WARN_ON(!rnbd_clt_get_dev(dev));
- err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 1, 0, NULL, 0,
+ err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0,
msg_close_conf, &errno, wait);
if (err) {
rnbd_clt_put_dev(dev);
@@ -581,7 +570,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
WARN_ON(!rnbd_clt_get_dev(dev));
err = send_usr_msg(sess->rtrs, READ, iu,
- &vec, 1, sizeof(*rsp), iu->sglist, 1,
+ &vec, sizeof(*rsp), iu->sglist, 1,
msg_open_conf, &errno, wait);
if (err) {
rnbd_clt_put_dev(dev);
@@ -635,7 +624,7 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
goto put_iu;
}
err = send_usr_msg(sess->rtrs, READ, iu,
- &vec, 1, sizeof(*rsp), iu->sglist, 1,
+ &vec, sizeof(*rsp), iu->sglist, 1,
msg_sess_info_conf, &errno, wait);
if (err) {
rnbd_clt_put_sess(sess);
@@ -1180,7 +1169,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->queue_depth = sess->queue_depth;
tag_set->numa_node = NUMA_NO_NODE;
tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
- BLK_MQ_F_TAG_SHARED;
+ BLK_MQ_F_TAG_QUEUE_SHARED;
tag_set->cmd_size = sizeof(struct rnbd_iu);
tag_set->nr_hw_queues = num_online_cpus();
@@ -1520,7 +1509,7 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
"map_device: Failed to configure device, err: %d\n",
ret);
mutex_unlock(&dev->lock);
- goto del_dev;
+ goto send_close;
}
rnbd_clt_info(dev,
@@ -1539,6 +1528,8 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
return dev;
+send_close:
+ send_msg_close(dev, dev->device_id, WAIT);
del_dev:
delete_dev(dev);
put_dev:
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 0fb94843a495..e1bc8b4cd592 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -148,7 +148,8 @@ static int process_rdma(struct rtrs_srv *sess,
/* Generate bio with pages pointing to the rdma buffer */
bio = rnbd_bio_map_kern(data, sess_dev->rnbd_dev->ibd_bio_set, datalen, GFP_KERNEL);
if (IS_ERR(bio)) {
- rnbd_srv_err(sess_dev, "Failed to generate bio, err: %ld\n", PTR_ERR(bio));
+ err = PTR_ERR(bio);
+ rnbd_srv_err(sess_dev, "Failed to generate bio, err: %d\n", err);
goto sess_dev_put;
}
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 7e261224ff10..63f549889f87 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -425,7 +425,7 @@ static void card_state_change(struct rsxx_cardinfo *card,
* Fall through so the DMA devices can be attached and
* the user can attempt to pull off their data.
*/
- /* fall through */
+ fallthrough;
case CARD_STATE_GOOD:
st = rsxx_get_card_size8(card, &card->size8);
if (st)
@@ -439,7 +439,7 @@ static void card_state_change(struct rsxx_cardinfo *card,
case CARD_STATE_FAULT:
dev_crit(CARD_TO_DEV(card),
"Hardware Fault reported!\n");
- /* Fall through. */
+ fallthrough;
/* Everything else, detach DMA interface if it's attached. */
case CARD_STATE_SHUTDOWN:
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 3a476dc1d14f..a962b4551bed 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/completion.h>
#include <linux/scatterlist.h>
-#include <linux/version.h>
#include <linux/err.h>
#include <linux/aer.h>
#include <linux/wait.h>
@@ -1436,7 +1435,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
blk_mq_requeue_request(req, true);
break;
}
- /* fall through */
+ fallthrough;
case SKD_CHECK_STATUS_REPORT_ERROR:
default:
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index dd34504382e5..52dd1efa00f9 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -638,7 +638,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
return 0;
if (mode & (FMODE_READ|FMODE_WRITE)) {
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev) && fs->disk_in)
+ fs->ejected = 0;
if ((mode & FMODE_WRITE) && fs->write_protected) {
err = -EROFS;
goto out;
@@ -735,24 +736,6 @@ static unsigned int floppy_check_events(struct gendisk *disk,
return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
}
-static int floppy_revalidate(struct gendisk *disk)
-{
- struct floppy_state *fs = disk->private_data;
- struct swim __iomem *base = fs->swd->base;
-
- swim_drive(base, fs->location);
-
- if (fs->ejected)
- setup_medium(fs);
-
- if (!fs->disk_in)
- swim_motor(base, OFF);
- else
- fs->ejected = 0;
-
- return !fs->disk_in;
-}
-
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_unlocked_open,
@@ -760,7 +743,6 @@ static const struct block_device_operations floppy_fops = {
.ioctl = floppy_ioctl,
.getgeo = floppy_getgeo,
.check_events = floppy_check_events,
- .revalidate_disk = floppy_revalidate,
};
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index aa77eb5fb7de..c2d922d125e2 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -945,7 +945,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (err == 0 && (mode & FMODE_NDELAY) == 0
&& (mode & (FMODE_READ|FMODE_WRITE))) {
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev))
+ floppy_revalidate(bdev->bd_disk);
if (fs->ejected)
err = -ENXIO;
}
@@ -1055,7 +1056,6 @@ static const struct block_device_operations floppy_fops = {
.release = floppy_release,
.ioctl = floppy_ioctl,
.check_events = floppy_check_events,
- .revalidate_disk= floppy_revalidate,
};
static const struct blk_mq_ops swim3_mq_ops = {
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 63b213e00b37..a314b9382442 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -126,16 +126,31 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
if (!range)
return -ENOMEM;
- __rq_for_each_bio(bio, req) {
- u64 sector = bio->bi_iter.bi_sector;
- u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
-
- range[n].flags = cpu_to_le32(flags);
- range[n].num_sectors = cpu_to_le32(num_sectors);
- range[n].sector = cpu_to_le64(sector);
- n++;
+ /*
+ * Single max discard segment means multi-range discard isn't
+ * supported, and block layer only runs contiguity merge like
+ * normal RW request. So we can't reply on bio for retrieving
+ * each range info.
+ */
+ if (queue_max_discard_segments(req->q) == 1) {
+ range[0].flags = cpu_to_le32(flags);
+ range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
+ range[0].sector = cpu_to_le64(blk_rq_pos(req));
+ n = 1;
+ } else {
+ __rq_for_each_bio(bio, req) {
+ u64 sector = bio->bi_iter.bi_sector;
+ u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
+
+ range[n].flags = cpu_to_le32(flags);
+ range[n].num_sectors = cpu_to_le32(num_sectors);
+ range[n].sector = cpu_to_le64(sector);
+ n++;
+ }
}
+ WARN_ON_ONCE(n != segments);
+
req->special_vec.bv_page = virt_to_page(range);
req->special_vec.bv_offset = offset_in_page(range);
req->special_vec.bv_len = sizeof(*range) * segments;
@@ -583,7 +598,7 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
struct virtio_blk *vblk = vdev->priv;
blk_queue_write_cache(vblk->disk->queue, writeback, false);
- revalidate_disk(vblk->disk);
+ revalidate_disk_size(vblk->disk, true);
}
static const char *const virtblk_cache_types[] = {
@@ -631,7 +646,7 @@ static struct attribute *virtblk_attrs[] = {
static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
struct virtio_device *vdev = vblk->vdev;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index c2f71265af4b..501e9dacfff9 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -201,7 +201,7 @@ static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
-static int do_block_io_op(struct xen_blkif_ring *ring);
+static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
struct blkif_request *req,
struct pending_req *pending_req);
@@ -612,6 +612,8 @@ int xen_blkif_schedule(void *arg)
struct xen_vbd *vbd = &blkif->vbd;
unsigned long timeout;
int ret;
+ bool do_eoi;
+ unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
set_freezable();
while (!kthread_should_stop()) {
@@ -636,16 +638,23 @@ int xen_blkif_schedule(void *arg)
if (timeout == 0)
goto purge_gnt_list;
+ do_eoi = ring->waiting_reqs;
+
ring->waiting_reqs = 0;
smp_mb(); /* clear flag *before* checking for work */
- ret = do_block_io_op(ring);
+ ret = do_block_io_op(ring, &eoi_flags);
if (ret > 0)
ring->waiting_reqs = 1;
if (ret == -EACCES)
wait_event_interruptible(ring->shutdown_wq,
kthread_should_stop());
+ if (do_eoi && !ring->waiting_reqs) {
+ xen_irq_lateeoi(ring->irq, eoi_flags);
+ eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
+ }
+
purge_gnt_list:
if (blkif->vbd.feature_gnt_persistent &&
time_after(jiffies, ring->next_lru)) {
@@ -1121,7 +1130,7 @@ static void end_block_io_op(struct bio *bio)
* and transmute it to the block API to hand it over to the proper block disk.
*/
static int
-__do_block_io_op(struct xen_blkif_ring *ring)
+__do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
struct blkif_request req;
@@ -1144,6 +1153,9 @@ __do_block_io_op(struct xen_blkif_ring *ring)
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
break;
+ /* We've seen a request, so clear spurious eoi flag. */
+ *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
+
if (kthread_should_stop()) {
more_to_do = 1;
break;
@@ -1202,13 +1214,13 @@ done:
}
static int
-do_block_io_op(struct xen_blkif_ring *ring)
+do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
{
union blkif_back_rings *blk_rings = &ring->blk_rings;
int more_to_do;
do {
- more_to_do = __do_block_io_op(ring);
+ more_to_do = __do_block_io_op(ring, eoi_flags);
if (more_to_do)
break;
@@ -1260,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
- /* fall through */
+ fallthrough;
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
operation = REQ_OP_WRITE;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 42944d41aea0..f5705569e2a7 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -246,9 +246,8 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
if (req_prod - rsp_prod > size)
goto fail;
- err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
- xen_blkif_be_int, 0,
- "blkif-backend", ring);
+ err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
+ evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
if (err < 0)
goto fail;
ring->irq = err;
@@ -474,6 +473,12 @@ static void xen_vbd_free(struct xen_vbd *vbd)
vbd->bdev = NULL;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent,
+ "Enables the persistent grants feature");
+
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
unsigned major, unsigned minor, int readonly,
int cdrom)
@@ -519,6 +524,8 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (q && blk_queue_secure_erase(q))
vbd->discard_secure = true;
+ vbd->feature_gnt_persistent = feature_persistent;
+
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
@@ -843,7 +850,7 @@ static void frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through */
+ fallthrough;
/* if not online */
case XenbusStateUnknown:
/* implies xen_blkif_disconnect() via xen_blkbk_remove() */
@@ -906,7 +913,8 @@ again:
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
- err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u", 1);
+ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
+ be->blkif->vbd.feature_gnt_persistent);
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
dev->nodename);
@@ -1067,7 +1075,6 @@ static int connect_ring(struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
- unsigned int pers_grants;
char protocol[64] = "";
int err, i;
char *xspath;
@@ -1093,9 +1100,11 @@ static int connect_ring(struct backend_info *be)
xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
return -ENOSYS;
}
- pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
- 0);
- blkif->vbd.feature_gnt_persistent = pers_grants;
+ if (blkif->vbd.feature_gnt_persistent)
+ blkif->vbd.feature_gnt_persistent =
+ xenbus_read_unsigned(dev->otherend,
+ "feature-persistent", 0);
+
blkif->vbd.overflow_max_grants = 0;
/*
@@ -1118,7 +1127,7 @@ static int connect_ring(struct backend_info *be)
pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
blkif->nr_rings, blkif->blk_protocol, protocol,
- pers_grants ? "persistent grants" : "");
+ blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
ring_page_order = xenbus_read_unsigned(dev->otherend,
"ring-page-order", 0);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3bb3dd8da9b0..48629d3433b4 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1403,7 +1403,6 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
case BLKIF_RSP_EOPNOTSUPP:
return REQ_EOPNOTSUPP;
case BLKIF_RSP_ERROR:
- /* Fallthrough. */
default:
return REQ_ERROR;
}
@@ -1643,7 +1642,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
info->feature_flush = 0;
xlvbd_flush(info);
}
- /* fall through */
+ fallthrough;
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
if (unlikely(bret->status != BLKIF_RSP_OKAY))
@@ -1867,8 +1866,8 @@ again:
message = "writing protocol";
goto abort_transaction;
}
- err = xenbus_printf(xbt, dev->nodename,
- "feature-persistent", "%u", 1);
+ err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
+ info->feature_persistent);
if (err)
dev_warn(&dev->dev,
"writing persistent grants feature to xenbus");
@@ -1942,6 +1941,13 @@ static int negotiate_mq(struct blkfront_info *info)
}
return 0;
}
+
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent,
+ "Enables the persistent grants feature");
+
/**
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@@ -2008,6 +2014,8 @@ static int blkfront_probe(struct xenbus_device *dev,
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
+ info->feature_persistent = feature_persistent;
+
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
dev_set_drvdata(&dev->dev, info);
@@ -2317,9 +2325,10 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- info->feature_persistent =
- !!xenbus_read_unsigned(info->xbdev->otherend,
- "feature-persistent", 0);
+ if (info->feature_persistent)
+ info->feature_persistent =
+ !!xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-persistent", 0);
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);
@@ -2484,7 +2493,7 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through */
+ fallthrough;
case XenbusStateClosing:
if (info)
blkfront_closing(info);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 5d8e0ab3f054..eb8ef65778c3 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -443,22 +443,27 @@ static void ace_fix_driveid(u16 *id)
#define ACE_FSM_NUM_STATES 11
/* Set flag to exit FSM loop and reschedule tasklet */
-static inline void ace_fsm_yield(struct ace_device *ace)
+static inline void ace_fsm_yieldpoll(struct ace_device *ace)
{
- dev_dbg(ace->dev, "ace_fsm_yield()\n");
tasklet_schedule(&ace->fsm_tasklet);
ace->fsm_continue_flag = 0;
}
+static inline void ace_fsm_yield(struct ace_device *ace)
+{
+ dev_dbg(ace->dev, "%s()\n", __func__);
+ ace_fsm_yieldpoll(ace);
+}
+
/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
static inline void ace_fsm_yieldirq(struct ace_device *ace)
{
dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
- if (!ace->irq)
- /* No IRQ assigned, so need to poll */
- tasklet_schedule(&ace->fsm_tasklet);
- ace->fsm_continue_flag = 0;
+ if (ace->irq > 0)
+ ace->fsm_continue_flag = 0;
+ else
+ ace_fsm_yieldpoll(ace);
}
static bool ace_has_next_request(struct request_queue *q)
@@ -888,26 +893,20 @@ static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing)
return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0;
}
-static int ace_revalidate_disk(struct gendisk *gd)
+static void ace_media_changed(struct ace_device *ace)
{
- struct ace_device *ace = gd->private_data;
unsigned long flags;
- dev_dbg(ace->dev, "ace_revalidate_disk()\n");
+ dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
- if (ace->media_change) {
- dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
-
- spin_lock_irqsave(&ace->lock, flags);
- ace->id_req_count++;
- spin_unlock_irqrestore(&ace->lock, flags);
+ spin_lock_irqsave(&ace->lock, flags);
+ ace->id_req_count++;
+ spin_unlock_irqrestore(&ace->lock, flags);
- tasklet_schedule(&ace->fsm_tasklet);
- wait_for_completion(&ace->id_completion);
- }
+ tasklet_schedule(&ace->fsm_tasklet);
+ wait_for_completion(&ace->id_completion);
dev_dbg(ace->dev, "revalidate complete\n");
- return ace->id_result;
}
static int ace_open(struct block_device *bdev, fmode_t mode)
@@ -922,7 +921,8 @@ static int ace_open(struct block_device *bdev, fmode_t mode)
ace->users++;
spin_unlock_irqrestore(&ace->lock, flags);
- check_disk_change(bdev);
+ if (bdev_check_media_change(bdev) && ace->media_change)
+ ace_media_changed(ace);
mutex_unlock(&xsysace_mutex);
return 0;
@@ -966,7 +966,6 @@ static const struct block_device_operations ace_fops = {
.open = ace_open,
.release = ace_release,
.check_events = ace_check_events,
- .revalidate_disk = ace_revalidate_disk,
.getgeo = ace_getgeo,
};
@@ -1059,12 +1058,12 @@ static int ace_setup(struct ace_device *ace)
ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
/* Now we can hook up the irq handler */
- if (ace->irq) {
+ if (ace->irq > 0) {
rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
if (rc) {
/* Failure - fall back to polled mode */
dev_err(ace->dev, "request_irq failed\n");
- ace->irq = 0;
+ ace->irq = rc;
}
}
@@ -1080,7 +1079,7 @@ static int ace_setup(struct ace_device *ace)
(unsigned long long) ace->physaddr, ace->baseaddr, ace->irq);
ace->media_change = 1;
- ace_revalidate_disk(ace->gd);
+ ace_media_changed(ace);
/* Make the sysace device 'live' */
add_disk(ace->gd);
@@ -1116,7 +1115,7 @@ static void ace_teardown(struct ace_device *ace)
tasklet_kill(&ace->fsm_tasklet);
- if (ace->irq)
+ if (ace->irq > 0)
free_irq(ace->irq, ace);
iounmap(ace->baseaddr);
@@ -1129,11 +1128,6 @@ static int ace_alloc(struct device *dev, int id, resource_size_t physaddr,
int rc;
dev_dbg(dev, "ace_alloc(%p)\n", dev);
- if (!physaddr) {
- rc = -ENODEV;
- goto err_noreg;
- }
-
/* Allocate and initialize the ace device structure */
ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
if (!ace) {
@@ -1159,7 +1153,6 @@ err_setup:
dev_set_drvdata(dev, NULL);
kfree(ace);
err_alloc:
-err_noreg:
dev_err(dev, "could not initialize device, err=%i\n", rc);
return rc;
}
@@ -1182,10 +1175,11 @@ static void ace_free(struct device *dev)
static int ace_probe(struct platform_device *dev)
{
- resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
+ resource_size_t physaddr;
+ struct resource *res;
u32 id = dev->id;
- int irq = 0;
+ int irq;
int i;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
@@ -1196,12 +1190,15 @@ static int ace_probe(struct platform_device *dev)
if (of_find_property(dev->dev.of_node, "8-bit", NULL))
bus_width = ACE_BUS_WIDTH_8;
- for (i = 0; i < dev->num_resources; i++) {
- if (dev->resource[i].flags & IORESOURCE_MEM)
- physaddr = dev->resource[i].start;
- if (dev->resource[i].flags & IORESOURCE_IRQ)
- irq = dev->resource[i].start;
- }
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ physaddr = res->start;
+ if (!physaddr)
+ return -ENODEV;
+
+ irq = platform_get_irq_optional(dev, 0);
/* Call the bus-independent setup code */
return ace_alloc(&dev->dev, id, physaddr, irq, bus_width);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 9100ac36670a..1b697208d661 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -52,6 +52,9 @@ static unsigned int num_devices = 1;
*/
static size_t huge_class_size;
+static const struct block_device_operations zram_devops;
+static const struct block_device_operations zram_wb_devops;
+
static void zram_free_page(struct zram *zram, size_t index);
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio);
@@ -408,8 +411,7 @@ static void reset_bdev(struct zram *zram)
zram->backing_dev = NULL;
zram->old_block_size = 0;
zram->bdev = NULL;
- zram->disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_SYNCHRONOUS_IO;
+ zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
}
@@ -491,9 +493,10 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- bdev = bdgrab(I_BDEV(inode));
- err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
- if (err < 0) {
+ bdev = blkdev_get_by_dev(inode->i_rdev,
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
+ if (IS_ERR(bdev)) {
+ err = PTR_ERR(bdev);
bdev = NULL;
goto out;
}
@@ -528,8 +531,7 @@ static ssize_t backing_dev_store(struct device *dev,
* freely but in fact, IO is going on so finally could cause
* use-after-free when the IO is really done.
*/
- zram->disk->queue->backing_dev_info->capabilities &=
- ~BDI_CAP_SYNCHRONOUS_IO;
+ zram->disk->fops = &zram_wb_devops;
up_write(&zram->init_lock);
pr_info("setup backing device %s\n", file_name);
@@ -1216,10 +1218,11 @@ out:
static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
struct bio *bio, bool partial_io)
{
- int ret;
+ struct zcomp_strm *zstrm;
unsigned long handle;
unsigned int size;
void *src, *dst;
+ int ret;
zram_slot_lock(zram, index);
if (zram_test_flag(zram, index, ZRAM_WB)) {
@@ -1250,6 +1253,9 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
size = zram_get_obj_size(zram, index);
+ if (size != PAGE_SIZE)
+ zstrm = zcomp_stream_get(zram->comp);
+
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
@@ -1257,8 +1263,6 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
kunmap_atomic(dst);
ret = 0;
} else {
- struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
-
dst = kmap_atomic(page);
ret = zcomp_decompress(zstrm, src, size, dst);
kunmap_atomic(dst);
@@ -1268,7 +1272,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
zram_slot_unlock(zram, index);
/* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret))
+ if (WARN_ON(ret))
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
return ret;
@@ -1739,7 +1743,7 @@ static ssize_t disksize_store(struct device *dev,
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- revalidate_disk(zram->disk);
+ revalidate_disk_size(zram->disk, true);
up_write(&zram->init_lock);
return len;
@@ -1786,7 +1790,7 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- revalidate_disk(zram->disk);
+ revalidate_disk_size(zram->disk, true);
bdput(bdev);
mutex_lock(&bdev->bd_mutex);
@@ -1819,6 +1823,13 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
+static const struct block_device_operations zram_wb_devops = {
+ .open = zram_open,
+ .submit_bio = zram_submit_bio,
+ .swap_slot_free_notify = zram_slot_free_notify,
+ .owner = THIS_MODULE
+};
+
static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
@@ -1946,8 +1957,7 @@ static int zram_add(void)
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
- zram->disk->queue->backing_dev_info->capabilities |=
- (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));