aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2024-01-19 08:57:46 -0500
committerKevin Wolf <kwolf@redhat.com>2024-01-26 11:16:58 +0100
commit71ee0cdd14cc01a8b51aa4e9577dd0a1bb2f8e19 (patch)
tree2a27b4df1f2b3ed1ed2d52ec46f106ec498e523e
parentvirtio-blk: rename dataplane to ioeventfd (diff)
downloadqemu-71ee0cdd14cc01a8b51aa4e9577dd0a1bb2f8e19.tar.xz
qemu-71ee0cdd14cc01a8b51aa4e9577dd0a1bb2f8e19.zip
virtio-blk: restart s->rq reqs in vq AioContexts
A virtio-blk device with the iothread-vq-mapping parameter has per-virtqueue AioContexts. It is not thread-safe to process s->rq requests in the BlockBackend AioContext since that may be different from the virtqueue's AioContext to which this request belongs. The code currently races and could crash. Adapt virtio_blk_dma_restart_cb() to first split s->rq into per-vq lists and then schedule a BH each vq's AioContext as necessary. This way requests are safely processed in their vq's AioContext. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-ID: <20240119135748.270944-5-stefanha@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r--hw/block/virtio-blk.c44
1 files changed, 33 insertions, 11 deletions
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index e342cb2cfb..4525988d92 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -1156,16 +1156,11 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
static void virtio_blk_dma_restart_bh(void *opaque)
{
- VirtIOBlock *s = opaque;
+ VirtIOBlockReq *req = opaque;
+ VirtIOBlock *s = req->dev; /* we're called with at least one request */
- VirtIOBlockReq *req;
MultiReqBuffer mrb = {};
- WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
- req = s->rq;
- s->rq = NULL;
- }
-
while (req) {
VirtIOBlockReq *next = req->next;
if (virtio_blk_handle_request(req, &mrb)) {
@@ -1195,16 +1190,43 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
RunState state)
{
VirtIOBlock *s = opaque;
+ uint16_t num_queues = s->conf.num_queues;
if (!running) {
return;
}
- /* Paired with dec in virtio_blk_dma_restart_bh() */
- blk_inc_in_flight(s->conf.conf.blk);
+ /* Split the device-wide s->rq request list into per-vq request lists */
+ g_autofree VirtIOBlockReq **vq_rq = g_new0(VirtIOBlockReq *, num_queues);
+ VirtIOBlockReq *rq;
+
+ WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
+ rq = s->rq;
+ s->rq = NULL;
+ }
+
+ while (rq) {
+ VirtIOBlockReq *next = rq->next;
+ uint16_t idx = virtio_get_queue_index(rq->vq);
+
+ rq->next = vq_rq[idx];
+ vq_rq[idx] = rq;
+ rq = next;
+ }
- aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.conf.blk),
- virtio_blk_dma_restart_bh, s);
+ /* Schedule a BH to submit the requests in each vq's AioContext */
+ for (uint16_t i = 0; i < num_queues; i++) {
+ if (!vq_rq[i]) {
+ continue;
+ }
+
+ /* Paired with dec in virtio_blk_dma_restart_bh() */
+ blk_inc_in_flight(s->conf.conf.blk);
+
+ aio_bh_schedule_oneshot(s->vq_aio_context[i],
+ virtio_blk_dma_restart_bh,
+ vq_rq[i]);
+ }
}
static void virtio_blk_reset(VirtIODevice *vdev)