aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virtio/virtio_ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio/virtio_ring.c')
-rw-r--r--drivers/virtio/virtio_ring.c778
1 files changed, 567 insertions, 211 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 643ca779fcc6..4620e9d79dde 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -85,6 +85,71 @@ struct vring_desc_extra {
u16 next; /* The next desc state in a list. */
};
+struct vring_virtqueue_split {
+ /* Actual memory layout for this queue. */
+ struct vring vring;
+
+ /* Last written value to avail->flags */
+ u16 avail_flags_shadow;
+
+ /*
+ * Last written value to avail->idx in
+ * guest byte order.
+ */
+ u16 avail_idx_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_split *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t queue_dma_addr;
+ size_t queue_size_in_bytes;
+
+ /*
+ * The parameters for creating vrings are reserved for creating new
+ * vring.
+ */
+ u32 vring_align;
+ bool may_reduce_num;
+};
+
+struct vring_virtqueue_packed {
+ /* Actual memory layout for this queue. */
+ struct {
+ unsigned int num;
+ struct vring_packed_desc *desc;
+ struct vring_packed_desc_event *driver;
+ struct vring_packed_desc_event *device;
+ } vring;
+
+ /* Driver ring wrap counter. */
+ bool avail_wrap_counter;
+
+ /* Avail used flags. */
+ u16 avail_used_flags;
+
+ /* Index of the next avail descriptor. */
+ u16 next_avail_idx;
+
+ /*
+ * Last written value to driver->flags in
+ * guest byte order.
+ */
+ u16 event_flags_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_packed *desc_state;
+ struct vring_desc_extra *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t ring_dma_addr;
+ dma_addr_t driver_event_dma_addr;
+ dma_addr_t device_event_dma_addr;
+ size_t ring_size_in_bytes;
+ size_t event_size_in_bytes;
+};
+
struct vring_virtqueue {
struct virtqueue vq;
@@ -124,64 +189,10 @@ struct vring_virtqueue {
union {
/* Available for split ring */
- struct {
- /* Actual memory layout for this queue. */
- struct vring vring;
-
- /* Last written value to avail->flags */
- u16 avail_flags_shadow;
-
- /*
- * Last written value to avail->idx in
- * guest byte order.
- */
- u16 avail_idx_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_split *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t queue_dma_addr;
- size_t queue_size_in_bytes;
- } split;
+ struct vring_virtqueue_split split;
/* Available for packed ring */
- struct {
- /* Actual memory layout for this queue. */
- struct {
- unsigned int num;
- struct vring_packed_desc *desc;
- struct vring_packed_desc_event *driver;
- struct vring_packed_desc_event *device;
- } vring;
-
- /* Driver ring wrap counter. */
- bool avail_wrap_counter;
-
- /* Avail used flags. */
- u16 avail_used_flags;
-
- /* Index of the next avail descriptor. */
- u16 next_avail_idx;
-
- /*
- * Last written value to driver->flags in
- * guest byte order.
- */
- u16 event_flags_shadow;
-
- /* Per-descriptor state. */
- struct vring_desc_state_packed *desc_state;
- struct vring_desc_extra *desc_extra;
-
- /* DMA address and size information */
- dma_addr_t ring_dma_addr;
- dma_addr_t driver_event_dma_addr;
- dma_addr_t device_event_dma_addr;
- size_t ring_size_in_bytes;
- size_t event_size_in_bytes;
- } packed;
+ struct vring_virtqueue_packed packed;
};
/* How to notify other side. FIXME: commonalize hcalls! */
@@ -200,6 +211,16 @@ struct vring_virtqueue {
#endif
};
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name);
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
+static void vring_free(struct virtqueue *_vq);
/*
* Helpers.
@@ -364,6 +385,24 @@ static int vring_mapping_error(const struct vring_virtqueue *vq,
return dma_mapping_error(vring_dma_dev(vq), addr);
}
+static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
+{
+ vq->vq.num_free = num;
+
+ if (vq->packed_ring)
+ vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+ else
+ vq->last_used_idx = 0;
+
+ vq->event_triggered = false;
+ vq->num_added = 0;
+
+#ifdef DEBUG
+ vq->in_use = false;
+ vq->last_add_time_valid = false;
+#endif
+}
+
/*
* Split ring specific functions - *_split().
@@ -907,28 +946,107 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
return NULL;
}
-static struct virtqueue *vring_create_virtqueue_split(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
+ struct vring_virtqueue *vq)
+{
+ struct virtio_device *vdev;
+
+ vdev = vq->vq.vdev;
+
+ vring_split->avail_flags_shadow = 0;
+ vring_split->avail_idx_shadow = 0;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!vq->vq.callback) {
+ vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
+ vring_split->avail_flags_shadow);
+ }
+}
+
+static void virtqueue_reinit_split(struct vring_virtqueue *vq)
+{
+ int num;
+
+ num = vq->split.vring.num;
+
+ vq->split.vring.avail->flags = 0;
+ vq->split.vring.avail->idx = 0;
+
+ /* reset avail event */
+ vq->split.vring.avail->ring[num] = 0;
+
+ vq->split.vring.used->flags = 0;
+ vq->split.vring.used->idx = 0;
+
+ /* reset used event */
+ *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
+
+ virtqueue_init(vq, num);
+
+ virtqueue_vring_init_split(&vq->split, vq);
+}
+
+static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
+ struct vring_virtqueue_split *vring_split)
+{
+ vq->split = *vring_split;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
+{
+ struct vring_desc_state_split *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_split->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
+ if (!state)
+ goto err_state;
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_extra;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_split));
+
+ vring_split->desc_state = state;
+ vring_split->desc_extra = extra;
+ return 0;
+
+err_extra:
+ kfree(state);
+err_state:
+ return -ENOMEM;
+}
+
+static void vring_free_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev)
+{
+ vring_free_queue(vdev, vring_split->queue_size_in_bytes,
+ vring_split->vring.desc,
+ vring_split->queue_dma_addr);
+
+ kfree(vring_split->desc_state);
+ kfree(vring_split->desc_extra);
+}
+
+static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ u32 num,
+ unsigned int vring_align,
+ bool may_reduce_num)
{
- struct virtqueue *vq;
void *queue = NULL;
dma_addr_t dma_addr;
- size_t queue_size_in_bytes;
- struct vring vring;
/* We assume num is a power of 2. */
if (num & (num - 1)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
- return NULL;
+ return -EINVAL;
}
/* TODO: allocate each queue chunk individually */
@@ -939,11 +1057,11 @@ static struct virtqueue *vring_create_virtqueue_split(
if (queue)
break;
if (!may_reduce_num)
- return NULL;
+ return -ENOMEM;
}
if (!num)
- return NULL;
+ return -ENOMEM;
if (!queue) {
/* Try to get a single page. You are my only hope! */
@@ -951,26 +1069,85 @@ static struct virtqueue *vring_create_virtqueue_split(
&dma_addr, GFP_KERNEL|__GFP_ZERO);
}
if (!queue)
- return NULL;
+ return -ENOMEM;
+
+ vring_init(&vring_split->vring, num, queue, vring_align);
+
+ vring_split->queue_dma_addr = dma_addr;
+ vring_split->queue_size_in_bytes = vring_size(num, vring_align);
+
+ vring_split->vring_align = vring_align;
+ vring_split->may_reduce_num = may_reduce_num;
+
+ return 0;
+}
+
+static struct virtqueue *vring_create_virtqueue_split(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct virtqueue *vq;
+ int err;
- queue_size_in_bytes = vring_size(num, vring_align);
- vring_init(&vring, num, queue, vring_align);
+ err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
+ may_reduce_num);
+ if (err)
+ return NULL;
- vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
if (!vq) {
- vring_free_queue(vdev, queue_size_in_bytes, queue,
- dma_addr);
+ vring_free_split(&vring_split, vdev);
return NULL;
}
- to_vvq(vq)->split.queue_dma_addr = dma_addr;
- to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
to_vvq(vq)->we_own_ring = true;
return vq;
}
+static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_split vring_split = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ err = vring_alloc_queue_split(&vring_split, vdev, num,
+ vq->split.vring_align,
+ vq->split.may_reduce_num);
+ if (err)
+ goto err;
+
+ err = vring_alloc_state_extra_split(&vring_split);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_split(&vring_split, vq);
+
+ virtqueue_init(vq, vring_split.vring.num);
+ virtqueue_vring_attach_split(vq, &vring_split);
+
+ return 0;
+
+err_state_extra:
+ vring_free_split(&vring_split, vdev);
+err:
+ virtqueue_reinit_split(vq);
+ return -ENOMEM;
+}
+
/*
* Packed ring specific functions - *_packed().
@@ -1637,8 +1814,7 @@ static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
return NULL;
}
-static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
- unsigned int num)
+static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
{
struct vring_desc_extra *desc_extra;
unsigned int i;
@@ -1656,19 +1832,32 @@ static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *v
return desc_extra;
}
-static struct virtqueue *vring_create_virtqueue_packed(
- unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool may_reduce_num,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev)
+{
+ if (vring_packed->vring.desc)
+ vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
+ vring_packed->vring.desc,
+ vring_packed->ring_dma_addr);
+
+ if (vring_packed->vring.driver)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.driver,
+ vring_packed->driver_event_dma_addr);
+
+ if (vring_packed->vring.device)
+ vring_free_queue(vdev, vring_packed->event_size_in_bytes,
+ vring_packed->vring.device,
+ vring_packed->device_event_dma_addr);
+
+ kfree(vring_packed->desc_state);
+ kfree(vring_packed->desc_extra);
+}
+
+static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
+ struct virtio_device *vdev,
+ u32 num)
{
- struct vring_virtqueue *vq;
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
@@ -1680,7 +1869,11 @@ static struct virtqueue *vring_create_virtqueue_packed(
&ring_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!ring)
- goto err_ring;
+ goto err;
+
+ vring_packed->vring.desc = ring;
+ vring_packed->ring_dma_addr = ring_dma_addr;
+ vring_packed->ring_size_in_bytes = ring_size_in_bytes;
event_size_in_bytes = sizeof(struct vring_packed_desc_event);
@@ -1688,13 +1881,112 @@ static struct virtqueue *vring_create_virtqueue_packed(
&driver_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!driver)
- goto err_driver;
+ goto err;
+
+ vring_packed->vring.driver = driver;
+ vring_packed->event_size_in_bytes = event_size_in_bytes;
+ vring_packed->driver_event_dma_addr = driver_event_dma_addr;
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (!device)
- goto err_device;
+ goto err;
+
+ vring_packed->vring.device = device;
+ vring_packed->device_event_dma_addr = device_event_dma_addr;
+
+ vring_packed->vring.num = num;
+
+ return 0;
+
+err:
+ vring_free_packed(vring_packed, vdev);
+ return -ENOMEM;
+}
+
+static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
+{
+ struct vring_desc_state_packed *state;
+ struct vring_desc_extra *extra;
+ u32 num = vring_packed->vring.num;
+
+ state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
+ if (!state)
+ goto err_desc_state;
+
+ memset(state, 0, num * sizeof(struct vring_desc_state_packed));
+
+ extra = vring_alloc_desc_extra(num);
+ if (!extra)
+ goto err_desc_extra;
+
+ vring_packed->desc_state = state;
+ vring_packed->desc_extra = extra;
+
+ return 0;
+
+err_desc_extra:
+ kfree(state);
+err_desc_state:
+ return -ENOMEM;
+}
+
+static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
+ bool callback)
+{
+ vring_packed->next_avail_idx = 0;
+ vring_packed->avail_wrap_counter = 1;
+ vring_packed->event_flags_shadow = 0;
+ vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
+
+ /* No callback? Tell other side not to bother us. */
+ if (!callback) {
+ vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+ vring_packed->vring.driver->flags =
+ cpu_to_le16(vring_packed->event_flags_shadow);
+ }
+}
+
+static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
+ struct vring_virtqueue_packed *vring_packed)
+{
+ vq->packed = *vring_packed;
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+}
+
+static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
+{
+ memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
+ memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
+
+ /* we need to reset the desc.flags. For more, see is_used_desc_packed() */
+ memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
+
+ virtqueue_init(vq, vq->packed.vring.num);
+ virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
+}
+
+static struct virtqueue *vring_create_virtqueue_packed(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
@@ -1703,8 +1995,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = true;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -1713,15 +2005,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
- vq->event_triggered = false;
- vq->num_added = 0;
vq->packed_ring = true;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -1730,65 +2015,58 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->packed.ring_dma_addr = ring_dma_addr;
- vq->packed.driver_event_dma_addr = driver_event_dma_addr;
- vq->packed.device_event_dma_addr = device_event_dma_addr;
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
- vq->packed.ring_size_in_bytes = ring_size_in_bytes;
- vq->packed.event_size_in_bytes = event_size_in_bytes;
+ virtqueue_vring_init_packed(&vring_packed, !!callback);
- vq->packed.vring.num = num;
- vq->packed.vring.desc = ring;
- vq->packed.vring.driver = driver;
- vq->packed.vring.device = device;
-
- vq->packed.next_avail_idx = 0;
- vq->packed.avail_wrap_counter = 1;
- vq->packed.event_flags_shadow = 0;
- vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
-
- vq->packed.desc_state = kmalloc_array(num,
- sizeof(struct vring_desc_state_packed),
- GFP_KERNEL);
- if (!vq->packed.desc_state)
- goto err_desc_state;
-
- memset(vq->packed.desc_state, 0,
- num * sizeof(struct vring_desc_state_packed));
-
- /* Put everything in free lists. */
- vq->free_head = 0;
-
- vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
- if (!vq->packed.desc_extra)
- goto err_desc_extra;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
- vq->packed.vring.driver->flags =
- cpu_to_le16(vq->packed.event_flags_shadow);
- }
+ virtqueue_init(vq, num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-err_desc_extra:
- kfree(vq->packed.desc_state);
-err_desc_state:
+err_state_extra:
kfree(vq);
err_vq:
- vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
-err_device:
- vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
-err_driver:
- vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
+ vring_free_packed(&vring_packed, vdev);
err_ring:
return NULL;
}
+static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
+{
+ struct vring_virtqueue_packed vring_packed = {};
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+ int err;
+
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num))
+ goto err_ring;
+
+ err = vring_alloc_state_extra_packed(&vring_packed);
+ if (err)
+ goto err_state_extra;
+
+ vring_free(&vq->vq);
+
+ virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
+
+ virtqueue_init(vq, vring_packed.vring.num);
+ virtqueue_vring_attach_packed(vq, &vring_packed);
+
+ return 0;
+
+err_state_extra:
+ vring_free_packed(&vring_packed, vdev);
+err_ring:
+ virtqueue_reinit_packed(vq);
+ return -ENOMEM;
+}
+
/*
* Generic functions and exported symbols.
@@ -2131,8 +2409,8 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
* @_vq: the struct virtqueue we're talking about.
*
* Returns NULL or the "data" token handed to virtqueue_add_*().
- * This is not valid on an active queue; it is useful only for device
- * shutdown.
+ * This is not valid on an active queue; it is useful for device
+ * shutdown or the reset queue.
*/
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
@@ -2148,6 +2426,14 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
}
+/**
+ * vring_interrupt - notify a virtqueue on an interrupt
+ * @irq: the IRQ number (ignored)
+ * @_vq: the struct virtqueue to notify
+ *
+ * Calls the callback function of @_vq to process the virtqueue
+ * notification.
+ */
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -2180,16 +2466,17 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
EXPORT_SYMBOL_GPL(vring_interrupt);
/* Only available for split ring */
-struct virtqueue *__vring_new_virtqueue(unsigned int index,
- struct vring vring,
- struct virtio_device *vdev,
- bool weak_barriers,
- bool context,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+static struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring_virtqueue_split *vring_split,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
{
struct vring_virtqueue *vq;
+ int err;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
@@ -2202,8 +2489,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = vring.num;
vq->vq.index = index;
+ vq->vq.reset = false;
vq->we_own_ring = false;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -2212,14 +2499,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
#else
vq->broken = false;
#endif
- vq->last_used_idx = 0;
- vq->event_triggered = false;
- vq->num_added = 0;
vq->use_dma_api = vring_use_dma_api(vdev);
-#ifdef DEBUG
- vq->in_use = false;
- vq->last_add_time_valid = false;
-#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2228,47 +2508,22 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
vq->weak_barriers = false;
- vq->split.queue_dma_addr = 0;
- vq->split.queue_size_in_bytes = 0;
-
- vq->split.vring = vring;
- vq->split.avail_flags_shadow = 0;
- vq->split.avail_idx_shadow = 0;
-
- /* No callback? Tell other side not to bother us. */
- if (!callback) {
- vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- if (!vq->event)
- vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
- vq->split.avail_flags_shadow);
+ err = vring_alloc_state_extra_split(vring_split);
+ if (err) {
+ kfree(vq);
+ return NULL;
}
- vq->split.desc_state = kmalloc_array(vring.num,
- sizeof(struct vring_desc_state_split), GFP_KERNEL);
- if (!vq->split.desc_state)
- goto err_state;
-
- vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
- if (!vq->split.desc_extra)
- goto err_extra;
+ virtqueue_vring_init_split(vring_split, vq);
- /* Put everything in free lists. */
- vq->free_head = 0;
- memset(vq->split.desc_state, 0, vring.num *
- sizeof(struct vring_desc_state_split));
+ virtqueue_init(vq, vring_split->vring.num);
+ virtqueue_vring_attach_split(vq, vring_split);
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
-
-err_extra:
- kfree(vq->split.desc_state);
-err_state:
- kfree(vq);
- return NULL;
}
-EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
struct virtqueue *vring_create_virtqueue(
unsigned int index,
@@ -2294,6 +2549,75 @@ struct virtqueue *vring_create_virtqueue(
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+/**
+ * virtqueue_resize - resize the vring of vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @num: new ring num
+ * @recycle: callback for recycle the useless buffer
+ *
+ * When it is really necessary to create a new vring, it will set the current vq
+ * into the reset state. Then call the passed callback to recycle the buffer
+ * that is no longer used. Only after the new vring is successfully created, the
+ * old vring will be released.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
+ * vq can still work normally
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -E2BIG/-EINVAL: num error
+ * -EPERM: Operation not permitted
+ *
+ */
+int virtqueue_resize(struct virtqueue *_vq, u32 num,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+ void *buf;
+ int err;
+
+ if (!vq->we_own_ring)
+ return -EPERM;
+
+ if (num > vq->vq.num_max)
+ return -E2BIG;
+
+ if (!num)
+ return -EINVAL;
+
+ if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
+ return 0;
+
+ if (!vdev->config->disable_vq_and_reset)
+ return -ENOENT;
+
+ if (!vdev->config->enable_vq_after_reset)
+ return -ENOENT;
+
+ err = vdev->config->disable_vq_and_reset(_vq);
+ if (err)
+ return err;
+
+ while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+ recycle(_vq, buf);
+
+ if (vq->packed_ring)
+ err = virtqueue_resize_packed(_vq, num);
+ else
+ err = virtqueue_resize_split(_vq, num);
+
+ if (vdev->config->enable_vq_after_reset(_vq))
+ return -EBUSY;
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(virtqueue_resize);
+
/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
@@ -2306,25 +2630,21 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *vq),
const char *name)
{
- struct vring vring;
+ struct vring_virtqueue_split vring_split = {};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return NULL;
- vring_init(&vring, num, pages, vring_align);
- return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
+ vring_init(&vring_split.vring, num, pages, vring_align);
+ return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
-void vring_del_virtqueue(struct virtqueue *_vq)
+static void vring_free(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- spin_lock(&vq->vq.vdev->vqs_list_lock);
- list_del(&_vq->list);
- spin_unlock(&vq->vq.vdev->vqs_list_lock);
-
if (vq->we_own_ring) {
if (vq->packed_ring) {
vring_free_queue(vq->vq.vdev,
@@ -2355,6 +2675,18 @@ void vring_del_virtqueue(struct virtqueue *_vq)
kfree(vq->split.desc_state);
kfree(vq->split.desc_extra);
}
+}
+
+void vring_del_virtqueue(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ spin_lock(&vq->vq.vdev->vqs_list_lock);
+ list_del(&_vq->list);
+ spin_unlock(&vq->vq.vdev->vqs_list_lock);
+
+ vring_free(_vq);
+
kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2402,6 +2734,30 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_break(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, true);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_break);
+
+/*
+ * This function should only be called by the core, not directly by the driver.
+ */
+void __virtqueue_unbreak(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
+ WRITE_ONCE(vq->broken, false);
+}
+EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
+
bool virtqueue_is_broken(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);