aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 12:20:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 12:20:31 -0800
commit6b9e2cea428cf7af93a84bcb865e478d8bf1c165 (patch)
tree11be387e37129fce0c4c111803df1a2e56637b60 /drivers/vhost
parentMerge branch 'mailbox-devel' of git://git.linaro.org/landing-teams/working/fujitsu/integration (diff)
parentvirtio_ccw: finalize_features error handling (diff)
downloadlinux-dev-6b9e2cea428cf7af93a84bcb865e478d8bf1c165.tar.xz
linux-dev-6b9e2cea428cf7af93a84bcb865e478d8bf1c165.zip
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin: "virtio: virtio 1.0 support, misc patches This adds a lot of infrastructure for virtio 1.0 support. Notable missing pieces: virtio pci, virtio balloon (needs spec extension), vhost scsi. Plus, there are some minor fixes in a couple of places. Note: some net drivers are affected by these patches. David said he's fine with merging these patches through my tree. Rusty's on vacation, he acked using my tree for these, too" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (70 commits) virtio_ccw: finalize_features error handling virtio_ccw: future-proof finalize_features virtio_pci: rename virtio_pci -> virtio_pci_common virtio_pci: update file descriptions and copyright virtio_pci: split out legacy device support virtio_pci: setup config vector indirectly virtio_pci: setup vqs indirectly virtio_pci: delete vqs indirectly virtio_pci: use priv for vq notification virtio_pci: free up vq->priv virtio_pci: fix coding style for structs virtio_pci: add isr field virtio: drop legacy_only driver flag virtio_balloon: drop legacy_only driver flag virtio_ccw: rev 1 devices set VIRTIO_F_VERSION_1 virtio: allow finalize_features to fail virtio_ccw: legacy: don't negotiate rev 1/features virtio: add API to detect legacy devices virtio_console: fix sparse warnings vhost: remove unnecessary forward declarations in vhost.h ...
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/net.c31
-rw-r--r--drivers/vhost/scsi.c22
-rw-r--r--drivers/vhost/vhost.c93
-rw-r--r--drivers/vhost/vhost.h41
4 files changed, 121 insertions, 66 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 8dae2f724a35..a935c254749e 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -48,20 +48,21 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
* status internally; used for zerocopy tx only.
*/
/* Lower device DMA failed */
-#define VHOST_DMA_FAILED_LEN 3
+#define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
/* Lower device DMA done */
-#define VHOST_DMA_DONE_LEN 2
+#define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
/* Lower device DMA in progress */
-#define VHOST_DMA_IN_PROGRESS 1
+#define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
/* Buffer unused */
-#define VHOST_DMA_CLEAR_LEN 0
+#define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
-#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
+#define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
enum {
VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1ULL << VIRTIO_NET_F_MRG_RXBUF),
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_VERSION_1),
};
enum {
@@ -416,7 +417,7 @@ static void handle_tx(struct vhost_net *net)
struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
- vq->heads[nvq->upend_idx].id = head;
+ vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
ubuf->ctx = nvq->ubufs;
@@ -500,6 +501,10 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
int headcount = 0;
unsigned d;
int r, nlogs = 0;
+ /* len is always initialized before use since we are always called with
+ * datalen > 0.
+ */
+ u32 uninitialized_var(len);
while (datalen > 0 && headcount < quota) {
if (unlikely(seg >= UIO_MAXIOV)) {
@@ -527,13 +532,14 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
nlogs += *log_num;
log += *log_num;
}
- heads[headcount].id = d;
- heads[headcount].len = iov_length(vq->iov + seg, in);
- datalen -= heads[headcount].len;
+ heads[headcount].id = cpu_to_vhost32(vq, d);
+ len = iov_length(vq->iov + seg, in);
+ heads[headcount].len = cpu_to_vhost32(vq, len);
+ datalen -= len;
++headcount;
seg += in;
}
- heads[headcount - 1].len += datalen;
+ heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen);
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
@@ -1025,7 +1031,8 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features)
size_t vhost_hlen, sock_hlen, hdr_len;
int i;
- hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
+ hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_VERSION_1))) ?
sizeof(struct virtio_net_hdr_mrg_rxbuf) :
sizeof(struct virtio_net_hdr);
if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index a17f11850669..01c01cb3933f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -168,6 +168,7 @@ enum {
VHOST_SCSI_VQ_IO = 2,
};
+/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
enum {
VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
(1ULL << VIRTIO_SCSI_F_T10_PI)
@@ -577,8 +578,8 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
return NULL;
}
- evt->event.event = event;
- evt->event.reason = reason;
+ evt->event.event = cpu_to_vhost32(vq, event);
+ evt->event.reason = cpu_to_vhost32(vq, reason);
vs->vs_events_nr++;
return evt;
@@ -636,7 +637,7 @@ again:
}
if (vs->vs_events_missed) {
- event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
+ event->event |= cpu_to_vhost32(vq, VIRTIO_SCSI_T_EVENTS_MISSED);
vs->vs_events_missed = false;
}
@@ -695,12 +696,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
cmd, se_cmd->residual_count, se_cmd->scsi_status);
memset(&v_rsp, 0, sizeof(v_rsp));
- v_rsp.resid = se_cmd->residual_count;
+ v_rsp.resid = cpu_to_vhost32(cmd->tvc_vq, se_cmd->residual_count);
/* TODO is status_qualifier field needed? */
v_rsp.status = se_cmd->scsi_status;
- v_rsp.sense_len = se_cmd->scsi_sense_length;
+ v_rsp.sense_len = cpu_to_vhost32(cmd->tvc_vq,
+ se_cmd->scsi_sense_length);
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
- v_rsp.sense_len);
+ se_cmd->scsi_sense_length);
ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
if (likely(ret == 0)) {
struct vhost_scsi_virtqueue *q;
@@ -1095,14 +1097,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
", but wrong data_direction\n");
goto err_cmd;
}
- prot_bytes = v_req_pi.pi_bytesout;
+ prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
} else if (v_req_pi.pi_bytesin) {
if (data_direction != DMA_FROM_DEVICE) {
vq_err(vq, "Received non zero di_pi_niov"
", but wrong data_direction\n");
goto err_cmd;
}
- prot_bytes = v_req_pi.pi_bytesin;
+ prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
}
if (prot_bytes) {
int tmp = 0;
@@ -1117,12 +1119,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
data_first += prot_niov;
data_niov = data_num - prot_niov;
}
- tag = v_req_pi.tag;
+ tag = vhost64_to_cpu(vq, v_req_pi.tag);
task_attr = v_req_pi.task_attr;
cdb = &v_req_pi.cdb[0];
lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
} else {
- tag = v_req.tag;
+ tag = vhost64_to_cpu(vq, v_req.tag);
task_attr = v_req.task_attr;
cdb = &v_req.cdb[0];
lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c90f4374442a..ed71b5347a76 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -33,8 +33,8 @@ enum {
VHOST_MEMORY_F_LOG = 0x1,
};
-#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
-#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
+#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
+#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
@@ -1001,7 +1001,7 @@ EXPORT_SYMBOL_GPL(vhost_log_write);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
void __user *used;
- if (__put_user(vq->used_flags, &vq->used->flags) < 0)
+ if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0)
return -EFAULT;
if (unlikely(vq->log_used)) {
/* Make sure the flag is seen before log. */
@@ -1019,7 +1019,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
{
- if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
+ if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)))
return -EFAULT;
if (unlikely(vq->log_used)) {
void __user *used;
@@ -1038,6 +1038,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
int vhost_init_used(struct vhost_virtqueue *vq)
{
+ __virtio16 last_used_idx;
int r;
if (!vq->private_data)
return 0;
@@ -1046,7 +1047,13 @@ int vhost_init_used(struct vhost_virtqueue *vq)
if (r)
return r;
vq->signalled_used_valid = false;
- return get_user(vq->last_used_idx, &vq->used->idx);
+ if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx))
+ return -EFAULT;
+ r = __get_user(last_used_idx, &vq->used->idx);
+ if (r)
+ return r;
+ vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
+ return 0;
}
EXPORT_SYMBOL_GPL(vhost_init_used);
@@ -1087,16 +1094,16 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
/* Each buffer in the virtqueues is actually a chain of descriptors. This
* function returns the next descriptor in the chain,
* or -1U if we're at the end. */
-static unsigned next_desc(struct vring_desc *desc)
+static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
{
unsigned int next;
/* If this descriptor says it doesn't chain, we're done. */
- if (!(desc->flags & VRING_DESC_F_NEXT))
+ if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
return -1U;
/* Check they're not leading us off end of descriptors. */
- next = desc->next;
+ next = vhost16_to_cpu(vq, desc->next);
/* Make sure compiler knows to grab that: we don't want it changing! */
/* We will use the result as an index in an array, so most
* architectures only need a compiler barrier here. */
@@ -1113,18 +1120,19 @@ static int get_indirect(struct vhost_virtqueue *vq,
{
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
+ u32 len = vhost32_to_cpu(vq, indirect->len);
int ret;
/* Sanity check */
- if (unlikely(indirect->len % sizeof desc)) {
+ if (unlikely(len % sizeof desc)) {
vq_err(vq, "Invalid length in indirect descriptor: "
"len 0x%llx not multiple of 0x%zx\n",
- (unsigned long long)indirect->len,
+ (unsigned long long)len,
sizeof desc);
return -EINVAL;
}
- ret = translate_desc(vq, indirect->addr, indirect->len, vq->indirect,
+ ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
UIO_MAXIOV);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d in indirect.\n", ret);
@@ -1135,7 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
* architectures only need a compiler barrier here. */
read_barrier_depends();
- count = indirect->len / sizeof desc;
+ count = len / sizeof desc;
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
if (unlikely(count > USHRT_MAX + 1)) {
@@ -1155,16 +1163,17 @@ static int get_indirect(struct vhost_virtqueue *vq,
if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
vq->indirect, sizeof desc))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
- i, (size_t)indirect->addr + i * sizeof desc);
+ i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
}
- if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
+ if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
- i, (size_t)indirect->addr + i * sizeof desc);
+ i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
}
- ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count,
+ ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
+ vhost32_to_cpu(vq, desc.len), iov + iov_count,
iov_size - iov_count);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d indirect idx %d\n",
@@ -1172,11 +1181,11 @@ static int get_indirect(struct vhost_virtqueue *vq,
return ret;
}
/* If this is an input descriptor, increment that count. */
- if (desc.flags & VRING_DESC_F_WRITE) {
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
*in_num += ret;
if (unlikely(log)) {
- log[*log_num].addr = desc.addr;
- log[*log_num].len = desc.len;
+ log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
+ log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
}
} else {
@@ -1189,7 +1198,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
}
*out_num += ret;
}
- } while ((i = next_desc(&desc)) != -1);
+ } while ((i = next_desc(vq, &desc)) != -1);
return 0;
}
@@ -1209,15 +1218,18 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct vring_desc desc;
unsigned int i, head, found = 0;
u16 last_avail_idx;
+ __virtio16 avail_idx;
+ __virtio16 ring_head;
int ret;
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vq->last_avail_idx;
- if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
+ if (unlikely(__get_user(avail_idx, &vq->avail->idx))) {
vq_err(vq, "Failed to access avail idx at %p\n",
&vq->avail->idx);
return -EFAULT;
}
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
vq_err(vq, "Guest moved used index from %u to %u",
@@ -1234,7 +1246,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
- if (unlikely(__get_user(head,
+ if (unlikely(__get_user(ring_head,
&vq->avail->ring[last_avail_idx % vq->num]))) {
vq_err(vq, "Failed to read head: idx %d address %p\n",
last_avail_idx,
@@ -1242,6 +1254,8 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
return -EFAULT;
}
+ head = vhost16_to_cpu(vq, ring_head);
+
/* If their number is silly, that's an error. */
if (unlikely(head >= vq->num)) {
vq_err(vq, "Guest says index %u > %u is available",
@@ -1274,7 +1288,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
i, vq->desc + i);
return -EFAULT;
}
- if (desc.flags & VRING_DESC_F_INDIRECT) {
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
ret = get_indirect(vq, iov, iov_size,
out_num, in_num,
log, log_num, &desc);
@@ -1286,20 +1300,21 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
continue;
}
- ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count,
+ ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
+ vhost32_to_cpu(vq, desc.len), iov + iov_count,
iov_size - iov_count);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d descriptor idx %d\n",
ret, i);
return ret;
}
- if (desc.flags & VRING_DESC_F_WRITE) {
+ if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
if (unlikely(log)) {
- log[*log_num].addr = desc.addr;
- log[*log_num].len = desc.len;
+ log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
+ log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
}
} else {
@@ -1312,7 +1327,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
}
*out_num += ret;
}
- } while ((i = next_desc(&desc)) != -1);
+ } while ((i = next_desc(vq, &desc)) != -1);
/* On success, increment avail index. */
vq->last_avail_idx++;
@@ -1335,7 +1350,10 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
* want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
- struct vring_used_elem heads = { head, len };
+ struct vring_used_elem heads = {
+ cpu_to_vhost32(vq, head),
+ cpu_to_vhost32(vq, len)
+ };
return vhost_add_used_n(vq, &heads, 1);
}
@@ -1404,7 +1422,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
/* Make sure buffer is written before we update index. */
smp_wmb();
- if (put_user(vq->last_used_idx, &vq->used->idx)) {
+ if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
@@ -1422,7 +1440,8 @@ EXPORT_SYMBOL_GPL(vhost_add_used_n);
static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- __u16 old, new, event;
+ __u16 old, new;
+ __virtio16 event;
bool v;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
@@ -1434,12 +1453,12 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
return true;
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
- __u16 flags;
+ __virtio16 flags;
if (__get_user(flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
}
- return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
+ return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
}
old = vq->signalled_used;
v = vq->signalled_used_valid;
@@ -1449,11 +1468,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (unlikely(!v))
return true;
- if (get_user(event, vhost_used_event(vq))) {
+ if (__get_user(event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
- return vring_need_event(event, new, old);
+ return vring_need_event(vhost16_to_cpu(vq, event), new, old);
}
/* This actually signals the guest, using eventfd. */
@@ -1488,7 +1507,7 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
- u16 avail_idx;
+ __virtio16 avail_idx;
int r;
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
@@ -1519,7 +1538,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
return false;
}
- return avail_idx != vq->avail_idx;
+ return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 3eda654b8f5a..8c1c792900ba 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -12,8 +12,6 @@
#include <linux/virtio_ring.h>
#include <linux/atomic.h>
-struct vhost_device;
-
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -54,8 +52,6 @@ struct vhost_log {
u64 len;
};
-struct vhost_virtqueue;
-
/* The virtqueue structure describes a queue attached to a device. */
struct vhost_virtqueue {
struct vhost_dev *dev;
@@ -106,7 +102,7 @@ struct vhost_virtqueue {
/* Protected by virtqueue mutex. */
struct vhost_memory *memory;
void *private_data;
- unsigned acked_features;
+ u64 acked_features;
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
@@ -172,8 +168,39 @@ enum {
(1ULL << VHOST_F_LOG_ALL),
};
-static inline int vhost_has_feature(struct vhost_virtqueue *vq, int bit)
+static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
+{
+ return vq->acked_features & (1ULL << bit);
+}
+
+/* Memory accessors */
+static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
+{
+ return __virtio16_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
+{
+ return __cpu_to_virtio16(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
+{
+ return __virtio32_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
+{
+ return __cpu_to_virtio32(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
+{
+ return __virtio64_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
{
- return vq->acked_features & (1 << bit);
+ return __cpu_to_virtio64(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
}
#endif