diff options
Diffstat (limited to 'drivers/vdpa')
-rw-r--r-- | drivers/vdpa/ifcvf/ifcvf_base.c | 4 | ||||
-rw-r--r-- | drivers/vdpa/ifcvf/ifcvf_base.h | 14 | ||||
-rw-r--r-- | drivers/vdpa/ifcvf/ifcvf_main.c | 43 | ||||
-rw-r--r-- | drivers/vdpa/mlx5/core/mlx5_vdpa.h | 2 | ||||
-rw-r--r-- | drivers/vdpa/mlx5/core/mr.c | 95 | ||||
-rw-r--r-- | drivers/vdpa/mlx5/core/resources.c | 7 | ||||
-rw-r--r-- | drivers/vdpa/mlx5/net/mlx5_vnet.c | 67 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 | ||||
-rw-r--r-- | drivers/vdpa/vdpa_sim/vdpa_sim_blk.c | 1 | ||||
-rw-r--r-- | drivers/vdpa/virtio_pci/vp_vdpa.c | 43 |
10 files changed, 211 insertions, 69 deletions
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 1a661ab45af5..6e197fe0fcf9 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -133,6 +133,8 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) &hw->notify_off_multiplier); hw->notify_bar = cap.bar; hw->notify_base = get_cap_addr(hw, &cap); + hw->notify_base_pa = pci_resource_start(pdev, cap.bar) + + le32_to_cpu(cap.offset); IFCVF_DBG(pdev, "hw->notify_base = %p\n", hw->notify_base); break; @@ -161,6 +163,8 @@ next: notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off); hw->vring[i].notify_addr = hw->notify_base + notify_off * hw->notify_off_multiplier; + hw->vring[i].notify_pa = hw->notify_base_pa + + notify_off * hw->notify_off_multiplier; } hw->lm_cfg = hw->base[IFCVF_LM_BAR]; diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index 0111bfdeb342..2996db0da490 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -19,21 +19,9 @@ #include <uapi/linux/virtio_config.h> #include <uapi/linux/virtio_pci.h> -#define N3000_VENDOR_ID 0x1AF4 #define N3000_DEVICE_ID 0x1041 -#define N3000_SUBSYS_VENDOR_ID 0x8086 #define N3000_SUBSYS_DEVICE_ID 0x001A -#define C5000X_PL_VENDOR_ID 0x1AF4 -#define C5000X_PL_DEVICE_ID 0x1000 -#define C5000X_PL_SUBSYS_VENDOR_ID 0x8086 -#define C5000X_PL_SUBSYS_DEVICE_ID 0x0001 - -#define C5000X_PL_BLK_VENDOR_ID 0x1AF4 -#define C5000X_PL_BLK_DEVICE_ID 0x1001 -#define C5000X_PL_BLK_SUBSYS_VENDOR_ID 0x8086 -#define C5000X_PL_BLK_SUBSYS_DEVICE_ID 0x0002 - #define IFCVF_NET_SUPPORTED_FEATURES \ ((1ULL << VIRTIO_NET_F_MAC) | \ (1ULL << VIRTIO_F_ANY_LAYOUT) | \ @@ -73,6 +61,7 @@ struct vring_info { u16 last_avail_idx; bool ready; void __iomem *notify_addr; + phys_addr_t notify_pa; u32 irq; struct vdpa_callback cb; char msix_name[256]; @@ -87,6 +76,7 @@ struct ifcvf_hw { u8 notify_bar; /* Notificaiton bar address */ void __iomem *notify_base; + phys_addr_t notify_base_pa; u32 notify_off_multiplier; u64 req_features; u64 hw_features; diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index ab0ab5cf0f6e..21b78f1cd521 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -264,7 +264,7 @@ static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - state->avail_index = ifcvf_get_vq_state(vf, qid); + state->split.avail_index = ifcvf_get_vq_state(vf, qid); return 0; } @@ -273,7 +273,7 @@ static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - return ifcvf_set_vq_state(vf, qid, state->avail_index); + return ifcvf_set_vq_state(vf, qid, state->split.avail_index); } static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, @@ -413,6 +413,21 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, return vf->vring[qid].irq; } +static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev, + u16 idx) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + struct vdpa_notification_area area; + + area.addr = vf->vring[idx].notify_pa; + if (!vf->notify_off_multiplier) + area.size = PAGE_SIZE; + else + area.size = vf->notify_off_multiplier; + + return area; +} + /* * IFCVF currently does't have on-chip IOMMU, so not * implemented set_map()/dma_map()/dma_unmap() @@ -440,6 +455,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = { .get_config = ifcvf_vdpa_get_config, .set_config = ifcvf_vdpa_set_config, .set_config_cb = ifcvf_vdpa_set_config_cb, + .get_vq_notification = ifcvf_get_vq_notification, }; static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -536,18 +552,21 @@ static void ifcvf_remove(struct pci_dev *pdev) } static struct pci_device_id ifcvf_pci_ids[] = { - { PCI_DEVICE_SUB(N3000_VENDOR_ID, + /* N3000 network device */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET, N3000_DEVICE_ID, - N3000_SUBSYS_VENDOR_ID, + PCI_VENDOR_ID_INTEL, N3000_SUBSYS_DEVICE_ID) }, - { PCI_DEVICE_SUB(C5000X_PL_VENDOR_ID, - C5000X_PL_DEVICE_ID, - C5000X_PL_SUBSYS_VENDOR_ID, - C5000X_PL_SUBSYS_DEVICE_ID) }, - { PCI_DEVICE_SUB(C5000X_PL_BLK_VENDOR_ID, - C5000X_PL_BLK_DEVICE_ID, - C5000X_PL_BLK_SUBSYS_VENDOR_ID, - C5000X_PL_BLK_SUBSYS_DEVICE_ID) }, + /* C5000X-PL network device */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET, + VIRTIO_TRANS_ID_NET, + PCI_VENDOR_ID_INTEL, + VIRTIO_ID_NET) }, + /* C5000X-PL block device */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET, + VIRTIO_TRANS_ID_BLOCK, + PCI_VENDOR_ID_INTEL, + VIRTIO_ID_BLOCK) }, { 0 }, }; diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index b6cc53ba980c..0002b2136b48 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -35,12 +35,14 @@ struct mlx5_vdpa_mr { /* serialize mkey creation and destruction */ struct mutex mkey_mtx; + bool user_mr; }; struct mlx5_vdpa_resources { u32 pdn; struct mlx5_uars_page *uar; void __iomem *kick_addr; + u64 phys_kick_addr; u16 uid; u32 null_mkey; bool valid; diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 800cfd1967ad..dcee6039e966 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -219,11 +219,6 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey); } -static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev) -{ - return &mvdev->mdev->pdev->dev; -} - static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr, struct vhost_iotlb *iotlb) { @@ -239,7 +234,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr u64 pa; u64 paend; struct scatterlist *sg; - struct device *dma = get_dma_device(mvdev); + struct device *dma = mvdev->vdev.dma_dev; for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) { @@ -298,7 +293,7 @@ err_map: static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) { - struct device *dma = get_dma_device(mvdev); + struct device *dma = mvdev->vdev.dma_dev; destroy_direct_mr(mvdev, mr); dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); @@ -360,7 +355,7 @@ err_alloc: * indirect memory key that provides access to the enitre address space given * by iotlb. */ -static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) +static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) { struct mlx5_vdpa_mr *mr = &mvdev->mr; struct mlx5_vdpa_direct_mr *dmr; @@ -374,9 +369,6 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb int err = 0; int nnuls; - if (mr->initialized) - return 0; - INIT_LIST_HEAD(&mr->head); for (map = vhost_iotlb_itree_first(iotlb, start, last); map; map = vhost_iotlb_itree_next(map, start, last)) { @@ -414,7 +406,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb if (err) goto err_chain; - mr->initialized = true; + mr->user_mr = true; return 0; err_chain: @@ -426,33 +418,94 @@ err_chain: return err; } -int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) +static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) +{ + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + void *mkc; + u32 *in; + int err; + + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); + MLX5_SET(mkc, mkc, length64, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, pd, mvdev->res.pdn); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + + err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen); + if (!err) + mr->user_mr = false; + + kfree(in); + return err; +} + +static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) +{ + mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey); +} + +static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) { struct mlx5_vdpa_mr *mr = &mvdev->mr; int err; - mutex_lock(&mr->mkey_mtx); + if (mr->initialized) + return 0; + + if (iotlb) + err = create_user_mr(mvdev, iotlb); + else + err = create_dma_mr(mvdev, mr); + + if (!err) + mr->initialized = true; + + return err; +} + +int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) +{ + int err; + + mutex_lock(&mvdev->mr.mkey_mtx); err = _mlx5_vdpa_create_mr(mvdev, iotlb); - mutex_unlock(&mr->mkey_mtx); + mutex_unlock(&mvdev->mr.mkey_mtx); return err; } -void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) +static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) { - struct mlx5_vdpa_mr *mr = &mvdev->mr; struct mlx5_vdpa_direct_mr *dmr; struct mlx5_vdpa_direct_mr *n; - mutex_lock(&mr->mkey_mtx); - if (!mr->initialized) - goto out; - destroy_indirect_key(mvdev, mr); list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { list_del_init(&dmr->list); unmap_direct_mr(mvdev, dmr); kfree(dmr); } +} + +void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) +{ + struct mlx5_vdpa_mr *mr = &mvdev->mr; + + mutex_lock(&mr->mkey_mtx); + if (!mr->initialized) + goto out; + + if (mr->user_mr) + destroy_user_mr(mvdev, mr); + else + destroy_dma_mr(mvdev, mr); + memset(mr, 0, sizeof(*mr)); mr->initialized = false; out: diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c index 6521cbd0f5c2..d4606213f88a 100644 --- a/drivers/vdpa/mlx5/core/resources.c +++ b/drivers/vdpa/mlx5/core/resources.c @@ -54,6 +54,9 @@ static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid) void *in; int err; + if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) + return 0; + /* 0 means not supported */ if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx)) return -EOPNOTSUPP; @@ -79,6 +82,9 @@ static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid) u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {}; u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {}; + if (!uid) + return; + MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX); MLX5_SET(destroy_uctx_in, in, uid, uid); @@ -247,6 +253,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) goto err_key; kick_addr = mdev->bar_addr + offset; + res->phys_kick_addr = kick_addr; res->kick_addr = ioremap(kick_addr, PAGE_SIZE); if (!res->kick_addr) { diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index dda5dc6f7737..2a31467f7ac5 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -611,8 +611,8 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx) mlx5_db_free(ndev->mvdev.mdev, &vcq->db); } -static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, - struct mlx5_vdpa_umem **umemp) +static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, + struct mlx5_vdpa_umem **umemp) { struct mlx5_core_dev *mdev = ndev->mvdev.mdev; int p_a; @@ -635,7 +635,7 @@ static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq *umemp = &mvq->umem3; break; } - return p_a * mvq->num_ent + p_b; + (*umemp)->size = p_a * mvq->num_ent + p_b; } static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem) @@ -651,15 +651,10 @@ static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m void *in; int err; __be64 *pas; - int size; struct mlx5_vdpa_umem *umem; - size = umem_size(ndev, mvq, num, &umem); - if (size < 0) - return size; - - umem->size = size; - err = umem_frag_buf_alloc(ndev, umem, size); + set_umem_size(ndev, mvq, num, &umem); + err = umem_frag_buf_alloc(ndev, umem, umem->size); if (err) return err; @@ -829,9 +824,9 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); - MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size); + MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size); MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); - MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size); + MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type)) MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1); @@ -1428,8 +1423,8 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx, return -EINVAL; } - mvq->used_idx = state->avail_index; - mvq->avail_idx = state->avail_index; + mvq->used_idx = state->split.avail_index; + mvq->avail_idx = state->split.avail_index; return 0; } @@ -1450,7 +1445,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa * Since both values should be identical, we take the value of * used_idx which is reported correctly. */ - state->avail_index = mvq->used_idx; + state->split.avail_index = mvq->used_idx; return 0; } @@ -1459,7 +1454,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n"); return err; } - state->avail_index = attr.used_index; + state->split.avail_index = attr.used_index; return 0; } @@ -1772,6 +1767,14 @@ out: mutex_unlock(&ndev->reslock); } +static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) +{ + int i; + + for (i = 0; i < ndev->mvdev.max_vqs; i++) + ndev->vqs[i].ready = false; +} + static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) { struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); @@ -1782,10 +1785,15 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) if (!status) { mlx5_vdpa_info(mvdev, "performing device reset\n"); teardown_driver(ndev); + clear_vqs_ready(ndev); mlx5_vdpa_destroy_mr(&ndev->mvdev); ndev->mvdev.status = 0; ndev->mvdev.mlx_features = 0; ++mvdev->generation; + if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { + if (mlx5_vdpa_create_mr(mvdev, NULL)) + mlx5_vdpa_warn(mvdev, "create MR failed\n"); + } return; } @@ -1866,6 +1874,7 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev) ndev = to_mlx5_vdpa_ndev(mvdev); free_resources(ndev); + mlx5_vdpa_destroy_mr(mvdev); if (!is_zero_ether_addr(ndev->config.mac)) { pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); @@ -1876,8 +1885,22 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev) static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx) { + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); struct vdpa_notification_area ret = {}; + struct mlx5_vdpa_net *ndev; + phys_addr_t addr; + + /* If SF BAR size is smaller than PAGE_SIZE, do not use direct + * notification to avoid the risk of mapping pages that contain BAR of more + * than one SF + */ + if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT) + return ret; + ndev = to_mlx5_vdpa_ndev(mvdev); + addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; + ret.addr = addr; + ret.size = PAGE_SIZE; return ret; } @@ -2037,14 +2060,20 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) goto err_mtu; } - mvdev->vdev.dma_dev = mdev->device; + mvdev->vdev.dma_dev = &mdev->pdev->dev; err = mlx5_vdpa_alloc_resources(&ndev->mvdev); if (err) goto err_mpfs; + if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { + err = mlx5_vdpa_create_mr(mvdev, NULL); + if (err) + goto err_res; + } + err = alloc_resources(ndev); if (err) - goto err_res; + goto err_mr; mvdev->vdev.mdev = &mgtdev->mgtdev; err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs)); @@ -2056,6 +2085,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name) err_reg: free_resources(ndev); +err_mr: + mlx5_vdpa_destroy_mr(mvdev); err_res: mlx5_vdpa_free_resources(&ndev->mvdev); err_mpfs: diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 98f793bc9376..14e024de5cbf 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -374,7 +374,7 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, struct vringh *vrh = &vq->vring; spin_lock(&vdpasim->lock); - vrh->last_avail_idx = state->avail_index; + vrh->last_avail_idx = state->split.avail_index; spin_unlock(&vdpasim->lock); return 0; @@ -387,7 +387,7 @@ static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vringh *vrh = &vq->vring; - state->avail_index = vrh->last_avail_idx; + state->split.avail_index = vrh->last_avail_idx; return 0; } diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c index 5bfe1c281645..a790903f243e 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c @@ -15,7 +15,6 @@ #include <linux/blkdev.h> #include <linux/vringh.h> #include <linux/vdpa.h> -#include <linux/blkdev.h> #include <uapi/linux/virtio_blk.h> #include "vdpa_sim.h" diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index c76ebb531212..7b4a6396c553 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -210,13 +210,49 @@ static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, return -EOPNOTSUPP; } +static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa, + const struct vdpa_vq_state *state) +{ + const struct vdpa_vq_state_split *split = &state->split; + + if (split->avail_index == 0) + return 0; + + return -EOPNOTSUPP; +} + +static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa, + const struct vdpa_vq_state *state) +{ + const struct vdpa_vq_state_packed *packed = &state->packed; + + if (packed->last_avail_counter == 1 && + packed->last_avail_idx == 0 && + packed->last_used_counter == 1 && + packed->last_used_idx == 0) + return 0; + + return -EOPNOTSUPP; +} + static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, const struct vdpa_vq_state *state) { - /* Note that this is not supported by virtio specification, so - * we return -ENOPOTSUPP here. This means we can't support live - * migration, vhost device start/stop. + struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa); + + /* Note that this is not supported by virtio specification. + * But if the state is by chance equal to the device initial + * state, we can let it go. */ + if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) && + !vp_modern_get_queue_enable(mdev, qid)) { + if (vp_modern_get_driver_features(mdev) & + BIT_ULL(VIRTIO_F_RING_PACKED)) + return vp_vdpa_set_vq_state_packed(vdpa, state); + else + return vp_vdpa_set_vq_state_split(vdpa, state); + } + return -EOPNOTSUPP; } @@ -442,6 +478,7 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) vp_modern_map_vq_notify(mdev, i, &vp_vdpa->vring[i].notify_pa); if (!vp_vdpa->vring[i].notify) { + ret = -EINVAL; dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i); goto err; } |