From 9b0d6b7e28a9bbbf4cee0727a299c2107047b1a5 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 5 Aug 2021 22:19:08 -0300 Subject: vfio/ap,ccw: Fix open/close when multiple device FDs are open The user can open multiple device FDs if it likes, however these open() functions call vfio_register_notifier() on some device global state. Calling vfio_register_notifier() twice in will trigger a WARN_ON from notifier_chain_register() and the first close will wrongly delete the notifier and more. Since these really want the new open/close_device() semantics just change the functions over. Reviewed-by: Cornelia Huck Signed-off-by: Jason Gunthorpe Link: https://lore.kernel.org/r/12-v4-9ea22c5e6afb+1adf-vfio_reflck_jgg@nvidia.com Signed-off-by: Alex Williamson --- drivers/s390/crypto/vfio_ap_ops.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/s390/crypto') diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 122c85c22469..cee5626fe0a4 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -1315,7 +1315,7 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) return rc; } -static int vfio_ap_mdev_open(struct mdev_device *mdev) +static int vfio_ap_mdev_open_device(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); unsigned long events; @@ -1348,7 +1348,7 @@ static int vfio_ap_mdev_open(struct mdev_device *mdev) return ret; } -static void vfio_ap_mdev_release(struct mdev_device *mdev) +static void vfio_ap_mdev_close_device(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); @@ -1427,8 +1427,8 @@ static const struct mdev_parent_ops vfio_ap_matrix_ops = { .mdev_attr_groups = vfio_ap_mdev_attr_groups, .create = vfio_ap_mdev_create, .remove = vfio_ap_mdev_remove, - .open = vfio_ap_mdev_open, - .release = vfio_ap_mdev_release, + .open_device = vfio_ap_mdev_open_device, + .close_device = vfio_ap_mdev_close_device, .ioctl = vfio_ap_mdev_ioctl, }; -- cgit v1.2.3-59-g8ed1b From 1e753732bda6dcf888ea0b90b2a91ac1c1a0bae9 Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Mon, 23 Aug 2021 17:20:46 -0400 Subject: s390/vfio-ap: r/w lock for PQAP interception handler function pointer The function pointer to the interception handler for the PQAP instruction can get changed during the interception process. Let's add a semaphore to struct kvm_s390_crypto to control read/write access to the function pointer contained therein. The semaphore must be locked for write access by the vfio_ap device driver when notified that the KVM pointer has been set or cleared. It must be locked for read access by the interception framework when the PQAP instruction is intercepted. Signed-off-by: Tony Krowiak Reviewed-by: Jason Gunthorpe Reviewed-by: Christian Borntraeger Link: https://lore.kernel.org/r/20210823212047.1476436-2-akrowiak@linux.ibm.com Signed-off-by: Alex Williamson --- arch/s390/include/asm/kvm_host.h | 8 +++----- arch/s390/kvm/kvm-s390.c | 1 + arch/s390/kvm/priv.c | 15 +++++++++------ drivers/s390/crypto/vfio_ap_ops.c | 23 +++++++++++++++++------ drivers/s390/crypto/vfio_ap_private.h | 2 +- 5 files changed, 31 insertions(+), 18 deletions(-) (limited to 'drivers/s390/crypto') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 161a9e12bfb8..d681ae462350 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -798,14 +798,12 @@ struct kvm_s390_cpu_model { unsigned short ibc; }; -struct kvm_s390_module_hook { - int (*hook)(struct kvm_vcpu *vcpu); - struct module *owner; -}; +typedef int (*crypto_hook)(struct kvm_vcpu *vcpu); struct kvm_s390_crypto { struct kvm_s390_crypto_cb *crycb; - struct kvm_s390_module_hook *pqap_hook; + struct rw_semaphore pqap_hook_rwsem; + crypto_hook *pqap_hook; __u32 crycbd; __u8 aes_kw; __u8 dea_kw; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 4527ac7b5961..5c4f559bcd60 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2630,6 +2630,7 @@ static void kvm_s390_crypto_init(struct kvm *kvm) { kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; kvm_s390_set_crycb_format(kvm); + init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); if (!test_kvm_facility(kvm, 76)) return; diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 9928f785c677..53da4ceb16a3 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -610,6 +610,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu) static int handle_pqap(struct kvm_vcpu *vcpu) { struct ap_queue_status status = {}; + crypto_hook pqap_hook; unsigned long reg0; int ret; uint8_t fc; @@ -654,18 +655,20 @@ static int handle_pqap(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* - * Verify that the hook callback is registered, lock the owner - * and call the hook. + * If the hook callback is registered, there will be a pointer to the + * hook function pointer in the kvm_s390_crypto structure. Lock the + * owner, retrieve the hook function pointer and call the hook. */ + down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); if (vcpu->kvm->arch.crypto.pqap_hook) { - if (!try_module_get(vcpu->kvm->arch.crypto.pqap_hook->owner)) - return -EOPNOTSUPP; - ret = vcpu->kvm->arch.crypto.pqap_hook->hook(vcpu); - module_put(vcpu->kvm->arch.crypto.pqap_hook->owner); + pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook; + ret = pqap_hook(vcpu); if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000) kvm_s390_set_psw_cc(vcpu, 3); + up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); return ret; } + up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem); /* * A vfio_driver must register a hook. * No hook means no driver to enable the SIE CRYCB and no queues. diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index cee5626fe0a4..439ca7768eb7 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -352,8 +352,7 @@ static int vfio_ap_mdev_create(struct mdev_device *mdev) vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); init_waitqueue_head(&matrix_mdev->wait_for_kvm); mdev_set_drvdata(mdev, matrix_mdev); - matrix_mdev->pqap_hook.hook = handle_pqap; - matrix_mdev->pqap_hook.owner = THIS_MODULE; + matrix_mdev->pqap_hook = handle_pqap; mutex_lock(&matrix_dev->lock); list_add(&matrix_mdev->node, &matrix_dev->mdev_list); mutex_unlock(&matrix_dev->lock); @@ -1115,15 +1114,20 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, } kvm_get_kvm(kvm); + matrix_mdev->kvm = kvm; matrix_mdev->kvm_busy = true; mutex_unlock(&matrix_dev->lock); + + down_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem); + kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; + up_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem); + kvm_arch_crypto_set_masks(kvm, matrix_mdev->matrix.apm, matrix_mdev->matrix.aqm, matrix_mdev->matrix.adm); + mutex_lock(&matrix_dev->lock); - kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; - matrix_mdev->kvm = kvm; matrix_mdev->kvm_busy = false; wake_up_all(&matrix_mdev->wait_for_kvm); } @@ -1189,10 +1193,17 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) if (matrix_mdev->kvm) { matrix_mdev->kvm_busy = true; mutex_unlock(&matrix_dev->lock); - kvm_arch_crypto_clear_masks(matrix_mdev->kvm); + + if (matrix_mdev->kvm->arch.crypto.crycbd) { + down_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem); + matrix_mdev->kvm->arch.crypto.pqap_hook = NULL; + up_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem); + + kvm_arch_crypto_clear_masks(matrix_mdev->kvm); + } + mutex_lock(&matrix_dev->lock); vfio_ap_mdev_reset_queues(matrix_mdev->mdev); - matrix_mdev->kvm->arch.crypto.pqap_hook = NULL; kvm_put_kvm(matrix_mdev->kvm); matrix_mdev->kvm = NULL; matrix_mdev->kvm_busy = false; diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index f82a6396acae..e12218e5a629 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -86,7 +86,7 @@ struct ap_matrix_mdev { bool kvm_busy; wait_queue_head_t wait_for_kvm; struct kvm *kvm; - struct kvm_s390_module_hook pqap_hook; + crypto_hook pqap_hook; struct mdev_device *mdev; }; -- cgit v1.2.3-59-g8ed1b From 86956e70761b3292156d668e87126844334dd71b Mon Sep 17 00:00:00 2001 From: Tony Krowiak Date: Mon, 23 Aug 2021 17:20:47 -0400 Subject: s390/vfio-ap: replace open coded locks for VFIO_GROUP_NOTIFY_SET_KVM notification It was pointed out during an unrelated patch review that locks should not be open coded - i.e., writing the algorithm of a standard lock in a function instead of using a lock from the standard library. The setting and testing of a busy flag and sleeping on a wait_event is the same thing a lock does. The open coded locks are invisible to lockdep, so potential locking problems are not detected. This patch removes the open coded locks used during VFIO_GROUP_NOTIFY_SET_KVM notification. The busy flag and wait queue were introduced to resolve a possible circular locking dependency reported by lockdep when starting a secure execution guest configured with AP adapters and domains. Reversing the order in which the kvm->lock mutex and matrix_dev->lock mutex are locked resolves the issue reported by lockdep, thus enabling the removal of the open coded locks. Signed-off-by: Tony Krowiak Acked-by: Halil Pasic Link: https://lore.kernel.org/r/20210823212047.1476436-3-akrowiak@linux.ibm.com Signed-off-by: Alex Williamson --- arch/s390/kvm/kvm-s390.c | 31 ++++++-- drivers/s390/crypto/vfio_ap_ops.c | 132 +++++++++++----------------------- drivers/s390/crypto/vfio_ap_private.h | 2 - 3 files changed, 67 insertions(+), 98 deletions(-) (limited to 'drivers/s390/crypto') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 5c4f559bcd60..efda0615741f 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2559,12 +2559,26 @@ static void kvm_s390_set_crycb_format(struct kvm *kvm) kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; } +/* + * kvm_arch_crypto_set_masks + * + * @kvm: pointer to the target guest's KVM struct containing the crypto masks + * to be set. + * @apm: the mask identifying the accessible AP adapters + * @aqm: the mask identifying the accessible AP domains + * @adm: the mask identifying the accessible AP control domains + * + * Set the masks that identify the adapters, domains and control domains to + * which the KVM guest is granted access. + * + * Note: The kvm->lock mutex must be locked by the caller before invoking this + * function. + */ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, unsigned long *aqm, unsigned long *adm) { struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; - mutex_lock(&kvm->lock); kvm_s390_vcpu_block_all(kvm); switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { @@ -2595,13 +2609,23 @@ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, /* recreate the shadow crycb for each vcpu */ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); kvm_s390_vcpu_unblock_all(kvm); - mutex_unlock(&kvm->lock); } EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks); +/* + * kvm_arch_crypto_clear_masks + * + * @kvm: pointer to the target guest's KVM struct containing the crypto masks + * to be cleared. + * + * Clear the masks that identify the adapters, domains and control domains to + * which the KVM guest is granted access. + * + * Note: The kvm->lock mutex must be locked by the caller before invoking this + * function. + */ void kvm_arch_crypto_clear_masks(struct kvm *kvm) { - mutex_lock(&kvm->lock); kvm_s390_vcpu_block_all(kvm); memset(&kvm->arch.crypto.crycb->apcb0, 0, @@ -2613,7 +2637,6 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm) /* recreate the shadow crycb for each vcpu */ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); kvm_s390_vcpu_unblock_all(kvm); - mutex_unlock(&kvm->lock); } EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks); diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 439ca7768eb7..c46937de5758 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -294,15 +294,6 @@ static int handle_pqap(struct kvm_vcpu *vcpu) matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, struct ap_matrix_mdev, pqap_hook); - /* - * If the KVM pointer is in the process of being set, wait until the - * process has completed. - */ - wait_event_cmd(matrix_mdev->wait_for_kvm, - !matrix_mdev->kvm_busy, - mutex_unlock(&matrix_dev->lock), - mutex_lock(&matrix_dev->lock)); - /* If the there is no guest using the mdev, there is nothing to do */ if (!matrix_mdev->kvm) goto out_unlock; @@ -350,7 +341,6 @@ static int vfio_ap_mdev_create(struct mdev_device *mdev) matrix_mdev->mdev = mdev; vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); - init_waitqueue_head(&matrix_mdev->wait_for_kvm); mdev_set_drvdata(mdev, matrix_mdev); matrix_mdev->pqap_hook = handle_pqap; mutex_lock(&matrix_dev->lock); @@ -619,11 +609,8 @@ static ssize_t assign_adapter_store(struct device *dev, mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * un-assignment of adapter - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If the KVM guest is running, disallow assignment of adapter */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -692,11 +679,8 @@ static ssize_t unassign_adapter_store(struct device *dev, mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * un-assignment of adapter - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If the KVM guest is running, disallow unassignment of adapter */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -782,11 +766,8 @@ static ssize_t assign_domain_store(struct device *dev, mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * assignment of domain - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If the KVM guest is running, disallow assignment of domain */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -850,11 +831,8 @@ static ssize_t unassign_domain_store(struct device *dev, mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * un-assignment of domain - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If the KVM guest is running, disallow unassignment of domain */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -904,11 +882,8 @@ static ssize_t assign_control_domain_store(struct device *dev, mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * assignment of control domain. - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If the KVM guest is running, disallow assignment of control domain */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -963,11 +938,8 @@ static ssize_t unassign_control_domain_store(struct device *dev, mutex_lock(&matrix_dev->lock); - /* - * If the KVM pointer is in flux or the guest is running, disallow - * un-assignment of control domain. - */ - if (matrix_mdev->kvm_busy || matrix_mdev->kvm) { + /* If a KVM guest is running, disallow unassignment of control domain */ + if (matrix_mdev->kvm) { ret = -EBUSY; goto done; } @@ -1108,28 +1080,30 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, struct ap_matrix_mdev *m; if (kvm->arch.crypto.crycbd) { + down_write(&kvm->arch.crypto.pqap_hook_rwsem); + kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; + up_write(&kvm->arch.crypto.pqap_hook_rwsem); + + mutex_lock(&kvm->lock); + mutex_lock(&matrix_dev->lock); + list_for_each_entry(m, &matrix_dev->mdev_list, node) { - if (m != matrix_mdev && m->kvm == kvm) + if (m != matrix_mdev && m->kvm == kvm) { + mutex_unlock(&kvm->lock); + mutex_unlock(&matrix_dev->lock); return -EPERM; + } } kvm_get_kvm(kvm); matrix_mdev->kvm = kvm; - matrix_mdev->kvm_busy = true; - mutex_unlock(&matrix_dev->lock); - - down_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem); - kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; - up_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem); - kvm_arch_crypto_set_masks(kvm, matrix_mdev->matrix.apm, matrix_mdev->matrix.aqm, matrix_mdev->matrix.adm); - mutex_lock(&matrix_dev->lock); - matrix_mdev->kvm_busy = false; - wake_up_all(&matrix_mdev->wait_for_kvm); + mutex_unlock(&kvm->lock); + mutex_unlock(&matrix_dev->lock); } return 0; @@ -1179,35 +1153,24 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb, * done under the @matrix_mdev->lock. * */ -static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) +static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev, + struct kvm *kvm) { - /* - * If the KVM pointer is in the process of being set, wait until the - * process has completed. - */ - wait_event_cmd(matrix_mdev->wait_for_kvm, - !matrix_mdev->kvm_busy, - mutex_unlock(&matrix_dev->lock), - mutex_lock(&matrix_dev->lock)); - - if (matrix_mdev->kvm) { - matrix_mdev->kvm_busy = true; - mutex_unlock(&matrix_dev->lock); - - if (matrix_mdev->kvm->arch.crypto.crycbd) { - down_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem); - matrix_mdev->kvm->arch.crypto.pqap_hook = NULL; - up_write(&matrix_mdev->kvm->arch.crypto.pqap_hook_rwsem); - - kvm_arch_crypto_clear_masks(matrix_mdev->kvm); - } + if (kvm && kvm->arch.crypto.crycbd) { + down_write(&kvm->arch.crypto.pqap_hook_rwsem); + kvm->arch.crypto.pqap_hook = NULL; + up_write(&kvm->arch.crypto.pqap_hook_rwsem); + mutex_lock(&kvm->lock); mutex_lock(&matrix_dev->lock); + + kvm_arch_crypto_clear_masks(kvm); vfio_ap_mdev_reset_queues(matrix_mdev->mdev); - kvm_put_kvm(matrix_mdev->kvm); + kvm_put_kvm(kvm); matrix_mdev->kvm = NULL; - matrix_mdev->kvm_busy = false; - wake_up_all(&matrix_mdev->wait_for_kvm); + + mutex_unlock(&kvm->lock); + mutex_unlock(&matrix_dev->lock); } } @@ -1220,16 +1183,13 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, if (action != VFIO_GROUP_NOTIFY_SET_KVM) return NOTIFY_OK; - mutex_lock(&matrix_dev->lock); matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier); if (!data) - vfio_ap_mdev_unset_kvm(matrix_mdev); + vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); else if (vfio_ap_mdev_set_kvm(matrix_mdev, data)) notify_rc = NOTIFY_DONE; - mutex_unlock(&matrix_dev->lock); - return notify_rc; } @@ -1363,14 +1323,11 @@ static void vfio_ap_mdev_close_device(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); - mutex_lock(&matrix_dev->lock); - vfio_ap_mdev_unset_kvm(matrix_mdev); - mutex_unlock(&matrix_dev->lock); - vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &matrix_mdev->iommu_notifier); vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &matrix_mdev->group_notifier); + vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); module_put(THIS_MODULE); } @@ -1412,15 +1369,6 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, break; } - /* - * If the KVM pointer is in the process of being set, wait until - * the process has completed. - */ - wait_event_cmd(matrix_mdev->wait_for_kvm, - !matrix_mdev->kvm_busy, - mutex_unlock(&matrix_dev->lock), - mutex_lock(&matrix_dev->lock)); - ret = vfio_ap_mdev_reset_queues(mdev); break; default: diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index e12218e5a629..22d2e0ca3ae5 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -83,8 +83,6 @@ struct ap_matrix_mdev { struct ap_matrix matrix; struct notifier_block group_notifier; struct notifier_block iommu_notifier; - bool kvm_busy; - wait_queue_head_t wait_for_kvm; struct kvm *kvm; crypto_hook pqap_hook; struct mdev_device *mdev; -- cgit v1.2.3-59-g8ed1b From eb0feefd4c025b2697464d141f7ff178095f34df Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 23 Aug 2021 11:42:04 -0300 Subject: vfio/ap_ops: Convert to use vfio_register_group_dev() This is straightforward conversion, the ap_matrix_mdev is actually serving as the vfio_device and we can replace all the mdev_get_drvdata()'s with a simple container_of() or a dev_get_drvdata() for sysfs paths. Cc: Alex Williamson Cc: Cornelia Huck Cc: kvm@vger.kernel.org Cc: Christoph Hellwig Reviewed-by: Tony Krowiak Reviewed-by: Christoph Hellwig Signed-off-by: Jason Gunthorpe Link: https://lore.kernel.org/r/0-v4-0203a4ab0596+f7-vfio_ap_jgg@nvidia.com Signed-off-by: Alex Williamson --- drivers/s390/crypto/vfio_ap_ops.c | 155 +++++++++++++++++++--------------- drivers/s390/crypto/vfio_ap_private.h | 2 + 2 files changed, 91 insertions(+), 66 deletions(-) (limited to 'drivers/s390/crypto') diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index c46937de5758..2347808fa3e4 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -24,8 +24,9 @@ #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough" #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" -static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev); +static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev); static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); +static const struct vfio_device_ops vfio_ap_matrix_dev_ops; static int match_apqn(struct device *dev, const void *data) { @@ -326,43 +327,57 @@ static void vfio_ap_matrix_init(struct ap_config_info *info, matrix->adm_max = info->apxa ? info->Nd : 15; } -static int vfio_ap_mdev_create(struct mdev_device *mdev) +static int vfio_ap_mdev_probe(struct mdev_device *mdev) { struct ap_matrix_mdev *matrix_mdev; + int ret; if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0)) return -EPERM; matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL); if (!matrix_mdev) { - atomic_inc(&matrix_dev->available_instances); - return -ENOMEM; + ret = -ENOMEM; + goto err_dec_available; } + vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev, + &vfio_ap_matrix_dev_ops); matrix_mdev->mdev = mdev; vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); - mdev_set_drvdata(mdev, matrix_mdev); matrix_mdev->pqap_hook = handle_pqap; mutex_lock(&matrix_dev->lock); list_add(&matrix_mdev->node, &matrix_dev->mdev_list); mutex_unlock(&matrix_dev->lock); + ret = vfio_register_group_dev(&matrix_mdev->vdev); + if (ret) + goto err_list; + dev_set_drvdata(&mdev->dev, matrix_mdev); return 0; + +err_list: + mutex_lock(&matrix_dev->lock); + list_del(&matrix_mdev->node); + mutex_unlock(&matrix_dev->lock); + kfree(matrix_mdev); +err_dec_available: + atomic_inc(&matrix_dev->available_instances); + return ret; } -static int vfio_ap_mdev_remove(struct mdev_device *mdev) +static void vfio_ap_mdev_remove(struct mdev_device *mdev) { - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); + + vfio_unregister_group_dev(&matrix_mdev->vdev); mutex_lock(&matrix_dev->lock); - vfio_ap_mdev_reset_queues(mdev); + vfio_ap_mdev_reset_queues(matrix_mdev); list_del(&matrix_mdev->node); kfree(matrix_mdev); - mdev_set_drvdata(mdev, NULL); atomic_inc(&matrix_dev->available_instances); mutex_unlock(&matrix_dev->lock); - - return 0; } static ssize_t name_show(struct mdev_type *mtype, @@ -604,8 +619,7 @@ static ssize_t assign_adapter_store(struct device *dev, { int ret; unsigned long apid; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); mutex_lock(&matrix_dev->lock); @@ -674,8 +688,7 @@ static ssize_t unassign_adapter_store(struct device *dev, { int ret; unsigned long apid; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); mutex_lock(&matrix_dev->lock); @@ -760,8 +773,7 @@ static ssize_t assign_domain_store(struct device *dev, { int ret; unsigned long apqi; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); unsigned long max_apqi = matrix_mdev->matrix.aqm_max; mutex_lock(&matrix_dev->lock); @@ -826,8 +838,7 @@ static ssize_t unassign_domain_store(struct device *dev, { int ret; unsigned long apqi; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); mutex_lock(&matrix_dev->lock); @@ -877,8 +888,7 @@ static ssize_t assign_control_domain_store(struct device *dev, { int ret; unsigned long id; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); mutex_lock(&matrix_dev->lock); @@ -932,8 +942,7 @@ static ssize_t unassign_control_domain_store(struct device *dev, { int ret; unsigned long domid; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); unsigned long max_domid = matrix_mdev->matrix.adm_max; mutex_lock(&matrix_dev->lock); @@ -968,8 +977,7 @@ static ssize_t control_domains_show(struct device *dev, int nchars = 0; int n; char *bufpos = buf; - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); unsigned long max_domid = matrix_mdev->matrix.adm_max; mutex_lock(&matrix_dev->lock); @@ -987,8 +995,7 @@ static DEVICE_ATTR_RO(control_domains); static ssize_t matrix_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct mdev_device *mdev = mdev_from_dev(dev); - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); char *bufpos = buf; unsigned long apid; unsigned long apqi; @@ -1165,7 +1172,7 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev, mutex_lock(&matrix_dev->lock); kvm_arch_crypto_clear_masks(kvm); - vfio_ap_mdev_reset_queues(matrix_mdev->mdev); + vfio_ap_mdev_reset_queues(matrix_mdev); kvm_put_kvm(kvm); matrix_mdev->kvm = NULL; @@ -1259,13 +1266,12 @@ free_resources: return ret; } -static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) +static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) { int ret; int rc = 0; unsigned long apid, apqi; struct vfio_ap_queue *q; - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, matrix_mdev->matrix.apm_max + 1) { @@ -1286,49 +1292,45 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) return rc; } -static int vfio_ap_mdev_open_device(struct mdev_device *mdev) +static int vfio_ap_mdev_open_device(struct vfio_device *vdev) { - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = + container_of(vdev, struct ap_matrix_mdev, vdev); unsigned long events; int ret; - - if (!try_module_get(THIS_MODULE)) - return -ENODEV; - matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier; events = VFIO_GROUP_NOTIFY_SET_KVM; - ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + ret = vfio_register_notifier(vdev->dev, VFIO_GROUP_NOTIFY, &events, &matrix_mdev->group_notifier); - if (ret) { - module_put(THIS_MODULE); + if (ret) return ret; - } matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; - ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, &events, &matrix_mdev->iommu_notifier); - if (!ret) - return ret; + if (ret) + goto out_unregister_group; + return 0; - vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, +out_unregister_group: + vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY, &matrix_mdev->group_notifier); - module_put(THIS_MODULE); return ret; } -static void vfio_ap_mdev_close_device(struct mdev_device *mdev) +static void vfio_ap_mdev_close_device(struct vfio_device *vdev) { - struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); + struct ap_matrix_mdev *matrix_mdev = + container_of(vdev, struct ap_matrix_mdev, vdev); - vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, &matrix_mdev->iommu_notifier); - vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, + vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY, &matrix_mdev->group_notifier); vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); - module_put(THIS_MODULE); } static int vfio_ap_mdev_get_device_info(unsigned long arg) @@ -1351,11 +1353,12 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg) return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; } -static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, +static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev, unsigned int cmd, unsigned long arg) { + struct ap_matrix_mdev *matrix_mdev = + container_of(vdev, struct ap_matrix_mdev, vdev); int ret; - struct ap_matrix_mdev *matrix_mdev; mutex_lock(&matrix_dev->lock); switch (cmd) { @@ -1363,13 +1366,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, ret = vfio_ap_mdev_get_device_info(arg); break; case VFIO_DEVICE_RESET: - matrix_mdev = mdev_get_drvdata(mdev); - if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) { - ret = -EINVAL; - break; - } - - ret = vfio_ap_mdev_reset_queues(mdev); + ret = vfio_ap_mdev_reset_queues(matrix_mdev); break; default: ret = -EOPNOTSUPP; @@ -1380,25 +1377,51 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev, return ret; } +static const struct vfio_device_ops vfio_ap_matrix_dev_ops = { + .open_device = vfio_ap_mdev_open_device, + .close_device = vfio_ap_mdev_close_device, + .ioctl = vfio_ap_mdev_ioctl, +}; + +static struct mdev_driver vfio_ap_matrix_driver = { + .driver = { + .name = "vfio_ap_mdev", + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + .dev_groups = vfio_ap_mdev_attr_groups, + }, + .probe = vfio_ap_mdev_probe, + .remove = vfio_ap_mdev_remove, +}; + static const struct mdev_parent_ops vfio_ap_matrix_ops = { .owner = THIS_MODULE, + .device_driver = &vfio_ap_matrix_driver, .supported_type_groups = vfio_ap_mdev_type_groups, - .mdev_attr_groups = vfio_ap_mdev_attr_groups, - .create = vfio_ap_mdev_create, - .remove = vfio_ap_mdev_remove, - .open_device = vfio_ap_mdev_open_device, - .close_device = vfio_ap_mdev_close_device, - .ioctl = vfio_ap_mdev_ioctl, }; int vfio_ap_mdev_register(void) { + int ret; + atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT); - return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops); + ret = mdev_register_driver(&vfio_ap_matrix_driver); + if (ret) + return ret; + + ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops); + if (ret) + goto err_driver; + return 0; + +err_driver: + mdev_unregister_driver(&vfio_ap_matrix_driver); + return ret; } void vfio_ap_mdev_unregister(void) { mdev_unregister_device(&matrix_dev->device); + mdev_unregister_driver(&vfio_ap_matrix_driver); } diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index 22d2e0ca3ae5..77760e2b546f 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -18,6 +18,7 @@ #include #include #include +#include #include "ap_bus.h" @@ -79,6 +80,7 @@ struct ap_matrix { * @kvm: the struct holding guest's state */ struct ap_matrix_mdev { + struct vfio_device vdev; struct list_head node; struct ap_matrix matrix; struct notifier_block group_notifier; -- cgit v1.2.3-59-g8ed1b