diff options
Diffstat (limited to 'drivers/crypto/qat/qat_common/adf_sriov.c')
-rw-r--r-- | drivers/crypto/qat/qat_common/adf_sriov.c | 43 |
1 files changed, 25 insertions, 18 deletions
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index 90ec057f9183..b2db1d70d71f 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) -/* Copyright(c) 2015 - 2020 Intel Corporation */ +/* Copyright(c) 2015 - 2021 Intel Corporation */ #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/device.h> -#include <linux/iommu.h> #include "adf_common_drv.h" #include "adf_cfg.h" -#include "adf_pf2vf_msg.h" +#include "adf_pfvf_pf_msg.h" + +#define ADF_VF2PF_RATELIMIT_INTERVAL 8 +#define ADF_VF2PF_RATELIMIT_BURST 130 static struct workqueue_struct *pf2vf_resp_wq; @@ -19,8 +21,16 @@ static void adf_iov_send_resp(struct work_struct *work) { struct adf_pf2vf_resp *pf2vf_resp = container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work); + struct adf_accel_vf_info *vf_info = pf2vf_resp->vf_info; + struct adf_accel_dev *accel_dev = vf_info->accel_dev; + u32 vf_nr = vf_info->vf_nr; + bool ret; + + ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr); + if (ret) + /* re-enable interrupt on PF from this VF */ + adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr); - adf_vf2pf_req_hndl(pf2vf_resp->vf_info); kfree(pf2vf_resp); } @@ -50,11 +60,12 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) /* This ptr will be populated when VFs will be created */ vf_info->accel_dev = accel_dev; vf_info->vf_nr = i; + vf_info->vf_compat_ver = 0; mutex_init(&vf_info->pf2vf_lock); ratelimit_state_init(&vf_info->vf2pf_ratelimit, - DEFAULT_RATELIMIT_INTERVAL, - DEFAULT_RATELIMIT_BURST); + ADF_VF2PF_RATELIMIT_INTERVAL, + ADF_VF2PF_RATELIMIT_BURST); } /* Set Valid bits in AE Thread to PCIe Function Mapping */ @@ -62,8 +73,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) hw_data->configure_iov_threads(accel_dev, true); /* Enable VF to PF interrupts for all VFs */ - if (hw_data->get_pf2vf_offset) - adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1); + adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1); /* * Due to the hardware design, when SR-IOV and the ring arbiter @@ -92,22 +102,18 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) if (!accel_dev->pf.vf_info) return; - if (hw_data->get_pf2vf_offset) - adf_pf2vf_notify_restarting(accel_dev); - + adf_pf2vf_notify_restarting(accel_dev); pci_disable_sriov(accel_to_pci_dev(accel_dev)); /* Disable VF to PF interrupts */ - if (hw_data->get_pf2vf_offset) - adf_disable_vf2pf_interrupts(accel_dev, GENMASK(31, 0)); + adf_disable_all_vf2pf_interrupts(accel_dev); /* Clear Valid bits in AE Thread to PCIe Function Mapping */ if (hw_data->configure_iov_threads) hw_data->configure_iov_threads(accel_dev, false); - for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) { + for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) mutex_destroy(&vf->pf2vf_lock); - } kfree(accel_dev->pf.vf_info); accel_dev->pf.vf_info = NULL; @@ -138,7 +144,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) return -EFAULT; } - if (!iommu_present(&pci_bus_type)) + if (!device_iommu_mapped(&pdev->dev)) dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n"); if (accel_dev->pf.vf_info) { @@ -153,8 +159,9 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) return -EBUSY; } - adf_dev_stop(accel_dev); - adf_dev_shutdown(accel_dev); + ret = adf_dev_shutdown_cache_cfg(accel_dev); + if (ret) + return ret; } if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC)) |