aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c')
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c126
1 files changed, 71 insertions, 55 deletions
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 09599fe4d2f3..cb3bdd3618fb 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -7,6 +7,8 @@
#include "adf_dh895xcc_hw_data.h"
#include "icp_qat_hw.h"
+#define ADF_DH895XCC_VF_MSK 0xFFFFFFFF
+
/* Worker thread to service arbiter mappings */
static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = {
0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
@@ -58,17 +60,24 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_COMPRESSION;
/* Read accelerator capabilities mask */
pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
- if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
+ /* A set bit in legfuses means the feature is OFF in this SKU */
+ if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
- if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
+ if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
@@ -100,43 +109,6 @@ static const u32 *adf_get_arbiter_mapping(void)
return thrd_to_arb_map;
}
-static void adf_enable_ints(struct adf_accel_dev *accel_dev)
-{
- void __iomem *addr;
-
- addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
-
- /* Enable bundle and misc interrupts */
- ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
- accel_dev->pf.vf_info ? 0 :
- BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1);
- ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
- ADF_DH895XCC_SMIA1_MASK);
-}
-
-static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
-{
- u32 errsou3, errmsk3, errsou5, errmsk5, vf_int_mask;
-
- /* Get the interrupt sources triggered by VFs */
- errsou3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU3);
- vf_int_mask = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3);
-
- /* To avoid adding duplicate entries to work queue, clear
- * vf_int_mask_sets bits that are already masked in ERRMSK register.
- */
- errmsk3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK3);
- vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3);
-
- /* Do the same for ERRSOU5 */
- errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5);
- errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5);
- vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
- vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
-
- return vf_int_mask;
-}
-
static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
/* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
@@ -150,27 +122,71 @@ static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
if (vf_mask >> 16) {
u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
& ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
-
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
}
}
-static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr)
{
+ u32 val;
+
/* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
- | ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
- }
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
/* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
- if (vf_mask >> 16) {
- u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
- | ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
+ val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+ | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+}
- ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
- }
+static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
+{
+ u32 sources, pending, disabled;
+ u32 errsou3, errmsk3;
+ u32 errsou5, errmsk5;
+
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+ errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5);
+ sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3)
+ | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
+
+ if (!sources)
+ return 0;
+
+ /* Get the already disabled interrupts */
+ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+ errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5);
+ disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3)
+ | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
+
+ pending = sources & ~disabled;
+ if (!pending)
+ return 0;
+
+ /* Due to HW limitations, when disabling the interrupts, we can't
+ * just disable the requested sources, as this would lead to missed
+ * interrupts if sources changes just before writing to ERRMSK3 and
+ * ERRMSK5.
+ * To work around it, disable all and re-enable only the sources that
+ * are not in vf_mask and were not already disabled. Re-enabling will
+ * trigger a new interrupt for the sources that have changed in the
+ * meantime, if any.
+ */
+ errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
+ errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+ errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
+ errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
+
+ /* Return the sources of the (new) interrupt(s) */
+ return pending;
}
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
@@ -215,14 +231,14 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
- hw_data->enable_ints = adf_enable_ints;
+ hw_data->enable_ints = adf_gen2_enable_ints;
hw_data->reset_device = adf_reset_sbr;
hw_data->disable_iov = adf_disable_sriov;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
- hw_data->pfvf_ops.get_vf2pf_sources = get_vf2pf_sources;
hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
- hw_data->pfvf_ops.disable_vf2pf_interrupts = disable_vf2pf_interrupts;
+ hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts;
+ hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts;
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
}