aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat/qat_common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qat/qat_common')
-rw-r--r--drivers/crypto/qat/qat_common/Makefile14
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h1
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c49
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_strings.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h30
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c40
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c15
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c10
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c92
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c61
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c12
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c876
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsaprivkey.asn111
-rw-r--r--drivers/crypto/qat/qat_common/qat_rsapubkey.asn14
16 files changed, 1004 insertions, 219 deletions
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 29c7c53d2845..92fb6ffdc062 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -1,15 +1,6 @@
-$(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
- $(obj)/qat_rsapubkey-asn1.h
-$(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
- $(obj)/qat_rsaprivkey-asn1.h
-
-clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
-clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
-
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \
adf_isr.o \
- adf_vf_isr.o \
adf_ctl_drv.o \
adf_dev_mgr.o \
adf_init.o \
@@ -20,11 +11,10 @@ intel_qat-objs := adf_cfg.o \
adf_hw_arbiter.o \
qat_crypto.o \
qat_algs.o \
- qat_rsapubkey-asn1.o \
- qat_rsaprivkey-asn1.o \
qat_asym_algs.o \
qat_uclo.o \
qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
-intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o \
+ adf_vf2pf_msg.o adf_vf_isr.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 5a07208ce778..e8822536530b 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -176,6 +176,7 @@ struct adf_hw_device_data {
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
+ void (*reset_device)(struct adf_accel_dev *accel_dev);
const char *fw_name;
const char *fw_mmp_name;
uint32_t fuses;
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index eb557f69e367..ce7c4626c983 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -61,7 +61,7 @@
#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
#define ADF_ADMINMSG_LEN 32
-static const u8 const_tab[1024] = {
+static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index b40d9c8dad96..2839fccdd84b 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -82,18 +82,12 @@ struct adf_reset_dev_data {
struct work_struct reset_work;
};
-void adf_dev_restore(struct adf_accel_dev *accel_dev)
+void adf_reset_sbr(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
struct pci_dev *parent = pdev->bus->self;
uint16_t bridge_ctl = 0;
- if (accel_dev->is_vf)
- return;
-
- dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
- accel_dev->accel_id);
-
if (!parent)
parent = pdev;
@@ -101,6 +95,8 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
dev_info(&GET_DEV(accel_dev),
"Transaction still in progress. Proceeding\n");
+ dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n");
+
pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
@@ -108,8 +104,40 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
msleep(100);
- pci_restore_state(pdev);
- pci_save_state(pdev);
+}
+EXPORT_SYMBOL_GPL(adf_reset_sbr);
+
+void adf_reset_flr(struct adf_accel_dev *accel_dev)
+{
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+ u16 control = 0;
+ int pos = 0;
+
+ dev_info(&GET_DEV(accel_dev), "Function level reset\n");
+ pos = pci_pcie_cap(pdev);
+ if (!pos) {
+ dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+ return;
+ }
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &control);
+ control |= PCI_EXP_DEVCTL_BCR_FLR;
+ pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, control);
+ msleep(100);
+}
+EXPORT_SYMBOL_GPL(adf_reset_flr);
+
+void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+ if (hw_device->reset_device) {
+ dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
+ accel_dev->accel_id);
+ hw_device->reset_device(accel_dev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ }
}
static void adf_device_reset_worker(struct work_struct *work)
@@ -243,7 +271,8 @@ EXPORT_SYMBOL_GPL(adf_disable_aer);
int adf_init_aer(void)
{
- device_reset_wq = create_workqueue("qat_device_reset_wq");
+ device_reset_wq = alloc_workqueue("qat_device_reset_wq",
+ WQ_MEM_RECLAIM, 0);
return !device_reset_wq ? -EFAULT : 0;
}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index 13575111382c..7632ed0f25c5 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -57,10 +57,8 @@
#define ADF_RING_DC_SIZE "NumConcurrentRequests"
#define ADF_RING_ASYM_TX "RingAsymTx"
#define ADF_RING_SYM_TX "RingSymTx"
-#define ADF_RING_RND_TX "RingNrbgTx"
#define ADF_RING_ASYM_RX "RingAsymRx"
#define ADF_RING_SYM_RX "RingSymRx"
-#define ADF_RING_RND_RX "RingNrbgRx"
#define ADF_RING_DC_TX "RingTx"
#define ADF_RING_DC_RX "RingRx"
#define ADF_ETRMGR_BANK "Bank"
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 976b01e58afb..980e07475012 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -67,7 +67,7 @@
#define ADF_STATUS_AE_INITIALISED 4
#define ADF_STATUS_AE_UCODE_LOADED 5
#define ADF_STATUS_AE_STARTED 6
-#define ADF_STATUS_ORPHAN_TH_RUNNING 7
+#define ADF_STATUS_PF_RUNNING 7
#define ADF_STATUS_IRQ_ALLOCATED 8
enum adf_dev_reset_mode {
@@ -103,7 +103,7 @@ int adf_service_unregister(struct service_hndl *service);
int adf_dev_init(struct adf_accel_dev *accel_dev);
int adf_dev_start(struct adf_accel_dev *accel_dev);
-int adf_dev_stop(struct adf_accel_dev *accel_dev);
+void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
@@ -141,6 +141,8 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev);
int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
void adf_disable_aer(struct adf_accel_dev *accel_dev);
+void adf_reset_sbr(struct adf_accel_dev *accel_dev);
+void adf_reset_flr(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
@@ -236,8 +238,13 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
void adf_exit_pf_wq(void);
+int adf_init_vf_wq(void);
+void adf_exit_vf_wq(void);
#else
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
@@ -256,6 +263,15 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
+static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+
static inline int adf_init_pf_wq(void)
{
return 0;
@@ -264,5 +280,15 @@ static inline int adf_init_pf_wq(void)
static inline void adf_exit_pf_wq(void)
{
}
+
+static inline int adf_init_vf_wq(void)
+{
+ return 0;
+}
+
+static inline void adf_exit_vf_wq(void)
+{
+}
+
#endif
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 3c3f948290ca..abc7a7f64d64 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -270,26 +270,33 @@ static int adf_ctl_is_device_in_use(int id)
return 0;
}
-static int adf_ctl_stop_devices(uint32_t id)
+static void adf_ctl_stop_devices(uint32_t id)
{
struct adf_accel_dev *accel_dev;
- int ret = 0;
- list_for_each_entry_reverse(accel_dev, adf_devmgr_get_head(), list) {
+ list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (!adf_dev_started(accel_dev))
continue;
- if (adf_dev_stop(accel_dev)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to stop qat_dev%d\n", id);
- ret = -EFAULT;
- } else {
- adf_dev_shutdown(accel_dev);
- }
+ /* First stop all VFs */
+ if (!accel_dev->is_vf)
+ continue;
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+ }
+ }
+
+ list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+ if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+ if (!adf_dev_started(accel_dev))
+ continue;
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
}
}
- return ret;
}
static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
@@ -318,9 +325,8 @@ static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
ctl_data->device_id);
- ret = adf_ctl_stop_devices(ctl_data->device_id);
- if (ret)
- pr_err("QAT: failed to stop device.\n");
+ adf_ctl_stop_devices(ctl_data->device_id);
+
out:
kfree(ctl_data);
return ret;
@@ -465,12 +471,17 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_init_pf_wq())
goto err_pf_wq;
+ if (adf_init_vf_wq())
+ goto err_vf_wq;
+
if (qat_crypto_register())
goto err_crypto_register;
return 0;
err_crypto_register:
+ adf_exit_vf_wq();
+err_vf_wq:
adf_exit_pf_wq();
err_pf_wq:
adf_exit_aer();
@@ -485,6 +496,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
{
adf_chr_drv_destroy();
adf_exit_aer();
+ adf_exit_vf_wq();
adf_exit_pf_wq();
qat_crypto_unregister();
adf_clean_vf_map(false);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index ef5575e4a215..888c6675e7e5 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -236,9 +236,9 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
* is shuting down.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code otherwise.
+ * Return: void
*/
-int adf_dev_stop(struct adf_accel_dev *accel_dev)
+void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
@@ -246,9 +246,9 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
int ret;
if (!adf_dev_started(accel_dev) &&
- !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
- return 0;
- }
+ !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
+ return;
+
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
@@ -279,8 +279,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
else
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
}
-
- return 0;
}
EXPORT_SYMBOL_GPL(adf_dev_stop);
@@ -329,6 +327,8 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, &service->init_status);
}
+ hw_data->disable_iov(accel_dev);
+
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
hw_data->free_irq(accel_dev);
clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
@@ -344,7 +344,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (hw_data->exit_admin_comms)
hw_data->exit_admin_comms(accel_dev);
- hw_data->disable_iov(accel_dev);
adf_cleanup_etr_data(accel_dev);
adf_dev_restore(accel_dev);
}
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index b81f79acc4ea..06d49017a52b 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -302,7 +302,7 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
}
/**
- * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * adf_isr_resource_free() - Free IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function frees interrupts for acceleration device.
@@ -317,7 +317,7 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
EXPORT_SYMBOL_GPL(adf_isr_resource_free);
/**
- * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function allocates interrupts for acceleration device.
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 38a0415e767d..9320ae1d005b 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -249,13 +249,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return -EBUSY;
}
- if (adf_dev_stop(accel_dev)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to stop qat_dev%d\n",
- accel_dev->accel_id);
- return -EFAULT;
- }
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
}
@@ -298,7 +292,7 @@ EXPORT_SYMBOL_GPL(adf_sriov_configure);
int __init adf_init_pf_wq(void)
{
/* Workqueue for PF2VF responses */
- pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+ pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
return !pf2vf_resp_wq ? -ENOMEM : 0;
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
new file mode 100644
index 000000000000..cd5f37dffe8a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -0,0 +1,92 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+/**
+ * adf_vf2pf_init() - send init msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends an init messge from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (adf_iov_putmsg(accel_dev, msg, 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Init event to PF\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_init);
+
+/**
+ * adf_vf2pf_shutdown() - send shutdown msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown messge from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+ if (adf_iov_putmsg(accel_dev, msg, 0))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 09427b3d4d55..bf99e11a3403 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -51,6 +51,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
@@ -64,6 +65,13 @@
#define ADF_VINTSOU_BUN BIT(0)
#define ADF_VINTSOU_PF2VF BIT(1)
+static struct workqueue_struct *adf_vf_stop_wq;
+
+struct adf_vf_stop_data {
+ struct adf_accel_dev *accel_dev;
+ struct work_struct work;
+};
+
static int adf_enable_msi(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -90,6 +98,20 @@ static void adf_disable_msi(struct adf_accel_dev *accel_dev)
pci_disable_msi(pdev);
}
+static void adf_dev_stop_async(struct work_struct *work)
+{
+ struct adf_vf_stop_data *stop_data =
+ container_of(work, struct adf_vf_stop_data, work);
+ struct adf_accel_dev *accel_dev = stop_data->accel_dev;
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+ /* Re-enable PF2VF interrupts */
+ adf_enable_pf2vf_interrupts(accel_dev);
+ kfree(stop_data);
+}
+
static void adf_pf2vf_bh_handler(void *data)
{
struct adf_accel_dev *accel_dev = data;
@@ -107,11 +129,29 @@ static void adf_pf2vf_bh_handler(void *data)
goto err;
switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
- case ADF_PF2VF_MSGTYPE_RESTARTING:
+ case ADF_PF2VF_MSGTYPE_RESTARTING: {
+ struct adf_vf_stop_data *stop_data;
+
dev_dbg(&GET_DEV(accel_dev),
"Restarting msg received from PF 0x%x\n", msg);
- adf_dev_stop(accel_dev);
- break;
+
+ clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+ stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+ if (!stop_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "Couldn't schedule stop for vf_%d\n",
+ accel_dev->accel_id);
+ return;
+ }
+ stop_data->accel_dev = accel_dev;
+ INIT_WORK(&stop_data->work, adf_dev_stop_async);
+ queue_work(adf_vf_stop_wq, &stop_data->work);
+ /* To ack, clear the PF2VFINT bit */
+ msg &= ~BIT(0);
+ ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+ return;
+ }
case ADF_PF2VF_MSGTYPE_VERSION_RESP:
dev_dbg(&GET_DEV(accel_dev),
"Version resp received from PF 0x%x\n", msg);
@@ -278,3 +318,18 @@ err_out:
return -EFAULT;
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+
+int __init adf_init_vf_wq(void)
+{
+ adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
+
+ return !adf_vf_stop_wq ? -EFAULT : 0;
+}
+
+void adf_exit_vf_wq(void)
+{
+ if (adf_vf_stop_wq)
+ destroy_workqueue(adf_vf_stop_wq);
+
+ adf_vf_stop_wq = NULL;
+}
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1e8852a8a057..20f35df8a01f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -947,13 +947,13 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
return 0;
out_free_all:
- memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
- dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ dma_free_coherent(dev, sizeof(*ctx->dec_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
out_free_enc:
- memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
- dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ dma_free_coherent(dev, sizeof(*ctx->enc_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
return -ENOMEM;
@@ -1260,8 +1260,8 @@ static struct crypto_alg qat_algs[] = { {
.setkey = qat_alg_ablkcipher_xts_setkey,
.decrypt = qat_alg_ablkcipher_decrypt,
.encrypt = qat_alg_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
},
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index e5c0727d4876..0d35dca2e925 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -49,11 +49,12 @@
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/dh.h>
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <crypto/scatterwalk.h>
-#include "qat_rsapubkey-asn1.h"
-#include "qat_rsaprivkey-asn1.h"
#include "icp_qat_fw_pke.h"
#include "adf_accel_devices.h"
#include "adf_transport.h"
@@ -75,6 +76,14 @@ struct qat_rsa_input_params {
dma_addr_t d;
dma_addr_t n;
} dec;
+ struct {
+ dma_addr_t c;
+ dma_addr_t p;
+ dma_addr_t q;
+ dma_addr_t dp;
+ dma_addr_t dq;
+ dma_addr_t qinv;
+ } dec_crt;
u64 in_tab[8];
};
} __packed __aligned(64);
@@ -95,71 +104,480 @@ struct qat_rsa_ctx {
char *n;
char *e;
char *d;
+ char *p;
+ char *q;
+ char *dp;
+ char *dq;
+ char *qinv;
dma_addr_t dma_n;
dma_addr_t dma_e;
dma_addr_t dma_d;
+ dma_addr_t dma_p;
+ dma_addr_t dma_q;
+ dma_addr_t dma_dp;
+ dma_addr_t dma_dq;
+ dma_addr_t dma_qinv;
unsigned int key_sz;
+ bool crt_mode;
+ struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_dh_input_params {
+ union {
+ struct {
+ dma_addr_t b;
+ dma_addr_t xa;
+ dma_addr_t p;
+ } in;
+ struct {
+ dma_addr_t xa;
+ dma_addr_t p;
+ } in_g2;
+ u64 in_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_dh_output_params {
+ union {
+ dma_addr_t r;
+ u64 out_tab[8];
+ };
+} __packed __aligned(64);
+
+struct qat_dh_ctx {
+ char *g;
+ char *xa;
+ char *p;
+ dma_addr_t dma_g;
+ dma_addr_t dma_xa;
+ dma_addr_t dma_p;
+ unsigned int p_size;
+ bool g2;
struct qat_crypto_instance *inst;
} __packed __aligned(64);
-struct qat_rsa_request {
- struct qat_rsa_input_params in;
- struct qat_rsa_output_params out;
+struct qat_asym_request {
+ union {
+ struct qat_rsa_input_params rsa;
+ struct qat_dh_input_params dh;
+ } in;
+ union {
+ struct qat_rsa_output_params rsa;
+ struct qat_dh_output_params dh;
+ } out;
dma_addr_t phy_in;
dma_addr_t phy_out;
char *src_align;
char *dst_align;
struct icp_qat_fw_pke_request req;
- struct qat_rsa_ctx *ctx;
+ union {
+ struct qat_rsa_ctx *rsa;
+ struct qat_dh_ctx *dh;
+ } ctx;
+ union {
+ struct akcipher_request *rsa;
+ struct kpp_request *dh;
+ } areq;
int err;
+ void (*cb)(struct icp_qat_fw_pke_resp *resp);
} __aligned(64);
-static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
{
- struct akcipher_request *areq = (void *)(__force long)resp->opaque;
- struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
- struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
+ struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+ struct kpp_request *areq = req->areq.dh;
+ struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
resp->pke_resp_hdr.comn_resp_flags);
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
- if (req->src_align)
- dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
- req->in.enc.m);
- else
- dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
- DMA_TO_DEVICE);
+ if (areq->src) {
+ if (req->src_align)
+ dma_free_coherent(dev, req->ctx.dh->p_size,
+ req->src_align, req->in.dh.in.b);
+ else
+ dma_unmap_single(dev, req->in.dh.in.b,
+ req->ctx.dh->p_size, DMA_TO_DEVICE);
+ }
- areq->dst_len = req->ctx->key_sz;
+ areq->dst_len = req->ctx.dh->p_size;
if (req->dst_align) {
- char *ptr = req->dst_align;
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
- while (!(*ptr) && areq->dst_len) {
- areq->dst_len--;
- ptr++;
- }
+ dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
+ req->out.dh.r);
+ } else {
+ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
+ DMA_FROM_DEVICE);
+ }
- if (areq->dst_len != req->ctx->key_sz)
- memmove(req->dst_align, ptr, areq->dst_len);
+ dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, req->phy_out,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
- scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
- areq->dst_len, 1);
+ kpp_request_complete(areq, err);
+}
+
+#define PKE_DH_1536 0x390c1a49
+#define PKE_DH_G2_1536 0x2e0b1a3e
+#define PKE_DH_2048 0x4d0c1a60
+#define PKE_DH_G2_2048 0x3e0b1a55
+#define PKE_DH_3072 0x510c1a77
+#define PKE_DH_G2_3072 0x3a0b1a6c
+#define PKE_DH_4096 0x690c1a8e
+#define PKE_DH_G2_4096 0x4a0b1a83
- dma_free_coherent(dev, req->ctx->key_sz, req->dst_align,
- req->out.enc.c);
+static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 1536:
+ return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
+ case 2048:
+ return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
+ case 3072:
+ return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
+ case 4096:
+ return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
+ default:
+ return 0;
+ };
+}
+
+static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
+{
+ return kpp_tfm_ctx(tfm);
+}
+
+static int qat_dh_compute_value(struct kpp_request *req)
+{
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ struct qat_asym_request *qat_req =
+ PTR_ALIGN(kpp_request_ctx(req), 64);
+ struct icp_qat_fw_pke_request *msg = &qat_req->req;
+ int ret, ctr = 0;
+ int n_input_params = 0;
+
+ if (unlikely(!ctx->xa))
+ return -EINVAL;
+
+ if (req->dst_len < ctx->p_size) {
+ req->dst_len = ctx->p_size;
+ return -EOVERFLOW;
+ }
+ memset(msg, '\0', sizeof(*msg));
+ ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+ ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+ msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
+ !req->src && ctx->g2);
+ if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+ return -EINVAL;
+
+ qat_req->cb = qat_dh_cb;
+ qat_req->ctx.dh = ctx;
+ qat_req->areq.dh = req;
+ msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+ msg->pke_hdr.comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+ QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+ /*
+ * If no source is provided use g as base
+ */
+ if (req->src) {
+ qat_req->in.dh.in.xa = ctx->dma_xa;
+ qat_req->in.dh.in.p = ctx->dma_p;
+ n_input_params = 3;
} else {
- char *ptr = sg_virt(areq->dst);
+ if (ctx->g2) {
+ qat_req->in.dh.in_g2.xa = ctx->dma_xa;
+ qat_req->in.dh.in_g2.p = ctx->dma_p;
+ n_input_params = 2;
+ } else {
+ qat_req->in.dh.in.b = ctx->dma_g;
+ qat_req->in.dh.in.xa = ctx->dma_xa;
+ qat_req->in.dh.in.p = ctx->dma_p;
+ n_input_params = 3;
+ }
+ }
- while (!(*ptr) && areq->dst_len) {
- areq->dst_len--;
- ptr++;
+ ret = -ENOMEM;
+ if (req->src) {
+ /*
+ * src can be of any size in valid range, but HW expects it to
+ * be the same as modulo p so in case it is different we need
+ * to allocate a new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
+ qat_req->src_align = NULL;
+ qat_req->in.dh.in.b = dma_map_single(dev,
+ sg_virt(req->src),
+ req->src_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev,
+ qat_req->in.dh.in.b)))
+ return ret;
+
+ } else {
+ int shift = ctx->p_size - req->src_len;
+
+ qat_req->src_align = dma_zalloc_coherent(dev,
+ ctx->p_size,
+ &qat_req->in.dh.in.b,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->src_align))
+ return ret;
+
+ scatterwalk_map_and_copy(qat_req->src_align + shift,
+ req->src, 0, req->src_len, 0);
}
+ }
+ /*
+ * dst can be of any size in valid range, but HW expects it to be the
+ * same as modulo m so in case it is different we need to allocate a
+ * new buf and copy src data.
+ * In other case we just need to map the user provided buffer.
+ * Also need to make sure that it is in contiguous buffer.
+ */
+ if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
+ qat_req->dst_align = NULL;
+ qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
+ req->dst_len,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
+ goto unmap_src;
+
+ } else {
+ qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
+ &qat_req->out.dh.r,
+ GFP_KERNEL);
+ if (unlikely(!qat_req->dst_align))
+ goto unmap_src;
+ }
+
+ qat_req->in.dh.in_tab[n_input_params] = 0;
+ qat_req->out.dh.out_tab[1] = 0;
+ /* Mapping in.in.b or in.in_g2.xa is the same */
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
+ sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
+ goto unmap_dst;
+
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
+ goto unmap_in_params;
+
+ msg->pke_mid.src_data_addr = qat_req->phy_in;
+ msg->pke_mid.dest_data_addr = qat_req->phy_out;
+ msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ msg->input_param_count = n_input_params;
+ msg->output_param_count = 1;
+
+ do {
+ ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+ } while (ret == -EBUSY && ctr++ < 100);
+
+ if (!ret)
+ return -EINPROGRESS;
+
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_dh_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_dh_input_params),
+ DMA_TO_DEVICE);
+unmap_dst:
+ if (qat_req->dst_align)
+ dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
+ qat_req->out.dh.r);
+ else
+ if (!dma_mapping_error(dev, qat_req->out.dh.r))
+ dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
+ DMA_FROM_DEVICE);
+unmap_src:
+ if (req->src) {
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
+ qat_req->in.dh.in.b);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
+ dma_unmap_single(dev, qat_req->in.dh.in.b,
+ ctx->p_size,
+ DMA_TO_DEVICE);
+ }
+ return ret;
+}
+
+static int qat_dh_check_params_length(unsigned int p_len)
+{
+ switch (p_len) {
+ case 1536:
+ case 2048:
+ case 3072:
+ case 4096:
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+
+ if (unlikely(!params->p || !params->g))
+ return -EINVAL;
+
+ if (qat_dh_check_params_length(params->p_size << 3))
+ return -EINVAL;
+
+ ctx->p_size = params->p_size;
+ ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
+ if (!ctx->p)
+ return -ENOMEM;
+ memcpy(ctx->p, params->p, ctx->p_size);
+
+ /* If g equals 2 don't copy it */
+ if (params->g_size == 1 && *(char *)params->g == 0x02) {
+ ctx->g2 = true;
+ return 0;
+ }
+
+ ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
+ if (!ctx->g) {
+ dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+ return -ENOMEM;
+ }
+ memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
+ params->g_size);
+
+ return 0;
+}
+
+static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
+{
+ if (ctx->g) {
+ dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
+ ctx->g = NULL;
+ }
+ if (ctx->xa) {
+ dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
+ ctx->xa = NULL;
+ }
+ if (ctx->p) {
+ dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+ }
+ ctx->p_size = 0;
+ ctx->g2 = false;
+}
+
+static int qat_dh_set_secret(struct crypto_kpp *tfm, void *buf,
+ unsigned int len)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ struct dh params;
+ int ret;
+
+ if (crypto_dh_decode_key(buf, len, &params) < 0)
+ return -EINVAL;
+
+ /* Free old secret if any */
+ qat_dh_clear_ctx(dev, ctx);
+
+ ret = qat_dh_set_params(ctx, &params);
+ if (ret < 0)
+ return ret;
+
+ ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
+ GFP_KERNEL);
+ if (!ctx->xa) {
+ qat_dh_clear_ctx(dev, ctx);
+ return -ENOMEM;
+ }
+ memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
+ params.key_size);
+
+ return 0;
+}
+
+static int qat_dh_max_size(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+
+ return ctx->p ? ctx->p_size : -EINVAL;
+}
+
+static int qat_dh_init_tfm(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(get_current_node());
+
+ if (!inst)
+ return -EINVAL;
+
+ ctx->p_size = 0;
+ ctx->g2 = false;
+ ctx->inst = inst;
+ return 0;
+}
+
+static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+ qat_dh_clear_ctx(dev, ctx);
+ qat_crypto_put_instance(ctx->inst);
+}
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+ struct qat_asym_request *req = (void *)(__force long)resp->opaque;
+ struct akcipher_request *areq = req->areq.rsa;
+ struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
+ int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+ resp->pke_resp_hdr.comn_resp_flags);
+
+ err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
- if (sg_virt(areq->dst) != ptr && areq->dst_len)
- memmove(sg_virt(areq->dst), ptr, areq->dst_len);
+ if (req->src_align)
+ dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
+ req->in.rsa.enc.m);
+ else
+ dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
+ DMA_TO_DEVICE);
- dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
+ areq->dst_len = req->ctx.rsa->key_sz;
+ if (req->dst_align) {
+ scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
+ areq->dst_len, 1);
+
+ dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
+ req->out.rsa.enc.c);
+ } else {
+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
DMA_FROM_DEVICE);
}
@@ -175,8 +593,9 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
void qat_alg_asym_callback(void *_resp)
{
struct icp_qat_fw_pke_resp *resp = _resp;
+ struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
- qat_rsa_cb(resp);
+ areq->cb(resp);
}
#define PKE_RSA_EP_512 0x1c161b21
@@ -237,13 +656,42 @@ static unsigned long qat_rsa_dec_fn_id(unsigned int len)
};
}
+#define PKE_RSA_DP2_512 0x1c131b57
+#define PKE_RSA_DP2_1024 0x26131c2d
+#define PKE_RSA_DP2_1536 0x45111d12
+#define PKE_RSA_DP2_2048 0x59121dfa
+#define PKE_RSA_DP2_3072 0x81121ed9
+#define PKE_RSA_DP2_4096 0xb1111fb2
+
+static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
+{
+ unsigned int bitslen = len << 3;
+
+ switch (bitslen) {
+ case 512:
+ return PKE_RSA_DP2_512;
+ case 1024:
+ return PKE_RSA_DP2_1024;
+ case 1536:
+ return PKE_RSA_DP2_1536;
+ case 2048:
+ return PKE_RSA_DP2_2048;
+ case 3072:
+ return PKE_RSA_DP2_3072;
+ case 4096:
+ return PKE_RSA_DP2_4096;
+ default:
+ return 0;
+ };
+}
+
static int qat_rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- struct qat_rsa_request *qat_req =
+ struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
int ret, ctr = 0;
@@ -262,14 +710,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
if (unlikely(!msg->pke_hdr.cd_pars.func_id))
return -EINVAL;
- qat_req->ctx = ctx;
+ qat_req->cb = qat_rsa_cb;
+ qat_req->ctx.rsa = ctx;
+ qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
- qat_req->in.enc.e = ctx->dma_e;
- qat_req->in.enc.n = ctx->dma_n;
+ qat_req->in.rsa.enc.e = ctx->dma_e;
+ qat_req->in.rsa.enc.n = ctx->dma_n;
ret = -ENOMEM;
/*
@@ -281,16 +731,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.enc.m = dma_map_single(dev, sg_virt(req->src),
+ qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
req->src_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.enc.m)))
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
return ret;
} else {
int shift = ctx->key_sz - req->src_len;
qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.enc.m,
+ &qat_req->in.rsa.enc.m,
GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -300,30 +750,30 @@ static int qat_rsa_enc(struct akcipher_request *req)
}
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL;
- qat_req->out.enc.c = dma_map_single(dev, sg_virt(req->dst),
- req->dst_len,
- DMA_FROM_DEVICE);
+ qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
+ req->dst_len,
+ DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.enc.c)))
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
goto unmap_src;
} else {
qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.enc.c,
+ &qat_req->out.rsa.enc.c,
GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
}
- qat_req->in.in_tab[3] = 0;
- qat_req->out.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
+ qat_req->in.rsa.in_tab[3] = 0;
+ qat_req->out.rsa.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
@@ -331,7 +781,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)req;
+ msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
msg->input_param_count = 3;
msg->output_param_count = 1;
do {
@@ -353,19 +803,19 @@ unmap_in_params:
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.enc.c);
+ qat_req->out.rsa.enc.c);
else
- if (!dma_mapping_error(dev, qat_req->out.enc.c))
- dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
- DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
+ dma_unmap_single(dev, qat_req->out.rsa.enc.c,
+ ctx->key_sz, DMA_FROM_DEVICE);
unmap_src:
if (qat_req->src_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.enc.m);
+ qat_req->in.rsa.enc.m);
else
- if (!dma_mapping_error(dev, qat_req->in.enc.m))
- dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
- DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
+ dma_unmap_single(dev, qat_req->in.rsa.enc.m,
+ ctx->key_sz, DMA_TO_DEVICE);
return ret;
}
@@ -375,7 +825,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
- struct qat_rsa_request *qat_req =
+ struct qat_asym_request *qat_req =
PTR_ALIGN(akcipher_request_ctx(req), 64);
struct icp_qat_fw_pke_request *msg = &qat_req->req;
int ret, ctr = 0;
@@ -390,18 +840,30 @@ static int qat_rsa_dec(struct akcipher_request *req)
memset(msg, '\0', sizeof(*msg));
ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
ICP_QAT_FW_COMN_REQ_FLAG_SET);
- msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
+ msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
+ qat_rsa_dec_fn_id_crt(ctx->key_sz) :
+ qat_rsa_dec_fn_id(ctx->key_sz);
if (unlikely(!msg->pke_hdr.cd_pars.func_id))
return -EINVAL;
- qat_req->ctx = ctx;
+ qat_req->cb = qat_rsa_cb;
+ qat_req->ctx.rsa = ctx;
+ qat_req->areq.rsa = req;
msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
msg->pke_hdr.comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
- qat_req->in.dec.d = ctx->dma_d;
- qat_req->in.dec.n = ctx->dma_n;
+ if (ctx->crt_mode) {
+ qat_req->in.rsa.dec_crt.p = ctx->dma_p;
+ qat_req->in.rsa.dec_crt.q = ctx->dma_q;
+ qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
+ qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
+ qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
+ } else {
+ qat_req->in.rsa.dec.d = ctx->dma_d;
+ qat_req->in.rsa.dec.n = ctx->dma_n;
+ }
ret = -ENOMEM;
/*
@@ -413,16 +875,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
*/
if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
qat_req->src_align = NULL;
- qat_req->in.dec.c = dma_map_single(dev, sg_virt(req->src),
+ qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
req->dst_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->in.dec.c)))
+ if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
return ret;
} else {
int shift = ctx->key_sz - req->src_len;
qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->in.dec.c,
+ &qat_req->in.rsa.dec.c,
GFP_KERNEL);
if (unlikely(!qat_req->src_align))
return ret;
@@ -432,31 +894,34 @@ static int qat_rsa_dec(struct akcipher_request *req)
}
if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
qat_req->dst_align = NULL;
- qat_req->out.dec.m = dma_map_single(dev, sg_virt(req->dst),
+ qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
req->dst_len,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.dec.m)))
+ if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
goto unmap_src;
} else {
qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
- &qat_req->out.dec.m,
+ &qat_req->out.rsa.dec.m,
GFP_KERNEL);
if (unlikely(!qat_req->dst_align))
goto unmap_src;
}
- qat_req->in.in_tab[3] = 0;
- qat_req->out.out_tab[1] = 0;
- qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
+ if (ctx->crt_mode)
+ qat_req->in.rsa.in_tab[6] = 0;
+ else
+ qat_req->in.rsa.in_tab[3] = 0;
+ qat_req->out.rsa.out_tab[1] = 0;
+ qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
sizeof(struct qat_rsa_input_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
goto unmap_dst;
- qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
+ qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
sizeof(struct qat_rsa_output_params),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
@@ -464,8 +929,12 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->pke_mid.src_data_addr = qat_req->phy_in;
msg->pke_mid.dest_data_addr = qat_req->phy_out;
- msg->pke_mid.opaque = (uint64_t)(__force long)req;
- msg->input_param_count = 3;
+ msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
+ if (ctx->crt_mode)
+ msg->input_param_count = 6;
+ else
+ msg->input_param_count = 3;
+
msg->output_param_count = 1;
do {
ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
@@ -486,26 +955,24 @@ unmap_in_params:
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
- qat_req->out.dec.m);
+ qat_req->out.rsa.dec.m);
else
- if (!dma_mapping_error(dev, qat_req->out.dec.m))
- dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
- DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
+ dma_unmap_single(dev, qat_req->out.rsa.dec.m,
+ ctx->key_sz, DMA_FROM_DEVICE);
unmap_src:
if (qat_req->src_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.dec.c);
+ qat_req->in.rsa.dec.c);
else
- if (!dma_mapping_error(dev, qat_req->in.dec.c))
- dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
- DMA_TO_DEVICE);
+ if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
+ dma_unmap_single(dev, qat_req->in.rsa.dec.c,
+ ctx->key_sz, DMA_TO_DEVICE);
return ret;
}
-int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
+int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
{
- struct qat_rsa_ctx *ctx = context;
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
@@ -518,11 +985,6 @@ int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
ctx->key_sz = vlen;
ret = -EINVAL;
- /* In FIPS mode only allow key size 2K & 3K */
- if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
- pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
- goto err;
- }
/* invalid key size provided */
if (!qat_rsa_enc_fn_id(ctx->key_sz))
goto err;
@@ -540,10 +1002,8 @@ err:
return ret;
}
-int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
+int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
{
- struct qat_rsa_ctx *ctx = context;
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
@@ -559,18 +1019,15 @@ int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
}
ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
- if (!ctx->e) {
- ctx->e = NULL;
+ if (!ctx->e)
return -ENOMEM;
- }
+
memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
return 0;
}
-int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
- const void *value, size_t vlen)
+int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen)
{
- struct qat_rsa_ctx *ctx = context;
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev = &GET_DEV(inst->accel_dev);
const char *ptr = value;
@@ -585,15 +1042,9 @@ int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
goto err;
- /* In FIPS mode only allow key size 2K & 3K */
- if (fips_enabled && (vlen != 256 && vlen != 384)) {
- pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
- goto err;
- }
-
ret = -ENOMEM;
ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
- if (!ctx->n)
+ if (!ctx->d)
goto err;
memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
@@ -603,12 +1054,106 @@ err:
return ret;
}
-static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
- unsigned int keylen, bool private)
+static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
{
- struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
- struct device *dev = &GET_DEV(ctx->inst->accel_dev);
- int ret;
+ while (!**ptr && *len) {
+ (*ptr)++;
+ (*len)--;
+ }
+}
+
+static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
+{
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev = &GET_DEV(inst->accel_dev);
+ const char *ptr;
+ unsigned int len;
+ unsigned int half_key_sz = ctx->key_sz / 2;
+
+ /* p */
+ ptr = rsa_key->p;
+ len = rsa_key->p_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto err;
+ ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
+ if (!ctx->p)
+ goto err;
+ memcpy(ctx->p + (half_key_sz - len), ptr, len);
+
+ /* q */
+ ptr = rsa_key->q;
+ len = rsa_key->q_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_p;
+ ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
+ if (!ctx->q)
+ goto free_p;
+ memcpy(ctx->q + (half_key_sz - len), ptr, len);
+
+ /* dp */
+ ptr = rsa_key->dp;
+ len = rsa_key->dp_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_q;
+ ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
+ GFP_KERNEL);
+ if (!ctx->dp)
+ goto free_q;
+ memcpy(ctx->dp + (half_key_sz - len), ptr, len);
+
+ /* dq */
+ ptr = rsa_key->dq;
+ len = rsa_key->dq_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_dp;
+ ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
+ GFP_KERNEL);
+ if (!ctx->dq)
+ goto free_dp;
+ memcpy(ctx->dq + (half_key_sz - len), ptr, len);
+
+ /* qinv */
+ ptr = rsa_key->qinv;
+ len = rsa_key->qinv_sz;
+ qat_rsa_drop_leading_zeros(&ptr, &len);
+ if (!len)
+ goto free_dq;
+ ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
+ GFP_KERNEL);
+ if (!ctx->qinv)
+ goto free_dq;
+ memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
+
+ ctx->crt_mode = true;
+ return;
+
+free_dq:
+ memset(ctx->dq, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+ ctx->dq = NULL;
+free_dp:
+ memset(ctx->dp, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+ ctx->dp = NULL;
+free_q:
+ memset(ctx->q, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+ ctx->q = NULL;
+free_p:
+ memset(ctx->p, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+ ctx->p = NULL;
+err:
+ ctx->crt_mode = false;
+}
+
+static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
+{
+ unsigned int half_key_sz = ctx->key_sz / 2;
/* Free the old key if any */
if (ctx->n)
@@ -619,19 +1164,68 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
memset(ctx->d, '\0', ctx->key_sz);
dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
}
+ if (ctx->p) {
+ memset(ctx->p, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
+ }
+ if (ctx->q) {
+ memset(ctx->q, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
+ }
+ if (ctx->dp) {
+ memset(ctx->dp, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
+ }
+ if (ctx->dq) {
+ memset(ctx->dq, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
+ }
+ if (ctx->qinv) {
+ memset(ctx->qinv, '\0', half_key_sz);
+ dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
+ }
ctx->n = NULL;
ctx->e = NULL;
ctx->d = NULL;
+ ctx->p = NULL;
+ ctx->q = NULL;
+ ctx->dp = NULL;
+ ctx->dq = NULL;
+ ctx->qinv = NULL;
+ ctx->crt_mode = false;
+ ctx->key_sz = 0;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, bool private)
+{
+ struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+ struct rsa_key rsa_key;
+ int ret;
+
+ qat_rsa_clear_ctx(dev, ctx);
if (private)
- ret = asn1_ber_decoder(&qat_rsaprivkey_decoder, ctx, key,
- keylen);
+ ret = rsa_parse_priv_key(&rsa_key, key, keylen);
else
- ret = asn1_ber_decoder(&qat_rsapubkey_decoder, ctx, key,
- keylen);
+ ret = rsa_parse_pub_key(&rsa_key, key, keylen);
+ if (ret < 0)
+ goto free;
+
+ ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
+ if (ret < 0)
+ goto free;
+ ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
if (ret < 0)
goto free;
+ if (private) {
+ ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
+ if (ret < 0)
+ goto free;
+ qat_rsa_setkey_crt(ctx, &rsa_key);
+ }
if (!ctx->n || !ctx->e) {
/* invalid key provided */
@@ -646,20 +1240,7 @@ static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
return 0;
free:
- if (ctx->d) {
- memset(ctx->d, '\0', ctx->key_sz);
- dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
- ctx->d = NULL;
- }
- if (ctx->e) {
- dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
- ctx->e = NULL;
- }
- if (ctx->n) {
- dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
- ctx->n = NULL;
- ctx->key_sz = 0;
- }
+ qat_rsa_clear_ctx(dev, ctx);
return ret;
}
@@ -711,7 +1292,7 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
}
qat_crypto_put_instance(ctx->inst);
ctx->n = NULL;
- ctx->d = NULL;
+ ctx->e = NULL;
ctx->d = NULL;
}
@@ -725,7 +1306,7 @@ static struct akcipher_alg rsa = {
.max_size = qat_rsa_max_size,
.init = qat_rsa_init_tfm,
.exit = qat_rsa_exit_tfm,
- .reqsize = sizeof(struct qat_rsa_request) + 64,
+ .reqsize = sizeof(struct qat_asym_request) + 64,
.base = {
.cra_name = "rsa",
.cra_driver_name = "qat-rsa",
@@ -735,6 +1316,23 @@ static struct akcipher_alg rsa = {
},
};
+static struct kpp_alg dh = {
+ .set_secret = qat_dh_set_secret,
+ .generate_public_key = qat_dh_compute_value,
+ .compute_shared_secret = qat_dh_compute_value,
+ .max_size = qat_dh_max_size,
+ .init = qat_dh_init_tfm,
+ .exit = qat_dh_exit_tfm,
+ .reqsize = sizeof(struct qat_asym_request) + 64,
+ .base = {
+ .cra_name = "dh",
+ .cra_driver_name = "qat-dh",
+ .cra_priority = 1000,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct qat_dh_ctx),
+ },
+};
+
int qat_asym_algs_register(void)
{
int ret = 0;
@@ -743,7 +1341,11 @@ int qat_asym_algs_register(void)
if (++active_devs == 1) {
rsa.base.cra_flags = 0;
ret = crypto_register_akcipher(&rsa);
+ if (ret)
+ goto unlock;
+ ret = crypto_register_kpp(&dh);
}
+unlock:
mutex_unlock(&algs_lock);
return ret;
}
@@ -751,7 +1353,9 @@ int qat_asym_algs_register(void)
void qat_asym_algs_unregister(void)
{
mutex_lock(&algs_lock);
- if (--active_devs == 0)
+ if (--active_devs == 0) {
crypto_unregister_akcipher(&rsa);
+ crypto_unregister_kpp(&dh);
+ }
mutex_unlock(&algs_lock);
}
diff --git a/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1 b/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
deleted file mode 100644
index f0066adb79b8..000000000000
--- a/drivers/crypto/qat/qat_common/qat_rsaprivkey.asn1
+++ /dev/null
@@ -1,11 +0,0 @@
-RsaPrivKey ::= SEQUENCE {
- version INTEGER,
- n INTEGER ({ qat_rsa_get_n }),
- e INTEGER ({ qat_rsa_get_e }),
- d INTEGER ({ qat_rsa_get_d }),
- prime1 INTEGER,
- prime2 INTEGER,
- exponent1 INTEGER,
- exponent2 INTEGER,
- coefficient INTEGER
-}
diff --git a/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1 b/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
deleted file mode 100644
index bd667b31a21a..000000000000
--- a/drivers/crypto/qat/qat_common/qat_rsapubkey.asn1
+++ /dev/null
@@ -1,4 +0,0 @@
-RsaPubKey ::= SEQUENCE {
- n INTEGER ({ qat_rsa_get_n }),
- e INTEGER ({ qat_rsa_get_e })
-}