From ffb57daad3a345a90543ffa1bb5e03f76b6453fb Mon Sep 17 00:00:00 2001 From: Hadar Gat Date: Fri, 27 Mar 2020 09:10:21 +0300 Subject: dt-bindings: add device tree binding for Arm CryptoCell trng engine The Arm CryptoCell is a hardware security engine. This patch adds DT bindings for its TRNG (True Random Number Generator) engine. Signed-off-by: Hadar Gat Reviewed-by: Rob Herring Signed-off-by: Herbert Xu --- .../devicetree/bindings/rng/arm-cctrng.yaml | 54 ++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 Documentation/devicetree/bindings/rng/arm-cctrng.yaml diff --git a/Documentation/devicetree/bindings/rng/arm-cctrng.yaml b/Documentation/devicetree/bindings/rng/arm-cctrng.yaml new file mode 100644 index 000000000000..ca6aad19b6ba --- /dev/null +++ b/Documentation/devicetree/bindings/rng/arm-cctrng.yaml @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rng/arm-cctrng.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Arm TrustZone CryptoCell TRNG engine + +maintainers: + - Hadar Gat + +description: |+ + Arm TrustZone CryptoCell TRNG (True Random Number Generator) engine. + +properties: + compatible: + enum: + - arm,cryptocell-713-trng + - arm,cryptocell-703-trng + + interrupts: + maxItems: 1 + + reg: + maxItems: 1 + + arm,rosc-ratio: + description: + Arm TrustZone CryptoCell TRNG engine has 4 ring oscillators. + Sampling ratio values for these 4 ring oscillators. (from calibration) + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-array + - items: + maxItems: 4 + + clocks: + maxItems: 1 + +required: + - compatible + - interrupts + - reg + - arm,rosc-ratio + +additionalProperties: false + +examples: + - | + arm_cctrng: rng@60000000 { + compatible = "arm,cryptocell-713-trng"; + interrupts = <0 29 4>; + reg = <0x60000000 0x10000>; + arm,rosc-ratio = <5000 1000 500 0>; + }; -- cgit v1.2.3-59-g8ed1b From a583ed310bb6b514e717c11a30b5a7bc3a65d1b1 Mon Sep 17 00:00:00 2001 From: Hadar Gat Date: Fri, 27 Mar 2020 09:10:22 +0300 Subject: hwrng: cctrng - introduce Arm CryptoCell driver Introduce low level Arm CryptoCell TRNG HW support. Signed-off-by: Hadar Gat Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 12 + drivers/char/hw_random/Makefile | 1 + drivers/char/hw_random/cctrng.c | 736 ++++++++++++++++++++++++++++++++++++++++ drivers/char/hw_random/cctrng.h | 72 ++++ 4 files changed, 821 insertions(+) create mode 100644 drivers/char/hw_random/cctrng.c create mode 100644 drivers/char/hw_random/cctrng.h diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 9bc46da8d77a..848f26f78dc1 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -474,6 +474,18 @@ config HW_RANDOM_KEYSTONE help This option enables Keystone's hardware random generator. +config HW_RANDOM_CCTRNG + tristate "Arm CryptoCell True Random Number Generator support" + default HW_RANDOM + help + This driver provides support for the True Random Number + Generator available in Arm TrustZone CryptoCell. + + To compile this driver as a module, choose M here: the module + will be called cctrng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index a7801b49ce6c..2c6724735345 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -41,3 +41,4 @@ obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o +obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c new file mode 100644 index 000000000000..bdcd56243104 --- /dev/null +++ b/drivers/char/hw_random/cctrng.c @@ -0,0 +1,736 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cctrng.h" + +#define CC_REG_LOW(name) (name ## _BIT_SHIFT) +#define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1) +#define CC_GENMASK(name) GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name)) + +#define CC_REG_FLD_GET(reg_name, fld_name, reg_val) \ + (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val)) + +#define CC_HW_RESET_LOOP_COUNT 10 +#define CC_TRNG_SUSPEND_TIMEOUT 3000 + +/* data circular buffer in words must be: + * - of a power-of-2 size (limitation of circ_buf.h macros) + * - at least 6, the size generated in the EHR according to HW implementation + */ +#define CCTRNG_DATA_BUF_WORDS 32 + +/* The timeout for the TRNG operation should be calculated with the formula: + * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE + * while: + * - SAMPLE_CNT is input value from the characterisation process + * - all the rest are constants + */ +#define EHR_NUM 1 +#define VN_COEFF 4 +#define EHR_LENGTH CC_TRNG_EHR_IN_BITS +#define SCALE_VALUE 2 +#define CCTRNG_TIMEOUT(smpl_cnt) \ + (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE) + +struct cctrng_drvdata { + struct platform_device *pdev; + void __iomem *cc_base; + struct clk *clk; + struct hwrng rng; + u32 active_rosc; + /* Sampling interval for each ring oscillator: + * count of ring oscillator cycles between consecutive bits sampling. + * Value of 0 indicates non-valid rosc + */ + u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS]; + + u32 data_buf[CCTRNG_DATA_BUF_WORDS]; + struct circ_buf circ; + struct work_struct compwork; + struct work_struct startwork; + + /* pending_hw - 1 when HW is pending, 0 when it is idle */ + atomic_t pending_hw; + + /* protects against multiple concurrent consumers of data_buf */ + spinlock_t read_lock; +}; + + +/* functions for write/read CC registers */ +static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val) +{ + iowrite32(val, (drvdata->cc_base + reg)); +} +static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg) +{ + return ioread32(drvdata->cc_base + reg); +} + + +static int cc_trng_pm_get(struct device *dev) +{ + int rc = 0; + + rc = pm_runtime_get_sync(dev); + + /* pm_runtime_get_sync() can return 1 as a valid return code */ + return (rc == 1 ? 0 : rc); +} + +static void cc_trng_pm_put_suspend(struct device *dev) +{ + int rc = 0; + + pm_runtime_mark_last_busy(dev); + rc = pm_runtime_put_autosuspend(dev); + if (rc) + dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc); +} + +static int cc_trng_pm_init(struct cctrng_drvdata *drvdata) +{ + struct device *dev = &(drvdata->pdev->dev); + + /* must be before the enabling to avoid redundant suspending */ + pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT); + pm_runtime_use_autosuspend(dev); + /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */ + return pm_runtime_set_active(dev); +} + +static void cc_trng_pm_go(struct cctrng_drvdata *drvdata) +{ + struct device *dev = &(drvdata->pdev->dev); + + /* enable the PM module*/ + pm_runtime_enable(dev); +} + +static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata) +{ + struct device *dev = &(drvdata->pdev->dev); + + pm_runtime_disable(dev); +} + + +static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata) +{ + struct device *dev = &(drvdata->pdev->dev); + struct device_node *np = drvdata->pdev->dev.of_node; + int rc; + int i; + /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */ + int ret = -EINVAL; + + rc = of_property_read_u32_array(np, "arm,rosc-ratio", + drvdata->smpl_ratio, + CC_TRNG_NUM_OF_ROSCS); + if (rc) { + /* arm,rosc-ratio was not found in device tree */ + return rc; + } + + /* verify that at least one rosc has (sampling ratio > 0) */ + for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) { + dev_dbg(dev, "rosc %d sampling ratio %u", + i, drvdata->smpl_ratio[i]); + + if (drvdata->smpl_ratio[i] > 0) + ret = 0; + } + + return ret; +} + +static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata) +{ + struct device *dev = &(drvdata->pdev->dev); + + dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc); + drvdata->active_rosc += 1; + + while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) { + if (drvdata->smpl_ratio[drvdata->active_rosc] > 0) + return 0; + + drvdata->active_rosc += 1; + } + return -EINVAL; +} + + +static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata) +{ + u32 max_cycles; + + /* Set watchdog threshold to maximal allowed time (in CPU cycles) */ + max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]); + cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles); + + /* enable the RND source */ + cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1); + + /* unmask RNG interrupts */ + cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK); +} + + +/* increase circular data buffer index (head/tail) */ +static inline void circ_idx_inc(int *idx, int bytes) +{ + *idx += (bytes + 3) >> 2; + *idx &= (CCTRNG_DATA_BUF_WORDS - 1); +} + +static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata) +{ + return CIRC_SPACE(drvdata->circ.head, + drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); + +} + +static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + /* current implementation ignores "wait" */ + + struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv; + struct device *dev = &(drvdata->pdev->dev); + u32 *buf = (u32 *)drvdata->circ.buf; + size_t copied = 0; + size_t cnt_w; + size_t size; + size_t left; + + if (!spin_trylock(&drvdata->read_lock)) { + /* concurrent consumers from data_buf cannot be served */ + dev_dbg_ratelimited(dev, "unable to hold lock\n"); + return 0; + } + + /* copy till end of data buffer (without wrap back) */ + cnt_w = CIRC_CNT_TO_END(drvdata->circ.head, + drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); + size = min((cnt_w<<2), max); + memcpy(data, &(buf[drvdata->circ.tail]), size); + copied = size; + circ_idx_inc(&drvdata->circ.tail, size); + /* copy rest of data in data buffer */ + left = max - copied; + if (left > 0) { + cnt_w = CIRC_CNT(drvdata->circ.head, + drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS); + size = min((cnt_w<<2), left); + memcpy(data, &(buf[drvdata->circ.tail]), size); + copied += size; + circ_idx_inc(&drvdata->circ.tail, size); + } + + spin_unlock(&drvdata->read_lock); + + if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { + if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) { + /* re-check space in buffer to avoid potential race */ + if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { + /* increment device's usage counter */ + int rc = cc_trng_pm_get(dev); + + if (rc) { + dev_err(dev, + "cc_trng_pm_get returned %x\n", + rc); + return rc; + } + + /* schedule execution of deferred work handler + * for filling of data buffer + */ + schedule_work(&drvdata->startwork); + } else { + atomic_set(&drvdata->pending_hw, 0); + } + } + } + + return copied; +} + +static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata) +{ + u32 tmp_smpl_cnt = 0; + struct device *dev = &(drvdata->pdev->dev); + + dev_dbg(dev, "cctrng hw trigger.\n"); + + /* enable the HW RND clock */ + cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1); + + /* do software reset */ + cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1); + /* in order to verify that the reset has completed, + * the sample count need to be verified + */ + do { + /* enable the HW RND clock */ + cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1); + + /* set sampling ratio (rng_clocks) between consecutive bits */ + cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET, + drvdata->smpl_ratio[drvdata->active_rosc]); + + /* read the sampling ratio */ + tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET); + + } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]); + + /* disable the RND source for setting new parameters in HW */ + cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0); + + cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF); + + cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc); + + /* Debug Control register: set to 0 - no bypasses */ + cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0); + + cc_trng_enable_rnd_source(drvdata); +} + +void cc_trng_compwork_handler(struct work_struct *w) +{ + u32 isr = 0; + u32 ehr_valid = 0; + struct cctrng_drvdata *drvdata = + container_of(w, struct cctrng_drvdata, compwork); + struct device *dev = &(drvdata->pdev->dev); + int i; + + /* stop DMA and the RNG source */ + cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0); + cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0); + + /* read RNG_ISR and check for errors */ + isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET); + ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr); + dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid); + +#ifdef CONFIG_CRYPTO_FIPS + if (CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr) && fips_enabled) { + fips_fail_notify(); + /* FIPS error is fatal */ + panic("Got HW CRNGT error while fips is enabled!\n"); + } +#endif + + /* Clear all pending RNG interrupts */ + cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr); + + + if (!ehr_valid) { + /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */ + if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) || + CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) { + dev_dbg(dev, "cctrng autocorr/timeout error.\n"); + goto next_rosc; + } + + /* in case of VN error, ignore it */ + } + + /* read EHR data from registers */ + for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) { + /* calc word ptr in data_buf */ + u32 *buf = (u32 *)drvdata->circ.buf; + + buf[drvdata->circ.head] = cc_ioread(drvdata, + CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32))); + + /* EHR_DATA registers are cleared on read. In case 0 value was + * returned, restart the entropy collection. + */ + if (buf[drvdata->circ.head] == 0) { + dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n", + drvdata->active_rosc); + goto next_rosc; + } + + circ_idx_inc(&drvdata->circ.head, 1<<2); + } + + atomic_set(&drvdata->pending_hw, 0); + + /* continue to fill data buffer if needed */ + if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) { + if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) { + /* Re-enable rnd source */ + cc_trng_enable_rnd_source(drvdata); + return; + } + } + + cc_trng_pm_put_suspend(dev); + + dev_dbg(dev, "compwork handler done\n"); + return; + +next_rosc: + if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) && + (cc_trng_change_rosc(drvdata) == 0)) { + /* trigger trng hw with next rosc */ + cc_trng_hw_trigger(drvdata); + } else { + atomic_set(&drvdata->pending_hw, 0); + cc_trng_pm_put_suspend(dev); + } +} + +static irqreturn_t cc_isr(int irq, void *dev_id) +{ + struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id; + struct device *dev = &(drvdata->pdev->dev); + u32 irr; + + /* if driver suspended return, probably shared interrupt */ + if (pm_runtime_suspended(dev)) + return IRQ_NONE; + + /* read the interrupt status */ + irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET); + dev_dbg(dev, "Got IRR=0x%08X\n", irr); + + if (irr == 0) /* Probably shared interrupt line */ + return IRQ_NONE; + + /* clear interrupt - must be before processing events */ + cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr); + + /* RNG interrupt - most probable */ + if (irr & CC_HOST_RNG_IRQ_MASK) { + /* Mask RNG interrupts - will be unmasked in deferred work */ + cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF); + + /* We clear RNG interrupt here, + * to avoid it from firing as we'll unmask RNG interrupts. + */ + cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, + CC_HOST_RNG_IRQ_MASK); + + irr &= ~CC_HOST_RNG_IRQ_MASK; + + /* schedule execution of deferred work handler */ + schedule_work(&drvdata->compwork); + } + + if (irr) { + dev_dbg_ratelimited(dev, + "IRR includes unknown cause bits (0x%08X)\n", + irr); + /* Just warning */ + } + + return IRQ_HANDLED; +} + +void cc_trng_startwork_handler(struct work_struct *w) +{ + struct cctrng_drvdata *drvdata = + container_of(w, struct cctrng_drvdata, startwork); + + drvdata->active_rosc = 0; + cc_trng_hw_trigger(drvdata); +} + + +static int cc_trng_clk_init(struct cctrng_drvdata *drvdata) +{ + struct clk *clk; + struct device *dev = &(drvdata->pdev->dev); + int rc = 0; + + clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(dev, "Error getting clock: %pe\n", clk); + return PTR_ERR(clk); + } + drvdata->clk = clk; + + rc = clk_prepare_enable(drvdata->clk); + if (rc) { + dev_err(dev, "Failed to enable clock\n"); + return rc; + } + + return 0; +} + +static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata) +{ + clk_disable_unprepare(drvdata->clk); +} + + +static int cctrng_probe(struct platform_device *pdev) +{ + struct resource *req_mem_cc_regs = NULL; + struct cctrng_drvdata *drvdata; + struct device *dev = &pdev->dev; + int rc = 0; + u32 val; + int irq; + + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); + if (!drvdata->rng.name) + return -ENOMEM; + + drvdata->rng.read = cctrng_read; + drvdata->rng.priv = (unsigned long)drvdata; + drvdata->rng.quality = CC_TRNG_QUALITY; + + platform_set_drvdata(pdev, drvdata); + drvdata->pdev = pdev; + + drvdata->circ.buf = (char *)drvdata->data_buf; + + /* Get device resources */ + /* First CC registers space */ + req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + /* Map registers space */ + drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs); + if (IS_ERR(drvdata->cc_base)) { + dev_err(dev, "Failed to ioremap registers"); + return PTR_ERR(drvdata->cc_base); + } + + dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name, + req_mem_cc_regs); + dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n", + &req_mem_cc_regs->start, drvdata->cc_base); + + /* Then IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "Failed getting IRQ resource\n"); + return irq; + } + + /* parse sampling rate from device tree */ + rc = cc_trng_parse_sampling_ratio(drvdata); + if (rc) { + dev_err(dev, "Failed to get legal sampling ratio for rosc\n"); + return rc; + } + + rc = cc_trng_clk_init(drvdata); + if (rc) { + dev_err(dev, "cc_trng_clk_init failed\n"); + return rc; + } + + INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler); + INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler); + spin_lock_init(&drvdata->read_lock); + + /* register the driver isr function */ + rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata); + if (rc) { + dev_err(dev, "Could not register to interrupt %d\n", irq); + goto post_clk_err; + } + dev_dbg(dev, "Registered to IRQ: %d\n", irq); + + /* Clear all pending interrupts */ + val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET); + dev_dbg(dev, "IRR=0x%08X\n", val); + cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val); + + /* unmask HOST RNG interrupt */ + cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET, + cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) & + ~CC_HOST_RNG_IRQ_MASK); + + /* init PM */ + rc = cc_trng_pm_init(drvdata); + if (rc) { + dev_err(dev, "cc_trng_pm_init failed\n"); + goto post_clk_err; + } + + /* increment device's usage counter */ + rc = cc_trng_pm_get(dev); + if (rc) { + dev_err(dev, "cc_trng_pm_get returned %x\n", rc); + goto post_pm_err; + } + + /* set pending_hw to verify that HW won't be triggered from read */ + atomic_set(&drvdata->pending_hw, 1); + + /* registration of the hwrng device */ + rc = hwrng_register(&drvdata->rng); + if (rc) { + dev_err(dev, "Could not register hwrng device.\n"); + goto post_pm_err; + } + + /* trigger HW to start generate data */ + drvdata->active_rosc = 0; + cc_trng_hw_trigger(drvdata); + + /* All set, we can allow auto-suspend */ + cc_trng_pm_go(drvdata); + + dev_info(dev, "ARM cctrng device initialized\n"); + + return 0; + +post_pm_err: + cc_trng_pm_fini(drvdata); + +post_clk_err: + cc_trng_clk_fini(drvdata); + + return rc; +} + +static int cctrng_remove(struct platform_device *pdev) +{ + struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + dev_dbg(dev, "Releasing cctrng resources...\n"); + + hwrng_unregister(&drvdata->rng); + + cc_trng_pm_fini(drvdata); + + cc_trng_clk_fini(drvdata); + + dev_info(dev, "ARM cctrng device terminated\n"); + + return 0; +} + +static int __maybe_unused cctrng_suspend(struct device *dev) +{ + struct cctrng_drvdata *drvdata = dev_get_drvdata(dev); + + dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); + cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET, + POWER_DOWN_ENABLE); + + clk_disable_unprepare(drvdata->clk); + + return 0; +} + +static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata) +{ + unsigned int val; + unsigned int i; + + for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) { + /* in cc7x3 NVM_IS_IDLE indicates that CC reset is + * completed and device is fully functional + */ + val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET); + if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) { + /* hw indicate reset completed */ + return true; + } + /* allow scheduling other process on the processor */ + schedule(); + } + /* reset not completed */ + return false; +} + +static int __maybe_unused cctrng_resume(struct device *dev) +{ + struct cctrng_drvdata *drvdata = dev_get_drvdata(dev); + int rc; + + dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n"); + /* Enables the device source clk */ + rc = clk_prepare_enable(drvdata->clk); + if (rc) { + dev_err(dev, "failed getting clock back on. We're toast.\n"); + return rc; + } + + /* wait for Cryptocell reset completion */ + if (!cctrng_wait_for_reset_completion(drvdata)) { + dev_err(dev, "Cryptocell reset not completed"); + return -EBUSY; + } + + /* unmask HOST RNG interrupt */ + cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET, + cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) & + ~CC_HOST_RNG_IRQ_MASK); + + cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET, + POWER_DOWN_DISABLE); + + return 0; +} + +static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL); + +static const struct of_device_id arm_cctrng_dt_match[] = { + { .compatible = "arm,cryptocell-713-trng", }, + { .compatible = "arm,cryptocell-703-trng", }, + {}, +}; +MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match); + +static struct platform_driver cctrng_driver = { + .driver = { + .name = "cctrng", + .of_match_table = arm_cctrng_dt_match, + .pm = &cctrng_pm, + }, + .probe = cctrng_probe, + .remove = cctrng_remove, +}; + +static int __init cctrng_mod_init(void) +{ + /* Compile time assertion checks */ + BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6); + BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0); + + return platform_driver_register(&cctrng_driver); +} +module_init(cctrng_mod_init); + +static void __exit cctrng_mod_exit(void) +{ + platform_driver_unregister(&cctrng_driver); +} +module_exit(cctrng_mod_exit); + +/* Module description */ +MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver"); +MODULE_AUTHOR("ARM"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/cctrng.h b/drivers/char/hw_random/cctrng.h new file mode 100644 index 000000000000..1f2fde95adcb --- /dev/null +++ b/drivers/char/hw_random/cctrng.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019-2020 ARM Limited or its affiliates. */ + +#include + +#define POWER_DOWN_ENABLE 0x01 +#define POWER_DOWN_DISABLE 0x00 + +/* hwrng quality: bits of true entropy per 1024 bits of input */ +#define CC_TRNG_QUALITY 1024 + +/* CryptoCell TRNG HW definitions */ +#define CC_TRNG_NUM_OF_ROSCS 4 +/* The number of words generated in the entropy holding register (EHR) + * 6 words (192 bit) according to HW implementation + */ +#define CC_TRNG_EHR_IN_WORDS 6 +#define CC_TRNG_EHR_IN_BITS (CC_TRNG_EHR_IN_WORDS * BITS_PER_TYPE(u32)) + +#define CC_HOST_RNG_IRQ_MASK BIT(CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT) + +/* RNG interrupt mask */ +#define CC_RNG_INT_MASK (BIT(CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT) | \ + BIT(CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT) | \ + BIT(CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT) | \ + BIT(CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT) | \ + BIT(CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT)) + +// -------------------------------------- +// BLOCK: RNG +// -------------------------------------- +#define CC_RNG_IMR_REG_OFFSET 0x0100UL +#define CC_RNG_IMR_EHR_VALID_INT_MASK_BIT_SHIFT 0x0UL +#define CC_RNG_IMR_AUTOCORR_ERR_INT_MASK_BIT_SHIFT 0x1UL +#define CC_RNG_IMR_CRNGT_ERR_INT_MASK_BIT_SHIFT 0x2UL +#define CC_RNG_IMR_VN_ERR_INT_MASK_BIT_SHIFT 0x3UL +#define CC_RNG_IMR_WATCHDOG_INT_MASK_BIT_SHIFT 0x4UL +#define CC_RNG_ISR_REG_OFFSET 0x0104UL +#define CC_RNG_ISR_EHR_VALID_BIT_SHIFT 0x0UL +#define CC_RNG_ISR_EHR_VALID_BIT_SIZE 0x1UL +#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SHIFT 0x1UL +#define CC_RNG_ISR_AUTOCORR_ERR_BIT_SIZE 0x1UL +#define CC_RNG_ISR_CRNGT_ERR_BIT_SHIFT 0x2UL +#define CC_RNG_ISR_CRNGT_ERR_BIT_SIZE 0x1UL +#define CC_RNG_ISR_WATCHDOG_BIT_SHIFT 0x4UL +#define CC_RNG_ISR_WATCHDOG_BIT_SIZE 0x1UL +#define CC_RNG_ICR_REG_OFFSET 0x0108UL +#define CC_TRNG_CONFIG_REG_OFFSET 0x010CUL +#define CC_EHR_DATA_0_REG_OFFSET 0x0114UL +#define CC_RND_SOURCE_ENABLE_REG_OFFSET 0x012CUL +#define CC_SAMPLE_CNT1_REG_OFFSET 0x0130UL +#define CC_TRNG_DEBUG_CONTROL_REG_OFFSET 0x0138UL +#define CC_RNG_SW_RESET_REG_OFFSET 0x0140UL +#define CC_RNG_CLK_ENABLE_REG_OFFSET 0x01C4UL +#define CC_RNG_DMA_ENABLE_REG_OFFSET 0x01C8UL +#define CC_RNG_WATCHDOG_VAL_REG_OFFSET 0x01D8UL +// -------------------------------------- +// BLOCK: SEC_HOST_RGF +// -------------------------------------- +#define CC_HOST_RGF_IRR_REG_OFFSET 0x0A00UL +#define CC_HOST_RGF_IRR_RNG_INT_BIT_SHIFT 0xAUL +#define CC_HOST_RGF_IMR_REG_OFFSET 0x0A04UL +#define CC_HOST_RGF_ICR_REG_OFFSET 0x0A08UL + +#define CC_HOST_POWER_DOWN_EN_REG_OFFSET 0x0A78UL + +// -------------------------------------- +// BLOCK: NVM +// -------------------------------------- +#define CC_NVM_IS_IDLE_REG_OFFSET 0x0F10UL +#define CC_NVM_IS_IDLE_VALUE_BIT_SHIFT 0x0UL +#define CC_NVM_IS_IDLE_VALUE_BIT_SIZE 0x1UL -- cgit v1.2.3-59-g8ed1b From 3c8e0bd105037c4451adb4594fcfa33aef58d286 Mon Sep 17 00:00:00 2001 From: Hadar Gat Date: Fri, 27 Mar 2020 09:10:23 +0300 Subject: MAINTAINERS: add HG as cctrng maintainer I work for Arm on maintaining the TrustZone CryptoCell TRNG driver. Signed-off-by: Hadar Gat Signed-off-by: Herbert Xu --- MAINTAINERS | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index e64e5db31497..82a9d3d5c541 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3894,6 +3894,15 @@ S: Supported W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family F: drivers/crypto/ccree/ +CCTRNG ARM TRUSTZONE CRYPTOCELL TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER +M: Hadar Gat +L: linux-crypto@vger.kernel.org +S: Supported +F: drivers/char/hw_random/cctrng.c +F: drivers/char/hw_random/cctrng.h +F: Documentation/devicetree/bindings/rng/arm-cctrng.txt +W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family + CEC FRAMEWORK M: Hans Verkuil L: linux-media@vger.kernel.org -- cgit v1.2.3-59-g8ed1b From 619e464ae22a17e6a060527c8591ccf78eb368ba Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Thu, 2 Apr 2020 14:53:01 +0800 Subject: crypto: hisilicon - put vfs_num into struct hisi_qm We plan to move vfs_num related code into qm.c, put the param vfs_num into struct hisi_qm first. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre.h | 1 - drivers/crypto/hisilicon/hpre/hpre_main.c | 12 +++++------- drivers/crypto/hisilicon/qm.h | 1 + drivers/crypto/hisilicon/sec2/sec.h | 1 - drivers/crypto/hisilicon/sec2/sec_main.c | 17 ++++++++--------- drivers/crypto/hisilicon/zip/zip_main.c | 19 ++++++++----------- 6 files changed, 22 insertions(+), 29 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index 03d512ec6336..0a8ba468e2be 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -47,7 +47,6 @@ struct hpre_debug { struct hpre { struct hisi_qm qm; struct hpre_debug debug; - u32 num_vfs; unsigned long status; }; diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 88be53bf4a38..5269e5b9be73 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -354,9 +354,7 @@ static u32 hpre_current_qm_read(struct hpre_debugfs_file *file) static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) { struct hisi_qm *qm = hpre_file_to_qm(file); - struct hpre_debug *debug = file->debug; - struct hpre *hpre = container_of(debug, struct hpre, debug); - u32 num_vfs = hpre->num_vfs; + u32 num_vfs = qm->vfs_num; u32 vfq_num, tmp; @@ -827,7 +825,7 @@ static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs) static int hpre_clear_vft_config(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; - u32 num_vfs = hpre->num_vfs; + u32 num_vfs = qm->vfs_num; int ret; u32 i; @@ -836,7 +834,7 @@ static int hpre_clear_vft_config(struct hpre *hpre) if (ret) return ret; } - hpre->num_vfs = 0; + qm->vfs_num = 0; return 0; } @@ -860,7 +858,7 @@ static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs) return ret; } - hpre->num_vfs = num_vfs; + hpre->qm.vfs_num = num_vfs; ret = pci_enable_sriov(pdev, num_vfs); if (ret) { @@ -903,7 +901,7 @@ static void hpre_remove(struct pci_dev *pdev) hpre_algs_unregister(); hisi_qm_del_from_list(qm, &hpre_devices); - if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0) { + if (qm->fun_type == QM_HW_PF && qm->vfs_num) { ret = hpre_sriov_disable(pdev); if (ret) { pci_err(pdev, "Disable SRIOV fail!\n"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index ec5b6f48db6c..33c5a8edd2ae 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -161,6 +161,7 @@ struct hisi_qm { u32 qp_num; u32 qp_in_used; u32 ctrl_qp_num; + u32 vfs_num; struct list_head list; struct qm_dma qdma; diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 3598fa17beb2..2326634a1d71 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -172,7 +172,6 @@ struct sec_dev { struct sec_debug debug; u32 ctx_q_num; bool iommu_used; - u32 num_vfs; unsigned long status; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 1f54ebe164b6..ef26239ec360 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -424,23 +424,22 @@ static u32 sec_current_qm_read(struct sec_debug_file *file) static int sec_current_qm_write(struct sec_debug_file *file, u32 val) { struct hisi_qm *qm = file->qm; - struct sec_dev *sec = container_of(qm, struct sec_dev, qm); u32 vfq_num; u32 tmp; - if (val > sec->num_vfs) + if (val > qm->vfs_num) return -EINVAL; /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ if (!val) { qm->debug.curr_qm_qp_num = qm->qp_num; } else { - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / sec->num_vfs; + vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num; - if (val == sec->num_vfs) + if (val == qm->vfs_num) qm->debug.curr_qm_qp_num = qm->ctrl_qp_num - qm->qp_num - - (sec->num_vfs - 1) * vfq_num; + (qm->vfs_num - 1) * vfq_num; else qm->debug.curr_qm_qp_num = vfq_num; } @@ -926,7 +925,7 @@ static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs) static int sec_clear_vft_config(struct sec_dev *sec) { struct hisi_qm *qm = &sec->qm; - u32 num_vfs = sec->num_vfs; + u32 num_vfs = qm->vfs_num; int ret; u32 i; @@ -936,7 +935,7 @@ static int sec_clear_vft_config(struct sec_dev *sec) return ret; } - sec->num_vfs = 0; + qm->vfs_num = 0; return 0; } @@ -962,7 +961,7 @@ static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs) return ret; } - sec->num_vfs = num_vfs; + sec->qm.vfs_num = num_vfs; ret = pci_enable_sriov(pdev, num_vfs); if (ret) { @@ -1006,7 +1005,7 @@ static void sec_remove(struct pci_dev *pdev) hisi_qm_del_from_list(qm, &sec_devices); - if (qm->fun_type == QM_HW_PF && sec->num_vfs) + if (qm->fun_type == QM_HW_PF && qm->vfs_num) (void)sec_sriov_disable(pdev); sec_debugfs_exit(sec); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index fcc85d2dbd07..f5ffa0297730 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -134,7 +134,6 @@ struct ctrl_debug_file { * Just relevant for PF. */ struct hisi_zip_ctrl { - u32 num_vfs; struct hisi_zip *hisi_zip; struct dentry *debug_root; struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM]; @@ -342,21 +341,20 @@ static u32 current_qm_read(struct ctrl_debug_file *file) static int current_qm_write(struct ctrl_debug_file *file, u32 val) { struct hisi_qm *qm = file_to_qm(file); - struct hisi_zip_ctrl *ctrl = file->ctrl; u32 vfq_num; u32 tmp; - if (val > ctrl->num_vfs) + if (val > qm->vfs_num) return -EINVAL; /* Calculate curr_qm_qp_num and store */ if (val == 0) { qm->debug.curr_qm_qp_num = qm->qp_num; } else { - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs; - if (val == ctrl->num_vfs) + vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num; + if (val == qm->vfs_num) qm->debug.curr_qm_qp_num = qm->ctrl_qp_num - - qm->qp_num - (ctrl->num_vfs - 1) * vfq_num; + qm->qp_num - (qm->vfs_num - 1) * vfq_num; else qm->debug.curr_qm_qp_num = vfq_num; } @@ -686,9 +684,8 @@ static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs) static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip) { - struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl; struct hisi_qm *qm = &hisi_zip->qm; - u32 i, num_vfs = ctrl->num_vfs; + u32 i, num_vfs = qm->vfs_num; int ret; for (i = 1; i <= num_vfs; i++) { @@ -697,7 +694,7 @@ static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip) return ret; } - ctrl->num_vfs = 0; + qm->vfs_num = 0; return 0; } @@ -723,7 +720,7 @@ static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) return ret; } - hisi_zip->ctrl->num_vfs = num_vfs; + hisi_zip->qm.vfs_num = num_vfs; ret = pci_enable_sriov(pdev, num_vfs); if (ret) { @@ -852,7 +849,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); struct hisi_qm *qm = &hisi_zip->qm; - if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0) + if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_zip_sriov_disable(pdev); hisi_zip_debugfs_exit(hisi_zip); -- cgit v1.2.3-59-g8ed1b From cd1b7ae3435cc428579615241adeee36d217320c Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Thu, 2 Apr 2020 14:53:02 +0800 Subject: crypto: hisilicon - unify SR-IOV related codes into QM Clean the duplicate SR-IOV related codes, put all into qm.c. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 105 +---------------------- drivers/crypto/hisilicon/qm.c | 136 ++++++++++++++++++++++++++++-- drivers/crypto/hisilicon/qm.h | 4 +- drivers/crypto/hisilicon/sec2/sec_main.c | 108 +----------------------- drivers/crypto/hisilicon/zip/zip_main.c | 109 +----------------------- 5 files changed, 138 insertions(+), 324 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 5269e5b9be73..4e41d308f0a9 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -792,107 +792,6 @@ err_with_qm_init: return ret; } -static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs) -{ - struct hisi_qm *qm = &hpre->qm; - u32 qp_num = qm->qp_num; - int q_num, remain_q_num, i; - u32 q_base = qp_num; - int ret; - - if (!num_vfs) - return -EINVAL; - - remain_q_num = qm->ctrl_qp_num - qp_num; - - /* If remaining queues are not enough, return error. */ - if (remain_q_num < num_vfs) - return -EINVAL; - - q_num = remain_q_num / num_vfs; - for (i = 1; i <= num_vfs; i++) { - if (i == num_vfs) - q_num += remain_q_num % num_vfs; - ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num); - if (ret) - return ret; - q_base += q_num; - } - - return 0; -} - -static int hpre_clear_vft_config(struct hpre *hpre) -{ - struct hisi_qm *qm = &hpre->qm; - u32 num_vfs = qm->vfs_num; - int ret; - u32 i; - - for (i = 1; i <= num_vfs; i++) { - ret = hisi_qm_set_vft(qm, i, 0, 0); - if (ret) - return ret; - } - qm->vfs_num = 0; - - return 0; -} - -static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs) -{ - struct hpre *hpre = pci_get_drvdata(pdev); - int pre_existing_vfs, num_vfs, ret; - - pre_existing_vfs = pci_num_vf(pdev); - if (pre_existing_vfs) { - pci_err(pdev, - "Can't enable VF. Please disable pre-enabled VFs!\n"); - return 0; - } - - num_vfs = min_t(int, max_vfs, HPRE_VF_NUM); - ret = hpre_vf_q_assign(hpre, num_vfs); - if (ret) { - pci_err(pdev, "Can't assign queues for VF!\n"); - return ret; - } - - hpre->qm.vfs_num = num_vfs; - - ret = pci_enable_sriov(pdev, num_vfs); - if (ret) { - pci_err(pdev, "Can't enable VF!\n"); - hpre_clear_vft_config(hpre); - return ret; - } - - return num_vfs; -} - -static int hpre_sriov_disable(struct pci_dev *pdev) -{ - struct hpre *hpre = pci_get_drvdata(pdev); - - if (pci_vfs_assigned(pdev)) { - pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n"); - return -EPERM; - } - - /* remove in hpre_pci_driver will be called to free VF resources */ - pci_disable_sriov(pdev); - - return hpre_clear_vft_config(hpre); -} - -static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs) -{ - if (num_vfs) - return hpre_sriov_enable(pdev, num_vfs); - else - return hpre_sriov_disable(pdev); -} - static void hpre_remove(struct pci_dev *pdev) { struct hpre *hpre = pci_get_drvdata(pdev); @@ -902,7 +801,7 @@ static void hpre_remove(struct pci_dev *pdev) hpre_algs_unregister(); hisi_qm_del_from_list(qm, &hpre_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) { - ret = hpre_sriov_disable(pdev); + ret = hisi_qm_sriov_disable(pdev); if (ret) { pci_err(pdev, "Disable SRIOV fail!\n"); return; @@ -929,7 +828,7 @@ static struct pci_driver hpre_pci_driver = { .id_table = hpre_dev_ids, .probe = hpre_probe, .remove = hpre_remove, - .sriov_configure = hpre_sriov_configure, + .sriov_configure = hisi_qm_sriov_configure, .err_handler = &hpre_err_handler, }; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index f795fb557630..7c2dedc12d13 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1781,12 +1781,6 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) EXPORT_SYMBOL_GPL(hisi_qm_get_vft); /** - * hisi_qm_set_vft() - Set "virtual function table" for a qm. - * @fun_num: Number of operated function. - * @qm: The qm in which to set vft, alway in a PF. - * @base: The base number of queue in vft. - * @number: The number of queues in vft. 0 means invalid vft. - * * This function is alway called in PF driver, it is used to assign queues * among PF and VFs. * @@ -1794,7 +1788,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_get_vft); * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) * (VF function number 0x2) */ -int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, +static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number) { u32 max_q_num = qm->ctrl_qp_num; @@ -1805,7 +1799,6 @@ int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, return qm_set_sqc_cqc_vft(qm, fun_num, base, number); } -EXPORT_SYMBOL_GPL(hisi_qm_set_vft); static void qm_init_eq_aeq_status(struct hisi_qm *qm) { @@ -2299,6 +2292,133 @@ err: } EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); +static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) +{ + u32 remain_q_num, q_num, i, j; + u32 q_base = qm->qp_num; + int ret; + + if (!num_vfs) + return -EINVAL; + + remain_q_num = qm->ctrl_qp_num - qm->qp_num; + + /* If remain queues not enough, return error. */ + if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs) + return -EINVAL; + + q_num = remain_q_num / num_vfs; + for (i = 1; i <= num_vfs; i++) { + if (i == num_vfs) + q_num += remain_q_num % num_vfs; + ret = hisi_qm_set_vft(qm, i, q_base, q_num); + if (ret) { + for (j = i; j > 0; j--) + hisi_qm_set_vft(qm, j, 0, 0); + return ret; + } + q_base += q_num; + } + + return 0; +} + +static int qm_clear_vft_config(struct hisi_qm *qm) +{ + int ret; + u32 i; + + for (i = 1; i <= qm->vfs_num; i++) { + ret = hisi_qm_set_vft(qm, i, 0, 0); + if (ret) + return ret; + } + qm->vfs_num = 0; + + return 0; +} + +/** + * hisi_qm_sriov_enable() - enable virtual functions + * @pdev: the PCIe device + * @max_vfs: the number of virtual functions to enable + * + * Returns the number of enabled VFs. If there are VFs enabled already or + * max_vfs is more than the total number of device can be enabled, returns + * failure. + */ +int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + int pre_existing_vfs, num_vfs, total_vfs, ret; + + total_vfs = pci_sriov_get_totalvfs(pdev); + pre_existing_vfs = pci_num_vf(pdev); + if (pre_existing_vfs) { + pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", + pre_existing_vfs); + return 0; + } + + num_vfs = min_t(int, max_vfs, total_vfs); + ret = qm_vf_q_assign(qm, num_vfs); + if (ret) { + pci_err(pdev, "Can't assign queues for VF!\n"); + return ret; + } + + qm->vfs_num = num_vfs; + + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) { + pci_err(pdev, "Can't enable VF!\n"); + qm_clear_vft_config(qm); + return ret; + } + + pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); + + return num_vfs; +} +EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); + +/** + * hisi_qm_sriov_disable - disable virtual functions + * @pdev: the PCI device + * + * Return failure if there are VFs assigned already. + */ +int hisi_qm_sriov_disable(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + + if (pci_vfs_assigned(pdev)) { + pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); + return -EPERM; + } + + /* remove in hpre_pci_driver will be called to free VF resources */ + pci_disable_sriov(pdev); + return qm_clear_vft_config(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); + +/** + * hisi_qm_sriov_configure - configure the number of VFs + * @pdev: The PCI device + * @num_vfs: The number of VFs need enabled + * + * Enable SR-IOV according to num_vfs, 0 means disable. + */ +int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + if (num_vfs == 0) + return hisi_qm_sriov_disable(pdev); + else + return hisi_qm_sriov_enable(pdev, num_vfs); +} +EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); + static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm) { u32 err_sts; diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 33c5a8edd2ae..665e53d8d958 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -268,10 +268,12 @@ void hisi_qm_release_qp(struct hisi_qp *qp); int hisi_qp_send(struct hisi_qp *qp, const void *msg); int hisi_qm_get_free_qp_num(struct hisi_qm *qm); int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); -int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number); int hisi_qm_debug_init(struct hisi_qm *qm); enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); void hisi_qm_debug_regs_clear(struct hisi_qm *qm); +int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); +int hisi_qm_sriov_disable(struct pci_dev *pdev); +int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); void hisi_qm_dev_err_init(struct hisi_qm *qm); void hisi_qm_dev_err_uninit(struct hisi_qm *qm); pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index ef26239ec360..129648a49114 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -892,110 +892,6 @@ err_qm_uninit: return ret; } -/* now we only support equal assignment */ -static int sec_vf_q_assign(struct sec_dev *sec, u32 num_vfs) -{ - struct hisi_qm *qm = &sec->qm; - u32 qp_num = qm->qp_num; - u32 q_base = qp_num; - u32 q_num, remain_q_num; - int i, j, ret; - - if (!num_vfs) - return -EINVAL; - - remain_q_num = qm->ctrl_qp_num - qp_num; - q_num = remain_q_num / num_vfs; - - for (i = 1; i <= num_vfs; i++) { - if (i == num_vfs) - q_num += remain_q_num % num_vfs; - ret = hisi_qm_set_vft(qm, i, q_base, q_num); - if (ret) { - for (j = i; j > 0; j--) - hisi_qm_set_vft(qm, j, 0, 0); - return ret; - } - q_base += q_num; - } - - return 0; -} - -static int sec_clear_vft_config(struct sec_dev *sec) -{ - struct hisi_qm *qm = &sec->qm; - u32 num_vfs = qm->vfs_num; - int ret; - u32 i; - - for (i = 1; i <= num_vfs; i++) { - ret = hisi_qm_set_vft(qm, i, 0, 0); - if (ret) - return ret; - } - - qm->vfs_num = 0; - - return 0; -} - -static int sec_sriov_enable(struct pci_dev *pdev, int max_vfs) -{ - struct sec_dev *sec = pci_get_drvdata(pdev); - int pre_existing_vfs, ret; - u32 num_vfs; - - pre_existing_vfs = pci_num_vf(pdev); - - if (pre_existing_vfs) { - pci_err(pdev, "Can't enable VF. Please disable at first!\n"); - return 0; - } - - num_vfs = min_t(u32, max_vfs, SEC_VF_NUM); - - ret = sec_vf_q_assign(sec, num_vfs); - if (ret) { - pci_err(pdev, "Can't assign queues for VF!\n"); - return ret; - } - - sec->qm.vfs_num = num_vfs; - - ret = pci_enable_sriov(pdev, num_vfs); - if (ret) { - pci_err(pdev, "Can't enable VF!\n"); - sec_clear_vft_config(sec); - return ret; - } - - return num_vfs; -} - -static int sec_sriov_disable(struct pci_dev *pdev) -{ - struct sec_dev *sec = pci_get_drvdata(pdev); - - if (pci_vfs_assigned(pdev)) { - pci_err(pdev, "Can't disable VFs while VFs are assigned!\n"); - return -EPERM; - } - - /* remove in sec_pci_driver will be called to free VF resources */ - pci_disable_sriov(pdev); - - return sec_clear_vft_config(sec); -} - -static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs) -{ - if (num_vfs) - return sec_sriov_enable(pdev, num_vfs); - else - return sec_sriov_disable(pdev); -} - static void sec_remove(struct pci_dev *pdev) { struct sec_dev *sec = pci_get_drvdata(pdev); @@ -1006,7 +902,7 @@ static void sec_remove(struct pci_dev *pdev) hisi_qm_del_from_list(qm, &sec_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) - (void)sec_sriov_disable(pdev); + hisi_qm_sriov_disable(pdev); sec_debugfs_exit(sec); @@ -1030,7 +926,7 @@ static struct pci_driver sec_pci_driver = { .probe = sec_probe, .remove = sec_remove, .err_handler = &sec_err_handler, - .sriov_configure = sec_sriov_configure, + .sriov_configure = hisi_qm_sriov_configure, }; static void sec_register_debugfs(void) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index f5ffa0297730..5dcda7b9856c 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -653,101 +653,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) return 0; } -/* Currently we only support equal assignment */ -static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs) -{ - struct hisi_qm *qm = &hisi_zip->qm; - u32 qp_num = qm->qp_num; - u32 q_base = qp_num; - u32 q_num, remain_q_num, i; - int ret; - - if (!num_vfs) - return -EINVAL; - - remain_q_num = qm->ctrl_qp_num - qp_num; - if (remain_q_num < num_vfs) - return -EINVAL; - - q_num = remain_q_num / num_vfs; - for (i = 1; i <= num_vfs; i++) { - if (i == num_vfs) - q_num += remain_q_num % num_vfs; - ret = hisi_qm_set_vft(qm, i, q_base, q_num); - if (ret) - return ret; - q_base += q_num; - } - - return 0; -} - -static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip) -{ - struct hisi_qm *qm = &hisi_zip->qm; - u32 i, num_vfs = qm->vfs_num; - int ret; - - for (i = 1; i <= num_vfs; i++) { - ret = hisi_qm_set_vft(qm, i, 0, 0); - if (ret) - return ret; - } - - qm->vfs_num = 0; - - return 0; -} - -static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs) -{ - struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); - int pre_existing_vfs, num_vfs, ret; - - pre_existing_vfs = pci_num_vf(pdev); - - if (pre_existing_vfs) { - dev_err(&pdev->dev, - "Can't enable VF. Please disable pre-enabled VFs!\n"); - return 0; - } - - num_vfs = min_t(int, max_vfs, HZIP_VF_NUM); - - ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs); - if (ret) { - dev_err(&pdev->dev, "Can't assign queues for VF!\n"); - return ret; - } - - hisi_zip->qm.vfs_num = num_vfs; - - ret = pci_enable_sriov(pdev, num_vfs); - if (ret) { - dev_err(&pdev->dev, "Can't enable VF!\n"); - hisi_zip_clear_vft_config(hisi_zip); - return ret; - } - - return num_vfs; -} - -static int hisi_zip_sriov_disable(struct pci_dev *pdev) -{ - struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); - - if (pci_vfs_assigned(pdev)) { - dev_err(&pdev->dev, - "Can't disable VFs while VFs are assigned!\n"); - return -EPERM; - } - - /* remove in hisi_zip_pci_driver will be called to free VF resources */ - pci_disable_sriov(pdev); - - return hisi_zip_clear_vft_config(hisi_zip); -} - static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_zip *hisi_zip; @@ -820,7 +725,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) } if (qm->fun_type == QM_HW_PF && vfs_num > 0) { - ret = hisi_zip_sriov_enable(pdev, vfs_num); + ret = hisi_qm_sriov_enable(pdev, vfs_num); if (ret < 0) goto err_remove_from_list; } @@ -836,21 +741,13 @@ err_qm_uninit: return ret; } -static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs) -{ - if (num_vfs == 0) - return hisi_zip_sriov_disable(pdev); - else - return hisi_zip_sriov_enable(pdev, num_vfs); -} - static void hisi_zip_remove(struct pci_dev *pdev) { struct hisi_zip *hisi_zip = pci_get_drvdata(pdev); struct hisi_qm *qm = &hisi_zip->qm; if (qm->fun_type == QM_HW_PF && qm->vfs_num) - hisi_zip_sriov_disable(pdev); + hisi_qm_sriov_disable(pdev); hisi_zip_debugfs_exit(hisi_zip); hisi_qm_stop(qm); @@ -870,7 +767,7 @@ static struct pci_driver hisi_zip_pci_driver = { .probe = hisi_zip_probe, .remove = hisi_zip_remove, .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? - hisi_zip_sriov_configure : NULL, + hisi_qm_sriov_configure : NULL, .err_handler = &hisi_zip_err_handler, }; -- cgit v1.2.3-59-g8ed1b From 35ee280fb1fb97ffa32a19953457becb4f45579b Mon Sep 17 00:00:00 2001 From: Hao Fang Date: Thu, 2 Apr 2020 14:53:03 +0800 Subject: crypto: hisilicon - add vfs_num module parameter for hpre/sec The vfs_num module parameter has been used in zip driver, this patch adds this for HPRE and SEC driver. Signed-off-by: Hao Fang Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 19 +++++++++++++++++++ drivers/crypto/hisilicon/qm.h | 20 ++++++++++++++++++++ drivers/crypto/hisilicon/sec2/sec_main.c | 18 ++++++++++++++++++ drivers/crypto/hisilicon/zip/zip_main.c | 9 +++++++-- 4 files changed, 64 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 4e41d308f0a9..9cff5c1b7c9b 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -195,6 +195,15 @@ static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM; module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444); MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)"); +static const struct kernel_param_ops vfs_num_ops = { + .set = vfs_num_set, + .get = param_get_int, +}; + +static u32 vfs_num; +module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); +MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); + struct hisi_qp *hpre_create_qp(void) { int node = cpu_to_node(smp_processor_id()); @@ -777,8 +786,18 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_err(pdev, "fail to register algs to crypto!\n"); goto err_with_qm_start; } + + if (qm->fun_type == QM_HW_PF && vfs_num) { + ret = hisi_qm_sriov_enable(pdev, vfs_num); + if (ret < 0) + goto err_with_crypto_register; + } + return 0; +err_with_crypto_register: + hpre_algs_unregister(); + err_with_qm_start: hisi_qm_del_from_list(qm, &hpre_devices); hisi_qm_stop(qm); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 665e53d8d958..1b5171b91141 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -8,6 +8,8 @@ #include #include +#define QM_MAX_VFS_NUM_V2 63 + /* qm user domain */ #define QM_ARUSER_M_CFG_1 0x100088 #define AXUSER_SNOOP_ENABLE BIT(30) @@ -235,6 +237,24 @@ struct hisi_qp { struct uacce_queue *uacce_q; }; +static inline int vfs_num_set(const char *val, const struct kernel_param *kp) +{ + u32 n; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret < 0) + return ret; + + if (n > QM_MAX_VFS_NUM_V2) + return -EINVAL; + + return param_set_int(val, kp); +} + static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) { INIT_LIST_HEAD(&qm_list->list); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 129648a49114..c76c49ed883c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -207,6 +207,15 @@ static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)"); +static const struct kernel_param_ops vfs_num_ops = { + .set = vfs_num_set, + .get = param_get_int, +}; + +static u32 vfs_num; +module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); +MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); + void sec_destroy_qps(struct hisi_qp **qps, int qp_num) { hisi_qm_free_qps(qps, qp_num); @@ -876,8 +885,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_remove_from_list; } + if (qm->fun_type == QM_HW_PF && vfs_num) { + ret = hisi_qm_sriov_enable(pdev, vfs_num); + if (ret < 0) + goto err_crypto_unregister; + } + return 0; +err_crypto_unregister: + sec_unregister_from_crypto(); + err_remove_from_list: hisi_qm_del_from_list(qm, &sec_devices); sec_debugfs_exit(sec); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 5dcda7b9856c..fe9d6d29eed2 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -231,9 +231,14 @@ static u32 pf_q_num = HZIP_PF_DEF_Q_NUM; module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)"); +static const struct kernel_param_ops vfs_num_ops = { + .set = vfs_num_set, + .get = param_get_int, +}; + static u32 vfs_num; -module_param(vfs_num, uint, 0444); -MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63)"); +module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); +MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); static const struct pci_device_id hisi_zip_dev_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, -- cgit v1.2.3-59-g8ed1b From 6c6dd5802c2d6769fa589c0e8de54299def199a7 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Fri, 3 Apr 2020 16:16:38 +0800 Subject: crypto: hisilicon/qm - add controller reset interface Add the main implementation of the controller reset interface, which is roughly divided into three parts, stop, reset, and reinitialization. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 544 ++++++++++++++++++++++++++++++++++++++++++ drivers/crypto/hisilicon/qm.h | 16 ++ 2 files changed, 560 insertions(+) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 7c2dedc12d13..98e65c5b0c4a 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include +#include +#include #include #include #include @@ -122,9 +124,11 @@ #define QM_DFX_CNT_CLR_CE 0x100118 #define QM_ABNORMAL_INT_SOURCE 0x100000 +#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0) #define QM_ABNORMAL_INT_MASK 0x100004 #define QM_ABNORMAL_INT_MASK_VALUE 0x1fff #define QM_ABNORMAL_INT_STATUS 0x100008 +#define QM_ABNORMAL_INT_SET 0x10000c #define QM_ABNORMAL_INF00 0x100010 #define QM_FIFO_OVERFLOW_TYPE 0xc0 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 @@ -140,6 +144,25 @@ #define QM_RAS_CE_TIMES_PER_IRQ 1 #define QM_RAS_MSI_INT_SEL 0x1040f4 +#define QM_DEV_RESET_FLAG 0 +#define QM_RESET_WAIT_TIMEOUT 400 +#define QM_PEH_VENDOR_ID 0x1000d8 +#define ACC_VENDOR_ID_VALUE 0x5a5a +#define QM_PEH_DFX_INFO0 0x1000fc +#define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 +#define ACC_PEH_MSI_DISABLE GENMASK(31, 0) +#define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 +#define ACC_MASTER_TRANS_RETURN_RW 3 +#define ACC_MASTER_TRANS_RETURN 0x300150 +#define ACC_MASTER_GLOBAL_CTRL 0x300000 +#define ACC_AM_CFG_PORT_WR_EN 0x30001c +#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT +#define ACC_AM_ROB_ECC_INT_STS 0x300104 +#define ACC_ROB_ECC_ERR_MULTPL BIT(1) + +#define POLL_PERIOD 10 +#define POLL_TIMEOUT 1000 +#define MAX_WAIT_COUNTS 1000 #define QM_CACHE_WB_START 0x204 #define QM_CACHE_WB_DONE 0x208 @@ -1012,10 +1035,18 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, { u32 irq_enable = ce | nfe | fe | msi; u32 irq_unmask = ~irq_enable; + u32 error_status; qm->error_mask = ce | nfe | fe; qm->msi_mask = msi; + /* clear QM hw residual error source */ + error_status = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); + if (error_status) { + error_status &= qm->error_mask; + writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + } + /* configure error type */ writel(ce, qm->io_base + QM_RAS_CE_ENABLE); writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); @@ -1080,6 +1111,9 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) error_status = qm->error_mask & tmp; if (error_status) { + if (error_status & QM_ECC_MBIT) + qm->err_status.is_qm_ecc_mbit = true; + qm_log_hw_error(qm, error_status); /* clear err sts */ @@ -1971,6 +2005,52 @@ int hisi_qm_start(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_start); +static int qm_restart(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct hisi_qp *qp; + int ret, i; + + ret = hisi_qm_start(qm); + if (ret < 0) + return ret; + + write_lock(&qm->qps_lock); + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp) { + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) { + dev_err(dev, "Failed to start qp%d!\n", i); + + write_unlock(&qm->qps_lock); + return ret; + } + } + } + write_unlock(&qm->qps_lock); + + return 0; +} + +/** + * This function clears all queues memory in a qm. Reset of accelerator can + * use this to clear queues. + */ +static void qm_clear_queues(struct hisi_qm *qm) +{ + struct hisi_qp *qp; + int i; + + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp) + memset(qp->qdma.va, 0, qp->qdma.size); + } + + memset(qm->qdma.va, 0, qm->qdma.size); +} + /** * hisi_qm_stop() - Stop a qm. * @qm: The qm which will be stopped. @@ -2014,6 +2094,8 @@ int hisi_qm_stop(struct hisi_qm *qm) dev_err(dev, "Failed to set vft!\n"); } + qm_clear_queues(qm); + return ret; } EXPORT_SYMBOL_GPL(hisi_qm_stop); @@ -2431,6 +2513,9 @@ static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm) /* get device hardware error status */ err_sts = qm->err_ini->get_dev_hw_err_status(qm); if (err_sts) { + if (err_sts & qm->err_ini->err_info.ecc_2bits_mask) + qm->err_status.is_dev_ecc_mbit = true; + if (!qm->err_ini->log_dev_hw_err) { dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n"); return PCI_ERS_RESULT_NEED_RESET; @@ -2481,6 +2566,465 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, } EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); +static int qm_check_req_recv(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + u32 val; + + writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); + ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, + (val == ACC_VENDOR_ID_VALUE), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) { + dev_err(&pdev->dev, "Fails to read QM reg!\n"); + return ret; + } + + writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); + ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, + (val == PCI_VENDOR_ID_HUAWEI), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) + dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); + + return ret; +} + +static int qm_set_pf_mse(struct hisi_qm *qm, bool set) +{ + struct pci_dev *pdev = qm->pdev; + u16 cmd; + int i; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (set) + cmd |= PCI_COMMAND_MEMORY; + else + cmd &= ~PCI_COMMAND_MEMORY; + + pci_write_config_word(pdev, PCI_COMMAND, cmd); + for (i = 0; i < MAX_WAIT_COUNTS; i++) { + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) + return 0; + + udelay(1); + } + + return -ETIMEDOUT; +} + +static int qm_set_vf_mse(struct hisi_qm *qm, bool set) +{ + struct pci_dev *pdev = qm->pdev; + u16 sriov_ctrl; + int pos; + int i; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); + if (set) + sriov_ctrl |= PCI_SRIOV_CTRL_MSE; + else + sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; + pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); + + for (i = 0; i < MAX_WAIT_COUNTS; i++) { + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); + if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> + ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) + return 0; + + udelay(1); + } + + return -ETIMEDOUT; +} + +static int qm_set_msi(struct hisi_qm *qm, bool set) +{ + struct pci_dev *pdev = qm->pdev; + + if (set) { + pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, + 0); + } else { + pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, + ACC_PEH_MSI_DISABLE); + if (qm->err_status.is_qm_ecc_mbit || + qm->err_status.is_dev_ecc_mbit) + return 0; + + mdelay(1); + if (readl(qm->io_base + QM_PEH_DFX_INFO0)) + return -EFAULT; + } + + return 0; +} + +static int qm_vf_reset_prepare(struct hisi_qm *qm) +{ + struct hisi_qm_list *qm_list = qm->qm_list; + struct pci_dev *pdev = qm->pdev; + struct pci_dev *virtfn; + struct hisi_qm *vf_qm; + int ret = 0; + + mutex_lock(&qm_list->lock); + list_for_each_entry(vf_qm, &qm_list->list, list) { + virtfn = vf_qm->pdev; + if (virtfn == pdev) + continue; + + if (pci_physfn(virtfn) == pdev) { + ret = hisi_qm_stop(vf_qm); + if (ret) + goto stop_fail; + } + } + +stop_fail: + mutex_unlock(&qm_list->lock); + return ret; +} + +static int qm_reset_prepare_ready(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + int delay = 0; + + /* All reset requests need to be queued for processing */ + while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) { + msleep(++delay); + if (delay > QM_RESET_WAIT_TIMEOUT) + return -EBUSY; + } + + return 0; +} + +static int qm_controller_reset_prepare(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + ret = qm_reset_prepare_ready(qm); + if (ret) { + pci_err(pdev, "Controller reset not ready!\n"); + return ret; + } + + if (qm->vfs_num) { + ret = qm_vf_reset_prepare(qm); + if (ret) { + pci_err(pdev, "Fails to stop VFs!\n"); + return ret; + } + } + + ret = hisi_qm_stop(qm); + if (ret) { + pci_err(pdev, "Fails to stop QM!\n"); + return ret; + } + + return 0; +} + +static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) +{ + u32 nfe_enb = 0; + + if (!qm->err_status.is_dev_ecc_mbit && + qm->err_status.is_qm_ecc_mbit && + qm->err_ini->close_axi_master_ooo) { + + qm->err_ini->close_axi_master_ooo(qm); + + } else if (qm->err_status.is_dev_ecc_mbit && + !qm->err_status.is_qm_ecc_mbit && + !qm->err_ini->close_axi_master_ooo) { + + nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); + writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, + qm->io_base + QM_RAS_NFE_ENABLE); + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); + } +} + +static int qm_soft_reset(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + u32 val; + + /* Ensure all doorbells and mailboxes received by QM */ + ret = qm_check_req_recv(qm); + if (ret) + return ret; + + if (qm->vfs_num) { + ret = qm_set_vf_mse(qm, false); + if (ret) { + pci_err(pdev, "Fails to disable vf MSE bit.\n"); + return ret; + } + } + + ret = qm_set_msi(qm, false); + if (ret) { + pci_err(pdev, "Fails to disable PEH MSI bit.\n"); + return ret; + } + + qm_dev_ecc_mbit_handle(qm); + + /* OOO register set and check */ + writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, + qm->io_base + ACC_MASTER_GLOBAL_CTRL); + + /* If bus lock, reset chip */ + ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, + val, + (val == ACC_MASTER_TRANS_RETURN_RW), + POLL_PERIOD, POLL_TIMEOUT); + if (ret) { + pci_emerg(pdev, "Bus lock! Please reset system.\n"); + return ret; + } + + ret = qm_set_pf_mse(qm, false); + if (ret) { + pci_err(pdev, "Fails to disable pf MSE bit.\n"); + return ret; + } + + /* The reset related sub-control registers are not in PCI BAR */ + if (ACPI_HANDLE(&pdev->dev)) { + unsigned long long value = 0; + acpi_status s; + + s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), + qm->err_ini->err_info.acpi_rst, + NULL, &value); + if (ACPI_FAILURE(s)) { + pci_err(pdev, "NO controller reset method!\n"); + return -EIO; + } + + if (value) { + pci_err(pdev, "Reset step %llu failed!\n", value); + return -EIO; + } + } else { + pci_err(pdev, "No reset method!\n"); + return -EINVAL; + } + + return 0; +} + +static int qm_vf_reset_done(struct hisi_qm *qm) +{ + struct hisi_qm_list *qm_list = qm->qm_list; + struct pci_dev *pdev = qm->pdev; + struct pci_dev *virtfn; + struct hisi_qm *vf_qm; + int ret = 0; + + mutex_lock(&qm_list->lock); + list_for_each_entry(vf_qm, &qm_list->list, list) { + virtfn = vf_qm->pdev; + if (virtfn == pdev) + continue; + + if (pci_physfn(virtfn) == pdev) { + ret = qm_restart(vf_qm); + if (ret) + goto restart_fail; + } + } + +restart_fail: + mutex_unlock(&qm_list->lock); + return ret; +} + +static int qm_get_dev_err_status(struct hisi_qm *qm) +{ + + return(qm->err_ini->get_dev_hw_err_status(qm) & + qm->err_ini->err_info.ecc_2bits_mask); +} + +static int qm_dev_hw_init(struct hisi_qm *qm) +{ + return qm->err_ini->hw_init(qm); +} + +static void qm_restart_prepare(struct hisi_qm *qm) +{ + u32 value; + + if (!qm->err_status.is_qm_ecc_mbit && + !qm->err_status.is_dev_ecc_mbit) + return; + + /* temporarily close the OOO port used for PEH to write out MSI */ + value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); + writel(value & ~qm->err_ini->err_info.msi_wr_port, + qm->io_base + ACC_AM_CFG_PORT_WR_EN); + + /* clear dev ecc 2bit error source if having */ + value = qm_get_dev_err_status(qm); + if (value && qm->err_ini->clear_dev_hw_err_status) + qm->err_ini->clear_dev_hw_err_status(qm, value); + + /* clear QM ecc mbit error source */ + writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); + + /* clear AM Reorder Buffer ecc mbit source */ + writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); + + if (qm->err_ini->open_axi_master_ooo) + qm->err_ini->open_axi_master_ooo(qm); +} + +static void qm_restart_done(struct hisi_qm *qm) +{ + u32 value; + + if (!qm->err_status.is_qm_ecc_mbit && + !qm->err_status.is_dev_ecc_mbit) + return; + + /* open the OOO port for PEH to write out MSI */ + value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); + value |= qm->err_ini->err_info.msi_wr_port; + writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); + + qm->err_status.is_qm_ecc_mbit = false; + qm->err_status.is_dev_ecc_mbit = false; +} + +static int qm_controller_reset_done(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + ret = qm_set_msi(qm, true); + if (ret) { + pci_err(pdev, "Fails to enable PEH MSI bit!\n"); + return ret; + } + + ret = qm_set_pf_mse(qm, true); + if (ret) { + pci_err(pdev, "Fails to enable pf MSE bit!\n"); + return ret; + } + + if (qm->vfs_num) { + ret = qm_set_vf_mse(qm, true); + if (ret) { + pci_err(pdev, "Fails to enable vf MSE bit!\n"); + return ret; + } + } + + ret = qm_dev_hw_init(qm); + if (ret) { + pci_err(pdev, "Failed to init device\n"); + return ret; + } + + qm_restart_prepare(qm); + + ret = qm_restart(qm); + if (ret) { + pci_err(pdev, "Failed to start QM!\n"); + return ret; + } + + if (qm->vfs_num) { + ret = qm_vf_q_assign(qm, qm->vfs_num); + if (ret) { + pci_err(pdev, "Failed to assign queue!\n"); + return ret; + } + } + + ret = qm_vf_reset_done(qm); + if (ret) { + pci_err(pdev, "Failed to start VFs!\n"); + return -EPERM; + } + + hisi_qm_dev_err_init(qm); + qm_restart_done(qm); + + clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag); + + return 0; +} + +int qm_controller_reset(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + pci_info(pdev, "Controller resetting...\n"); + + ret = qm_controller_reset_prepare(qm); + if (ret) + return ret; + + ret = qm_soft_reset(qm); + if (ret) { + pci_err(pdev, "Controller reset failed (%d)\n", ret); + return ret; + } + + ret = qm_controller_reset_done(qm); + if (ret) + return ret; + + pci_info(pdev, "Controller reset complete\n"); + + return 0; +} + +/** + * hisi_qm_dev_slot_reset() - slot reset + * @pdev: the PCIe device + * + * This function offers QM relate PCIe device reset interface. Drivers which + * use QM can use this function as slot_reset in its struct pci_error_handlers. + */ +pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) +{ + struct hisi_qm *qm = pci_get_drvdata(pdev); + int ret; + + if (pdev->is_virtfn) + return PCI_ERS_RESULT_RECOVERED; + + pci_aer_clear_nonfatal_status(pdev); + + /* reset pcie device controller */ + ret = qm_controller_reset(qm); + if (ret) { + pci_err(pdev, "Controller reset failed (%d)\n", ret); + return PCI_ERS_RESULT_DISCONNECT; + } + + return PCI_ERS_RESULT_RECOVERED; +} +EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zhou Wang "); MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 1b5171b91141..9d17167840f8 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -133,16 +133,28 @@ struct hisi_qm_status { struct hisi_qm; struct hisi_qm_err_info { + char *acpi_rst; + u32 msi_wr_port; + u32 ecc_2bits_mask; u32 ce; u32 nfe; u32 fe; u32 msi; }; +struct hisi_qm_err_status { + u32 is_qm_ecc_mbit; + u32 is_dev_ecc_mbit; +}; + struct hisi_qm_err_ini { + int (*hw_init)(struct hisi_qm *qm); void (*hw_err_enable)(struct hisi_qm *qm); void (*hw_err_disable)(struct hisi_qm *qm); u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); + void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); + void (*open_axi_master_ooo)(struct hisi_qm *qm); + void (*close_axi_master_ooo)(struct hisi_qm *qm); void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); struct hisi_qm_err_info err_info; }; @@ -165,6 +177,7 @@ struct hisi_qm { u32 ctrl_qp_num; u32 vfs_num; struct list_head list; + struct hisi_qm_list *qm_list; struct qm_dma qdma; struct qm_sqc *sqc; @@ -178,6 +191,8 @@ struct hisi_qm { struct hisi_qm_status status; const struct hisi_qm_err_ini *err_ini; + struct hisi_qm_err_status err_status; + unsigned long reset_flag; rwlock_t qps_lock; unsigned long *qp_bitmap; @@ -298,6 +313,7 @@ void hisi_qm_dev_err_init(struct hisi_qm *qm); void hisi_qm_dev_err_uninit(struct hisi_qm *qm); pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, pci_channel_state_t state); +pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, -- cgit v1.2.3-59-g8ed1b From 84c9b7802b02a0b13de1db262122e0c59f4abd77 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Fri, 3 Apr 2020 16:16:39 +0800 Subject: crypto: hisilicon/zip - add controller reset support for zip Register controller reset handle with PCIe AER. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_main.c | 57 +++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index fe9d6d29eed2..37db11f96fab 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -62,6 +62,7 @@ #define HZIP_CORE_INT_SOURCE 0x3010A0 #define HZIP_CORE_INT_MASK_REG 0x3010A4 +#define HZIP_CORE_INT_SET 0x3010A8 #define HZIP_CORE_INT_STATUS 0x3010AC #define HZIP_CORE_INT_STATUS_M_ECC BIT(1) #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 @@ -83,6 +84,9 @@ #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 #define SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) +#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C +#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14) +#define HZIP_WR_PORT BIT(11) #define HZIP_BUF_SIZE 22 @@ -254,9 +258,9 @@ int zip_create_qps(struct hisi_qp **qps, int qp_num) return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); } -static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip) +static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) { - void __iomem *base = hisi_zip->qm.io_base; + void __iomem *base = qm->io_base; /* qm user domain */ writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); @@ -283,7 +287,7 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip) writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63); - if (hisi_zip->qm.use_sva) { + if (qm->use_sva) { writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63); writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63); } else { @@ -299,6 +303,8 @@ static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip) writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); + + return 0; } static void hisi_zip_hw_error_enable(struct hisi_qm *qm) @@ -601,8 +607,6 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts) } err++; } - - writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); } static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) @@ -610,17 +614,56 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) return readl(qm->io_base + HZIP_CORE_INT_STATUS); } +static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); +} + +static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + + writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE, + qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + + writel(val | HZIP_AXI_SHUTDOWN_ENABLE, + qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); +} + +static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm) +{ + u32 nfe_enb; + + /* Disable ECC Mbit error report. */ + nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC, + qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); + + /* Inject zip ECC Mbit error to block master ooo. */ + writel(HZIP_CORE_INT_STATUS_M_ECC, + qm->io_base + HZIP_CORE_INT_SET); +} + static const struct hisi_qm_err_ini hisi_zip_err_ini = { + .hw_init = hisi_zip_set_user_domain_and_cache, .hw_err_enable = hisi_zip_hw_error_enable, .hw_err_disable = hisi_zip_hw_error_disable, .get_dev_hw_err_status = hisi_zip_get_hw_err_status, + .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status, .log_dev_hw_err = hisi_zip_log_hw_error, + .open_axi_master_ooo = hisi_zip_open_axi_master_ooo, + .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, .err_info = { .ce = QM_BASE_CE, .nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, .fe = 0, .msi = QM_DB_RANDOM_INVALID, + .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC, + .msi_wr_port = HZIP_WR_PORT, + .acpi_rst = "ZRST", } }; @@ -651,7 +694,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) qm->err_ini = &hisi_zip_err_ini; - hisi_zip_set_user_domain_and_cache(hisi_zip); + hisi_zip_set_user_domain_and_cache(qm); hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(hisi_zip); @@ -697,6 +740,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) qm->qp_base = HZIP_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; + qm->qm_list = &zip_devices; } else if (qm->fun_type == QM_HW_VF) { /* * have no way to get qm configure in VM in v1 hardware, @@ -764,6 +808,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) static const struct pci_error_handlers hisi_zip_err_handler = { .error_detected = hisi_qm_dev_err_detected, + .slot_reset = hisi_qm_dev_slot_reset, }; static struct pci_driver hisi_zip_pci_driver = { -- cgit v1.2.3-59-g8ed1b From 1f5c9f34f0cc78e058088090b9d2abca45690e6b Mon Sep 17 00:00:00 2001 From: Hui Tang Date: Fri, 3 Apr 2020 16:16:40 +0800 Subject: crypto: hisilicon/hpre - add controller reset support for HPRE Add support for the controller reset in HPRE driver. Signed-off-by: Hui Tang Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 46 +++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 9cff5c1b7c9b..0d63666ba373 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -59,10 +59,6 @@ #define HPRE_HAC_ECC2_CNT 0x301a08 #define HPRE_HAC_INT_STATUS 0x301800 #define HPRE_HAC_SOURCE_INT 0x301600 -#define MASTER_GLOBAL_CTRL_SHUTDOWN 1 -#define MASTER_TRANS_RETURN_RW 3 -#define HPRE_MASTER_TRANS_RETURN 0x300150 -#define HPRE_MASTER_GLOBAL_CTRL 0x300000 #define HPRE_CLSTR_ADDR_INTRVL 0x1000 #define HPRE_CLUSTER_INQURY 0x100 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 @@ -80,6 +76,13 @@ #define HPRE_BD_USR_MASK 0x3 #define HPRE_CLUSTER_CORE_MASK 0xf +#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044 +#define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0) +#define HPRE_WR_MSI_PORT BIT(2) + +#define HPRE_CORE_ECC_2BIT_ERR BIT(1) +#define HPRE_OOO_ECC_2BIT_ERR BIT(5) + #define HPRE_VIA_MSI_DSM 1 static struct hisi_qm_list hpre_devices; @@ -241,9 +244,8 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm) return 0; } -static int hpre_set_user_domain_and_cache(struct hpre *hpre) +static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) { - struct hisi_qm *qm = &hpre->qm; struct device *dev = &qm->pdev->dev; unsigned long offset; int ret, i; @@ -339,6 +341,9 @@ static void hpre_hw_error_disable(struct hisi_qm *qm) static void hpre_hw_error_enable(struct hisi_qm *qm) { + /* clear HPRE hw error source if having */ + writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT); + /* enable hpre hw error interrupts */ writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); @@ -700,8 +705,6 @@ static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) err->msg, err->int_msk); err++; } - - writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); } static u32 hpre_get_hw_err_status(struct hisi_qm *qm) @@ -709,16 +712,39 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm) return readl(qm->io_base + HPRE_HAC_INT_STATUS); } +static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); +} + +static void hpre_open_axi_master_ooo(struct hisi_qm *qm) +{ + u32 value; + + value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE, + HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); + writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE, + HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); +} + static const struct hisi_qm_err_ini hpre_err_ini = { + .hw_init = hpre_set_user_domain_and_cache, .hw_err_enable = hpre_hw_error_enable, .hw_err_disable = hpre_hw_error_disable, .get_dev_hw_err_status = hpre_get_hw_err_status, + .clear_dev_hw_err_status = hpre_clear_hw_err_status, .log_dev_hw_err = hpre_log_hw_error, + .open_axi_master_ooo = hpre_open_axi_master_ooo, .err_info = { .ce = QM_BASE_CE, .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, .fe = 0, .msi = QM_DB_RANDOM_INVALID, + .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | + HPRE_OOO_ECC_2BIT_ERR, + .msi_wr_port = HPRE_WR_MSI_PORT, + .acpi_rst = "HRST", } }; @@ -729,10 +755,11 @@ static int hpre_pf_probe_init(struct hpre *hpre) qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2; - ret = hpre_set_user_domain_and_cache(hpre); + ret = hpre_set_user_domain_and_cache(qm); if (ret) return ret; + qm->qm_list = &hpre_devices; qm->err_ini = &hpre_err_ini; hisi_qm_dev_err_init(qm); @@ -840,6 +867,7 @@ static void hpre_remove(struct pci_dev *pdev) static const struct pci_error_handlers hpre_err_handler = { .error_detected = hisi_qm_dev_err_detected, + .slot_reset = hisi_qm_dev_slot_reset, }; static struct pci_driver hpre_pci_driver = { -- cgit v1.2.3-59-g8ed1b From 141876c252a461818c39d45ca30ef1cb7c71953a Mon Sep 17 00:00:00 2001 From: Yang Shen Date: Fri, 3 Apr 2020 16:16:41 +0800 Subject: crypto: hisilicon/sec2 - add controller reset support for SEC2 Add support for controller reset in SEC driver. Signed-off-by: Yang Shen Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 40 ++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index c76c49ed883c..07a5f4eb96ff 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -249,9 +249,8 @@ static const struct pci_device_id sec_dev_ids[] = { }; MODULE_DEVICE_TABLE(pci, sec_dev_ids); -static u8 sec_get_endian(struct sec_dev *sec) +static u8 sec_get_endian(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; u32 reg; /* @@ -279,9 +278,8 @@ static u8 sec_get_endian(struct sec_dev *sec) return SEC_64BE; } -static int sec_engine_init(struct sec_dev *sec) +static int sec_engine_init(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; int ret; u32 reg; @@ -324,7 +322,7 @@ static int sec_engine_init(struct sec_dev *sec) /* config endian */ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); - reg |= sec_get_endian(sec); + reg |= sec_get_endian(qm); writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); /* Enable sm4 xts mode multiple iv */ @@ -334,10 +332,8 @@ static int sec_engine_init(struct sec_dev *sec) return 0; } -static int sec_set_user_domain_and_cache(struct sec_dev *sec) +static int sec_set_user_domain_and_cache(struct hisi_qm *qm) { - struct hisi_qm *qm = &sec->qm; - /* qm user domain */ writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); @@ -358,7 +354,7 @@ static int sec_set_user_domain_and_cache(struct sec_dev *sec) CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); - return sec_engine_init(sec); + return sec_engine_init(qm); } /* sec_debug_regs_clear() - clear the sec debug regs */ @@ -683,8 +679,6 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) } errs++; } - - writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); } static u32 sec_get_hw_err_status(struct hisi_qm *qm) @@ -692,17 +686,37 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm) return readl(qm->io_base + SEC_CORE_INT_STATUS); } +static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) +{ + writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); +} + +static void sec_open_axi_master_ooo(struct hisi_qm *qm) +{ + u32 val; + + val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); +} + static const struct hisi_qm_err_ini sec_err_ini = { + .hw_init = sec_set_user_domain_and_cache, .hw_err_enable = sec_hw_error_enable, .hw_err_disable = sec_hw_error_disable, .get_dev_hw_err_status = sec_get_hw_err_status, + .clear_dev_hw_err_status = sec_clear_hw_err_status, .log_dev_hw_err = sec_log_hw_error, + .open_axi_master_ooo = sec_open_axi_master_ooo, .err_info = { .ce = QM_BASE_CE, .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | QM_ACC_WB_NOT_READY_TIMEOUT, .fe = 0, .msi = QM_DB_RANDOM_INVALID, + .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC, + .msi_wr_port = BIT(0), + .acpi_rst = "SRST", } }; @@ -726,7 +740,7 @@ static int sec_pf_probe_init(struct sec_dev *sec) qm->err_ini = &sec_err_ini; - ret = sec_set_user_domain_and_cache(sec); + ret = sec_set_user_domain_and_cache(qm); if (ret) return ret; @@ -783,6 +797,7 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) qm->qp_base = SEC_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; + qm->qm_list = &sec_devices; ret = sec_pf_probe_init(sec); if (ret) @@ -936,6 +951,7 @@ static void sec_remove(struct pci_dev *pdev) static const struct pci_error_handlers sec_err_handler = { .error_detected = hisi_qm_dev_err_detected, + .slot_reset = hisi_qm_dev_slot_reset, }; static struct pci_driver sec_pci_driver = { -- cgit v1.2.3-59-g8ed1b From f037fc5f93f4e3c973f8c3b7ebd6ccd123f0944b Mon Sep 17 00:00:00 2001 From: Yang Shen Date: Fri, 3 Apr 2020 16:16:42 +0800 Subject: crypto: hisilicon/qm - stop qp by judging sq and cq tail It is not working well to determine whether the queue is empty based on whether the used count is 0. It is more stable to get if the queue is stopping by checking if the tail pointer of the send queue and the completion queue are equal. Signed-off-by: Yang Shen Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Reviewed-by: Zaibo Xu Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 123 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 114 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 98e65c5b0c4a..80c552523ac4 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -55,6 +55,7 @@ #define QM_SQ_TYPE_SHIFT 8 #define QM_SQ_TYPE_MASK GENMASK(3, 0) +#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1) /* cqc shift */ #define QM_CQ_HOP_NUM_SHIFT 0 @@ -66,6 +67,7 @@ #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) #define QM_QC_CQE_SIZE 4 +#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) /* eqc shift */ #define QM_EQE_AEQE_SIZE (2UL << 12) @@ -162,6 +164,8 @@ #define POLL_PERIOD 10 #define POLL_TIMEOUT 1000 +#define WAIT_PERIOD_US_MAX 200 +#define WAIT_PERIOD_US_MIN 100 #define MAX_WAIT_COUNTS 1000 #define QM_CACHE_WB_START 0x204 #define QM_CACHE_WB_DONE 0x208 @@ -1362,6 +1366,107 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) } EXPORT_SYMBOL_GPL(hisi_qm_start_qp); +static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, + dma_addr_t *dma_addr) +{ + struct device *dev = &qm->pdev->dev; + void *ctx_addr; + + ctx_addr = kzalloc(ctx_size, GFP_KERNEL); + if (!ctx_addr) + return ERR_PTR(-ENOMEM); + + *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, *dma_addr)) { + dev_err(dev, "DMA mapping error!\n"); + kfree(ctx_addr); + return ERR_PTR(-ENOMEM); + } + + return ctx_addr; +} + +static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, + const void *ctx_addr, dma_addr_t *dma_addr) +{ + struct device *dev = &qm->pdev->dev; + + dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); + kfree(ctx_addr); +} + +static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) +{ + return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); +} + +static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) +{ + return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); +} + +/** + * Determine whether the queue is cleared by judging the tail pointers of + * sq and cq. + */ +static int qm_drain_qp(struct hisi_qp *qp) +{ + size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc); + struct hisi_qm *qm = qp->qm; + struct device *dev = &qm->pdev->dev; + struct qm_sqc *sqc; + struct qm_cqc *cqc; + dma_addr_t dma_addr; + int ret = 0, i = 0; + void *addr; + + /* + * No need to judge if ECC multi-bit error occurs because the + * master OOO will be blocked. + */ + if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit) + return 0; + + addr = qm_ctx_alloc(qm, size, &dma_addr); + if (IS_ERR(addr)) { + dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); + return -ENOMEM; + } + + while (++i) { + ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); + if (ret) { + dev_err_ratelimited(dev, "Failed to dump sqc!\n"); + break; + } + sqc = addr; + + ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), + qp->qp_id); + if (ret) { + dev_err_ratelimited(dev, "Failed to dump cqc!\n"); + break; + } + cqc = addr + sizeof(struct qm_sqc); + + if ((sqc->tail == cqc->tail) && + (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) + break; + + if (i == MAX_WAIT_COUNTS) { + dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); + ret = -EBUSY; + break; + } + + usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); + } + + qm_ctx_free(qm, size, addr, &dma_addr); + + return ret; +} + /** * hisi_qm_stop_qp() - Stop a qp in qm. * @qp: The qp we want to stop. @@ -1371,20 +1476,20 @@ EXPORT_SYMBOL_GPL(hisi_qm_start_qp); int hisi_qm_stop_qp(struct hisi_qp *qp) { struct device *dev = &qp->qm->pdev->dev; - int i = 0; + int ret; /* it is stopped */ if (test_bit(QP_STOP, &qp->qp_status.flags)) return 0; - while (atomic_read(&qp->qp_status.used)) { - i++; - msleep(20); - if (i == 10) { - dev_err(dev, "Cannot drain out data for stopping, Force to stop!\n"); - return 0; - } - } + ret = qm_drain_qp(qp); + if (ret) + dev_err(dev, "Failed to drain out data for stopping!\n"); + + if (qp->qm->wq) + flush_workqueue(qp->qm->wq); + else + flush_work(&qp->qm->work); set_bit(QP_STOP, &qp->qp_status.flags); -- cgit v1.2.3-59-g8ed1b From 56e0b6273ec8791ffe1c3cdc5d32fe5d001fd520 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Sat, 4 Apr 2020 06:07:54 +0800 Subject: crypto: amlogic - Delete duplicate dev_err in meson_crypto_probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When something goes wrong, platform_get_irq() will print an error message, so in order to avoid the situation of repeat output,we should remove dev_err here. Signed-off-by: Tang Bin Signed-off-by: Herbert Xu --- drivers/crypto/amlogic/amlogic-gxl-core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 9d4ead2f7ebb..411857fad8ba 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -253,10 +253,8 @@ static int meson_crypto_probe(struct platform_device *pdev) mc->irqs = devm_kcalloc(mc->dev, MAXFLOW, sizeof(int), GFP_KERNEL); for (i = 0; i < MAXFLOW; i++) { mc->irqs[i] = platform_get_irq(pdev, i); - if (mc->irqs[i] < 0) { - dev_err(mc->dev, "Cannot get IRQ for flow %d\n", i); + if (mc->irqs[i] < 0) return mc->irqs[i]; - } err = devm_request_irq(&pdev->dev, mc->irqs[i], meson_irq_handler, 0, "gxl-crypto", mc); -- cgit v1.2.3-59-g8ed1b From b111418a226f20d3e0b00dbe40bbc4e20b9c2ab5 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 4 Apr 2020 16:45:57 +0200 Subject: hwrng: omap - Delete an error message in of_get_omap_rng_device_details() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function “platform_get_irq” can log an error already. Thus omit a redundant message for the exception handling in the calling function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Herbert Xu --- drivers/char/hw_random/omap-rng.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 65952393e1bb..7290c603fcb8 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -392,11 +392,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") || of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) { irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "%s: error getting IRQ resource - %d\n", - __func__, irq); + if (irq < 0) return irq; - } err = devm_request_irq(dev, irq, omap4_rng_irq, IRQF_TRIGGER_NONE, dev_name(dev), priv); -- cgit v1.2.3-59-g8ed1b From f72fed86a5ac80445c7723d3fc782325a23bc7db Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 4 Apr 2020 17:07:34 +0200 Subject: hwrng: xgene - Delete an error message in xgene_rng_probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function “platform_get_irq” can log an error already. Thus omit a redundant message for the exception handling in the calling function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Herbert Xu --- drivers/char/hw_random/xgene-rng.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c index d7516a446987..008e6db9ce01 100644 --- a/drivers/char/hw_random/xgene-rng.c +++ b/drivers/char/hw_random/xgene-rng.c @@ -328,10 +328,8 @@ static int xgene_rng_probe(struct platform_device *pdev) return PTR_ERR(ctx->csr_base); rc = platform_get_irq(pdev, 0); - if (rc < 0) { - dev_err(&pdev->dev, "No IRQ resource\n"); + if (rc < 0) return rc; - } ctx->irq = rc; dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", -- cgit v1.2.3-59-g8ed1b From 56b80bdee4a16cf330562801667a1e62fe7b9255 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 4 Apr 2020 17:34:53 +0200 Subject: crypto: sun8i-ss - Delete an error message in sun8i_ss_probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The function “platform_get_irq” can log an error already. Thus omit a redundant message for the exception handling in the calling function. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Acked-by: Corentin Labbe Signed-off-by: Herbert Xu --- drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 6b301afffd11..a1fb2fbdbe7b 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -537,10 +537,8 @@ static int sun8i_ss_probe(struct platform_device *pdev) return err; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(ss->dev, "Cannot get SecuritySystem IRQ\n"); + if (irq < 0) return irq; - } ss->reset = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(ss->reset)) { -- cgit v1.2.3-59-g8ed1b From beeb460cd12ac9b91640b484b6a52dcba9d9fc8f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 6 Apr 2020 23:02:40 -0700 Subject: crypto: algapi - Avoid spurious modprobe on LOADED Currently after any algorithm is registered and tested, there's an unnecessary request_module("cryptomgr") even if it's already loaded. Also, CRYPTO_MSG_ALG_LOADED is sent twice, and thus if the algorithm is "crct10dif", lib/crc-t10dif.c replaces the tfm twice rather than once. This occurs because CRYPTO_MSG_ALG_LOADED is sent using crypto_probing_notify(), which tries to load "cryptomgr" if the notification is not handled (NOTIFY_DONE). This doesn't make sense because "cryptomgr" doesn't handle this notification. Fix this by using crypto_notify() instead of crypto_probing_notify(). Fixes: dd8b083f9a5e ("crypto: api - Introduce notifier for new crypto algorithms") Cc: # v4.20+ Cc: Martin K. Petersen Signed-off-by: Eric Biggers Reviewed-by: Martin K. Petersen Signed-off-by: Herbert Xu --- crypto/algapi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 69605e21af92..849254d7e627 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -403,7 +403,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval) err = wait_for_completion_killable(&larval->completion); WARN_ON(err); if (!err) - crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval); + crypto_notify(CRYPTO_MSG_ALG_LOADED, larval); out: crypto_larval_kill(&larval->alg); -- cgit v1.2.3-59-g8ed1b From eebac678556d6927f09a992872f4464cf3aecc76 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Apr 2020 18:26:48 +0200 Subject: crypto: ccp -- don't "select" CONFIG_DMADEVICES DMADEVICES is the top-level option for the slave DMA subsystem, and should not be selected by device drivers, as this can cause circular dependencies such as: drivers/net/ethernet/freescale/Kconfig:6:error: recursive dependency detected! drivers/net/ethernet/freescale/Kconfig:6: symbol NET_VENDOR_FREESCALE depends on PPC_BESTCOMM drivers/dma/bestcomm/Kconfig:6: symbol PPC_BESTCOMM depends on DMADEVICES drivers/dma/Kconfig:6: symbol DMADEVICES is selected by CRYPTO_DEV_SP_CCP drivers/crypto/ccp/Kconfig:10: symbol CRYPTO_DEV_SP_CCP depends on CRYPTO crypto/Kconfig:16: symbol CRYPTO is selected by LIBCRC32C lib/Kconfig:222: symbol LIBCRC32C is selected by LIQUIDIO drivers/net/ethernet/cavium/Kconfig:65: symbol LIQUIDIO depends on PTP_1588_CLOCK drivers/ptp/Kconfig:8: symbol PTP_1588_CLOCK is implied by FEC drivers/net/ethernet/freescale/Kconfig:23: symbol FEC depends on NET_VENDOR_FREESCALE The LIQUIDIO driver causing this problem is addressed in a separate patch, but this change is needed to prevent it from happening again. Using "depends on DMADEVICES" is what we do for all other implementations of slave DMA controllers as well. Fixes: b3c2fee5d66b ("crypto: ccp - Ensure all dependencies are specified") Signed-off-by: Arnd Bergmann Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index e0a8bd15aa74..32268e239bf1 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -10,10 +10,9 @@ config CRYPTO_DEV_CCP_DD config CRYPTO_DEV_SP_CCP bool "Cryptographic Coprocessor device" default y - depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_CCP_DD && DMADEVICES select HW_RANDOM select DMA_ENGINE - select DMADEVICES select CRYPTO_SHA1 select CRYPTO_SHA256 help -- cgit v1.2.3-59-g8ed1b From 6603523bf5e432c7c8490fb500793bb15d4e5f61 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 10 Apr 2020 16:09:42 +1000 Subject: crypto: api - Fix use-after-free and race in crypto_spawn_alg There are two problems in crypto_spawn_alg. First of all it may return spawn->alg even if spawn->dead is set. This results in a double-free as detected by syzbot. Secondly the setting of the DYING flag is racy because we hold the read-lock instead of the write-lock. We should instead call crypto_shoot_alg in a safe manner by gaining a refcount, dropping the lock, and then releasing the refcount. This patch fixes both problems. Reported-by: syzbot+fc0674cde00b66844470@syzkaller.appspotmail.com Fixes: 4f87ee118d16 ("crypto: api - Do not zap spawn->alg") Fixes: 73669cc55646 ("crypto: api - Fix race condition in...") Cc: Signed-off-by: Herbert Xu --- crypto/algapi.c | 22 ++++++++++++++++------ crypto/api.c | 3 ++- crypto/internal.h | 1 + 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/crypto/algapi.c b/crypto/algapi.c index 849254d7e627..f1e6ccaff853 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -716,17 +716,27 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn); static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn) { - struct crypto_alg *alg; + struct crypto_alg *alg = ERR_PTR(-EAGAIN); + struct crypto_alg *target; + bool shoot = false; down_read(&crypto_alg_sem); - alg = spawn->alg; - if (!spawn->dead && !crypto_mod_get(alg)) { - alg->cra_flags |= CRYPTO_ALG_DYING; - alg = NULL; + if (!spawn->dead) { + alg = spawn->alg; + if (!crypto_mod_get(alg)) { + target = crypto_alg_get(alg); + shoot = true; + alg = ERR_PTR(-EAGAIN); + } } up_read(&crypto_alg_sem); - return alg ?: ERR_PTR(-EAGAIN); + if (shoot) { + crypto_shoot_alg(target); + crypto_alg_put(target); + } + + return alg; } struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, diff --git a/crypto/api.c b/crypto/api.c index 7d71a9b10e5f..edcf690800d4 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -333,12 +333,13 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) return len; } -static void crypto_shoot_alg(struct crypto_alg *alg) +void crypto_shoot_alg(struct crypto_alg *alg) { down_write(&crypto_alg_sem); alg->cra_flags |= CRYPTO_ALG_DYING; up_write(&crypto_alg_sem); } +EXPORT_SYMBOL_GPL(crypto_shoot_alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask) diff --git a/crypto/internal.h b/crypto/internal.h index d5ebc60c5143..ff06a3bd1ca1 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -65,6 +65,7 @@ void crypto_alg_tested(const char *name, int err); void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, struct crypto_alg *nalg); void crypto_remove_final(struct list_head *list); +void crypto_shoot_alg(struct crypto_alg *alg); struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); void *crypto_create_tfm(struct crypto_alg *alg, -- cgit v1.2.3-59-g8ed1b From 0a8f5989e03476cfb2a7756e33fa4d0163cb4375 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 11 Apr 2020 14:06:33 +0200 Subject: crypto: marvell/octeontx - Add missing '\n' in log messages Message logged by 'dev_xxx()' or 'pr_xxx()' should end with a '\n'. While at it, I've introduced a few pr_cont that looked logical to me. Fixes: 10b4f09491bf ("crypto: marvell - add the Virtual Function driver for CPT") Fixes: d9110b0b01ff ("crypto: marvell - add support for OCTEON TX CPT engine") Signed-off-by: Christophe JAILLET Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx/otx_cptpf_main.c | 4 +- drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c | 12 +-- drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c | 95 +++++++++++----------- drivers/crypto/marvell/octeontx/otx_cptvf_algs.c | 6 +- drivers/crypto/marvell/octeontx/otx_cptvf_main.c | 12 +-- drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c | 10 +-- 6 files changed, 70 insertions(+), 69 deletions(-) diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c index 200fb3303db0..34bb3063eb70 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_main.c @@ -79,13 +79,13 @@ static int otx_cpt_device_init(struct otx_cpt_device *cpt) /* Check BIST status */ bist = (u64)otx_cpt_check_bist_status(cpt); if (bist) { - dev_err(dev, "RAM BIST failed with code 0x%llx", bist); + dev_err(dev, "RAM BIST failed with code 0x%llx\n", bist); return -ENODEV; } bist = otx_cpt_check_exe_bist_status(cpt); if (bist) { - dev_err(dev, "Engine BIST failed with code 0x%llx", bist); + dev_err(dev, "Engine BIST failed with code 0x%llx\n", bist); return -ENODEV; } diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c index a6774232e9a3..a9e3de65875a 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_mbox.c @@ -63,11 +63,11 @@ static void dump_mbox_msg(struct otx_cpt_mbox *mbox_msg, int vf_id) hex_dump_to_buffer(mbox_msg, sizeof(struct otx_cpt_mbox), 16, 8, raw_data_str, OTX_CPT_MAX_MBOX_DATA_STR_SIZE, false); if (vf_id >= 0) - pr_debug("MBOX opcode %s received from VF%d raw_data %s", + pr_debug("MBOX opcode %s received from VF%d raw_data %s\n", get_mbox_opcode_str(mbox_msg->msg), vf_id, raw_data_str); else - pr_debug("MBOX opcode %s received from PF raw_data %s", + pr_debug("MBOX opcode %s received from PF raw_data %s\n", get_mbox_opcode_str(mbox_msg->msg), raw_data_str); } @@ -140,20 +140,20 @@ static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device *cpt, u8 q, u8 grp) struct otx_cpt_ucode *ucode; if (q >= cpt->max_vfs) { - dev_err(dev, "Requested queue %d is > than maximum avail %d", + dev_err(dev, "Requested queue %d is > than maximum avail %d\n", q, cpt->max_vfs); return -EINVAL; } if (grp >= OTX_CPT_MAX_ENGINE_GROUPS) { - dev_err(dev, "Requested group %d is > than maximum avail %d", + dev_err(dev, "Requested group %d is > than maximum avail %d\n", grp, OTX_CPT_MAX_ENGINE_GROUPS); return -EINVAL; } eng_grp = &cpt->eng_grps.grp[grp]; if (!eng_grp->is_enabled) { - dev_err(dev, "Requested engine group %d is disabled", grp); + dev_err(dev, "Requested engine group %d is disabled\n", grp); return -EINVAL; } @@ -212,7 +212,7 @@ static void otx_cpt_handle_mbox_intr(struct otx_cpt_device *cpt, int vf) vftype = otx_cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data); if ((vftype != OTX_CPT_AE_TYPES) && (vftype != OTX_CPT_SE_TYPES)) { - dev_err(dev, "VF%d binding to eng group %llu failed", + dev_err(dev, "VF%d binding to eng group %llu failed\n", vf, mbx.data); otx_cptpf_mbox_send_nack(cpt, vf, &mbx); } else { diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index d04baa319592..fec8f3b9b112 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -62,7 +62,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev, int i; if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) { - dev_err(dev, "unsupported number of engines %d on octeontx", + dev_err(dev, "unsupported number of engines %d on octeontx\n", eng_grp->g->engs_num); return bmap; } @@ -78,7 +78,7 @@ static struct otx_cpt_bitmap get_cores_bmap(struct device *dev, } if (!found) - dev_err(dev, "No engines reserved for engine group %d", + dev_err(dev, "No engines reserved for engine group %d\n", eng_grp->idx); return bmap; } @@ -306,7 +306,7 @@ static int process_tar_file(struct device *dev, ucode_size = ntohl(ucode_hdr->code_length) * 2; if (!ucode_size || (size < round_up(ucode_size, 16) + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { - dev_err(dev, "Ucode %s invalid size", filename); + dev_err(dev, "Ucode %s invalid size\n", filename); return -EINVAL; } @@ -379,18 +379,18 @@ static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch, { struct tar_ucode_info_t *curr; - pr_debug("Tar archive filename %s", tar_filename); - pr_debug("Tar archive pointer %p, size %ld", tar_arch->fw->data, + pr_debug("Tar archive filename %s\n", tar_filename); + pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data, tar_arch->fw->size); list_for_each_entry(curr, &tar_arch->ucodes, list) { - pr_debug("Ucode filename %s", curr->ucode.filename); - pr_debug("Ucode version string %s", curr->ucode.ver_str); - pr_debug("Ucode version %d.%d.%d.%d", + pr_debug("Ucode filename %s\n", curr->ucode.filename); + pr_debug("Ucode version string %s\n", curr->ucode.ver_str); + pr_debug("Ucode version %d.%d.%d.%d\n", curr->ucode.ver_num.nn, curr->ucode.ver_num.xx, curr->ucode.ver_num.yy, curr->ucode.ver_num.zz); - pr_debug("Ucode type (%d) %s", curr->ucode.type, + pr_debug("Ucode type (%d) %s\n", curr->ucode.type, get_ucode_type_str(curr->ucode.type)); - pr_debug("Ucode size %d", curr->ucode.size); + pr_debug("Ucode size %d\n", curr->ucode.size); pr_debug("Ucode ptr %p\n", curr->ucode_ptr); } } @@ -417,14 +417,14 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev, goto release_tar_arch; if (tar_arch->fw->size < TAR_BLOCK_LEN) { - dev_err(dev, "Invalid tar archive %s ", tar_filename); + dev_err(dev, "Invalid tar archive %s\n", tar_filename); goto release_tar_arch; } tar_size = tar_arch->fw->size; tar_blk = (struct tar_blk_t *) tar_arch->fw->data; if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) { - dev_err(dev, "Unsupported format of tar archive %s", + dev_err(dev, "Unsupported format of tar archive %s\n", tar_filename); goto release_tar_arch; } @@ -437,7 +437,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev, if (tar_offs + cur_size > tar_size || tar_offs + 2*TAR_BLOCK_LEN > tar_size) { - dev_err(dev, "Invalid tar archive %s ", tar_filename); + dev_err(dev, "Invalid tar archive %s\n", tar_filename); goto release_tar_arch; } @@ -458,7 +458,7 @@ static struct tar_arch_info_t *load_tar_archive(struct device *dev, /* Check for the end of the archive */ if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) { - dev_err(dev, "Invalid tar archive %s ", tar_filename); + dev_err(dev, "Invalid tar archive %s\n", tar_filename); goto release_tar_arch; } @@ -563,13 +563,13 @@ static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp, static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode) { - pr_debug("Ucode info"); - pr_debug("Ucode version string %s", ucode->ver_str); - pr_debug("Ucode version %d.%d.%d.%d", ucode->ver_num.nn, + pr_debug("Ucode info\n"); + pr_debug("Ucode version string %s\n", ucode->ver_str); + pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn, ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz); - pr_debug("Ucode type %s", get_ucode_type_str(ucode->type)); - pr_debug("Ucode size %d", ucode->size); - pr_debug("Ucode virt address %16.16llx", (u64)ucode->align_va); + pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type)); + pr_debug("Ucode size %d\n", ucode->size); + pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va); pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma); } @@ -600,19 +600,19 @@ static void print_dbg_info(struct device *dev, u32 mask[4]; int i, j; - pr_debug("Engine groups global info"); - pr_debug("max SE %d, max AE %d", + pr_debug("Engine groups global info\n"); + pr_debug("max SE %d, max AE %d\n", eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt); - pr_debug("free SE %d", eng_grps->avail.se_cnt); - pr_debug("free AE %d", eng_grps->avail.ae_cnt); + pr_debug("free SE %d\n", eng_grps->avail.se_cnt); + pr_debug("free AE %d\n", eng_grps->avail.ae_cnt); for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { grp = &eng_grps->grp[i]; - pr_debug("engine_group%d, state %s", i, grp->is_enabled ? + pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ? "enabled" : "disabled"); if (grp->is_enabled) { mirrored_grp = &eng_grps->grp[grp->mirror.idx]; - pr_debug("Ucode0 filename %s, version %s", + pr_debug("Ucode0 filename %s, version %s\n", grp->mirror.is_ena ? mirrored_grp->ucode[0].filename : grp->ucode[0].filename, @@ -626,18 +626,18 @@ static void print_dbg_info(struct device *dev, if (engs->type) { print_engs_info(grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, j); - pr_debug("Slot%d: %s", j, engs_info); + pr_debug("Slot%d: %s\n", j, engs_info); bitmap_to_arr32(mask, engs->bmap, eng_grps->engs_num); - pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x", + pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n", mask[3], mask[2], mask[1], mask[0]); } else - pr_debug("Slot%d not used", j); + pr_debug("Slot%d not used\n", j); } if (grp->is_enabled) { cpt_print_engines_mask(grp, dev, engs_mask, OTX_CPT_UCODE_NAME_LENGTH); - pr_debug("Cmask: %s", engs_mask); + pr_debug("Cmask: %s\n", engs_mask); } } } @@ -766,7 +766,7 @@ static int check_engines_availability(struct device *dev, if (avail_cnt < req_eng->count) { dev_err(dev, - "Error available %s engines %d < than requested %d", + "Error available %s engines %d < than requested %d\n", get_eng_type_str(req_eng->type), avail_cnt, req_eng->count); return -EBUSY; @@ -867,7 +867,7 @@ static int copy_ucode_to_dma_mem(struct device *dev, OTX_CPT_UCODE_ALIGNMENT, &ucode->dma, GFP_KERNEL); if (!ucode->va) { - dev_err(dev, "Unable to allocate space for microcode"); + dev_err(dev, "Unable to allocate space for microcode\n"); return -ENOMEM; } ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT); @@ -905,15 +905,15 @@ static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode, ucode->size = ntohl(ucode_hdr->code_length) * 2; if (!ucode->size || (fw->size < round_up(ucode->size, 16) + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) { - dev_err(dev, "Ucode %s invalid size", ucode_filename); + dev_err(dev, "Ucode %s invalid size\n", ucode_filename); ret = -EINVAL; goto release_fw; } ret = get_ucode_type(ucode_hdr, &ucode->type); if (ret) { - dev_err(dev, "Microcode %s unknown type 0x%x", ucode->filename, - ucode->type); + dev_err(dev, "Microcode %s unknown type 0x%x\n", + ucode->filename, ucode->type); goto release_fw; } @@ -1083,7 +1083,7 @@ static int eng_grp_update_masks(struct device *dev, break; default: - dev_err(dev, "Invalid engine type %d", engs->type); + dev_err(dev, "Invalid engine type %d\n", engs->type); return -EINVAL; } @@ -1142,13 +1142,14 @@ static int delete_engine_group(struct device *dev, return -EINVAL; if (eng_grp->mirror.ref_count) { - dev_err(dev, "Can't delete engine_group%d as it is used by:", + dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):", eng_grp->idx); for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { if (eng_grp->g->grp[i].mirror.is_ena && eng_grp->g->grp[i].mirror.idx == eng_grp->idx) - dev_err(dev, "engine_group%d", i); + pr_cont(" %d", i); } + pr_cont("\n"); return -EINVAL; } @@ -1182,7 +1183,7 @@ static int validate_1_ucode_scenario(struct device *dev, if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0], engs[i].type)) { dev_err(dev, - "Microcode %s does not support %s engines", + "Microcode %s does not support %s engines\n", eng_grp->ucode[0].filename, get_eng_type_str(engs[i].type)); return -EINVAL; @@ -1220,7 +1221,7 @@ static int create_engine_group(struct device *dev, /* Validate if requested engine types are supported by this device */ for (i = 0; i < engs_cnt; i++) if (!dev_supports_eng_type(eng_grps, engs[i].type)) { - dev_err(dev, "Device does not support %s engines", + dev_err(dev, "Device does not support %s engines\n", get_eng_type_str(engs[i].type)); return -EPERM; } @@ -1228,7 +1229,7 @@ static int create_engine_group(struct device *dev, /* Find engine group which is not used */ eng_grp = find_unused_eng_grp(eng_grps); if (!eng_grp) { - dev_err(dev, "Error all engine groups are being used"); + dev_err(dev, "Error all engine groups are being used\n"); return -ENOSPC; } @@ -1298,11 +1299,11 @@ static int create_engine_group(struct device *dev, eng_grp->is_enabled = true; if (eng_grp->mirror.is_ena) dev_info(dev, - "Engine_group%d: reuse microcode %s from group %d", + "Engine_group%d: reuse microcode %s from group %d\n", eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str, mirrored_eng_grp->idx); else - dev_info(dev, "Engine_group%d: microcode loaded %s", + dev_info(dev, "Engine_group%d: microcode loaded %s\n", eng_grp->idx, eng_grp->ucode[0].ver_str); return 0; @@ -1412,14 +1413,14 @@ static ssize_t ucode_load_store(struct device *dev, } else { if (del_grp_idx < 0 || del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) { - dev_err(dev, "Invalid engine group index %d", + dev_err(dev, "Invalid engine group index %d\n", del_grp_idx); ret = -EINVAL; return ret; } if (!eng_grps->grp[del_grp_idx].is_enabled) { - dev_err(dev, "Error engine_group%d is not configured", + dev_err(dev, "Error engine_group%d is not configured\n", del_grp_idx); ret = -EINVAL; return ret; @@ -1568,7 +1569,7 @@ void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt) udelay(CSR_DELAY); reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY); if (timeout--) { - dev_warn(&cpt->pdev->dev, "Cores still busy"); + dev_warn(&cpt->pdev->dev, "Cores still busy\n"); break; } } @@ -1626,7 +1627,7 @@ int otx_cpt_init_eng_grps(struct pci_dev *pdev, eng_grps->avail.max_ae_cnt; if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) { dev_err(&pdev->dev, - "Number of engines %d > than max supported %d", + "Number of engines %d > than max supported %d\n", eng_grps->engs_num, OTX_CPT_MAX_ENGINES); ret = -EINVAL; goto err; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 06202bcffb33..60e744f680d3 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -1660,7 +1660,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, case OTX_CPT_SE_TYPES: count = atomic_read(&se_devices.count); if (count >= CPT_MAX_VF_NUM) { - dev_err(&pdev->dev, "No space to add a new device"); + dev_err(&pdev->dev, "No space to add a new device\n"); ret = -ENOSPC; goto err; } @@ -1687,7 +1687,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod, case OTX_CPT_AE_TYPES: count = atomic_read(&ae_devices.count); if (count >= CPT_MAX_VF_NUM) { - dev_err(&pdev->dev, "No space to a add new device"); + dev_err(&pdev->dev, "No space to a add new device\n"); ret = -ENOSPC; goto err; } @@ -1728,7 +1728,7 @@ void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod, } if (!dev_found) { - dev_err(&pdev->dev, "%s device not found", __func__); + dev_err(&pdev->dev, "%s device not found\n", __func__); goto exit; } diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c index a91860b5dc77..ce3168327a39 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_main.c @@ -584,7 +584,7 @@ static irqreturn_t cptvf_done_intr_handler(int __always_unused irq, cptvf_write_vq_done_ack(cptvf, intr); wqe = get_cptvf_vq_wqe(cptvf, 0); if (unlikely(!wqe)) { - dev_err(&pdev->dev, "No work to schedule for VF (%d)", + dev_err(&pdev->dev, "No work to schedule for VF (%d)\n", cptvf->vfid); return IRQ_NONE; } @@ -602,7 +602,7 @@ static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec) if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec], GFP_KERNEL)) { dev_err(&pdev->dev, - "Allocation failed for affinity_mask for VF %d", + "Allocation failed for affinity_mask for VF %d\n", cptvf->vfid); return; } @@ -691,7 +691,7 @@ static ssize_t vf_engine_group_store(struct device *dev, return -EINVAL; if (val >= OTX_CPT_MAX_ENGINE_GROUPS) { - dev_err(dev, "Engine group >= than max available groups %d", + dev_err(dev, "Engine group >= than max available groups %d\n", OTX_CPT_MAX_ENGINE_GROUPS); return -EINVAL; } @@ -837,7 +837,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev, cptvf_misc_intr_handler, 0, "CPT VF misc intr", cptvf); if (err) { - dev_err(dev, "Failed to request misc irq"); + dev_err(dev, "Failed to request misc irq\n"); goto free_vectors; } @@ -854,7 +854,7 @@ static int otx_cptvf_probe(struct pci_dev *pdev, cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE; err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF); if (err) { - dev_err(dev, "cptvf_sw_init() failed"); + dev_err(dev, "cptvf_sw_init() failed\n"); goto free_misc_irq; } /* Convey VQ LEN to PF */ @@ -946,7 +946,7 @@ static void otx_cptvf_remove(struct pci_dev *pdev) /* Convey DOWN to PF */ if (otx_cptvf_send_vf_down(cptvf)) { - dev_err(&pdev->dev, "PF not responding to DOWN msg"); + dev_err(&pdev->dev, "PF not responding to DOWN msg\n"); } else { sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group); otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype); diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c index df839b880354..239195cccf93 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_reqmgr.c @@ -314,7 +314,7 @@ static int process_request(struct pci_dev *pdev, struct otx_cpt_req_info *req, GFP_ATOMIC; ret = setup_sgio_list(pdev, &info, req, gfp); if (unlikely(ret)) { - dev_err(&pdev->dev, "Setting up SG list failed"); + dev_err(&pdev->dev, "Setting up SG list failed\n"); goto request_cleanup; } cpt_req->dlen = info->dlen; @@ -410,17 +410,17 @@ int otx_cpt_do_request(struct pci_dev *pdev, struct otx_cpt_req_info *req, struct otx_cptvf *cptvf = pci_get_drvdata(pdev); if (!otx_cpt_device_ready(cptvf)) { - dev_err(&pdev->dev, "CPT Device is not ready"); + dev_err(&pdev->dev, "CPT Device is not ready\n"); return -ENODEV; } if ((cptvf->vftype == OTX_CPT_SE_TYPES) && (!req->ctrl.s.se_req)) { - dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request", + dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request\n", cptvf->vfid); return -EINVAL; } else if ((cptvf->vftype == OTX_CPT_AE_TYPES) && (req->ctrl.s.se_req)) { - dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request", + dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request\n", cptvf->vfid); return -EINVAL; } @@ -461,7 +461,7 @@ static int cpt_process_ccode(struct pci_dev *pdev, /* check for timeout */ if (time_after_eq(jiffies, cpt_info->time_in + OTX_CPT_COMMAND_TIMEOUT * HZ)) - dev_warn(&pdev->dev, "Request timed out 0x%p", req); + dev_warn(&pdev->dev, "Request timed out 0x%p\n", req); else if (cpt_info->extra_time < OTX_CPT_TIME_IN_RESET_COUNT) { cpt_info->time_in = jiffies; cpt_info->extra_time++; -- cgit v1.2.3-59-g8ed1b From f88480e300ac13141aa84f0f70b745df2e11b203 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Fri, 17 Apr 2020 15:08:31 +0800 Subject: crypto: hisilicon/qm - fix build failure with ACPI off Add Kconfig dependency to fix kbuild warnings. Fixes: 6c6dd5802c2d ("crypto: hisilicon/qm - add controller reset interface") Reported-by: kbuild test robot Reported-by: Stephen Rothwell Signed-off-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index f09c6cf7823e..99e962e39f36 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -42,6 +42,7 @@ config CRYPTO_DEV_HISI_QM depends on ARM64 || COMPILE_TEST depends on PCI && PCI_MSI depends on UACCE || UACCE=n + depends on ACPI help HiSilicon accelerator engines use a common queue management interface. Specific engine driver may use this module. -- cgit v1.2.3-59-g8ed1b From 3357b61177a7f34267256098b29a7f4992af40f3 Mon Sep 17 00:00:00 2001 From: Hadar Gat Date: Fri, 17 Apr 2020 23:38:45 +0300 Subject: hwrng: cctrng - add missing include to linux/fips.h This fixes build failure when CONFIG_CRYPTO_FIPS is defined. Fixes: a583ed310bb6 ("hwrng: cctrng - introduce Arm CryptoCell driver") Signed-off-by: Hadar Gat Reported-by: Randy Dunlap Reported-by: kbuild test robot Acked-by: Randy Dunlap # build-tested Signed-off-by: Herbert Xu --- drivers/char/hw_random/cctrng.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index bdcd56243104..e82716c12c3a 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "cctrng.h" -- cgit v1.2.3-59-g8ed1b From c73d1871503713c7ee37da9fde155322dc50280f Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 21 Apr 2020 14:56:49 +1000 Subject: crypto: hisilicon/qm - add more ACPI dependencies due to the selects of CRYPTO_DEV_HISI_QM which now depends on ACPI Fixes: 6c6dd5802c2d ("crypto: hisilicon/qm - add controller reset...") Signed-off-by: Stephen Rothwell Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/Kconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 99e962e39f36..9c3b3ca815e6 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -29,6 +29,7 @@ config CRYPTO_DEV_HISI_SEC2 depends on PCI && PCI_MSI depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) + depends on ACPI help Support for HiSilicon SEC Engine of version 2 in crypto subsystem. It provides AES, SM4, and 3DES algorithms with ECB @@ -53,6 +54,7 @@ config CRYPTO_DEV_HISI_ZIP depends on ARM64 || (COMPILE_TEST && 64BIT) depends on !CPU_BIG_ENDIAN || COMPILE_TEST depends on UACCE || UACCE=n + depends on ACPI select CRYPTO_DEV_HISI_QM help Support for HiSilicon ZIP Driver @@ -62,6 +64,7 @@ config CRYPTO_DEV_HISI_HPRE depends on PCI && PCI_MSI depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) + depends on ACPI select CRYPTO_DEV_HISI_QM select CRYPTO_DH select CRYPTO_RSA -- cgit v1.2.3-59-g8ed1b From 3adbbd2295ad138ab95404d0ffc6bfc1b8f9f1e4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 21 Apr 2020 15:42:09 +1000 Subject: hwrng: cctrng - Add dependency on HAS_IOMEM The cctrng doesn't compile without HAS_IOMEM so we should depend on it. Reported-by: kbuild test robot Fixes: a583ed310bb6 ("hwrng: cctrng - introduce Arm CryptoCell driver") Signed-off-by: Herbert Xu Acked-by: Hadar Gat Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 848f26f78dc1..0c99735df694 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -476,6 +476,7 @@ config HW_RANDOM_KEYSTONE config HW_RANDOM_CCTRNG tristate "Arm CryptoCell True Random Number Generator support" + depends on HAS_IOMEM default HW_RANDOM help This driver provides support for the True Random Number -- cgit v1.2.3-59-g8ed1b From 3ca73b70a3a98da739f4ebcff381d13bb7cd339f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 14 Apr 2020 19:20:08 +0100 Subject: crypto: arm64 - Consistently enable extension Currently most of the crypto files enable the crypto extension using the .arch directive but crct10dif-ce-core.S uses .cpu instead. Move that over to .arch for consistency. Signed-off-by: Mark Brown Acked-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/arm64/crypto/crct10dif-ce-core.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S index 5a95c2628fbf..111d9c9abddd 100644 --- a/arch/arm64/crypto/crct10dif-ce-core.S +++ b/arch/arm64/crypto/crct10dif-ce-core.S @@ -66,7 +66,7 @@ #include .text - .cpu generic+crypto + .arch armv8-a+crypto init_crc .req w19 buf .req x20 -- cgit v1.2.3-59-g8ed1b From 8a656a48f75f193b901efe14326663505874c37a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 15 Apr 2020 23:49:47 +0100 Subject: crypto: chelsio - remove redundant assignment to variable error The variable error is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- drivers/crypto/chelsio/chcr_algo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index c29b80dd30d8..5d3000fdd5f4 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -1757,7 +1757,7 @@ static int chcr_ahash_final(struct ahash_request *req) struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); struct chcr_context *ctx = h_ctx(rtfm); u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); - int error = -EINVAL; + int error; unsigned int cpu; cpu = get_cpu(); -- cgit v1.2.3-59-g8ed1b From 63e05f3275172283fd0020dba678ef8eca869ef7 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 16 Apr 2020 00:03:58 +0100 Subject: crypto: algif_rng - remove redundant assignment to variable err The variable err is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Herbert Xu --- crypto/algif_rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 22df3799a17b..087c0ad09d38 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -61,7 +61,7 @@ static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct rng_ctx *ctx = ask->private; - int err = -EFAULT; + int err; int genlen = 0; u8 result[MAXSIZE]; -- cgit v1.2.3-59-g8ed1b From 764428fe99e82ce9a57ca22fb8adc3370922348d Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Fri, 17 Apr 2020 21:33:33 +0200 Subject: crypto: jitter - SP800-90B compliance SP800-90B specifies various requirements for the noise source(s) that may seed any DRNG including SP800-90A DRBGs. In November 2020, SP800-90B will be mandated for all noise sources that provide entropy to DRBGs as part of a FIPS 140-[2|3] validation or other evaluation types. Without SP800-90B compliance, a noise source is defined to always deliver zero bits of entropy. This patch ports the SP800-90B compliance from the user space Jitter RNG version 2.2.0. The following changes are applied: - addition of (an enhanced version of) the repetitive count test (RCT) from SP800-90B section 4.4.1 - the enhancement is due to the fact of using the stuck test as input to the RCT. - addition of the adaptive proportion test (APT) from SP800-90B section 4.4.2 - update of the power-on self test to perform a test measurement of 1024 noise samples compliant to SP800-90B section 4.3 - remove of the continuous random number generator test which is replaced by APT and RCT Health test failures due to the SP800-90B operation are only enforced in FIPS mode. If a runtime health test failure is detected, the Jitter RNG is reset. If more than 1024 resets in a row are performed, a permanent error is returned to the caller. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/jitterentropy-kcapi.c | 27 +++ crypto/jitterentropy.c | 417 ++++++++++++++++++++++++++++++++----------- 2 files changed, 343 insertions(+), 101 deletions(-) diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c index a5ce8f96790f..b43684c0dade 100644 --- a/crypto/jitterentropy-kcapi.c +++ b/crypto/jitterentropy-kcapi.c @@ -108,6 +108,7 @@ void jent_get_nstime(__u64 *out) struct jitterentropy { spinlock_t jent_lock; struct rand_data *entropy_collector; + unsigned int reset_cnt; }; static int jent_kcapi_init(struct crypto_tfm *tfm) @@ -142,7 +143,33 @@ static int jent_kcapi_random(struct crypto_rng *tfm, int ret = 0; spin_lock(&rng->jent_lock); + + /* Return a permanent error in case we had too many resets in a row. */ + if (rng->reset_cnt > (1<<10)) { + ret = -EFAULT; + goto out; + } + ret = jent_read_entropy(rng->entropy_collector, rdata, dlen); + + /* Reset RNG in case of health failures */ + if (ret < -1) { + pr_warn_ratelimited("Reset Jitter RNG due to health test failure: %s failure\n", + (ret == -2) ? "Repetition Count Test" : + "Adaptive Proportion Test"); + + rng->reset_cnt++; + + ret = -EAGAIN; + } else { + rng->reset_cnt = 0; + + /* Convert the Jitter RNG error into a usable error code */ + if (ret == -1) + ret = -EINVAL; + } + +out: spin_unlock(&rng->jent_lock); return ret; diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 042157f0d28b..57f4a1ac738b 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -2,7 +2,7 @@ * Non-physical true random number generator based on timing jitter -- * Jitter RNG standalone code. * - * Copyright Stephan Mueller , 2015 - 2019 + * Copyright Stephan Mueller , 2015 - 2020 * * Design * ====== @@ -47,7 +47,7 @@ /* * This Jitterentropy RNG is based on the jitterentropy library - * version 2.1.2 provided at http://www.chronox.de/jent.html + * version 2.2.0 provided at http://www.chronox.de/jent.html */ #ifdef __OPTIMIZE__ @@ -83,6 +83,22 @@ struct rand_data { unsigned int memblocksize; /* Size of one memory block in bytes */ unsigned int memaccessloops; /* Number of memory accesses per random * bit generation */ + + /* Repetition Count Test */ + int rct_count; /* Number of stuck values */ + + /* Adaptive Proportion Test for a significance level of 2^-30 */ +#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */ +#define JENT_APT_WINDOW_SIZE 512 /* Data window size */ + /* LSB of time stamp to process */ +#define JENT_APT_LSB 16 +#define JENT_APT_WORD_MASK (JENT_APT_LSB - 1) + unsigned int apt_observations; /* Number of collected observations */ + unsigned int apt_count; /* APT counter */ + unsigned int apt_base; /* APT base reference */ + unsigned int apt_base_set:1; /* APT base reference set? */ + + unsigned int health_failure:1; /* Permanent health failure */ }; /* Flags that can be used to initialize the RNG */ @@ -98,12 +114,201 @@ struct rand_data { * variations (2nd derivation of time is * zero). */ #define JENT_ESTUCK 8 /* Too many stuck results during init. */ +#define JENT_EHEALTH 9 /* Health test failed during initialization */ +#define JENT_ERCT 10 /* RCT failed during initialization */ + +#include "jitterentropy.h" /*************************************************************************** - * Helper functions + * Adaptive Proportion Test + * + * This test complies with SP800-90B section 4.4.2. ***************************************************************************/ -#include "jitterentropy.h" +/** + * Reset the APT counter + * + * @ec [in] Reference to entropy collector + */ +static void jent_apt_reset(struct rand_data *ec, unsigned int delta_masked) +{ + /* Reset APT counter */ + ec->apt_count = 0; + ec->apt_base = delta_masked; + ec->apt_observations = 0; +} + +/** + * Insert a new entropy event into APT + * + * @ec [in] Reference to entropy collector + * @delta_masked [in] Masked time delta to process + */ +static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked) +{ + /* Initialize the base reference */ + if (!ec->apt_base_set) { + ec->apt_base = delta_masked; + ec->apt_base_set = 1; + return; + } + + if (delta_masked == ec->apt_base) { + ec->apt_count++; + + if (ec->apt_count >= JENT_APT_CUTOFF) + ec->health_failure = 1; + } + + ec->apt_observations++; + + if (ec->apt_observations >= JENT_APT_WINDOW_SIZE) + jent_apt_reset(ec, delta_masked); +} + +/*************************************************************************** + * Stuck Test and its use as Repetition Count Test + * + * The Jitter RNG uses an enhanced version of the Repetition Count Test + * (RCT) specified in SP800-90B section 4.4.1. Instead of counting identical + * back-to-back values, the input to the RCT is the counting of the stuck + * values during the generation of one Jitter RNG output block. + * + * The RCT is applied with an alpha of 2^{-30} compliant to FIPS 140-2 IG 9.8. + * + * During the counting operation, the Jitter RNG always calculates the RCT + * cut-off value of C. If that value exceeds the allowed cut-off value, + * the Jitter RNG output block will be calculated completely but discarded at + * the end. The caller of the Jitter RNG is informed with an error code. + ***************************************************************************/ + +/** + * Repetition Count Test as defined in SP800-90B section 4.4.1 + * + * @ec [in] Reference to entropy collector + * @stuck [in] Indicator whether the value is stuck + */ +static void jent_rct_insert(struct rand_data *ec, int stuck) +{ + /* + * If we have a count less than zero, a previous RCT round identified + * a failure. We will not overwrite it. + */ + if (ec->rct_count < 0) + return; + + if (stuck) { + ec->rct_count++; + + /* + * The cutoff value is based on the following consideration: + * alpha = 2^-30 as recommended in FIPS 140-2 IG 9.8. + * In addition, we require an entropy value H of 1/OSR as this + * is the minimum entropy required to provide full entropy. + * Note, we collect 64 * OSR deltas for inserting them into + * the entropy pool which should then have (close to) 64 bits + * of entropy. + * + * Note, ec->rct_count (which equals to value B in the pseudo + * code of SP800-90B section 4.4.1) starts with zero. Hence + * we need to subtract one from the cutoff value as calculated + * following SP800-90B. + */ + if ((unsigned int)ec->rct_count >= (31 * ec->osr)) { + ec->rct_count = -1; + ec->health_failure = 1; + } + } else { + ec->rct_count = 0; + } +} + +/** + * Is there an RCT health test failure? + * + * @ec [in] Reference to entropy collector + * + * @return + * 0 No health test failure + * 1 Permanent health test failure + */ +static int jent_rct_failure(struct rand_data *ec) +{ + if (ec->rct_count < 0) + return 1; + return 0; +} + +static inline __u64 jent_delta(__u64 prev, __u64 next) +{ +#define JENT_UINT64_MAX (__u64)(~((__u64) 0)) + return (prev < next) ? (next - prev) : + (JENT_UINT64_MAX - prev + 1 + next); +} + +/** + * Stuck test by checking the: + * 1st derivative of the jitter measurement (time delta) + * 2nd derivative of the jitter measurement (delta of time deltas) + * 3rd derivative of the jitter measurement (delta of delta of time deltas) + * + * All values must always be non-zero. + * + * @ec [in] Reference to entropy collector + * @current_delta [in] Jitter time delta + * + * @return + * 0 jitter measurement not stuck (good bit) + * 1 jitter measurement stuck (reject bit) + */ +static int jent_stuck(struct rand_data *ec, __u64 current_delta) +{ + __u64 delta2 = jent_delta(ec->last_delta, current_delta); + __u64 delta3 = jent_delta(ec->last_delta2, delta2); + unsigned int delta_masked = current_delta & JENT_APT_WORD_MASK; + + ec->last_delta = current_delta; + ec->last_delta2 = delta2; + + /* + * Insert the result of the comparison of two back-to-back time + * deltas. + */ + jent_apt_insert(ec, delta_masked); + + if (!current_delta || !delta2 || !delta3) { + /* RCT with a stuck bit */ + jent_rct_insert(ec, 1); + return 1; + } + + /* RCT with a non-stuck bit */ + jent_rct_insert(ec, 0); + + return 0; +} + +/** + * Report any health test failures + * + * @ec [in] Reference to entropy collector + * + * @return + * 0 No health test failure + * 1 Permanent health test failure + */ +static int jent_health_failure(struct rand_data *ec) +{ + /* Test is only enabled in FIPS mode */ + if (!jent_fips_enabled()) + return 0; + + return ec->health_failure; +} + +/*************************************************************************** + * Noise sources + ***************************************************************************/ /** * Update of the loop count used for the next round of @@ -148,10 +353,6 @@ static __u64 jent_loop_shuffle(struct rand_data *ec, return (shuffle + (1<data * * @return Number of loops the folding operation is performed */ -static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) +static void jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt, + int stuck) { unsigned int i; __u64 j = 0; @@ -220,9 +422,17 @@ static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) new ^= tmp; } } - ec->data = new; - return fold_loop_cnt; + /* + * If the time stamp is stuck, do not finally insert the value into + * the entropy pool. Although this operation should not do any harm + * even when the time stamp has no entropy, SP800-90B requires that + * any conditioning operation (SP800-90B considers the LFSR to be a + * conditioning operation) to have an identical amount of input + * data according to section 3.1.5. + */ + if (!stuck) + ec->data = new; } /** @@ -243,16 +453,13 @@ static __u64 jent_lfsr_time(struct rand_data *ec, __u64 time, __u64 loop_cnt) * to reliably access either L3 or memory, the ec->mem memory must be quite * large which is usually not desirable. * - * Input: - * @ec Reference to the entropy collector with the memory access data -- if - * the reference to the memory block to be accessed is NULL, this noise - * source is disabled - * @loop_cnt if a value not equal to 0 is set, use the given value as number of - * loops to perform the folding - * - * @return Number of memory access operations + * @ec [in] Reference to the entropy collector with the memory access data -- if + * the reference to the memory block to be accessed is NULL, this noise + * source is disabled + * @loop_cnt [in] if a value not equal to 0 is set, use the given value + * number of loops to perform the LFSR */ -static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) +static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt) { unsigned int wrap = 0; __u64 i = 0; @@ -262,7 +469,7 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) jent_loop_shuffle(ec, MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT); if (NULL == ec || NULL == ec->mem) - return 0; + return; wrap = ec->memblocksize * ec->memblocks; /* @@ -288,43 +495,11 @@ static unsigned int jent_memaccess(struct rand_data *ec, __u64 loop_cnt) ec->memlocation = ec->memlocation + ec->memblocksize - 1; ec->memlocation = ec->memlocation % wrap; } - return i; } /*************************************************************************** * Start of entropy processing logic ***************************************************************************/ - -/** - * Stuck test by checking the: - * 1st derivation of the jitter measurement (time delta) - * 2nd derivation of the jitter measurement (delta of time deltas) - * 3rd derivation of the jitter measurement (delta of delta of time deltas) - * - * All values must always be non-zero. - * - * Input: - * @ec Reference to entropy collector - * @current_delta Jitter time delta - * - * @return - * 0 jitter measurement not stuck (good bit) - * 1 jitter measurement stuck (reject bit) - */ -static int jent_stuck(struct rand_data *ec, __u64 current_delta) -{ - __s64 delta2 = ec->last_delta - current_delta; - __s64 delta3 = delta2 - ec->last_delta2; - - ec->last_delta = current_delta; - ec->last_delta2 = delta2; - - if (!current_delta || !delta2 || !delta3) - return 1; - - return 0; -} - /** * This is the heart of the entropy generation: calculate time deltas and * use the CPU jitter in the time deltas. The jitter is injected into the @@ -334,8 +509,7 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) * of this function! This can be done by calling this function * and not using its result. * - * Input: - * @entropy_collector Reference to entropy collector + * @ec [in] Reference to entropy collector * * @return result of stuck test */ @@ -343,6 +517,7 @@ static int jent_measure_jitter(struct rand_data *ec) { __u64 time = 0; __u64 current_delta = 0; + int stuck; /* Invoke one noise source before time measurement to add variations */ jent_memaccess(ec, 0); @@ -352,22 +527,23 @@ static int jent_measure_jitter(struct rand_data *ec) * invocation to measure the timing variations */ jent_get_nstime(&time); - current_delta = time - ec->prev_time; + current_delta = jent_delta(ec->prev_time, time); ec->prev_time = time; + /* Check whether we have a stuck measurement. */ + stuck = jent_stuck(ec, current_delta); + /* Now call the next noise sources which also injects the data */ - jent_lfsr_time(ec, current_delta, 0); + jent_lfsr_time(ec, current_delta, 0, stuck); - /* Check whether we have a stuck measurement. */ - return jent_stuck(ec, current_delta); + return stuck; } /** * Generator of one 64 bit random number * Function fills rand_data->data * - * Input: - * @ec Reference to entropy collector + * @ec [in] Reference to entropy collector */ static void jent_gen_entropy(struct rand_data *ec) { @@ -390,31 +566,6 @@ static void jent_gen_entropy(struct rand_data *ec) } } -/** - * The continuous test required by FIPS 140-2 -- the function automatically - * primes the test if needed. - * - * Return: - * returns normally if FIPS test passed - * panics the kernel if FIPS test failed - */ -static void jent_fips_test(struct rand_data *ec) -{ - if (!jent_fips_enabled()) - return; - - /* prime the FIPS test */ - if (!ec->old_data) { - ec->old_data = ec->data; - jent_gen_entropy(ec); - } - - if (ec->data == ec->old_data) - jent_panic("jitterentropy: Duplicate output detected\n"); - - ec->old_data = ec->data; -} - /** * Entry function: Obtain entropy for the caller. * @@ -425,17 +576,18 @@ static void jent_fips_test(struct rand_data *ec) * This function truncates the last 64 bit entropy value output to the exact * size specified by the caller. * - * Input: - * @ec Reference to entropy collector - * @data pointer to buffer for storing random data -- buffer must already - * exist - * @len size of the buffer, specifying also the requested number of random - * in bytes + * @ec [in] Reference to entropy collector + * @data [in] pointer to buffer for storing random data -- buffer must already + * exist + * @len [in] size of the buffer, specifying also the requested number of random + * in bytes * * @return 0 when request is fulfilled or an error * * The following error codes can occur: * -1 entropy_collector is NULL + * -2 RCT failed + * -3 APT test failed */ int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int len) @@ -449,7 +601,42 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, unsigned int tocopy; jent_gen_entropy(ec); - jent_fips_test(ec); + + if (jent_health_failure(ec)) { + int ret; + + if (jent_rct_failure(ec)) + ret = -2; + else + ret = -3; + + /* + * Re-initialize the noise source + * + * If the health test fails, the Jitter RNG remains + * in failure state and will return a health failure + * during next invocation. + */ + if (jent_entropy_init()) + return ret; + + /* Set APT to initial state */ + jent_apt_reset(ec, 0); + ec->apt_base_set = 0; + + /* Set RCT to initial state */ + ec->rct_count = 0; + + /* Re-enable Jitter RNG */ + ec->health_failure = 0; + + /* + * Return the health test failure status to the + * caller as the generated value is not appropriate. + */ + return ret; + } + if ((DATA_SIZE_BITS / 8) < len) tocopy = (DATA_SIZE_BITS / 8); else @@ -513,11 +700,15 @@ int jent_entropy_init(void) int i; __u64 delta_sum = 0; __u64 old_delta = 0; + unsigned int nonstuck = 0; int time_backwards = 0; int count_mod = 0; int count_stuck = 0; struct rand_data ec = { 0 }; + /* Required for RCT */ + ec.osr = 1; + /* We could perform statistical tests here, but the problem is * that we only have a few loop counts to do testing. These * loop counts may show some slight skew and we produce @@ -539,8 +730,10 @@ int jent_entropy_init(void) /* * TESTLOOPCOUNT needs some loops to identify edge systems. 100 is * definitely too little. + * + * SP800-90B requires at least 1024 initial test cycles. */ -#define TESTLOOPCOUNT 300 +#define TESTLOOPCOUNT 1024 #define CLEARCACHE 100 for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) { __u64 time = 0; @@ -552,13 +745,13 @@ int jent_entropy_init(void) /* Invoke core entropy collection logic */ jent_get_nstime(&time); ec.prev_time = time; - jent_lfsr_time(&ec, time, 0); + jent_lfsr_time(&ec, time, 0, 0); jent_get_nstime(&time2); /* test whether timer works */ if (!time || !time2) return JENT_ENOTIME; - delta = time2 - time; + delta = jent_delta(time, time2); /* * test whether timer is fine grained enough to provide * delta even when called shortly after each other -- this @@ -581,6 +774,28 @@ int jent_entropy_init(void) if (stuck) count_stuck++; + else { + nonstuck++; + + /* + * Ensure that the APT succeeded. + * + * With the check below that count_stuck must be less + * than 10% of the overall generated raw entropy values + * it is guaranteed that the APT is invoked at + * floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times. + */ + if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) { + jent_apt_reset(&ec, + delta & JENT_APT_WORD_MASK); + if (jent_health_failure(&ec)) + return JENT_EHEALTH; + } + } + + /* Validate RCT */ + if (jent_rct_failure(&ec)) + return JENT_ERCT; /* test whether we have an increasing timer */ if (!(time2 > time)) -- cgit v1.2.3-59-g8ed1b From 97f2650e504033376e8813691cb6eccf73151676 Mon Sep 17 00:00:00 2001 From: Stephan Müller Date: Fri, 17 Apr 2020 21:34:03 +0200 Subject: crypto: drbg - always seeded with SP800-90B compliant noise source As the Jitter RNG provides an SP800-90B compliant noise source, use this noise source always for the (re)seeding of the DRBG. To make sure the DRBG is always properly seeded, the reseed threshold is reduced to 1<<20 generate operations. The Jitter RNG may report health test failures. Such health test failures are treated as transient as follows. The DRBG will not reseed from the Jitter RNG (but from get_random_bytes) in case of a health test failure. Though, it produces the requested random number. The Jitter RNG has a failure counter where at most 1024 consecutive resets due to a health test failure are considered as a transient error. If more consecutive resets are required, the Jitter RNG will return a permanent error which is returned to the caller by the DRBG. With this approach, the worst case reseed threshold is significantly lower than mandated by SP800-90A in order to seed with an SP800-90B noise source: the DRBG has a reseed threshold of 2^20 * 1024 = 2^30 generate requests. Yet, in case of a transient Jitter RNG health test failure, the DRBG is seeded with the data obtained from get_random_bytes. However, if the Jitter RNG fails during the initial seeding operation even due to a health test error, the DRBG will send an error to the caller because at that time, the DRBG has received no seed that is SP800-90B compliant. Signed-off-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/drbg.c | 26 +++++++++++++++++++------- include/crypto/drbg.h | 6 +----- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/crypto/drbg.c b/crypto/drbg.c index b6929eb5f565..e57901d8545b 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1087,10 +1087,6 @@ static void drbg_async_seed(struct work_struct *work) if (ret) goto unlock; - /* If nonblocking pool is initialized, deactivate Jitter RNG */ - crypto_free_rng(drbg->jent); - drbg->jent = NULL; - /* Set seeded to false so that if __drbg_seed fails the * next generate call will trigger a reseed. */ @@ -1168,7 +1164,23 @@ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, entropylen); if (ret) { pr_devel("DRBG: jent failed with %d\n", ret); - goto out; + + /* + * Do not treat the transient failure of the + * Jitter RNG as an error that needs to be + * reported. The combined number of the + * maximum reseed threshold times the maximum + * number of Jitter RNG transient errors is + * less than the reseed threshold required by + * SP800-90A allowing us to treat the + * transient errors as such. + * + * However, we mandate that at least the first + * seeding operation must succeed with the + * Jitter RNG. + */ + if (!reseed || ret != -EAGAIN) + goto out; } drbg_string_fill(&data1, entropy, entropylen * 2); @@ -1492,6 +1504,8 @@ static int drbg_prepare_hrng(struct drbg_state *drbg) if (list_empty(&drbg->test_data.list)) return 0; + drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); + INIT_WORK(&drbg->seed_work, drbg_async_seed); drbg->random_ready.owner = THIS_MODULE; @@ -1512,8 +1526,6 @@ static int drbg_prepare_hrng(struct drbg_state *drbg) return err; } - drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); - /* * Require frequent reseeds until the seed source is fully * initialized. diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 8c9af21efce1..c4165126937e 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -184,11 +184,7 @@ static inline size_t drbg_max_addtl(struct drbg_state *drbg) static inline size_t drbg_max_requests(struct drbg_state *drbg) { /* SP800-90A requires 2**48 maximum requests before reseeding */ -#if (__BITS_PER_LONG == 32) - return SIZE_MAX; -#else - return (1UL<<48); -#endif + return (1<<20); } /* -- cgit v1.2.3-59-g8ed1b From 9c3d6497fbfa0911d4cd6f261762a0a7af29566a Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Sun, 19 Apr 2020 15:12:45 +0800 Subject: crypto: bcm - Delete redundant variable definition The variable "i" is redundant to be assigned a value of zero,because it's assigned in the for loop, so remove redundant one here. Signed-off-by: Shengju Zhang Signed-off-by: Tang Bin Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c8b9408541a9..5db23c18c81f 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -4724,7 +4724,6 @@ static int spu_dt_read(struct platform_device *pdev) spu->spu_type = matched_spu_type->type; spu->spu_subtype = matched_spu_type->subtype; - i = 0; for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs = platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) { -- cgit v1.2.3-59-g8ed1b From b2d7e8bcaab7d1d931369984e2bae952247a1412 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Mon, 20 Apr 2020 18:28:19 +1000 Subject: hwrng: cctrng - Remove unnecessary FIPS ifdef This patch removes the unnecessary FIPS ifdef in cctrng. Signed-off-by: Herbert Xu Acked-by: Hadar Gat Signed-off-by: Herbert Xu --- drivers/char/hw_random/cctrng.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index e82716c12c3a..49fb65a221f3 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -331,13 +331,11 @@ void cc_trng_compwork_handler(struct work_struct *w) ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr); dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid); -#ifdef CONFIG_CRYPTO_FIPS - if (CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr) && fips_enabled) { + if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) { fips_fail_notify(); /* FIPS error is fatal */ panic("Got HW CRNGT error while fips is enabled!\n"); } -#endif /* Clear all pending RNG interrupts */ cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr); -- cgit v1.2.3-59-g8ed1b From 3c2214b6027ff37945799de717c417212e1a8c54 Mon Sep 17 00:00:00 2001 From: Daniel Jordan Date: Tue, 21 Apr 2020 12:34:55 -0400 Subject: padata: add separate cpuhp node for CPUHP_PADATA_DEAD Removing the pcrypt module triggers this: general protection fault, probably for non-canonical address 0xdead000000000122 CPU: 5 PID: 264 Comm: modprobe Not tainted 5.6.0+ #2 Hardware name: QEMU Standard PC RIP: 0010:__cpuhp_state_remove_instance+0xcc/0x120 Call Trace: padata_sysfs_release+0x74/0xce kobject_put+0x81/0xd0 padata_free+0x12/0x20 pcrypt_exit+0x43/0x8ee [pcrypt] padata instances wrongly use the same hlist node for the online and dead states, so __padata_free()'s second cpuhp remove call chokes on the node that the first poisoned. cpuhp multi-instance callbacks only walk forward in cpuhp_step->list and the same node is linked in both the online and dead lists, so the list corruption that results from padata_alloc() adding the node to a second list without removing it from the first doesn't cause problems as long as no instances are freed. Avoid the issue by giving each state its own node. Fixes: 894c9ef9780c ("padata: validate cpumask without removed CPU during offline") Signed-off-by: Daniel Jordan Cc: Herbert Xu Cc: Steffen Klassert Cc: linux-crypto@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: stable@vger.kernel.org # v5.4+ Signed-off-by: Herbert Xu --- include/linux/padata.h | 6 ++++-- kernel/padata.c | 14 ++++++++------ 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/include/linux/padata.h b/include/linux/padata.h index a0d8b41850b2..693cae9bfe66 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -139,7 +139,8 @@ struct padata_shell { /** * struct padata_instance - The overall control structure. * - * @node: Used by CPU hotplug. + * @cpu_online_node: Linkage for CPU online callback. + * @cpu_dead_node: Linkage for CPU offline callback. * @parallel_wq: The workqueue used for parallel work. * @serial_wq: The workqueue used for serial work. * @pslist: List of padata_shell objects attached to this instance. @@ -150,7 +151,8 @@ struct padata_shell { * @flags: padata flags. */ struct padata_instance { - struct hlist_node node; + struct hlist_node cpu_online_node; + struct hlist_node cpu_dead_node; struct workqueue_struct *parallel_wq; struct workqueue_struct *serial_wq; struct list_head pslist; diff --git a/kernel/padata.c b/kernel/padata.c index a6afa12fb75e..aae789896616 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -703,7 +703,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) struct padata_instance *pinst; int ret; - pinst = hlist_entry_safe(node, struct padata_instance, node); + pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node); if (!pinst_has_cpu(pinst, cpu)) return 0; @@ -718,7 +718,7 @@ static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) struct padata_instance *pinst; int ret; - pinst = hlist_entry_safe(node, struct padata_instance, node); + pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node); if (!pinst_has_cpu(pinst, cpu)) return 0; @@ -734,8 +734,9 @@ static enum cpuhp_state hp_online; static void __padata_free(struct padata_instance *pinst) { #ifdef CONFIG_HOTPLUG_CPU - cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node); - cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); + cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, + &pinst->cpu_dead_node); + cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node); #endif WARN_ON(!list_empty(&pinst->pslist)); @@ -939,9 +940,10 @@ static struct padata_instance *padata_alloc(const char *name, mutex_init(&pinst->lock); #ifdef CONFIG_HOTPLUG_CPU - cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node); + cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, + &pinst->cpu_online_node); cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, - &pinst->node); + &pinst->cpu_dead_node); #endif put_online_cpus(); -- cgit v1.2.3-59-g8ed1b From 97f9ac3db6612f14ac0c509e1a63ce14fd4cc0eb Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Tue, 21 Apr 2020 12:44:49 -0500 Subject: crypto: ccp - Add support for SEV-ES to the PSP driver To provide support for SEV-ES, the hypervisor must provide an area of memory to the PSP. Once this Trusted Memory Region (TMR) is provided to the PSP, the contents of this area of memory are no longer available to the x86. Update the PSP driver to allocate a 1MB region for the TMR that is 1MB aligned and then provide it to the PSP through the SEV INIT command. Signed-off-by: Tom Lendacky Reviewed-by: Brijesh Singh Reviewed-by: Joerg Roedel Signed-off-by: Herbert Xu --- drivers/crypto/ccp/sev-dev.c | 43 +++++++++++++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 2 ++ include/uapi/linux/psp-sev.h | 2 ++ 3 files changed, 47 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 896f190b9a50..439cd737076e 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -20,6 +20,7 @@ #include #include #include +#include #include @@ -44,6 +45,14 @@ MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during static bool psp_dead; static int psp_timeout; +/* Trusted Memory Region (TMR): + * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator + * to allocate the memory, which will return aligned memory for the specified + * allocation order. + */ +#define SEV_ES_TMR_SIZE (1024 * 1024) +static void *sev_es_tmr; + static inline bool sev_version_greater_or_equal(u8 maj, u8 min) { struct sev_device *sev = psp_master->sev_data; @@ -214,6 +223,20 @@ static int __sev_platform_init_locked(int *error) if (sev->state == SEV_STATE_INIT) return 0; + if (sev_es_tmr) { + u64 tmr_pa; + + /* + * Do not include the encryption mask on the physical + * address of the TMR (firmware should clear it anyway). + */ + tmr_pa = __pa(sev_es_tmr); + + sev->init_cmd_buf.flags |= SEV_INIT_FLAGS_SEV_ES; + sev->init_cmd_buf.tmr_address = tmr_pa; + sev->init_cmd_buf.tmr_len = SEV_ES_TMR_SIZE; + } + rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error); if (rc) return rc; @@ -1012,6 +1035,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; + struct page *tmr_page; int error, rc; if (!sev) @@ -1041,6 +1065,16 @@ void sev_pci_init(void) sev_update_firmware(sev->dev) == 0) sev_get_api_version(); + /* Obtain the TMR memory area for SEV-ES use */ + tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE)); + if (tmr_page) { + sev_es_tmr = page_address(tmr_page); + } else { + sev_es_tmr = NULL; + dev_warn(sev->dev, + "SEV: TMR allocation failed, SEV-ES support unavailable\n"); + } + /* Initialize the platform */ rc = sev_platform_init(&error); if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) { @@ -1075,4 +1109,13 @@ void sev_pci_exit(void) return; sev_platform_shutdown(NULL); + + if (sev_es_tmr) { + /* The TMR area was encrypted, flush it from the cache */ + wbinvd_on_all_cpus(); + + free_pages((unsigned long)sev_es_tmr, + get_order(SEV_ES_TMR_SIZE)); + sev_es_tmr = NULL; + } } diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 5167bf2bfc75..7fbc8679145c 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -100,6 +100,8 @@ struct sev_data_init { u32 tmr_len; /* In */ } __packed; +#define SEV_INIT_FLAGS_SEV_ES 0x01 + /** * struct sev_data_pek_csr - PEK_CSR command parameters * diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 0549a5c622bf..91b4c63d5cbf 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -83,6 +83,8 @@ struct sev_user_data_status { __u32 guest_count; /* Out */ } __packed; +#define SEV_STATUS_FLAGS_CONFIG_ES 0x0100 + /** * struct sev_user_data_pek_csr - PEK_CSR command parameters * -- cgit v1.2.3-59-g8ed1b From 18f1ca46858eac22437819937ae44aa9a8f9f2fa Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 21 Apr 2020 14:47:04 -0700 Subject: lib/mpi: Fix 64-bit MIPS build with Clang When building 64r6_defconfig with CONFIG_MIPS32_O32 disabled and CONFIG_CRYPTO_RSA enabled: lib/mpi/generic_mpih-mul1.c:37:24: error: invalid use of a cast in a inline asm context requiring an l-value: remove the cast or build with -fheinous-gnu-extensions umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); ~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lib/mpi/longlong.h:664:22: note: expanded from macro 'umul_ppmm' : "=d" ((UDItype)(w0)) ~~~~~~~~~~^~~ lib/mpi/generic_mpih-mul1.c:37:13: error: invalid use of a cast in a inline asm context requiring an l-value: remove the cast or build with -fheinous-gnu-extensions umul_ppmm(prod_high, prod_low, s1_ptr[j], s2_limb); ~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lib/mpi/longlong.h:668:22: note: expanded from macro 'umul_ppmm' : "=d" ((UDItype)(w1)) ~~~~~~~~~~^~~ 2 errors generated. This special case for umul_ppmm for MIPS64r6 was added in commit bbc25bee37d2b ("lib/mpi: Fix umul_ppmm() for MIPS64r6"), due to GCC being inefficient and emitting a __multi3 intrinsic. There is no such issue with clang; with this patch applied, I can build this configuration without any problems and there are no link errors like mentioned in the commit above (which I can still reproduce with GCC 9.3.0 when that commit is reverted). Only use this definition when GCC is being used. This really should have been caught by commit b0c091ae04f67 ("lib/mpi: Eliminate unused umul_ppmm definitions for MIPS") when I was messing around in this area but I was not testing 64-bit MIPS at the time. Link: https://github.com/ClangBuiltLinux/linux/issues/885 Reported-by: Dmitry Golovin Signed-off-by: Nathan Chancellor Signed-off-by: Herbert Xu --- lib/mpi/longlong.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 2dceaca27489..bfff2e398ffe 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -653,7 +653,7 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 +#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC) /* * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C * code below, so we special case MIPS64r6 until the compiler can do better. -- cgit v1.2.3-59-g8ed1b From 1c8414dadb8c5668ee4d20d7dfbb478db52d1ab0 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 22 Apr 2020 15:58:08 +0300 Subject: hwrng: optee - Use UUID API for exporting the UUID There is export_uuid() function which exports uuid_t to the u8 array. Use it instead of open coding variant. This allows to hide the uuid_t internals. Signed-off-by: Andy Shevchenko Reviewed-by: Sumit Garg Signed-off-by: Herbert Xu --- drivers/char/hw_random/optee-rng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c index ddfbabaa5f8f..49b2e02537dd 100644 --- a/drivers/char/hw_random/optee-rng.c +++ b/drivers/char/hw_random/optee-rng.c @@ -226,7 +226,7 @@ static int optee_rng_probe(struct device *dev) return -ENODEV; /* Open session with hwrng Trusted App */ - memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN); + export_uuid(sess_arg.uuid, &rng_device->id.uuid); sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; sess_arg.num_params = 0; -- cgit v1.2.3-59-g8ed1b From d0f6223c0dc5196ef684657f6b77d4fa5fca2020 Mon Sep 17 00:00:00 2001 From: Zou Wei Date: Thu, 23 Apr 2020 10:22:36 +0800 Subject: crypto: hisilicon/qm - Make qm_controller_reset() static Fix the following sparse warning: drivers/crypto/hisilicon/qm.c:3079:5: warning: symbol 'qm_controller_reset' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: Zou Wei Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 80c552523ac4..69d02cb40e4b 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3076,7 +3076,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm) return 0; } -int qm_controller_reset(struct hisi_qm *qm) +static int qm_controller_reset(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; -- cgit v1.2.3-59-g8ed1b From 43f3c2b4dde331d380e4f575d3e5798f27867b3e Mon Sep 17 00:00:00 2001 From: Zou Wei Date: Thu, 23 Apr 2020 20:21:21 +0800 Subject: hwrng: cctrng - Make some symbols static Fix the following sparse warnings: drivers/char/hw_random/cctrng.c:316:6: warning: symbol 'cc_trng_compwork_handler' was not declared. Should it be static? drivers/char/hw_random/cctrng.c:451:6: warning: symbol 'cc_trng_startwork_handler' was not declared. Should it be static? Reported-by: Hulk Robot Signed-off-by: Zou Wei Acked-by: Hadar Gat Signed-off-by: Herbert Xu --- drivers/char/hw_random/cctrng.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index 49fb65a221f3..619148fb2dc9 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -313,7 +313,7 @@ static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata) cc_trng_enable_rnd_source(drvdata); } -void cc_trng_compwork_handler(struct work_struct *w) +static void cc_trng_compwork_handler(struct work_struct *w) { u32 isr = 0; u32 ehr_valid = 0; @@ -446,7 +446,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id) return IRQ_HANDLED; } -void cc_trng_startwork_handler(struct work_struct *w) +static void cc_trng_startwork_handler(struct work_struct *w) { struct cctrng_drvdata *drvdata = container_of(w, struct cctrng_drvdata, startwork); -- cgit v1.2.3-59-g8ed1b From f23efcbcc523b09c2ee359a35eb3897dc1764fd3 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Fri, 24 Apr 2020 13:40:46 +0000 Subject: crypto: ctr - no longer needs CRYPTO_SEQIV As comment of the v2, Herbert said: "The SEQIV select from CTR is historical and no longer necessary." So let's get rid of it. Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index c24a47406f8f..64caec4c52b6 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -370,7 +370,6 @@ config CRYPTO_CFB config CRYPTO_CTR tristate "CTR support" select CRYPTO_SKCIPHER - select CRYPTO_SEQIV select CRYPTO_MANAGER help CTR: Counter mode -- cgit v1.2.3-59-g8ed1b From d6fc1a4592219b9ccb85854282d0e59d35fddebc Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Fri, 24 Apr 2020 13:40:47 +0000 Subject: crypto: drbg - should select CTR if CRYPTO_DRBG_CTR is builtin and CTR is module, allocating such algo will fail. DRBG: could not allocate CTR cipher TFM handle: ctr(aes) alg: drbg: Failed to reset rng alg: drbg: Test 0 failed for drbg_pr_ctr_aes128 DRBG: could not allocate CTR cipher TFM handle: ctr(aes) alg: drbg: Failed to reset rng alg: drbg: Test 0 failed for drbg_nopr_ctr_aes128 DRBG: could not allocate CTR cipher TFM handle: ctr(aes) alg: drbg: Failed to reset rng alg: drbg: Test 0 failed for drbg_nopr_ctr_aes192 DRBG: could not allocate CTR cipher TFM handle: ctr(aes) alg: drbg: Failed to reset rng alg: drbg: Test 0 failed for drbg_nopr_ctr_aes256 So let's select CTR instead of just depend on it. Signed-off-by: Corentin Labbe Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 64caec4c52b6..d5daf35431e3 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1819,7 +1819,7 @@ config CRYPTO_DRBG_HASH config CRYPTO_DRBG_CTR bool "Enable CTR DRBG" select CRYPTO_AES - depends on CRYPTO_CTR + select CRYPTO_CTR help Enable the CTR DRBG variant as defined in NIST SP800-90A. -- cgit v1.2.3-59-g8ed1b From 42a13ddbab00455504d50ef159360f7451d597e4 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Sat, 25 Apr 2020 22:22:58 +0800 Subject: crypto: bcm - Remove the unnecessary cast for PTR_ERR(). It's not necessary to specify 'int' casting for PTR_ERR(). Signed-off-by: Zhang Shengju Signed-off-by: Tang Bin Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 5db23c18c81f..36a1f4e2aff9 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -4436,7 +4436,7 @@ static int spu_mb_init(struct device *dev) for (i = 0; i < iproc_priv.spu.num_chan; i++) { iproc_priv.mbox[i] = mbox_request_channel(mcl, i); if (IS_ERR(iproc_priv.mbox[i])) { - err = (int)PTR_ERR(iproc_priv.mbox[i]); + err = PTR_ERR(iproc_priv.mbox[i]); dev_err(dev, "Mbox channel %d request failed with err %d", i, err); -- cgit v1.2.3-59-g8ed1b From 12b3cf9093542d9f752a4968815ece836159013f Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Sat, 25 Apr 2020 22:24:14 +0800 Subject: crypto: bcm - Fix unused assignment Delete unused initialized value in cipher.c file. Signed-off-by: Zhang Shengju Signed-off-by: Tang Bin Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 36a1f4e2aff9..7bdecf813940 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -308,9 +308,9 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx) container_of(areq, struct skcipher_request, base); struct iproc_ctx_s *ctx = rctx->ctx; struct spu_cipher_parms cipher_parms; - int err = 0; - unsigned int chunksize = 0; /* Num bytes of request to submit */ - int remaining = 0; /* Bytes of request still to process */ + int err; + unsigned int chunksize; /* Num bytes of request to submit */ + int remaining; /* Bytes of request still to process */ int chunk_start; /* Beginning of data for current SPU msg */ /* IV or ctr value to use in this SPU msg */ @@ -698,7 +698,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) /* number of bytes still to be hashed in this req */ unsigned int nbytes_to_hash = 0; - int err = 0; + int err; unsigned int chunksize = 0; /* length of hash carry + new data */ /* * length of new data, not from hash carry, to be submitted in @@ -1664,7 +1664,7 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg) struct spu_hw *spu = &iproc_priv.spu; struct brcm_message *mssg = msg; struct iproc_reqctx_s *rctx; - int err = 0; + int err; rctx = mssg->ctx; if (unlikely(!rctx)) { @@ -1967,7 +1967,7 @@ static int ahash_enqueue(struct ahash_request *req) struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); - int err = 0; + int err; const char *alg_name; flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes); @@ -2299,7 +2299,7 @@ ahash_finup_exit: static int ahash_digest(struct ahash_request *req) { - int err = 0; + int err; flow_log("ahash_digest() nbytes:%u\n", req->nbytes); @@ -4746,7 +4746,7 @@ static int bcm_spu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct spu_hw *spu = &iproc_priv.spu; - int err = 0; + int err; iproc_priv.pdev = pdev; platform_set_drvdata(iproc_priv.pdev, -- cgit v1.2.3-59-g8ed1b From 43b05ce76733164e1c1b33ab16eda14130646c96 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Mon, 27 Apr 2020 08:42:24 +0200 Subject: crypto: stm32/hash - defer probe for reset controller Change stm32 HASH driver to defer its probe operation when reset controller device is registered but has not been probed yet. Signed-off-by: Etienne Carriere Reviewed-by: Lionel DEBIEVE Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 167b80eec437..fad6190be088 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -1482,7 +1482,12 @@ static int stm32_hash_probe(struct platform_device *pdev) pm_runtime_enable(dev); hdev->rst = devm_reset_control_get(&pdev->dev, NULL); - if (!IS_ERR(hdev->rst)) { + if (IS_ERR(hdev->rst)) { + if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_reset; + } + } else { reset_control_assert(hdev->rst); udelay(2); reset_control_deassert(hdev->rst); @@ -1535,7 +1540,7 @@ err_engine: if (hdev->dma_lch) dma_release_channel(hdev->dma_lch); - +err_reset: pm_runtime_disable(dev); pm_runtime_put_noidle(dev); -- cgit v1.2.3-59-g8ed1b From 45dafed6c5ecd01400766ce99a49b2d8ad351ce6 Mon Sep 17 00:00:00 2001 From: Etienne Carriere Date: Mon, 27 Apr 2020 08:42:25 +0200 Subject: crypto: stm32/hash - defer probe for dma device Change stm32 HASH driver to defer its probe operation when DMA channel device is registered but has not been probed yet. Signed-off-by: Etienne Carriere Reviewed-by: Lionel DEBIEVE Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index fad6190be088..0d592f55a271 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -507,6 +507,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) { struct dma_slave_config dma_conf; + struct dma_chan *chan; int err; memset(&dma_conf, 0, sizeof(dma_conf)); @@ -518,11 +519,11 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) dma_conf.dst_maxburst = hdev->dma_maxburst; dma_conf.device_fc = false; - hdev->dma_lch = dma_request_chan(hdev->dev, "in"); - if (IS_ERR(hdev->dma_lch)) { - dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n"); - return PTR_ERR(hdev->dma_lch); - } + chan = dma_request_chan(hdev->dev, "in"); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + hdev->dma_lch = chan; err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); if (err) { @@ -1498,8 +1499,15 @@ static int stm32_hash_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hdev); ret = stm32_hash_dma_init(hdev); - if (ret) + switch (ret) { + case 0: + break; + case -ENOENT: dev_dbg(dev, "DMA mode not available\n"); + break; + default: + goto err_dma; + } spin_lock(&stm32_hash.lock); list_add_tail(&hdev->list, &stm32_hash.dev_list); @@ -1537,7 +1545,7 @@ err_engine: spin_lock(&stm32_hash.lock); list_del(&hdev->list); spin_unlock(&stm32_hash.lock); - +err_dma: if (hdev->dma_lch) dma_release_channel(hdev->dma_lch); err_reset: -- cgit v1.2.3-59-g8ed1b From 79cd691f609c3f5b9603063099502b70ab32ae87 Mon Sep 17 00:00:00 2001 From: Lionel Debieve Date: Mon, 27 Apr 2020 08:42:26 +0200 Subject: crypto: stm32/hash - don't print error on probe deferral Change driver to not print an error message when the device probe is deferred for a clock resource. Signed-off-by: Lionel Debieve Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-hash.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 0d592f55a271..03c5e6683805 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -1464,8 +1464,11 @@ static int stm32_hash_probe(struct platform_device *pdev) hdev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hdev->clk)) { - dev_err(dev, "failed to get clock for hash (%lu)\n", - PTR_ERR(hdev->clk)); + if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) { + dev_err(dev, "failed to get clock for hash (%lu)\n", + PTR_ERR(hdev->clk)); + } + return PTR_ERR(hdev->clk); } -- cgit v1.2.3-59-g8ed1b From 3f7819bd42153b6df04af40dfa3439ce71ce4ad1 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Mon, 27 Apr 2020 16:22:18 +0800 Subject: crypto: bcm - Use the defined variable to clean code Use the defined variable "dev" to make the code cleaner. Signed-off-by: Zhang Shengju Signed-off-by: Tang Bin Signed-off-by: Herbert Xu --- drivers/crypto/bcm/cipher.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 7bdecf813940..a353217a0d33 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -4717,7 +4717,7 @@ static int spu_dt_read(struct platform_device *pdev) matched_spu_type = of_device_get_match_data(dev); if (!matched_spu_type) { - dev_err(&pdev->dev, "Failed to match device\n"); + dev_err(dev, "Failed to match device\n"); return -ENODEV; } @@ -4730,7 +4730,7 @@ static int spu_dt_read(struct platform_device *pdev) spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs); if (IS_ERR(spu->reg_vbase[i])) { err = PTR_ERR(spu->reg_vbase[i]); - dev_err(&pdev->dev, "Failed to map registers: %d\n", + dev_err(dev, "Failed to map registers: %d\n", err); spu->reg_vbase[i] = NULL; return err; @@ -4756,7 +4756,7 @@ static int bcm_spu_probe(struct platform_device *pdev) if (err < 0) goto failure; - err = spu_mb_init(&pdev->dev); + err = spu_mb_init(dev); if (err < 0) goto failure; @@ -4765,7 +4765,7 @@ static int bcm_spu_probe(struct platform_device *pdev) else if (spu->spu_type == SPU_TYPE_SPU2) iproc_priv.bcm_hdr_len = 0; - spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype); + spu_functions_register(dev, spu->spu_type, spu->spu_subtype); spu_counters_init(); -- cgit v1.2.3-59-g8ed1b From 34d47aab066544050089093a4179a95718be7002 Mon Sep 17 00:00:00 2001 From: Hadar Gat Date: Mon, 27 Apr 2020 14:36:02 +0300 Subject: hwrng: cctrng - Add dependency on OF The cctrng is unusable on non-DT systems so we should depend on it. Signed-off-by: Hadar Gat Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 0c99735df694..34047169bd06 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -476,7 +476,7 @@ config HW_RANDOM_KEYSTONE config HW_RANDOM_CCTRNG tristate "Arm CryptoCell True Random Number Generator support" - depends on HAS_IOMEM + depends on HAS_IOMEM && OF default HW_RANDOM help This driver provides support for the True Random Number -- cgit v1.2.3-59-g8ed1b From 55e8405680737492da83875ad0850010018854cd Mon Sep 17 00:00:00 2001 From: Hadar Gat Date: Mon, 27 Apr 2020 14:36:03 +0300 Subject: hwrng: cctrng - change default to n For many users, the Arm CryptoCell HW is not available, so the default for HW_RANDOM_CCTRNG should to n. Remove the line to follow the convention - 'n' is the default anyway so no need to state it explicitly. Signed-off-by: Hadar Gat Acked-by: Geert Uytterhoeven Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 34047169bd06..b98eb1cf8476 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -477,7 +477,6 @@ config HW_RANDOM_KEYSTONE config HW_RANDOM_CCTRNG tristate "Arm CryptoCell True Random Number Generator support" depends on HAS_IOMEM && OF - default HW_RANDOM help This driver provides support for the True Random Number Generator available in Arm TrustZone CryptoCell. -- cgit v1.2.3-59-g8ed1b From 3e37f04f2bf3dd86432785e15182793a8a8ef00e Mon Sep 17 00:00:00 2001 From: Hadar Gat Date: Mon, 27 Apr 2020 14:36:04 +0300 Subject: hwrng: cctrng - update help description Improved the HW_RANDOM_CCTRNG help description. Signed-off-by: Hadar Gat Signed-off-by: Herbert Xu --- drivers/char/hw_random/Kconfig | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index b98eb1cf8476..ac00d78ee9cc 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -478,13 +478,14 @@ config HW_RANDOM_CCTRNG tristate "Arm CryptoCell True Random Number Generator support" depends on HAS_IOMEM && OF help - This driver provides support for the True Random Number - Generator available in Arm TrustZone CryptoCell. - - To compile this driver as a module, choose M here: the module + Say 'Y' to enable the True Random Number Generator driver for the + Arm TrustZone CryptoCell family of processors. + Currently the CryptoCell 713 and 703 are supported. + The driver is supported only in SoC where Trusted Execution + Environment is not used. + Choose 'M' to compile this driver as a module. The module will be called cctrng. - - If unsure, say Y. + If unsure, say 'N'. endif # HW_RANDOM -- cgit v1.2.3-59-g8ed1b From ec6e2bf33b54cc3351bd702452e5d016b8f9d2f4 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Tue, 28 Apr 2020 18:49:03 +0300 Subject: crypto: algapi - create function to add request in front of queue Add crypto_enqueue_request_head function that enqueues a request in front of queue. This will be used in crypto-engine, on error path. In case a request was not executed by hardware, enqueue it back in front of queue (to keep the order of requests). Signed-off-by: Iuliana Prodan Signed-off-by: Herbert Xu --- crypto/algapi.c | 8 ++++++++ include/crypto/algapi.h | 2 ++ 2 files changed, 10 insertions(+) diff --git a/crypto/algapi.c b/crypto/algapi.c index f1e6ccaff853..92abdf675992 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -914,6 +914,14 @@ out: } EXPORT_SYMBOL_GPL(crypto_enqueue_request); +void crypto_enqueue_request_head(struct crypto_queue *queue, + struct crypto_async_request *request) +{ + queue->qlen++; + list_add(&request->list, &queue->list); +} +EXPORT_SYMBOL_GPL(crypto_enqueue_request_head); + struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) { struct list_head *request; diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index e115f9215ed5..00a9cf98debe 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -125,6 +125,8 @@ int crypto_inst_setname(struct crypto_instance *inst, const char *name, void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); int crypto_enqueue_request(struct crypto_queue *queue, struct crypto_async_request *request); +void crypto_enqueue_request_head(struct crypto_queue *queue, + struct crypto_async_request *request); struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); static inline unsigned int crypto_queue_len(struct crypto_queue *queue) { -- cgit v1.2.3-59-g8ed1b From 6a89f492f8e5097c09d4b0f4d713d354057a66ba Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Tue, 28 Apr 2020 18:49:04 +0300 Subject: crypto: engine - support for parallel requests based on retry mechanism Added support for executing multiple requests, in parallel, for crypto engine based on a retry mechanism. If hardware was unable to execute a backlog request, enqueue it back in front of crypto-engine queue, to keep the order of requests. A new variable is added, retry_support (this is to keep the backward compatibility of crypto-engine) , which keeps track whether the hardware has support for retry mechanism and, also, if can run multiple requests. If do_one_request() returns: >= 0: hardware executed the request successfully; < 0: this is the old error path. If hardware has support for retry mechanism, the request is put back in front of crypto-engine queue. For backwards compatibility, if the retry support is not available, the crypto-engine will work as before. If hardware queue is full (-ENOSPC), requeue request regardless of MAY_BACKLOG flag. If hardware throws any other error code (like -EIO, -EINVAL, -ENOMEM, etc.) only MAY_BACKLOG requests are enqueued back into crypto-engine's queue, since the others can be dropped. The new crypto_engine_alloc_init_and_set function, initializes crypto-engine, sets the maximum size for crypto-engine software queue (not hardcoded anymore) and the retry_support variable is set, by default, to false. On crypto_pump_requests(), if do_one_request() returns >= 0, a new request is send to hardware, until there is no space in hardware and do_one_request() returns < 0. By default, retry_support is false and crypto-engine will work as before - will send requests to hardware, one-by-one, on crypto_pump_requests(), and complete it, on crypto_finalize_request(), and so on. To support multiple requests, in each driver, retry_support must be set on true, and if do_one_request() returns an error the request must not be freed, since it will be enqueued back into crypto-engine's queue. When all drivers, that use crypto-engine now, will be updated for retry mechanism, the retry_support variable can be removed. Signed-off-by: Iuliana Prodan Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 146 ++++++++++++++++++++++++++++++++++++++---------- include/crypto/engine.h | 10 +++- 2 files changed, 124 insertions(+), 32 deletions(-) diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index eb029ff1e05a..ee192731a997 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -22,32 +22,36 @@ * @err: error number */ static void crypto_finalize_request(struct crypto_engine *engine, - struct crypto_async_request *req, int err) + struct crypto_async_request *req, int err) { unsigned long flags; - bool finalize_cur_req = false; + bool finalize_req = false; int ret; struct crypto_engine_ctx *enginectx; - spin_lock_irqsave(&engine->queue_lock, flags); - if (engine->cur_req == req) - finalize_cur_req = true; - spin_unlock_irqrestore(&engine->queue_lock, flags); + /* + * If hardware cannot enqueue more requests + * and retry mechanism is not supported + * make sure we are completing the current request + */ + if (!engine->retry_support) { + spin_lock_irqsave(&engine->queue_lock, flags); + if (engine->cur_req == req) { + finalize_req = true; + engine->cur_req = NULL; + } + spin_unlock_irqrestore(&engine->queue_lock, flags); + } - if (finalize_cur_req) { + if (finalize_req || engine->retry_support) { enginectx = crypto_tfm_ctx(req->tfm); - if (engine->cur_req_prepared && + if (enginectx->op.prepare_request && enginectx->op.unprepare_request) { ret = enginectx->op.unprepare_request(engine, req); if (ret) dev_err(engine->dev, "failed to unprepare request\n"); } - spin_lock_irqsave(&engine->queue_lock, flags); - engine->cur_req = NULL; - engine->cur_req_prepared = false; - spin_unlock_irqrestore(&engine->queue_lock, flags); } - req->complete(req, err); kthread_queue_work(engine->kworker, &engine->pump_requests); @@ -74,7 +78,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, spin_lock_irqsave(&engine->queue_lock, flags); /* Make sure we are not already running a request */ - if (engine->cur_req) + if (!engine->retry_support && engine->cur_req) goto out; /* If another context is idling then defer */ @@ -108,13 +112,21 @@ static void crypto_pump_requests(struct crypto_engine *engine, goto out; } +start_request: /* Get the fist request from the engine queue to handle */ backlog = crypto_get_backlog(&engine->queue); async_req = crypto_dequeue_request(&engine->queue); if (!async_req) goto out; - engine->cur_req = async_req; + /* + * If hardware doesn't support the retry mechanism, + * keep track of the request we are processing now. + * We'll need it on completion (crypto_finalize_request). + */ + if (!engine->retry_support) + engine->cur_req = async_req; + if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -130,7 +142,7 @@ static void crypto_pump_requests(struct crypto_engine *engine, ret = engine->prepare_crypt_hardware(engine); if (ret) { dev_err(engine->dev, "failed to prepare crypt hardware\n"); - goto req_err; + goto req_err_2; } } @@ -141,28 +153,81 @@ static void crypto_pump_requests(struct crypto_engine *engine, if (ret) { dev_err(engine->dev, "failed to prepare request: %d\n", ret); - goto req_err; + goto req_err_2; } - engine->cur_req_prepared = true; } if (!enginectx->op.do_one_request) { dev_err(engine->dev, "failed to do request\n"); ret = -EINVAL; - goto req_err; + goto req_err_1; } + ret = enginectx->op.do_one_request(engine, async_req); - if (ret) { - dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret); - goto req_err; + + /* Request unsuccessfully executed by hardware */ + if (ret < 0) { + /* + * If hardware queue is full (-ENOSPC), requeue request + * regardless of backlog flag. + * If hardware throws any other error code, + * requeue only backlog requests. + * Otherwise, unprepare and complete the request. + */ + if (!engine->retry_support || + ((ret != -ENOSPC) && + !(async_req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) { + dev_err(engine->dev, + "Failed to do one request from queue: %d\n", + ret); + goto req_err_1; + } + /* + * If retry mechanism is supported, + * unprepare current request and + * enqueue it back into crypto-engine queue. + */ + if (enginectx->op.unprepare_request) { + ret = enginectx->op.unprepare_request(engine, + async_req); + if (ret) + dev_err(engine->dev, + "failed to unprepare request\n"); + } + spin_lock_irqsave(&engine->queue_lock, flags); + /* + * If hardware was unable to execute request, enqueue it + * back in front of crypto-engine queue, to keep the order + * of requests. + */ + crypto_enqueue_request_head(&engine->queue, async_req); + + kthread_queue_work(engine->kworker, &engine->pump_requests); + goto out; } - return; -req_err: - crypto_finalize_request(engine, async_req, ret); + goto retry; + +req_err_1: + if (enginectx->op.unprepare_request) { + ret = enginectx->op.unprepare_request(engine, async_req); + if (ret) + dev_err(engine->dev, "failed to unprepare request\n"); + } + +req_err_2: + async_req->complete(async_req, ret); + +retry: + /* If retry mechanism is supported, send new requests to engine */ + if (engine->retry_support) { + spin_lock_irqsave(&engine->queue_lock, flags); + goto start_request; + } return; out: spin_unlock_irqrestore(&engine->queue_lock, flags); + return; } static void crypto_pump_work(struct kthread_work *work) @@ -386,15 +451,20 @@ int crypto_engine_stop(struct crypto_engine *engine) EXPORT_SYMBOL_GPL(crypto_engine_stop); /** - * crypto_engine_alloc_init - allocate crypto hardware engine structure and - * initialize it. + * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure + * and initialize it by setting the maximum number of entries in the software + * crypto-engine queue. * @dev: the device attached with one hardware engine + * @retry_support: whether hardware has support for retry mechanism * @rt: whether this queue is set to run as a realtime task + * @qlen: maximum size of the crypto-engine queue * * This must be called from context that can sleep. * Return: the crypto engine structure on success, else NULL. */ -struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) +struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, + bool retry_support, + bool rt, int qlen) { struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; struct crypto_engine *engine; @@ -411,12 +481,12 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) engine->running = false; engine->busy = false; engine->idling = false; - engine->cur_req_prepared = false; + engine->retry_support = retry_support; engine->priv_data = dev; snprintf(engine->name, sizeof(engine->name), "%s-engine", dev_name(dev)); - crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); + crypto_init_queue(&engine->queue, qlen); spin_lock_init(&engine->queue_lock); engine->kworker = kthread_create_worker(0, "%s", engine->name); @@ -433,6 +503,22 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) return engine; } +EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); + +/** + * crypto_engine_alloc_init - allocate crypto hardware engine structure and + * initialize it. + * @dev: the device attached with one hardware engine + * @rt: whether this queue is set to run as a realtime task + * + * This must be called from context that can sleep. + * Return: the crypto engine structure on success, else NULL. + */ +struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) +{ + return crypto_engine_alloc_init_and_set(dev, false, rt, + CRYPTO_ENGINE_MAX_QLEN); +} EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); /** diff --git a/include/crypto/engine.h b/include/crypto/engine.h index e29cd67f93c7..b92d7ff1528a 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -24,7 +24,9 @@ * @idling: the engine is entering idle state * @busy: request pump is busy * @running: the engine is on working - * @cur_req_prepared: current request is prepared + * @retry_support: indication that the hardware allows re-execution + * of a failed backlog request + * crypto-engine, in head position to keep order * @list: link with the global crypto engine list * @queue_lock: spinlock to syncronise access to request queue * @queue: the crypto queue of the engine @@ -45,7 +47,8 @@ struct crypto_engine { bool idling; bool busy; bool running; - bool cur_req_prepared; + + bool retry_support; struct list_head list; spinlock_t queue_lock; @@ -102,6 +105,9 @@ void crypto_finalize_skcipher_request(struct crypto_engine *engine, int crypto_engine_start(struct crypto_engine *engine); int crypto_engine_stop(struct crypto_engine *engine); struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); +struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, + bool retry_support, + bool rt, int qlen); int crypto_engine_exit(struct crypto_engine *engine); #endif /* _CRYPTO_ENGINE_H */ -- cgit v1.2.3-59-g8ed1b From 8d90822643ad8405eeb2ceafb20afd6952c24606 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Tue, 28 Apr 2020 18:49:05 +0300 Subject: crypto: engine - support for batch requests Added support for batch requests, per crypto engine. A new callback is added, do_batch_requests, which executes a batch of requests. This has the crypto_engine structure as argument (for cases when more than one crypto-engine is used). The crypto_engine_alloc_init_and_set function, initializes crypto-engine, but also, sets the do_batch_requests callback. On crypto_pump_requests, if do_batch_requests callback is implemented in a driver, this will be executed. The link between the requests will be done in driver, if possible. do_batch_requests is available only if the hardware has support for multiple request. Signed-off-by: Iuliana Prodan Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 27 ++++++++++++++++++++++++++- include/crypto/engine.h | 5 +++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index ee192731a997..412149e8e3dc 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -227,6 +227,18 @@ retry: out: spin_unlock_irqrestore(&engine->queue_lock, flags); + + /* + * Batch requests is possible only if + * hardware can enqueue multiple requests + */ + if (engine->do_batch_requests) { + ret = engine->do_batch_requests(engine); + if (ret) + dev_err(engine->dev, "failed to do batch requests: %d\n", + ret); + } + return; } @@ -456,6 +468,12 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop); * crypto-engine queue. * @dev: the device attached with one hardware engine * @retry_support: whether hardware has support for retry mechanism + * @cbk_do_batch: pointer to a callback function to be invoked when executing a + * a batch of requests. + * This has the form: + * callback(struct crypto_engine *engine) + * where: + * @engine: the crypto engine structure. * @rt: whether this queue is set to run as a realtime task * @qlen: maximum size of the crypto-engine queue * @@ -464,6 +482,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop); */ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, + int (*cbk_do_batch)(struct crypto_engine *engine), bool rt, int qlen) { struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; @@ -483,6 +502,12 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, engine->idling = false; engine->retry_support = retry_support; engine->priv_data = dev; + /* + * Batch requests is possible only if + * hardware has support for retry mechanism. + */ + engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; + snprintf(engine->name, sizeof(engine->name), "%s-engine", dev_name(dev)); @@ -516,7 +541,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); */ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) { - return crypto_engine_alloc_init_and_set(dev, false, rt, + return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, CRYPTO_ENGINE_MAX_QLEN); } EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); diff --git a/include/crypto/engine.h b/include/crypto/engine.h index b92d7ff1528a..3f06e40d063a 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -37,6 +37,8 @@ * @unprepare_crypt_hardware: there are currently no more requests on the * queue so the subsystem notifies the driver that it may relax the * hardware by issuing this call + * @do_batch_requests: execute a batch of requests. Depends on multiple + * requests support. * @kworker: kthread worker struct for request pump * @pump_requests: work struct for scheduling work to the request pump * @priv_data: the engine private data @@ -59,6 +61,8 @@ struct crypto_engine { int (*prepare_crypt_hardware)(struct crypto_engine *engine); int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + int (*do_batch_requests)(struct crypto_engine *engine); + struct kthread_worker *kworker; struct kthread_work pump_requests; @@ -107,6 +111,7 @@ int crypto_engine_stop(struct crypto_engine *engine); struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, + int (*cbk_do_batch)(struct crypto_engine *engine), bool rt, int qlen); int crypto_engine_exit(struct crypto_engine *engine); -- cgit v1.2.3-59-g8ed1b From c549226926ce9352dc6415931f4e96445759215d Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 30 Apr 2020 17:10:18 +1200 Subject: crypto: acomp - search acomp with scomp backend in crypto_has_acomp users may call crypto_has_acomp to confirm the existence of acomp before using crypto_acomp APIs. Right now, many acomp have scomp backend, for example, lz4, lzo, deflate etc. crypto_has_acomp will return false for them even though they support acomp APIs. Signed-off-by: Barry Song Signed-off-by: Herbert Xu --- include/crypto/acompress.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index d873f999b334..2b4d2b06ccbd 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -157,7 +157,7 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_ACOMPRESS; - mask |= CRYPTO_ALG_TYPE_MASK; + mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK; return crypto_has_alg(alg_name, type, mask); } -- cgit v1.2.3-59-g8ed1b From e0664ebcea6ac5e16da703409fb4bd61f8cd37d9 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 30 Apr 2020 08:13:53 +0000 Subject: crypto: drbg - fix error return code in drbg_alloc_state() Fix to return negative error code -ENOMEM from the kzalloc error handling case instead of 0, as done elsewhere in this function. Reported-by: Xiumei Mu Fixes: db07cd26ac6a ("crypto: drbg - add FIPS 140-2 CTRNG for noise source") Cc: Signed-off-by: Wei Yongjun Reviewed-by: Stephan Mueller Signed-off-by: Herbert Xu --- crypto/drbg.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crypto/drbg.c b/crypto/drbg.c index e57901d8545b..37526eb8c5d5 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1306,8 +1306,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags), GFP_KERNEL); - if (!drbg->prev) + if (!drbg->prev) { + ret = -ENOMEM; goto fini; + } drbg->fips_primed = false; } -- cgit v1.2.3-59-g8ed1b From d099ea6e6fde5f311bea5bcdadaa85fc3af79259 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 30 Apr 2020 23:30:43 +0200 Subject: crypto - Avoid free() namespace collision gcc-10 complains about using the name of a standard library function in the kernel, as we are not building with -ffreestanding: crypto/xts.c:325:13: error: conflicting types for built-in function 'free'; expected 'void(void *)' [-Werror=builtin-declaration-mismatch] 325 | static void free(struct skcipher_instance *inst) | ^~~~ crypto/lrw.c:290:13: error: conflicting types for built-in function 'free'; expected 'void(void *)' [-Werror=builtin-declaration-mismatch] 290 | static void free(struct skcipher_instance *inst) | ^~~~ crypto/lrw.c:27:1: note: 'free' is declared in header '' The xts and lrw cipher implementations run into this because they do not use the conventional namespaced function names. It might be better to rename all local functions in those files to help with things like 'ctags' and 'grep', but just renaming these two avoids the build issue. I picked the more verbose crypto_xts_free() and crypto_lrw_free() names for consistency with several other drivers that do use namespaced function names. Fixes: f1c131b45410 ("crypto: xts - Convert to skcipher") Fixes: 700cb3f5fe75 ("crypto: lrw - Convert to skcipher") Signed-off-by: Arnd Bergmann Signed-off-by: Herbert Xu --- crypto/lrw.c | 6 +++--- crypto/xts.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crypto/lrw.c b/crypto/lrw.c index 376d7ed3f1f8..5b07a7c09296 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -287,7 +287,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) crypto_free_skcipher(ctx->child); } -static void free(struct skcipher_instance *inst) +static void crypto_lrw_free(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); @@ -400,12 +400,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = encrypt; inst->alg.decrypt = decrypt; - inst->free = free; + inst->free = crypto_lrw_free; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: - free(inst); + crypto_lrw_free(inst); } return err; } diff --git a/crypto/xts.c b/crypto/xts.c index dbdd8af629e6..3565f3b863a6 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -322,7 +322,7 @@ static void exit_tfm(struct crypto_skcipher *tfm) crypto_free_cipher(ctx->tweak); } -static void free(struct skcipher_instance *inst) +static void crypto_xts_free(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); @@ -434,12 +434,12 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) inst->alg.encrypt = encrypt; inst->alg.decrypt = decrypt; - inst->free = free; + inst->free = crypto_xts_free; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: - free(inst); + crypto_xts_free(inst); } return err; } -- cgit v1.2.3-59-g8ed1b From 13855fd8ce641e567c1b972048b5fd1451984e88 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 09:42:29 -0700 Subject: crypto: lib/sha256 - return void The SHA-256 / SHA-224 library functions can't fail, so remove the useless return value. Also long as the declarations are being changed anyway, also fix some parameter names in the declarations to match the definitions. Signed-off-by: Eric Biggers Reviewed-by: Jason A. Donenfeld Signed-off-by: Herbert Xu --- crypto/sha256_generic.c | 14 +++++++++----- include/crypto/sha.h | 18 ++++++------------ include/crypto/sha256_base.h | 6 ++++-- lib/crypto/sha256.c | 20 ++++++++------------ 4 files changed, 27 insertions(+), 31 deletions(-) diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index f2d7095d4f2d..88156e3e2a33 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -35,27 +35,31 @@ EXPORT_SYMBOL_GPL(sha256_zero_message_hash); static int crypto_sha256_init(struct shash_desc *desc) { - return sha256_init(shash_desc_ctx(desc)); + sha256_init(shash_desc_ctx(desc)); + return 0; } static int crypto_sha224_init(struct shash_desc *desc) { - return sha224_init(shash_desc_ctx(desc)); + sha224_init(shash_desc_ctx(desc)); + return 0; } int crypto_sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha256_update(shash_desc_ctx(desc), data, len); + sha256_update(shash_desc_ctx(desc), data, len); + return 0; } EXPORT_SYMBOL(crypto_sha256_update); static int crypto_sha256_final(struct shash_desc *desc, u8 *out) { if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE) - return sha224_final(shash_desc_ctx(desc), out); + sha224_final(shash_desc_ctx(desc), out); else - return sha256_final(shash_desc_ctx(desc), out); + sha256_final(shash_desc_ctx(desc), out); + return 0; } int crypto_sha256_finup(struct shash_desc *desc, const u8 *data, diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 5c2132c71900..67aec7245cb7 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -123,7 +123,7 @@ extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, * For details see lib/crypto/sha256.c */ -static inline int sha256_init(struct sha256_state *sctx) +static inline void sha256_init(struct sha256_state *sctx) { sctx->state[0] = SHA256_H0; sctx->state[1] = SHA256_H1; @@ -134,14 +134,11 @@ static inline int sha256_init(struct sha256_state *sctx) sctx->state[6] = SHA256_H6; sctx->state[7] = SHA256_H7; sctx->count = 0; - - return 0; } -extern int sha256_update(struct sha256_state *sctx, const u8 *input, - unsigned int length); -extern int sha256_final(struct sha256_state *sctx, u8 *hash); +void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len); +void sha256_final(struct sha256_state *sctx, u8 *out); -static inline int sha224_init(struct sha256_state *sctx) +static inline void sha224_init(struct sha256_state *sctx) { sctx->state[0] = SHA224_H0; sctx->state[1] = SHA224_H1; @@ -152,11 +149,8 @@ static inline int sha224_init(struct sha256_state *sctx) sctx->state[6] = SHA224_H6; sctx->state[7] = SHA224_H7; sctx->count = 0; - - return 0; } -extern int sha224_update(struct sha256_state *sctx, const u8 *input, - unsigned int length); -extern int sha224_final(struct sha256_state *sctx, u8 *hash); +void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len); +void sha224_final(struct sha256_state *sctx, u8 *out); #endif diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h index cea60cff80bd..6ded110783ae 100644 --- a/include/crypto/sha256_base.h +++ b/include/crypto/sha256_base.h @@ -22,14 +22,16 @@ static inline int sha224_base_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); - return sha224_init(sctx); + sha224_init(sctx); + return 0; } static inline int sha256_base_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); - return sha256_init(sctx); + sha256_init(sctx); + return 0; } static inline int sha256_base_do_update(struct shash_desc *desc, diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 66cb04b0cf4e..2e621697c5c3 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -206,7 +206,7 @@ static void sha256_transform(u32 *state, const u8 *input) memzero_explicit(W, 64 * sizeof(u32)); } -int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) +void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) { unsigned int partial, done; const u8 *src; @@ -232,18 +232,16 @@ int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) partial = 0; } memcpy(sctx->buf + partial, src, len - done); - - return 0; } EXPORT_SYMBOL(sha256_update); -int sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len) +void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len) { - return sha256_update(sctx, data, len); + sha256_update(sctx, data, len); } EXPORT_SYMBOL(sha224_update); -static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words) +static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words) { __be32 *dst = (__be32 *)out; __be64 bits; @@ -268,19 +266,17 @@ static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words) /* Zeroize sensitive information. */ memset(sctx, 0, sizeof(*sctx)); - - return 0; } -int sha256_final(struct sha256_state *sctx, u8 *out) +void sha256_final(struct sha256_state *sctx, u8 *out) { - return __sha256_final(sctx, out, 8); + __sha256_final(sctx, out, 8); } EXPORT_SYMBOL(sha256_final); -int sha224_final(struct sha256_state *sctx, u8 *out) +void sha224_final(struct sha256_state *sctx, u8 *out) { - return __sha256_final(sctx, out, 7); + __sha256_final(sctx, out, 7); } EXPORT_SYMBOL(sha224_final); -- cgit v1.2.3-59-g8ed1b From 822a98b862d5b511826765d64ddf18192fc5b694 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:03 -0700 Subject: crypto: hash - introduce crypto_shash_tfm_digest() Currently the simplest use of the shash API is to use crypto_shash_digest() to digest a whole buffer. However, this still requires allocating a hash descriptor (struct shash_desc). Many users don't really want to preallocate one and instead just use a one-off descriptor on the stack like the following: { SHASH_DESC_ON_STACK(desc, tfm); int err; desc->tfm = tfm; err = crypto_shash_digest(desc, data, len, out); shash_desc_zero(desc); } Wrap this in a new helper function crypto_shash_tfm_digest() that can be used instead of the above. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/shash.c | 16 ++++++++++++++++ include/crypto/hash.h | 19 +++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/crypto/shash.c b/crypto/shash.c index c075b26c2a1d..e6a4b5f39b8c 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -206,6 +206,22 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, } EXPORT_SYMBOL_GPL(crypto_shash_digest); +int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, + unsigned int len, u8 *out) +{ + SHASH_DESC_ON_STACK(desc, tfm); + int err; + + desc->tfm = tfm; + + err = crypto_shash_digest(desc, data, len, out); + + shash_desc_zero(desc); + + return err; +} +EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest); + static int shash_default_export(struct shash_desc *desc, void *out) { memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index cee446c59497..4829d2367eda 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -855,6 +855,25 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out); +/** + * crypto_shash_tfm_digest() - calculate message digest for buffer + * @tfm: hash transformation object + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This is a simplified version of crypto_shash_digest() for users who don't + * want to allocate their own hash descriptor (shash_desc). Instead, + * crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash) + * directly, and it allocates a hash descriptor on the stack internally. + * Note that this stack allocation may be fairly large. + * + * Context: Any context. + * Return: 0 on success; < 0 if an error occurred. + */ +int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, + unsigned int len, u8 *out); + /** * crypto_shash_export() - extract operational state for message digest * @desc: reference to the operational state handle whose state is exported -- cgit v1.2.3-59-g8ed1b From a221b33b657b911e2cfefc47484b0ab7531b71be Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:04 -0700 Subject: crypto: arm64/aes-glue - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/arm64/crypto/aes-glue.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index ed5409c6abf4..395bbf64b2ab 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -158,7 +158,6 @@ static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm, unsigned int key_len) { struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - SHASH_DESC_ON_STACK(desc, ctx->hash); u8 digest[SHA256_DIGEST_SIZE]; int ret; @@ -166,8 +165,7 @@ static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm, if (ret) return ret; - desc->tfm = ctx->hash; - crypto_shash_digest(desc, in_key, key_len, digest); + crypto_shash_tfm_digest(ctx->hash, in_key, key_len, digest); return aes_expandkey(&ctx->key2, digest, sizeof(digest)); } -- cgit v1.2.3-59-g8ed1b From 1306664fdeeffc92c31ef034e9f2f5516b0d5c97 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:05 -0700 Subject: crypto: essiv - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/essiv.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crypto/essiv.c b/crypto/essiv.c index 465a89c9d1ef..a7f45dbc4ee2 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -66,7 +66,6 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct essiv_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); - SHASH_DESC_ON_STACK(desc, tctx->hash); u8 salt[HASH_MAX_DIGESTSIZE]; int err; @@ -78,8 +77,7 @@ static int essiv_skcipher_setkey(struct crypto_skcipher *tfm, if (err) return err; - desc->tfm = tctx->hash; - err = crypto_shash_digest(desc, key, keylen, salt); + err = crypto_shash_tfm_digest(tctx->hash, key, keylen, salt); if (err) return err; -- cgit v1.2.3-59-g8ed1b From 7e3e48d86b7c96f12896dd7111e4e5ae51c1b7da Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:06 -0700 Subject: crypto: artpec6 - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Jesper Nilsson Cc: Lars Persson Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/axis/artpec6_crypto.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index fcf1effc7661..62ba0325a618 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2239,16 +2239,12 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm, blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); if (keylen > blocksize) { - SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash); - - hdesc->tfm = tfm_ctx->child_hash; - tfm_ctx->hmac_key_length = blocksize; - ret = crypto_shash_digest(hdesc, key, keylen, - tfm_ctx->hmac_key); + + ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen, + tfm_ctx->hmac_key); if (ret) return ret; - } else { memcpy(tfm_ctx->hmac_key, key, keylen); tfm_ctx->hmac_key_length = keylen; -- cgit v1.2.3-59-g8ed1b From f32b6775c795b125361ea9181ca4fcfa261a91dd Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:07 -0700 Subject: crypto: ccp - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Tom Lendacky Signed-off-by: Eric Biggers Acked-by: Tom Lendacky Signed-off-by: Herbert Xu --- drivers/crypto/ccp/ccp-crypto-sha.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 474e6f1a6a84..b0cc2bd73af8 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -272,9 +272,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, { struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); struct crypto_shash *shash = ctx->u.sha.hmac_tfm; - - SHASH_DESC_ON_STACK(sdesc, shash); - unsigned int block_size = crypto_shash_blocksize(shash); unsigned int digest_size = crypto_shash_digestsize(shash); int i, ret; @@ -289,10 +286,8 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, if (key_len > block_size) { /* Must hash the input key */ - sdesc->tfm = shash; - - ret = crypto_shash_digest(sdesc, key, key_len, - ctx->u.sha.key); + ret = crypto_shash_tfm_digest(shash, key, key_len, + ctx->u.sha.key); if (ret) return -EINVAL; -- cgit v1.2.3-59-g8ed1b From 8cbb809794b14703c960b5e2941c9e7fd97b765a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:08 -0700 Subject: crypto: ccree - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Gilad Ben-Yossef Signed-off-by: Eric Biggers Acked-by: Gilad Ben-Yossef Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_cipher.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index a84335328f37..872ea3ff1c6b 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -427,12 +427,9 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, int key_len = keylen >> 1; int err; - SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm); - - desc->tfm = ctx_p->shash_tfm; - - err = crypto_shash_digest(desc, ctx_p->user.key, key_len, - ctx_p->user.key + key_len); + err = crypto_shash_tfm_digest(ctx_p->shash_tfm, + ctx_p->user.key, key_len, + ctx_p->user.key + key_len); if (err) { dev_err(dev, "Failed to hash ESSIV key.\n"); return err; -- cgit v1.2.3-59-g8ed1b From 61c38e3a94f26035104aec643ee17e80e29ff329 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:09 -0700 Subject: crypto: hisilicon/sec2 - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Zaibo Xu Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 7f1c6a31b82f..848ab492d26e 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -832,7 +832,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_authenc_keys *keys) { struct crypto_shash *hash_tfm = ctx->hash_tfm; - SHASH_DESC_ON_STACK(shash, hash_tfm); int blocksize, ret; if (!keys->authkeylen) { @@ -842,8 +841,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, blocksize = crypto_shash_blocksize(hash_tfm); if (keys->authkeylen > blocksize) { - ret = crypto_shash_digest(shash, keys->authkey, - keys->authkeylen, ctx->a_key); + ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, + keys->authkeylen, ctx->a_key); if (ret) { pr_err("hisi_sec2: aead auth digest error!\n"); return -EINVAL; -- cgit v1.2.3-59-g8ed1b From e0077ea8ee1774cb99cf9adf10dd4e6dcbf363b0 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:10 -0700 Subject: crypto: mediatek - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/mediatek/mtk-sha.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c index bd6309e57ab8..da3f0b8814aa 100644 --- a/drivers/crypto/mediatek/mtk-sha.c +++ b/drivers/crypto/mediatek/mtk-sha.c @@ -805,12 +805,9 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key, size_t ds = crypto_shash_digestsize(bctx->shash); int err, i; - SHASH_DESC_ON_STACK(shash, bctx->shash); - - shash->tfm = bctx->shash; - if (keylen > bs) { - err = crypto_shash_digest(shash, key, keylen, bctx->ipad); + err = crypto_shash_tfm_digest(bctx->shash, key, keylen, + bctx->ipad); if (err) return err; keylen = ds; -- cgit v1.2.3-59-g8ed1b From ce8e04888dd873179bc3979b0574cc7827071df5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:11 -0700 Subject: crypto: n2 - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/n2_core.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index f5c468f2cc82..6a828bbecea4 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -462,7 +462,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_shash *child_shash = ctx->child_shash; struct crypto_ahash *fallback_tfm; - SHASH_DESC_ON_STACK(shash, child_shash); int err, bs, ds; fallback_tfm = ctx->base.fallback_tfm; @@ -470,14 +469,12 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, if (err) return err; - shash->tfm = child_shash; - bs = crypto_shash_blocksize(child_shash); ds = crypto_shash_digestsize(child_shash); BUG_ON(ds > N2_HASH_KEY_MAX); if (keylen > bs) { - err = crypto_shash_digest(shash, key, keylen, - ctx->hash_key); + err = crypto_shash_tfm_digest(child_shash, key, keylen, + ctx->hash_key); if (err) return err; keylen = ds; -- cgit v1.2.3-59-g8ed1b From e29ba412bdfe15233abff3b49a46763d4a6dd7d9 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:12 -0700 Subject: crypto: omap-sham - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index e4072cd38585..d600c5b3fdd3 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1245,16 +1245,6 @@ static int omap_sham_update(struct ahash_request *req) return omap_sham_enqueue(req, OP_UPDATE); } -static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags, - const u8 *data, unsigned int len, u8 *out) -{ - SHASH_DESC_ON_STACK(shash, tfm); - - shash->tfm = tfm; - - return crypto_shash_digest(shash, data, len, out); -} - static int omap_sham_final_shash(struct ahash_request *req) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); @@ -1270,9 +1260,8 @@ static int omap_sham_final_shash(struct ahash_request *req) !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags)) offset = get_block_size(ctx); - return omap_sham_shash_digest(tctx->fallback, req->base.flags, - ctx->buffer + offset, - ctx->bufcnt - offset, req->result); + return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset, + ctx->bufcnt - offset, req->result); } static int omap_sham_final(struct ahash_request *req) @@ -1351,9 +1340,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, return err; if (keylen > bs) { - err = omap_sham_shash_digest(bctx->shash, - crypto_shash_get_flags(bctx->shash), - key, keylen, bctx->ipad); + err = crypto_shash_tfm_digest(bctx->shash, key, keylen, + bctx->ipad); if (err) return err; keylen = ds; -- cgit v1.2.3-59-g8ed1b From ecca1ad60cdfc061c1242490637e9e806ff1d884 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:13 -0700 Subject: crypto: s5p-sss - use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Krzysztof Kozlowski Cc: Vladimir Zapolskiy Cc: Kamil Konieczny Signed-off-by: Eric Biggers Acked-by: Krzysztof Kozlowski Signed-off-by: Herbert Xu --- drivers/crypto/s5p-sss.c | 39 ++++++--------------------------------- 1 file changed, 6 insertions(+), 33 deletions(-) diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 2a16800d2579..341433fbcc4a 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -1520,37 +1520,6 @@ static int s5p_hash_update(struct ahash_request *req) return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */ } -/** - * s5p_hash_shash_digest() - calculate shash digest - * @tfm: crypto transformation - * @flags: tfm flags - * @data: input data - * @len: length of data - * @out: output buffer - */ -static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags, - const u8 *data, unsigned int len, u8 *out) -{ - SHASH_DESC_ON_STACK(shash, tfm); - - shash->tfm = tfm; - - return crypto_shash_digest(shash, data, len, out); -} - -/** - * s5p_hash_final_shash() - calculate shash digest - * @req: AHASH request - */ -static int s5p_hash_final_shash(struct ahash_request *req) -{ - struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct s5p_hash_reqctx *ctx = ahash_request_ctx(req); - - return s5p_hash_shash_digest(tctx->fallback, req->base.flags, - ctx->buffer, ctx->bufcnt, req->result); -} - /** * s5p_hash_final() - close up hash and calculate digest * @req: AHASH request @@ -1582,8 +1551,12 @@ static int s5p_hash_final(struct ahash_request *req) if (ctx->error) return -EINVAL; /* uncompleted hash is not needed */ - if (!ctx->digcnt && ctx->bufcnt < BUFLEN) - return s5p_hash_final_shash(req); + if (!ctx->digcnt && ctx->bufcnt < BUFLEN) { + struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm); + + return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer, + ctx->bufcnt, req->result); + } return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */ } -- cgit v1.2.3-59-g8ed1b From 96a5aa721df8e740f8d33fe7eae18acc157a000d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:14 -0700 Subject: nfc: s3fwrn5: use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Robert Baldyga Cc: Krzysztof Opasiak Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- drivers/nfc/s3fwrn5/firmware.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c index de613c623a2c..69857f080704 100644 --- a/drivers/nfc/s3fwrn5/firmware.c +++ b/drivers/nfc/s3fwrn5/firmware.c @@ -434,15 +434,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info) goto out; } - { - SHASH_DESC_ON_STACK(desc, tfm); - - desc->tfm = tfm; - - ret = crypto_shash_digest(desc, fw->image, image_size, - hash_data); - shash_desc_zero(desc); - } + ret = crypto_shash_tfm_digest(tfm, fw->image, image_size, hash_data); crypto_free_shash(tfm); if (ret) { -- cgit v1.2.3-59-g8ed1b From 3e185a56eb6968927049f2b3b8c95b3636d77ffd Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:15 -0700 Subject: fscrypt: use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- fs/crypto/fname.c | 7 +------ fs/crypto/hkdf.c | 6 +----- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 4c212442a8f7..5c9fb013e3f7 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -83,13 +83,8 @@ static int fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result) tfm = prev_tfm; } } - { - SHASH_DESC_ON_STACK(desc, tfm); - desc->tfm = tfm; - - return crypto_shash_digest(desc, data, data_len, result); - } + return crypto_shash_tfm_digest(tfm, data, data_len, result); } static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c index efb95bd19a89..0cba7928446d 100644 --- a/fs/crypto/hkdf.c +++ b/fs/crypto/hkdf.c @@ -44,17 +44,13 @@ static int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm, unsigned int ikmlen, u8 prk[HKDF_HASHLEN]) { static const u8 default_salt[HKDF_HASHLEN]; - SHASH_DESC_ON_STACK(desc, hmac_tfm); int err; err = crypto_shash_setkey(hmac_tfm, default_salt, HKDF_HASHLEN); if (err) return err; - desc->tfm = hmac_tfm; - err = crypto_shash_digest(desc, ikm, ikmlen, prk); - shash_desc_zero(desc); - return err; + return crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk); } /* -- cgit v1.2.3-59-g8ed1b From 1979811388050dc3c1533b5e3e43a6d50b7ad39b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:16 -0700 Subject: ecryptfs: use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: ecryptfs@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- fs/ecryptfs/crypto.c | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 2c449aed1b92..0681540c48d9 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -48,18 +48,6 @@ void ecryptfs_from_hex(char *dst, char *src, int dst_size) } } -static int ecryptfs_hash_digest(struct crypto_shash *tfm, - char *src, int len, char *dst) -{ - SHASH_DESC_ON_STACK(desc, tfm); - int err; - - desc->tfm = tfm; - err = crypto_shash_digest(desc, src, len, dst); - shash_desc_zero(desc); - return err; -} - /** * ecryptfs_calculate_md5 - calculates the md5 of @src * @dst: Pointer to 16 bytes of allocated memory @@ -74,11 +62,8 @@ static int ecryptfs_calculate_md5(char *dst, struct ecryptfs_crypt_stat *crypt_stat, char *src, int len) { - struct crypto_shash *tfm; - int rc = 0; + int rc = crypto_shash_tfm_digest(crypt_stat->hash_tfm, src, len, dst); - tfm = crypt_stat->hash_tfm; - rc = ecryptfs_hash_digest(tfm, src, len, dst); if (rc) { printk(KERN_ERR "%s: Error computing crypto hash; rc = [%d]\n", -- cgit v1.2.3-59-g8ed1b From ea794db2646a3660647dd2f2c8f6f2770acc3596 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:17 -0700 Subject: nfsd: use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: linux-nfs@vger.kernel.org Signed-off-by: Eric Biggers Acked-by: J. Bruce Fields Signed-off-by: Herbert Xu --- fs/nfsd/nfs4recover.c | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index a8fb18609146..9e40dfecf1b1 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -127,16 +127,8 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname) goto out; } - { - SHASH_DESC_ON_STACK(desc, tfm); - - desc->tfm = tfm; - - status = crypto_shash_digest(desc, clname->data, clname->len, - cksum.data); - shash_desc_zero(desc); - } - + status = crypto_shash_tfm_digest(tfm, clname->data, clname->len, + cksum.data); if (status) goto out; @@ -1148,7 +1140,6 @@ nfsd4_cld_create_v2(struct nfs4_client *clp) struct crypto_shash *tfm = cn->cn_tfm; struct xdr_netobj cksum; char *principal = NULL; - SHASH_DESC_ON_STACK(desc, tfm); /* Don't upcall if it's already stored */ if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags)) @@ -1170,16 +1161,14 @@ nfsd4_cld_create_v2(struct nfs4_client *clp) else if (clp->cl_cred.cr_principal) principal = clp->cl_cred.cr_principal; if (principal) { - desc->tfm = tfm; cksum.len = crypto_shash_digestsize(tfm); cksum.data = kmalloc(cksum.len, GFP_KERNEL); if (cksum.data == NULL) { ret = -ENOMEM; goto out; } - ret = crypto_shash_digest(desc, principal, strlen(principal), - cksum.data); - shash_desc_zero(desc); + ret = crypto_shash_tfm_digest(tfm, principal, strlen(principal), + cksum.data); if (ret) { kfree(cksum.data); goto out; @@ -1343,7 +1332,6 @@ nfsd4_cld_check_v2(struct nfs4_client *clp) struct crypto_shash *tfm = cn->cn_tfm; struct xdr_netobj cksum; char *principal = NULL; - SHASH_DESC_ON_STACK(desc, tfm); /* did we already find that this client is stable? */ if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags)) @@ -1381,14 +1369,12 @@ found: principal = clp->cl_cred.cr_principal; if (principal == NULL) return -ENOENT; - desc->tfm = tfm; cksum.len = crypto_shash_digestsize(tfm); cksum.data = kmalloc(cksum.len, GFP_KERNEL); if (cksum.data == NULL) return -ENOENT; - status = crypto_shash_digest(desc, principal, strlen(principal), - cksum.data); - shash_desc_zero(desc); + status = crypto_shash_tfm_digest(tfm, principal, + strlen(principal), cksum.data); if (status) { kfree(cksum.data); return -ENOENT; -- cgit v1.2.3-59-g8ed1b From f80df3851246ce5b9b7dd9625f3438e0f39383f1 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:18 -0700 Subject: ubifs: use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: linux-mtd@lists.infradead.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- fs/ubifs/auth.c | 20 +++----------------- fs/ubifs/master.c | 9 +++------ fs/ubifs/replay.c | 14 +++----------- 3 files changed, 9 insertions(+), 34 deletions(-) diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c index 8cdbd53d780c..1e374baafc52 100644 --- a/fs/ubifs/auth.c +++ b/fs/ubifs/auth.c @@ -31,15 +31,9 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node, u8 *hash) { const struct ubifs_ch *ch = node; - SHASH_DESC_ON_STACK(shash, c->hash_tfm); - int err; - - shash->tfm = c->hash_tfm; - err = crypto_shash_digest(shash, node, le32_to_cpu(ch->len), hash); - if (err < 0) - return err; - return 0; + return crypto_shash_tfm_digest(c->hash_tfm, node, le32_to_cpu(ch->len), + hash); } /** @@ -53,15 +47,7 @@ int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node, static int ubifs_hash_calc_hmac(const struct ubifs_info *c, const u8 *hash, u8 *hmac) { - SHASH_DESC_ON_STACK(shash, c->hmac_tfm); - int err; - - shash->tfm = c->hmac_tfm; - - err = crypto_shash_digest(shash, hash, c->hash_len, hmac); - if (err < 0) - return err; - return 0; + return crypto_shash_tfm_digest(c->hmac_tfm, hash, c->hash_len, hmac); } /** diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c index 52a85c01397e..911d0555b9f2 100644 --- a/fs/ubifs/master.c +++ b/fs/ubifs/master.c @@ -68,12 +68,9 @@ static int mst_node_check_hash(const struct ubifs_info *c, u8 calc[UBIFS_MAX_HASH_LEN]; const void *node = mst; - SHASH_DESC_ON_STACK(shash, c->hash_tfm); - - shash->tfm = c->hash_tfm; - - crypto_shash_digest(shash, node + sizeof(struct ubifs_ch), - UBIFS_MST_NODE_SZ - sizeof(struct ubifs_ch), calc); + crypto_shash_tfm_digest(c->hash_tfm, node + sizeof(struct ubifs_ch), + UBIFS_MST_NODE_SZ - sizeof(struct ubifs_ch), + calc); if (ubifs_check_hash(c, expected, calc)) return -EPERM; diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index b28ac4dfb407..c4047a8f6410 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -558,7 +558,7 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud) return data == 0xFFFFFFFF; } -/* authenticate_sleb_hash and authenticate_sleb_hmac are split out for stack usage */ +/* authenticate_sleb_hash is split out for stack usage */ static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash) { SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm); @@ -569,15 +569,6 @@ static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_h return crypto_shash_final(hash_desc, hash); } -static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac) -{ - SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm); - - hmac_desc->tfm = c->hmac_tfm; - - return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac); -} - /** * authenticate_sleb - authenticate one scan LEB * @c: UBIFS file-system description object @@ -624,7 +615,8 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, if (err) goto out; - err = authenticate_sleb_hmac(c, hash, hmac); + err = crypto_shash_tfm_digest(c->hmac_tfm, hash, + c->hash_len, hmac); if (err) goto out; -- cgit v1.2.3-59-g8ed1b From ec0bf6edc4ad80b722ebec12a3c7eb392e28d2cb Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:19 -0700 Subject: Bluetooth: use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: linux-bluetooth@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- net/bluetooth/smp.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 1476a91ce935..d022f126eb02 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -170,7 +170,6 @@ static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m, size_t len, u8 mac[16]) { uint8_t tmp[16], mac_msb[16], msg_msb[CMAC_MSG_MAX]; - SHASH_DESC_ON_STACK(desc, tfm); int err; if (len > CMAC_MSG_MAX) @@ -181,8 +180,6 @@ static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m, return -EINVAL; } - desc->tfm = tfm; - /* Swap key and message from LSB to MSB */ swap_buf(k, tmp, 16); swap_buf(m, msg_msb, len); @@ -196,8 +193,7 @@ static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m, return err; } - err = crypto_shash_digest(desc, msg_msb, len, mac_msb); - shash_desc_zero(desc); + err = crypto_shash_tfm_digest(tfm, msg_msb, len, mac_msb); if (err) { BT_ERR("Hash computation error %d", err); return err; -- cgit v1.2.3-59-g8ed1b From 75b93c635482dead3071fe60180b12191d3b3b0f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:20 -0700 Subject: sctp: use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: linux-sctp@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- net/sctp/auth.c | 10 ++-------- net/sctp/sm_make_chunk.c | 23 ++++++++--------------- 2 files changed, 10 insertions(+), 23 deletions(-) diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 4278764d82b8..83e97e8892e0 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -741,14 +741,8 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc, if (crypto_shash_setkey(tfm, &asoc_key->data[0], asoc_key->len)) goto free; - { - SHASH_DESC_ON_STACK(desc, tfm); - - desc->tfm = tfm; - crypto_shash_digest(desc, (u8 *)auth, - end - (unsigned char *)auth, digest); - shash_desc_zero(desc); - } + crypto_shash_tfm_digest(tfm, (u8 *)auth, end - (unsigned char *)auth, + digest); free: if (free_key) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 09050c1d5517..c786215ba69a 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1666,17 +1666,14 @@ static struct sctp_cookie_param *sctp_pack_cookie( ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); if (sctp_sk(ep->base.sk)->hmac) { - SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac); + struct crypto_shash *tfm = sctp_sk(ep->base.sk)->hmac; int err; /* Sign the message. */ - desc->tfm = sctp_sk(ep->base.sk)->hmac; - - err = crypto_shash_setkey(desc->tfm, ep->secret_key, + err = crypto_shash_setkey(tfm, ep->secret_key, sizeof(ep->secret_key)) ?: - crypto_shash_digest(desc, (u8 *)&cookie->c, bodysize, - cookie->signature); - shash_desc_zero(desc); + crypto_shash_tfm_digest(tfm, (u8 *)&cookie->c, bodysize, + cookie->signature); if (err) goto free_cookie; } @@ -1737,17 +1734,13 @@ struct sctp_association *sctp_unpack_cookie( /* Check the signature. */ { - SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac); + struct crypto_shash *tfm = sctp_sk(ep->base.sk)->hmac; int err; - desc->tfm = sctp_sk(ep->base.sk)->hmac; - - err = crypto_shash_setkey(desc->tfm, ep->secret_key, + err = crypto_shash_setkey(tfm, ep->secret_key, sizeof(ep->secret_key)) ?: - crypto_shash_digest(desc, (u8 *)bear_cookie, bodysize, - digest); - shash_desc_zero(desc); - + crypto_shash_tfm_digest(tfm, (u8 *)bear_cookie, bodysize, + digest); if (err) { *error = -SCTP_IERROR_NOMEM; goto fail; -- cgit v1.2.3-59-g8ed1b From bce395eea0f2da2b9ca2e78f101ade353686ee5c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:21 -0700 Subject: KEYS: encrypted: use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: keyrings@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- security/keys/encrypted-keys/encrypted.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index f6797ba44bf7..14cf81d1a30b 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -323,19 +323,6 @@ error: return ukey; } -static int calc_hash(struct crypto_shash *tfm, u8 *digest, - const u8 *buf, unsigned int buflen) -{ - SHASH_DESC_ON_STACK(desc, tfm); - int err; - - desc->tfm = tfm; - - err = crypto_shash_digest(desc, buf, buflen, digest); - shash_desc_zero(desc); - return err; -} - static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen, const u8 *buf, unsigned int buflen) { @@ -351,7 +338,7 @@ static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen, err = crypto_shash_setkey(tfm, key, keylen); if (!err) - err = calc_hash(tfm, digest, buf, buflen); + err = crypto_shash_tfm_digest(tfm, buf, buflen, digest); crypto_free_shash(tfm); return err; } @@ -381,7 +368,8 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type, memcpy(derived_buf + strlen(derived_buf) + 1, master_key, master_keylen); - ret = calc_hash(hash_tfm, derived_key, derived_buf, derived_buf_len); + ret = crypto_shash_tfm_digest(hash_tfm, derived_buf, derived_buf_len, + derived_key); kzfree(derived_buf); return ret; } -- cgit v1.2.3-59-g8ed1b From 85fc78b80f15d723db3aa8f368b414ee70a1937c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 1 May 2020 22:31:22 -0700 Subject: ASoC: cros_ec_codec: use crypto_shash_tfm_digest() Instead of manually allocating a 'struct shash_desc' on the stack and calling crypto_shash_digest(), switch to using the new helper function crypto_shash_tfm_digest() which does this for us. Cc: Cheng-Yi Chiang Cc: Enric Balletbo i Serra Cc: Guenter Roeck Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- sound/soc/codecs/cros_ec_codec.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c index d3dc42aa6825..bfdd852bdc0d 100644 --- a/sound/soc/codecs/cros_ec_codec.c +++ b/sound/soc/codecs/cros_ec_codec.c @@ -115,14 +115,7 @@ static int calculate_sha256(struct cros_ec_codec_priv *priv, return PTR_ERR(tfm); } - { - SHASH_DESC_ON_STACK(desc, tfm); - - desc->tfm = tfm; - - crypto_shash_digest(desc, buf, size, digest); - shash_desc_zero(desc); - } + crypto_shash_tfm_digest(tfm, buf, size, digest); crypto_free_shash(tfm); -- cgit v1.2.3-59-g8ed1b From ac0ad93df7136e27d2a9c5ec554547695f581d0e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 2 May 2020 11:24:21 -0700 Subject: mptcp: use SHA256_BLOCK_SIZE, not SHA_MESSAGE_BYTES In preparation for naming the SHA-1 stuff in properly and moving it to a more appropriate header, fix the HMAC-SHA256 code in mptcp_crypto_hmac_sha() to use SHA256_BLOCK_SIZE instead of "SHA_MESSAGE_BYTES" which is actually the SHA-1 block size. (Fortunately these are both 64 bytes, so this wasn't a "real" bug...) Cc: Paolo Abeni Cc: mptcp@lists.01.org Signed-off-by: Eric Biggers Reviewed-by: Matthieu Baerts Signed-off-by: Herbert Xu --- net/mptcp/crypto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/mptcp/crypto.c b/net/mptcp/crypto.c index c151628bd416..81b06d875f92 100644 --- a/net/mptcp/crypto.c +++ b/net/mptcp/crypto.c @@ -61,7 +61,7 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) put_unaligned_be64(key2, key2be); /* Generate key xored with ipad */ - memset(input, 0x36, SHA_MESSAGE_BYTES); + memset(input, 0x36, SHA256_BLOCK_SIZE); for (i = 0; i < 8; i++) input[i] ^= key1be[i]; for (i = 0; i < 8; i++) @@ -78,7 +78,7 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) sha256_final(&state, &input[SHA256_BLOCK_SIZE]); /* Prepare second part of hmac */ - memset(input, 0x5C, SHA_MESSAGE_BYTES); + memset(input, 0x5C, SHA256_BLOCK_SIZE); for (i = 0; i < 8; i++) input[i] ^= key1be[i]; for (i = 0; i < 8; i++) -- cgit v1.2.3-59-g8ed1b From 1c4b3c409998a7d63ed9b590e3601cf7c4d102b5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 2 May 2020 11:24:22 -0700 Subject: crypto: powerpc/sha1 - remove unused temporary workspace The PowerPC implementation of SHA-1 doesn't actually use the 16-word temporary array that's passed to the assembly code. This was probably meant to correspond to the 'W' array that lib/sha1.c uses. However, in sha1-powerpc-asm.S these values are actually stored in GPRs 16-31. Referencing SHA_WORKSPACE_WORDS from this code also isn't appropriate, since it's an implementation detail of lib/sha1.c. Therefore, just remove this unneeded array. Tested with: export ARCH=powerpc CROSS_COMPILE=powerpc-linux-gnu- make mpc85xx_defconfig cat >> .config << EOF # CONFIG_MODULES is not set # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_DEBUG_KERNEL=y CONFIG_CRYPTO_MANAGER_EXTRA_TESTS=y CONFIG_CRYPTO_SHA1_PPC=y EOF make olddefconfig make -j32 qemu-system-ppc -M mpc8544ds -cpu e500 -nographic \ -kernel arch/powerpc/boot/zImage \ -append "cryptomgr.fuzz_iterations=1000 cryptomgr.panic_on_fail=1" Cc: linuxppc-dev@lists.ozlabs.org Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Paul Mackerras Signed-off-by: Eric Biggers Acked-by: Michael Ellerman (powerpc) Signed-off-by: Herbert Xu --- arch/powerpc/crypto/sha1.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c index 7b43fc352089..db46b6130a96 100644 --- a/arch/powerpc/crypto/sha1.c +++ b/arch/powerpc/crypto/sha1.c @@ -16,12 +16,11 @@ #include #include #include -#include #include #include #include -extern void powerpc_sha_transform(u32 *state, const u8 *src, u32 *temp); +void powerpc_sha_transform(u32 *state, const u8 *src); static int sha1_init(struct shash_desc *desc) { @@ -47,7 +46,6 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, src = data; if ((partial + len) > 63) { - u32 temp[SHA_WORKSPACE_WORDS]; if (partial) { done = -partial; @@ -56,12 +54,11 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, } do { - powerpc_sha_transform(sctx->state, src, temp); + powerpc_sha_transform(sctx->state, src); done += 64; src = data + done; } while (done + 63 < len); - memzero_explicit(temp, sizeof(temp)); partial = 0; } memcpy(sctx->buffer + partial, src, len - done); -- cgit v1.2.3-59-g8ed1b From 23dc2a0dfc981f2bc6455a7cefccbb6d39e07314 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 2 May 2020 11:24:23 -0700 Subject: crypto: powerpc/sha1 - prefix the "sha1_" functions Prefix the PowerPC SHA-1 functions with "powerpc_sha1_" rather than "sha1_". This allows us to rename the library function sha_init() to sha1_init() without causing a naming collision. Cc: linuxppc-dev@lists.ozlabs.org Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Paul Mackerras Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/powerpc/crypto/sha1.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c index db46b6130a96..b40dc50a6908 100644 --- a/arch/powerpc/crypto/sha1.c +++ b/arch/powerpc/crypto/sha1.c @@ -22,7 +22,7 @@ void powerpc_sha_transform(u32 *state, const u8 *src); -static int sha1_init(struct shash_desc *desc) +static int powerpc_sha1_init(struct shash_desc *desc) { struct sha1_state *sctx = shash_desc_ctx(desc); @@ -33,8 +33,8 @@ static int sha1_init(struct shash_desc *desc) return 0; } -static int sha1_update(struct shash_desc *desc, const u8 *data, - unsigned int len) +static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data, + unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int partial, done; @@ -68,7 +68,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, /* Add padding and return the message digest. */ -static int sha1_final(struct shash_desc *desc, u8 *out) +static int powerpc_sha1_final(struct shash_desc *desc, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; @@ -81,10 +81,10 @@ static int sha1_final(struct shash_desc *desc, u8 *out) /* Pad out to 56 mod 64 */ index = sctx->count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); - sha1_update(desc, padding, padlen); + powerpc_sha1_update(desc, padding, padlen); /* Append length */ - sha1_update(desc, (const u8 *)&bits, sizeof(bits)); + powerpc_sha1_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 5; i++) @@ -96,7 +96,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out) return 0; } -static int sha1_export(struct shash_desc *desc, void *out) +static int powerpc_sha1_export(struct shash_desc *desc, void *out) { struct sha1_state *sctx = shash_desc_ctx(desc); @@ -104,7 +104,7 @@ static int sha1_export(struct shash_desc *desc, void *out) return 0; } -static int sha1_import(struct shash_desc *desc, const void *in) +static int powerpc_sha1_import(struct shash_desc *desc, const void *in) { struct sha1_state *sctx = shash_desc_ctx(desc); @@ -114,11 +114,11 @@ static int sha1_import(struct shash_desc *desc, const void *in) static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, - .init = sha1_init, - .update = sha1_update, - .final = sha1_final, - .export = sha1_export, - .import = sha1_import, + .init = powerpc_sha1_init, + .update = powerpc_sha1_update, + .final = powerpc_sha1_final, + .export = powerpc_sha1_export, + .import = powerpc_sha1_import, .descsize = sizeof(struct sha1_state), .statesize = sizeof(struct sha1_state), .base = { -- cgit v1.2.3-59-g8ed1b From 4d21e594508c9dca0a51e41a684e16cb5c420c36 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 2 May 2020 11:24:24 -0700 Subject: crypto: s390/sha1 - prefix the "sha1_" functions Prefix the s390 SHA-1 functions with "s390_sha1_" rather than "sha1_". This allows us to rename the library function sha_init() to sha1_init() without causing a naming collision. Cc: linux-s390@vger.kernel.org Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/s390/crypto/sha1_s390.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 7c15542d3685..698b1e6d3c14 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -27,7 +27,7 @@ #include "sha.h" -static int sha1_init(struct shash_desc *desc) +static int s390_sha1_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); @@ -42,7 +42,7 @@ static int sha1_init(struct shash_desc *desc) return 0; } -static int sha1_export(struct shash_desc *desc, void *out) +static int s390_sha1_export(struct shash_desc *desc, void *out) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); struct sha1_state *octx = out; @@ -53,7 +53,7 @@ static int sha1_export(struct shash_desc *desc, void *out) return 0; } -static int sha1_import(struct shash_desc *desc, const void *in) +static int s390_sha1_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); const struct sha1_state *ictx = in; @@ -67,11 +67,11 @@ static int sha1_import(struct shash_desc *desc, const void *in) static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, - .init = sha1_init, + .init = s390_sha1_init, .update = s390_sha_update, .final = s390_sha_final, - .export = sha1_export, - .import = sha1_import, + .export = s390_sha1_export, + .import = s390_sha1_import, .descsize = sizeof(struct s390_sha_ctx), .statesize = sizeof(struct sha1_state), .base = { -- cgit v1.2.3-59-g8ed1b From 6b0b0fa2bce61db790efc8070ae6e5696435b0a8 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 2 May 2020 11:24:25 -0700 Subject: crypto: lib/sha1 - rename "sha" to "sha1" The library implementation of the SHA-1 compression function is confusingly called just "sha_transform()". Alongside it are some "SHA_" constants and "sha_init()". Presumably these are left over from a time when SHA just meant SHA-1. But now there are also SHA-2 and SHA-3, and moreover SHA-1 is now considered insecure and thus shouldn't be used. Therefore, rename these functions and constants to make it very clear that they are for SHA-1. Also add a comment to make it clear that these shouldn't be used. For the extra-misleadingly named "SHA_MESSAGE_BYTES", rename it to SHA1_BLOCK_SIZE and define it to just '64' rather than '(512/8)' so that it matches the same definition in . This prepares for merging into . Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- Documentation/security/siphash.rst | 2 +- crypto/sha1_generic.c | 4 ++-- drivers/char/random.c | 6 +++--- include/linux/cryptohash.h | 16 ++++++++++------ include/linux/filter.h | 2 +- kernel/bpf/core.c | 18 +++++++++--------- lib/sha1.c | 22 ++++++++++++---------- net/ipv6/addrconf.c | 10 +++++----- 8 files changed, 43 insertions(+), 37 deletions(-) diff --git a/Documentation/security/siphash.rst b/Documentation/security/siphash.rst index 4eba68cdf0a1..bd9363025fcb 100644 --- a/Documentation/security/siphash.rst +++ b/Documentation/security/siphash.rst @@ -7,7 +7,7 @@ SipHash - a short input PRF SipHash is a cryptographically secure PRF -- a keyed hash function -- that performs very well for short inputs, hence the name. It was designed by cryptographers Daniel J. Bernstein and Jean-Philippe Aumasson. It is intended -as a replacement for some uses of: `jhash`, `md5_transform`, `sha_transform`, +as a replacement for some uses of: `jhash`, `md5_transform`, `sha1_transform`, and so forth. SipHash takes a secret key filled with randomly generated numbers and either diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 7c57b844c382..a16d9787dcd2 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -31,10 +31,10 @@ EXPORT_SYMBOL_GPL(sha1_zero_message_hash); static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src, int blocks) { - u32 temp[SHA_WORKSPACE_WORDS]; + u32 temp[SHA1_WORKSPACE_WORDS]; while (blocks--) { - sha_transform(sst->state, src, temp); + sha1_transform(sst->state, src, temp); src += SHA1_BLOCK_SIZE; } memzero_explicit(temp, sizeof(temp)); diff --git a/drivers/char/random.c b/drivers/char/random.c index 0d10e31fd342..a19a8984741b 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1397,14 +1397,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out) __u32 w[5]; unsigned long l[LONGS(20)]; } hash; - __u32 workspace[SHA_WORKSPACE_WORDS]; + __u32 workspace[SHA1_WORKSPACE_WORDS]; unsigned long flags; /* * If we have an architectural hardware random number * generator, use it for SHA's initial vector */ - sha_init(hash.w); + sha1_init(hash.w); for (i = 0; i < LONGS(20); i++) { unsigned long v; if (!arch_get_random_long(&v)) @@ -1415,7 +1415,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out) /* Generate a hash across the pool, 16 words (512 bits) at a time */ spin_lock_irqsave(&r->lock, flags); for (i = 0; i < r->poolinfo->poolwords; i += 16) - sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); + sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace); /* * We mix the hash back into the pool to prevent backtracking diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h index f6ba4c3e60d7..c324ffca96e0 100644 --- a/include/linux/cryptohash.h +++ b/include/linux/cryptohash.h @@ -4,11 +4,15 @@ #include -#define SHA_DIGEST_WORDS 5 -#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8) -#define SHA_WORKSPACE_WORDS 16 - -void sha_init(__u32 *buf); -void sha_transform(__u32 *digest, const char *data, __u32 *W); +/* + * An implementation of SHA-1's compression function. Don't use in new code! + * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't + * the correct way to hash something with SHA-1 (use crypto_shash instead). + */ +#define SHA1_DIGEST_WORDS 5 +#define SHA1_BLOCK_SIZE 64 +#define SHA1_WORKSPACE_WORDS 16 +void sha1_init(__u32 *buf); +void sha1_transform(__u32 *digest, const char *data, __u32 *W); #endif diff --git a/include/linux/filter.h b/include/linux/filter.h index 9b5aa5c483cc..f42662adffe4 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -746,7 +746,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) { return round_up(bpf_prog_insn_size(prog) + - sizeof(__be64) + 1, SHA_MESSAGE_BYTES); + sizeof(__be64) + 1, SHA1_BLOCK_SIZE); } static inline unsigned int bpf_prog_size(unsigned int proglen) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 916f5132a984..14aa1f74dd10 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -262,10 +262,10 @@ void __bpf_prog_free(struct bpf_prog *fp) int bpf_prog_calc_tag(struct bpf_prog *fp) { - const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); + const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64); u32 raw_size = bpf_prog_tag_scratch_size(fp); - u32 digest[SHA_DIGEST_WORDS]; - u32 ws[SHA_WORKSPACE_WORDS]; + u32 digest[SHA1_DIGEST_WORDS]; + u32 ws[SHA1_WORKSPACE_WORDS]; u32 i, bsize, psize, blocks; struct bpf_insn *dst; bool was_ld_map; @@ -277,7 +277,7 @@ int bpf_prog_calc_tag(struct bpf_prog *fp) if (!raw) return -ENOMEM; - sha_init(digest); + sha1_init(digest); memset(ws, 0, sizeof(ws)); /* We need to take out the map fd for the digest calculation @@ -308,8 +308,8 @@ int bpf_prog_calc_tag(struct bpf_prog *fp) memset(&raw[psize], 0, raw_size - psize); raw[psize++] = 0x80; - bsize = round_up(psize, SHA_MESSAGE_BYTES); - blocks = bsize / SHA_MESSAGE_BYTES; + bsize = round_up(psize, SHA1_BLOCK_SIZE); + blocks = bsize / SHA1_BLOCK_SIZE; todo = raw; if (bsize - psize >= sizeof(__be64)) { bits = (__be64 *)(todo + bsize - sizeof(__be64)); @@ -320,12 +320,12 @@ int bpf_prog_calc_tag(struct bpf_prog *fp) *bits = cpu_to_be64((psize - 1) << 3); while (blocks--) { - sha_transform(digest, todo, ws); - todo += SHA_MESSAGE_BYTES; + sha1_transform(digest, todo, ws); + todo += SHA1_BLOCK_SIZE; } result = (__force __be32 *)digest; - for (i = 0; i < SHA_DIGEST_WORDS; i++) + for (i = 0; i < SHA1_DIGEST_WORDS; i++) result[i] = cpu_to_be32(digest[i]); memcpy(fp->tag, result, sizeof(fp->tag)); diff --git a/lib/sha1.c b/lib/sha1.c index 1d96d2c02b82..b381e8cd4fe4 100644 --- a/lib/sha1.c +++ b/lib/sha1.c @@ -64,22 +64,24 @@ #define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) /** - * sha_transform - single block SHA1 transform + * sha1_transform - single block SHA1 transform (deprecated) * * @digest: 160 bit digest to update * @data: 512 bits of data to hash * @array: 16 words of workspace (see note) * - * This function generates a SHA1 digest for a single 512-bit block. - * Be warned, it does not handle padding and message digest, do not - * confuse it with the full FIPS 180-1 digest algorithm for variable - * length messages. + * This function executes SHA-1's internal compression function. It updates the + * 160-bit internal state (@digest) with a single 512-bit data block (@data). + * + * Don't use this function. SHA-1 is no longer considered secure. And even if + * you do have to use SHA-1, this isn't the correct way to hash something with + * SHA-1 as this doesn't handle padding and finalization. * * Note: If the hash is security sensitive, the caller should be sure * to clear the workspace. This is left to the caller to avoid * unnecessary clears between chained hashing operations. */ -void sha_transform(__u32 *digest, const char *data, __u32 *array) +void sha1_transform(__u32 *digest, const char *data, __u32 *array) { __u32 A, B, C, D, E; @@ -185,13 +187,13 @@ void sha_transform(__u32 *digest, const char *data, __u32 *array) digest[3] += D; digest[4] += E; } -EXPORT_SYMBOL(sha_transform); +EXPORT_SYMBOL(sha1_transform); /** - * sha_init - initialize the vectors for a SHA1 digest + * sha1_init - initialize the vectors for a SHA1 digest * @buf: vector to initialize */ -void sha_init(__u32 *buf) +void sha1_init(__u32 *buf) { buf[0] = 0x67452301; buf[1] = 0xefcdab89; @@ -199,4 +201,4 @@ void sha_init(__u32 *buf) buf[3] = 0x10325476; buf[4] = 0xc3d2e1f0; } -EXPORT_SYMBOL(sha_init); +EXPORT_SYMBOL(sha1_init); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 24e319dfb510..f131cedf5ba6 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3222,11 +3222,11 @@ static int ipv6_generate_stable_address(struct in6_addr *address, const struct inet6_dev *idev) { static DEFINE_SPINLOCK(lock); - static __u32 digest[SHA_DIGEST_WORDS]; - static __u32 workspace[SHA_WORKSPACE_WORDS]; + static __u32 digest[SHA1_DIGEST_WORDS]; + static __u32 workspace[SHA1_WORKSPACE_WORDS]; static union { - char __data[SHA_MESSAGE_BYTES]; + char __data[SHA1_BLOCK_SIZE]; struct { struct in6_addr secret; __be32 prefix[2]; @@ -3251,7 +3251,7 @@ static int ipv6_generate_stable_address(struct in6_addr *address, retry: spin_lock_bh(&lock); - sha_init(digest); + sha1_init(digest); memset(&data, 0, sizeof(data)); memset(workspace, 0, sizeof(workspace)); memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len); @@ -3260,7 +3260,7 @@ retry: data.secret = secret; data.dad_count = dad_count; - sha_transform(digest, data.__data, workspace); + sha1_transform(digest, data.__data, workspace); temp = *address; temp.s6_addr32[2] = (__force __be32)digest[0]; -- cgit v1.2.3-59-g8ed1b From 2aaba014b55be46affcae78edff356c5e3389081 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 2 May 2020 11:24:26 -0700 Subject: crypto: lib/sha1 - remove unnecessary includes of linux/cryptohash.h sounds very generic and important, like it's the header to include if you're doing cryptographic hashing in the kernel. But actually it only includes the library implementation of the SHA-1 compression function (not even the full SHA-1). This should basically never be used anymore; SHA-1 is no longer considered secure, and there are much better ways to do cryptographic hashing in the kernel. Most files that include this header don't actually need it. So in preparation for removing it, remove all these unneeded includes of it. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/arm/crypto/sha1_glue.c | 1 - arch/arm/crypto/sha1_neon_glue.c | 1 - arch/arm/crypto/sha256_glue.c | 1 - arch/arm/crypto/sha256_neon_glue.c | 1 - arch/arm/kernel/armksyms.c | 1 - arch/arm64/crypto/sha256-glue.c | 1 - arch/arm64/crypto/sha512-glue.c | 1 - arch/microblaze/kernel/microblaze_ksyms.c | 1 - arch/mips/cavium-octeon/crypto/octeon-md5.c | 1 - arch/powerpc/crypto/md5-glue.c | 1 - arch/powerpc/crypto/sha1-spe-glue.c | 1 - arch/powerpc/crypto/sha256-spe-glue.c | 1 - arch/sparc/crypto/md5_glue.c | 1 - arch/sparc/crypto/sha1_glue.c | 1 - arch/sparc/crypto/sha256_glue.c | 1 - arch/sparc/crypto/sha512_glue.c | 1 - arch/unicore32/kernel/ksyms.c | 1 - arch/x86/crypto/sha1_ssse3_glue.c | 1 - arch/x86/crypto/sha256_ssse3_glue.c | 1 - arch/x86/crypto/sha512_ssse3_glue.c | 1 - drivers/crypto/atmel-sha.c | 1 - drivers/crypto/chelsio/chcr_algo.c | 1 - drivers/crypto/chelsio/chcr_ipsec.c | 1 - drivers/crypto/omap-sham.c | 1 - fs/f2fs/hash.c | 1 - include/net/tcp.h | 1 - lib/crypto/chacha.c | 1 - net/core/secure_seq.c | 1 - net/ipv6/seg6_hmac.c | 1 - 29 files changed, 29 deletions(-) diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c index c80b0ebfd02f..4e954b3f7ecd 100644 --- a/arch/arm/crypto/sha1_glue.c +++ b/arch/arm/crypto/sha1_glue.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c index 2c3627334335..0071e5e4411a 100644 --- a/arch/arm/crypto/sha1_neon_glue.c +++ b/arch/arm/crypto/sha1_neon_glue.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c index 215497f011f2..b8a4f79020cf 100644 --- a/arch/arm/crypto/sha256_glue.c +++ b/arch/arm/crypto/sha256_glue.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c index 38645e415196..79820b9e2541 100644 --- a/arch/arm/crypto/sha256_neon_glue.c +++ b/arch/arm/crypto/sha256_neon_glue.c @@ -11,7 +11,6 @@ #include #include -#include #include #include #include diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 98bdea51089d..82e96ac83684 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index ddf4a0d85c1c..77bc6e72abae 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c index 78d3083de6b7..370ccb29602f 100644 --- a/arch/arm64/crypto/sha512-glue.c +++ b/arch/arm64/crypto/sha512-glue.c @@ -6,7 +6,6 @@ */ #include -#include #include #include #include diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c index 92e12c2c2ec1..51c43ee5e380 100644 --- a/arch/microblaze/kernel/microblaze_ksyms.c +++ b/arch/microblaze/kernel/microblaze_ksyms.c @@ -6,7 +6,6 @@ #include #include -#include #include #include #include diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c index d1ed066e1a17..8c8ea139653e 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-md5.c +++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c index 7d1bf2fcf668..c24f605033bd 100644 --- a/arch/powerpc/crypto/md5-glue.c +++ b/arch/powerpc/crypto/md5-glue.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c index 6379990bd604..cb57be4ada61 100644 --- a/arch/powerpc/crypto/sha1-spe-glue.c +++ b/arch/powerpc/crypto/sha1-spe-glue.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c index 84939e563b81..ceb0b6c980b3 100644 --- a/arch/powerpc/crypto/sha256-spe-glue.c +++ b/arch/powerpc/crypto/sha256-spe-glue.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c index 14f6c15be6ae..111283fe837e 100644 --- a/arch/sparc/crypto/md5_glue.c +++ b/arch/sparc/crypto/md5_glue.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c index 7c1666304441..dc017782be52 100644 --- a/arch/sparc/crypto/sha1_glue.c +++ b/arch/sparc/crypto/sha1_glue.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c index f403ce9ba6e4..286bc8ecf15b 100644 --- a/arch/sparc/crypto/sha256_glue.c +++ b/arch/sparc/crypto/sha256_glue.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c index a3b532e43c07..3b2ca732ff7a 100644 --- a/arch/sparc/crypto/sha512_glue.c +++ b/arch/sparc/crypto/sha512_glue.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include diff --git a/arch/unicore32/kernel/ksyms.c b/arch/unicore32/kernel/ksyms.c index f4b84872d640..731445008932 100644 --- a/arch/unicore32/kernel/ksyms.c +++ b/arch/unicore32/kernel/ksyms.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index a801ffc10cbb..18200135603f 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 6394b5fe8db6..dd06249229e1 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index 82cc1b3ced1d..b0b05c93409e 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index e536e2a6bbd8..75ccf41a7cb9 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 5d3000fdd5f4..caf1136e7ef9 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 9fd3b9d1ec2f..25bf6d963066 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index d600c5b3fdd3..063ad5d03f33 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index 5bc4dcd8fc03..8c4ea5003ef8 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include diff --git a/include/net/tcp.h b/include/net/tcp.h index 5fa9eacd965a..5948c8e4c9e1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -23,7 +23,6 @@ #include #include #include -#include #include #include diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c index 65ead6b0c7e0..4ccbec442469 100644 --- a/lib/crypto/chacha.c +++ b/lib/crypto/chacha.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 7b6b1d2c3d10..b5bc680d4755 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -5,7 +5,6 @@ #include #include -#include #include #include #include diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index ffcfcd2b128f..85dddfe3a2c6 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -34,7 +34,6 @@ #include #include -#include #include #include #include -- cgit v1.2.3-59-g8ed1b From 228c4f265c6eb60eaa4ed0edb3bf7c113173576c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sat, 2 May 2020 11:24:27 -0700 Subject: crypto: lib/sha1 - fold linux/cryptohash.h into crypto/sha.h sounds very generic and important, like it's the header to include if you're doing cryptographic hashing in the kernel. But actually it only includes the library implementation of the SHA-1 compression function (not even the full SHA-1). This should basically never be used anymore; SHA-1 is no longer considered secure, and there are much better ways to do cryptographic hashing in the kernel. Remove this header and fold it into which already contains constants and functions for SHA-1 (along with SHA-2). Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- crypto/sha1_generic.c | 1 - drivers/char/random.c | 2 +- include/crypto/sha.h | 10 ++++++++++ include/linux/cryptohash.h | 18 ------------------ include/linux/filter.h | 2 +- lib/sha1.c | 2 +- 6 files changed, 13 insertions(+), 22 deletions(-) delete mode 100644 include/linux/cryptohash.h diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index a16d9787dcd2..1d43472fecbd 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/char/random.c b/drivers/char/random.c index a19a8984741b..cae02b2a871c 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -327,7 +327,6 @@ #include #include #include -#include #include #include #include @@ -337,6 +336,7 @@ #include #include #include +#include #include #include diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 67aec7245cb7..10753ff71d46 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -113,6 +113,16 @@ extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data, extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash); +/* + * An implementation of SHA-1's compression function. Don't use in new code! + * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't + * the correct way to hash something with SHA-1 (use crypto_shash instead). + */ +#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4) +#define SHA1_WORKSPACE_WORDS 16 +void sha1_init(__u32 *buf); +void sha1_transform(__u32 *digest, const char *data, __u32 *W); + /* * Stand-alone implementation of the SHA256 algorithm. It is designed to * have as little dependencies as possible so it can be used in the diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h deleted file mode 100644 index c324ffca96e0..000000000000 --- a/include/linux/cryptohash.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __CRYPTOHASH_H -#define __CRYPTOHASH_H - -#include - -/* - * An implementation of SHA-1's compression function. Don't use in new code! - * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't - * the correct way to hash something with SHA-1 (use crypto_shash instead). - */ -#define SHA1_DIGEST_WORDS 5 -#define SHA1_BLOCK_SIZE 64 -#define SHA1_WORKSPACE_WORDS 16 -void sha1_init(__u32 *buf); -void sha1_transform(__u32 *digest, const char *data, __u32 *W); - -#endif diff --git a/include/linux/filter.h b/include/linux/filter.h index f42662adffe4..ec45fd7992c9 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -16,11 +16,11 @@ #include #include #include -#include #include #include #include #include +#include #include diff --git a/lib/sha1.c b/lib/sha1.c index b381e8cd4fe4..49257a915bb6 100644 --- a/lib/sha1.c +++ b/lib/sha1.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include /* -- cgit v1.2.3-59-g8ed1b From 9a611a1dce07138c9cb2ac5d3f118e7de44fa774 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 15 May 2020 14:13:46 +1000 Subject: Revert "ASoC: cros_ec_codec: use crypto_shash_tfm_digest()" This reverts commit 85fc78b80f15d723db3aa8f368b414ee70a1937c as a different fix has already been applied in the sound-asoc tree and this patch is no longer required. Reported-by: Stephen Rothwell Reported-by: Mark Brown Signed-off-by: Herbert Xu --- sound/soc/codecs/cros_ec_codec.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/sound/soc/codecs/cros_ec_codec.c b/sound/soc/codecs/cros_ec_codec.c index bfdd852bdc0d..d3dc42aa6825 100644 --- a/sound/soc/codecs/cros_ec_codec.c +++ b/sound/soc/codecs/cros_ec_codec.c @@ -115,7 +115,14 @@ static int calculate_sha256(struct cros_ec_codec_priv *priv, return PTR_ERR(tfm); } - crypto_shash_tfm_digest(tfm, buf, size, digest); + { + SHASH_DESC_ON_STACK(desc, tfm); + + desc->tfm = tfm; + + crypto_shash_digest(desc, buf, size, digest); + shash_desc_zero(desc); + } crypto_free_shash(tfm); -- cgit v1.2.3-59-g8ed1b From 0c0408e86dbe8f44d4b27bf42130e8ac905361d6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 5 May 2020 15:53:45 +0200 Subject: crypto: blake2b - Fix clang optimization for ARMv7-M When building for ARMv7-M, clang-9 or higher tries to unroll some loops, which ends up confusing the register allocator to the point of generating rather bad code and using more than the warning limit for stack frames: warning: stack frame size of 1200 bytes in function 'blake2b_compress' [-Wframe-larger-than=] Forcing it to not unroll the final loop avoids this problem. Fixes: 91d689337fe8 ("crypto: blake2b - add blake2b generic implementation") Signed-off-by: Arnd Bergmann Reviewed-by: Nathan Chancellor Signed-off-by: Herbert Xu --- crypto/blake2b_generic.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crypto/blake2b_generic.c b/crypto/blake2b_generic.c index 1d262374fa4e..0ffd8d92e308 100644 --- a/crypto/blake2b_generic.c +++ b/crypto/blake2b_generic.c @@ -129,7 +129,9 @@ static void blake2b_compress(struct blake2b_state *S, ROUND(9); ROUND(10); ROUND(11); - +#ifdef CONFIG_CC_IS_CLANG +#pragma nounroll /* https://bugs.llvm.org/show_bug.cgi?id=45803 */ +#endif for (i = 0; i < 8; ++i) S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; } -- cgit v1.2.3-59-g8ed1b From 1036bb50c80561bab85a0dc57e87e6c7645c97b7 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 9 May 2020 00:34:59 +0200 Subject: crypto: ccree - constify struct debugfs_reg32 pid_cd_regs and debug_regs are never changed and can therefore be made const. This allows the compiler to put it in the text section instead of the data section. Before: text data bss dec hex filename 2871 2320 64 5255 1487 drivers/crypto/ccree/cc_debugfs.o After: text data bss dec hex filename 3255 1936 64 5255 1487 drivers/crypto/ccree/cc_debugfs.o Signed-off-by: Rikard Falkeborn Signed-off-by: Herbert Xu --- drivers/crypto/ccree/cc_debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c index c454afce7781..7083767602fc 100644 --- a/drivers/crypto/ccree/cc_debugfs.c +++ b/drivers/crypto/ccree/cc_debugfs.c @@ -26,7 +26,7 @@ static struct debugfs_reg32 ver_sig_regs[] = { { .name = "VERSION" }, /* Must be 1st */ }; -static struct debugfs_reg32 pid_cid_regs[] = { +static const struct debugfs_reg32 pid_cid_regs[] = { CC_DEBUG_REG(PERIPHERAL_ID_0), CC_DEBUG_REG(PERIPHERAL_ID_1), CC_DEBUG_REG(PERIPHERAL_ID_2), @@ -38,7 +38,7 @@ static struct debugfs_reg32 pid_cid_regs[] = { CC_DEBUG_REG(COMPONENT_ID_3), }; -static struct debugfs_reg32 debug_regs[] = { +static const struct debugfs_reg32 debug_regs[] = { CC_DEBUG_REG(HOST_IRR), CC_DEBUG_REG(HOST_POWER_DOWN_EN), CC_DEBUG_REG(AXIM_MON_ERR), -- cgit v1.2.3-59-g8ed1b From 2c2207aee52b6c6627f91aa2c7f5316c4087363a Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 9 May 2020 00:35:00 +0200 Subject: crypto: hisilicon/hpre - constify struct debugfs_reg32 hpre_cluster_dfx_regs and hpre_com_dfx_regs are never changed and can therefore be made const. This allows the compiler to put it in the text section instead of the data section. Before: text data bss dec hex filename 16455 6288 480 23223 5ab7 drivers/crypto/hisilicon/hpre/hpre_main.o After: text data bss dec hex filename 16839 5904 480 23223 5ab7 drivers/crypto/hisilicon/hpre/hpre_main.o Signed-off-by: Rikard Falkeborn Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 0d63666ba373..840e16c14570 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -134,7 +134,7 @@ static const u64 hpre_cluster_offsets[] = { HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL, }; -static struct debugfs_reg32 hpre_cluster_dfx_regs[] = { +static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = { {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET}, {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET}, {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET}, @@ -142,7 +142,7 @@ static struct debugfs_reg32 hpre_cluster_dfx_regs[] = { {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET}, }; -static struct debugfs_reg32 hpre_com_dfx_regs[] = { +static const struct debugfs_reg32 hpre_com_dfx_regs[] = { {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE}, {"AXQOS ", HPRE_VFG_AXQOS}, {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG}, -- cgit v1.2.3-59-g8ed1b From 8f68659bac1da933bf5526d4eeec46504d68457b Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 9 May 2020 00:35:01 +0200 Subject: crypto: hisilicon/zip - constify struct debugfs_reg32 hzip_dfx_regs is never changed and can be made const. This allows the compiler to put it in the text section instead of the data section. Before: text data bss dec hex filename 15236 6160 480 21876 5574 drivers/crypto/hisilicon/zip/zip_main.o After: text data bss dec hex filename 15620 5776 480 21876 5574 drivers/crypto/hisilicon/zip/zip_main.o Signed-off-by: Rikard Falkeborn Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 37db11f96fab..6934a03d21e1 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -165,7 +165,7 @@ static const u64 core_offsets[] = { [HZIP_DECOMP_CORE5] = 0x309000, }; -static struct debugfs_reg32 hzip_dfx_regs[] = { +static const struct debugfs_reg32 hzip_dfx_regs[] = { {"HZIP_GET_BD_NUM ", 0x00ull}, {"HZIP_GET_RIGHT_BD ", 0x04ull}, {"HZIP_GET_ERROR_BD ", 0x08ull}, -- cgit v1.2.3-59-g8ed1b From c549e8127213bbebba76b88a148875a80561e51d Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 9 May 2020 00:35:02 +0200 Subject: crypto: hisilicon/sec2 - constify sec_dfx_regs sec_dfx_regs is never changed and can therefore be made const. This allows the compiler to put it in the text section instead of the data section. Before: text data bss dec hex filename 17982 7312 480 25774 64ae drivers/crypto/hisilicon/sec2/sec_main.o After: text data bss dec hex filename 18366 6928 480 25774 64ae drivers/crypto/hisilicon/sec2/sec_main.o Signed-off-by: Rikard Falkeborn Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 07a5f4eb96ff..6f577b34098f 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -110,7 +110,7 @@ static const char * const sec_dbg_file_name[] = { [SEC_CLEAR_ENABLE] = "clear_enable", }; -static struct debugfs_reg32 sec_dfx_regs[] = { +static const struct debugfs_reg32 sec_dfx_regs[] = { {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, {"SEC_SAA_EN ", 0x301270}, {"SEC_BD_LATENCY_MIN ", 0x301600}, -- cgit v1.2.3-59-g8ed1b From 18614230f430a1108aad5ba2be2dd158b664081b Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Sat, 9 May 2020 17:43:54 +0800 Subject: crypto: hisilicon/sec2 - modify the SEC probe process Adjust the position of SMMU status check and SEC queue initialization in SEC probe Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/sec2/sec_main.c | 67 ++++++++++++++------------------ 1 file changed, 30 insertions(+), 37 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 6f577b34098f..5853a0695459 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -765,6 +765,21 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->dev_name = sec_name; qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? QM_HW_PF : QM_HW_VF; + if (qm->fun_type == QM_HW_PF) { + qm->qp_base = SEC_PF_DEF_Q_BASE; + qm->qp_num = pf_q_num; + qm->debug.curr_qm_qp_num = pf_q_num; + qm->qm_list = &sec_devices; + } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { + /* + * have no way to get qm configure in VM in v1 hardware, + * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force + * to trigger only one VF in v1 hardware. + * v2 hardware has no such problem. + */ + qm->qp_base = SEC_PF_DEF_Q_NUM; + qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; + } qm->use_dma_api = true; return hisi_qm_init(qm); @@ -775,8 +790,9 @@ static void sec_qm_uninit(struct hisi_qm *qm) hisi_qm_uninit(qm); } -static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) +static int sec_probe_init(struct sec_dev *sec) { + struct hisi_qm *qm = &sec->qm; int ret; /* @@ -793,40 +809,18 @@ static int sec_probe_init(struct hisi_qm *qm, struct sec_dev *sec) return -ENOMEM; } - if (qm->fun_type == QM_HW_PF) { - qm->qp_base = SEC_PF_DEF_Q_BASE; - qm->qp_num = pf_q_num; - qm->debug.curr_qm_qp_num = pf_q_num; - qm->qm_list = &sec_devices; - + if (qm->fun_type == QM_HW_PF) ret = sec_pf_probe_init(sec); - if (ret) - goto err_probe_uninit; - } else if (qm->fun_type == QM_HW_VF) { - /* - * have no way to get qm configure in VM in v1 hardware, - * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force - * to trigger only one VF in v1 hardware. - * v2 hardware has no such problem. - */ - if (qm->ver == QM_HW_V1) { - qm->qp_base = SEC_PF_DEF_Q_NUM; - qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; - } else if (qm->ver == QM_HW_V2) { - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - if (ret) - goto err_probe_uninit; - } - } else { - ret = -ENODEV; - goto err_probe_uninit; + else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) + /* v2 starts to support get vft by mailbox */ + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + + if (ret) { + destroy_workqueue(qm->wq); + return ret; } return 0; -err_probe_uninit: - destroy_workqueue(qm->wq); - return ret; } static void sec_probe_uninit(struct hisi_qm *qm) @@ -865,18 +859,17 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, sec); - sec->ctx_q_num = ctx_q_num; - sec_iommu_used_check(sec); - qm = &sec->qm; - ret = sec_qm_init(qm, pdev); if (ret) { - pci_err(pdev, "Failed to pre init qm!\n"); + pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret); return ret; } - ret = sec_probe_init(qm, sec); + sec->ctx_q_num = ctx_q_num; + sec_iommu_used_check(sec); + + ret = sec_probe_init(sec); if (ret) { pci_err(pdev, "Failed to probe!\n"); goto err_qm_uninit; -- cgit v1.2.3-59-g8ed1b From 5f3a2a5d37ff9058e3fa253cb8445f7780513635 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Sat, 9 May 2020 17:43:55 +0800 Subject: crypto: hisilicon/hpre - modify the HPRE probe process Misc fixes on coding style: 1.Merge pre-initialization and initialization of QM 2.Package the initialization of QM's PF and VF into a function Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Signed-off-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 42 ++++++++++++++++++------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 840e16c14570..e7585efa1596 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -666,7 +666,7 @@ static void hpre_debugfs_exit(struct hpre *hpre) debugfs_remove_recursive(qm->debug.debug_root); } -static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) +static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { enum qm_hw_ver rev_id; @@ -685,13 +685,14 @@ static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->dev_name = hpre_name; qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? QM_HW_PF : QM_HW_VF; + if (pdev->is_physfn) { qm->qp_base = HPRE_PF_DEF_Q_BASE; qm->qp_num = hpre_pf_q_num; } qm->use_dma_api = true; - return 0; + return hisi_qm_init(qm); } static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) @@ -766,6 +767,20 @@ static int hpre_pf_probe_init(struct hpre *hpre) return 0; } +static int hpre_probe_init(struct hpre *hpre) +{ + struct hisi_qm *qm = &hpre->qm; + int ret = -ENODEV; + + if (qm->fun_type == QM_HW_PF) + ret = hpre_pf_probe_init(hpre); + else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) + /* v2 starts to support get vft by mailbox */ + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + + return ret; +} + static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_qm *qm; @@ -779,23 +794,16 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, hpre); qm = &hpre->qm; - ret = hpre_qm_pre_init(qm, pdev); - if (ret) - return ret; - - ret = hisi_qm_init(qm); - if (ret) + ret = hpre_qm_init(qm, pdev); + if (ret) { + pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret); return ret; + } - if (pdev->is_physfn) { - ret = hpre_pf_probe_init(hpre); - if (ret) - goto err_with_qm_init; - } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - if (ret) - goto err_with_qm_init; + ret = hpre_probe_init(hpre); + if (ret) { + pci_err(pdev, "Failed to probe (%d)!\n", ret); + goto err_with_qm_init; } ret = hisi_qm_start(qm); -- cgit v1.2.3-59-g8ed1b From cfd66a660f73adfd388666f122e998691763aa55 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Sat, 9 May 2020 17:43:56 +0800 Subject: crypto: hisilicon/zip - modify the ZIP probe process Misc fixes on coding style: 1.Merge QM initialization code into a function 2.Merge QM's PF and VF initialization into a function Signed-off-by: Longfang Liu Signed-off-by: Zaibo Xu Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_main.c | 60 +++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 18 deletions(-) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 6934a03d21e1..3132c4e6a9da 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -701,23 +701,14 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) return 0; } -static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - struct hisi_zip *hisi_zip; enum qm_hw_ver rev_id; - struct hisi_qm *qm; - int ret; rev_id = hisi_qm_get_hw_version(pdev); if (rev_id == QM_HW_UNKNOWN) return -EINVAL; - hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); - if (!hisi_zip) - return -ENOMEM; - pci_set_drvdata(pdev, hisi_zip); - - qm = &hisi_zip->qm; qm->use_dma_api = true; qm->pdev = pdev; qm->ver = rev_id; @@ -725,13 +716,16 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) qm->algs = "zlib\ngzip"; qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; - qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : - QM_HW_VF; - ret = hisi_qm_init(qm); - if (ret) { - dev_err(&pdev->dev, "Failed to init qm!\n"); - return ret; - } + qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? + QM_HW_PF : QM_HW_VF; + + return hisi_qm_init(qm); +} + +static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) +{ + struct hisi_qm *qm = &hisi_zip->qm; + int ret; if (qm->fun_type == QM_HW_PF) { ret = hisi_zip_pf_probe_init(hisi_zip); @@ -754,7 +748,36 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; } else if (qm->ver == QM_HW_V2) /* v2 starts to support get vft by mailbox */ - hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + } + + return 0; +} + +static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_zip *hisi_zip; + struct hisi_qm *qm; + int ret; + + hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); + if (!hisi_zip) + return -ENOMEM; + + pci_set_drvdata(pdev, hisi_zip); + + qm = &hisi_zip->qm; + + ret = hisi_zip_qm_init(qm, pdev); + if (ret) { + pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret); + return ret; + } + + ret = hisi_zip_probe_init(hisi_zip); + if (ret) { + pci_err(pdev, "Failed to probe (%d)!\n", ret); + goto err_qm_uninit; } ret = hisi_qm_start(qm); @@ -787,6 +810,7 @@ err_remove_from_list: hisi_qm_stop(qm); err_qm_uninit: hisi_qm_uninit(qm); + return ret; } -- cgit v1.2.3-59-g8ed1b From 20b291f51802b35d84e20efbf110e8c9a853a22c Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Sat, 9 May 2020 17:43:57 +0800 Subject: crypto: hisilicon - refactor module parameter pf_q_num related code put q_num_set similar code into qm to reduce the redundancy. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 39 ++++++------------------------- drivers/crypto/hisilicon/qm.h | 39 +++++++++++++++++++++++++++++++ drivers/crypto/hisilicon/sec2/sec_main.c | 35 ++------------------------- drivers/crypto/hisilicon/zip/zip_main.c | 33 +------------------------- 4 files changed, 49 insertions(+), 97 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index e7585efa1596..18b3bb1ae950 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -159,44 +159,19 @@ static const struct debugfs_reg32 hpre_com_dfx_regs[] = { {"INT_STATUS ", HPRE_INT_STATUS}, }; -static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp) +static int pf_q_num_set(const char *val, const struct kernel_param *kp) { - struct pci_dev *pdev; - u32 n, q_num; - u8 rev_id; - int ret; - - if (!val) - return -EINVAL; - - pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL); - if (!pdev) { - q_num = HPRE_QUEUE_NUM_V2; - pr_info("No device found currently, suppose queue number is %d\n", - q_num); - } else { - rev_id = pdev->revision; - if (rev_id != QM_HW_V2) - return -EINVAL; - - q_num = HPRE_QUEUE_NUM_V2; - } - - ret = kstrtou32(val, 10, &n); - if (ret != 0 || n == 0 || n > q_num) - return -EINVAL; - - return param_set_int(val, kp); + return q_num_set(val, kp, HPRE_PCI_DEVICE_ID); } static const struct kernel_param_ops hpre_pf_q_num_ops = { - .set = hpre_pf_q_num_set, + .set = pf_q_num_set, .get = param_get_int, }; -static u32 hpre_pf_q_num = HPRE_PF_DEF_Q_NUM; -module_param_cb(hpre_pf_q_num, &hpre_pf_q_num_ops, &hpre_pf_q_num, 0444); -MODULE_PARM_DESC(hpre_pf_q_num, "Number of queues in PF of CS(1-1024)"); +static u32 pf_q_num = HPRE_PF_DEF_Q_NUM; +module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444); +MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)"); static const struct kernel_param_ops vfs_num_ops = { .set = vfs_num_set, @@ -688,7 +663,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) if (pdev->is_physfn) { qm->qp_base = HPRE_PF_DEF_Q_BASE; - qm->qp_num = hpre_pf_q_num; + qm->qp_num = pf_q_num; } qm->use_dma_api = true; diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 9d17167840f8..d1be8cdc99a3 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -8,6 +8,8 @@ #include #include +#define QM_QNUM_V1 4096 +#define QM_QNUM_V2 1024 #define QM_MAX_VFS_NUM_V2 63 /* qm user domain */ @@ -252,6 +254,43 @@ struct hisi_qp { struct uacce_queue *uacce_q; }; +static inline int q_num_set(const char *val, const struct kernel_param *kp, + unsigned int device) +{ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, + device, NULL); + u32 n, q_num; + u8 rev_id; + int ret; + + if (!val) + return -EINVAL; + + if (!pdev) { + q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); + pr_info("No device found currently, suppose queue number is %d\n", + q_num); + } else { + rev_id = pdev->revision; + switch (rev_id) { + case QM_HW_V1: + q_num = QM_QNUM_V1; + break; + case QM_HW_V2: + q_num = QM_QNUM_V2; + break; + default: + return -EINVAL; + } + } + + ret = kstrtou32(val, 10, &n); + if (ret || !n || n > q_num) + return -EINVAL; + + return param_set_int(val, kp); +} + static inline int vfs_num_set(const char *val, const struct kernel_param *kp) { u32 n; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 5853a0695459..06f840c397f2 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -136,45 +136,14 @@ static const struct debugfs_reg32 sec_dfx_regs[] = { static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) { - struct pci_dev *pdev; - u32 n, q_num; - u8 rev_id; - int ret; - - if (!val) - return -EINVAL; - - pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, - SEC_PF_PCI_DEVICE_ID, NULL); - if (!pdev) { - q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2); - pr_info("No device, suppose queue number is %d!\n", q_num); - } else { - rev_id = pdev->revision; - - switch (rev_id) { - case QM_HW_V1: - q_num = SEC_QUEUE_NUM_V1; - break; - case QM_HW_V2: - q_num = SEC_QUEUE_NUM_V2; - break; - default: - return -EINVAL; - } - } - - ret = kstrtou32(val, 10, &n); - if (ret || !n || n > q_num) - return -EINVAL; - - return param_set_int(val, kp); + return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID); } static const struct kernel_param_ops sec_pf_q_num_ops = { .set = sec_pf_q_num_set, .get = param_get_int, }; + static u32 pf_q_num = SEC_PF_DEF_Q_NUM; module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)"); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 3132c4e6a9da..a8cb699c67b7 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -192,38 +192,7 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = { static int pf_q_num_set(const char *val, const struct kernel_param *kp) { - struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, - PCI_DEVICE_ID_ZIP_PF, NULL); - u32 n, q_num; - u8 rev_id; - int ret; - - if (!val) - return -EINVAL; - - if (!pdev) { - q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2); - pr_info("No device found currently, suppose queue number is %d\n", - q_num); - } else { - rev_id = pdev->revision; - switch (rev_id) { - case QM_HW_V1: - q_num = HZIP_QUEUE_NUM_V1; - break; - case QM_HW_V2: - q_num = HZIP_QUEUE_NUM_V2; - break; - default: - return -EINVAL; - } - } - - ret = kstrtou32(val, 10, &n); - if (ret != 0 || n > q_num || n == 0) - return -EINVAL; - - return param_set_int(val, kp); + return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF); } static const struct kernel_param_ops pf_q_num_ops = { -- cgit v1.2.3-59-g8ed1b From b67202e8ed30bfa07b07a6f8fc762417a9a4e6de Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Sat, 9 May 2020 17:43:58 +0800 Subject: crypto: hisilicon/qm - add state machine for QM Add specific states for qm and qp, every state change under critical region to prevent from race condition. Meanwhile, qp state change will also depend on qm state. Due to the introduction of these states, it is necessary to pay attention to the calls of public logic, such as concurrent scenarios resetting and releasing queue will call hisi_qm_stop, which needs to add additional status to distinguish and process. Signed-off-by: Zhou Wang Signed-off-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 366 +++++++++++++++++++++++++++++++++--------- drivers/crypto/hisilicon/qm.h | 24 ++- 2 files changed, 307 insertions(+), 83 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 69d02cb40e4b..e42097e1d8a7 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -352,6 +352,93 @@ static const char * const qm_fifo_overflow[] = { "cq", "eq", "aeq", }; +static const char * const qm_s[] = { + "init", "start", "close", "stop", +}; + +static const char * const qp_s[] = { + "none", "init", "start", "stop", "close", +}; + +static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) +{ + enum qm_state curr = atomic_read(&qm->status.flags); + bool avail = false; + + switch (curr) { + case QM_INIT: + if (new == QM_START || new == QM_CLOSE) + avail = true; + break; + case QM_START: + if (new == QM_STOP) + avail = true; + break; + case QM_STOP: + if (new == QM_CLOSE || new == QM_START) + avail = true; + break; + default: + break; + } + + dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", + qm_s[curr], qm_s[new]); + + if (!avail) + dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", + qm_s[curr], qm_s[new]); + + return avail; +} + +static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp, + enum qp_state new) +{ + enum qm_state qm_curr = atomic_read(&qm->status.flags); + enum qp_state qp_curr = 0; + bool avail = false; + + if (qp) + qp_curr = atomic_read(&qp->qp_status.flags); + + switch (new) { + case QP_INIT: + if (qm_curr == QM_START || qm_curr == QM_INIT) + avail = true; + break; + case QP_START: + if ((qm_curr == QM_START && qp_curr == QP_INIT) || + (qm_curr == QM_START && qp_curr == QP_STOP)) + avail = true; + break; + case QP_STOP: + if ((qm_curr == QM_START && qp_curr == QP_START) || + (qp_curr == QP_INIT)) + avail = true; + break; + case QP_CLOSE: + if ((qm_curr == QM_START && qp_curr == QP_INIT) || + (qm_curr == QM_START && qp_curr == QP_STOP) || + (qm_curr == QM_STOP && qp_curr == QP_STOP) || + (qm_curr == QM_STOP && qp_curr == QP_INIT)) + avail = true; + break; + default: + break; + } + + dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", + qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); + + if (!avail) + dev_warn(&qm->pdev->dev, + "Can not change qp state from %s to %s in QM %s\n", + qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); + + return avail; +} + /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ static int qm_wait_mb_ready(struct hisi_qm *qm) { @@ -699,7 +786,7 @@ static void qm_init_qp_status(struct hisi_qp *qp) qp_status->sq_tail = 0; qp_status->cq_head = 0; qp_status->cqc_phase = true; - qp_status->flags = 0; + atomic_set(&qp_status->flags, 0); } static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, @@ -1155,29 +1242,21 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp) return qp->sqe + sq_tail * qp->qm->sqe_size; } -/** - * hisi_qm_create_qp() - Create a queue pair from qm. - * @qm: The qm we create a qp from. - * @alg_type: Accelerator specific algorithm type in sqc. - * - * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating - * qp memory fails. - */ -struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) +static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) { struct device *dev = &qm->pdev->dev; struct hisi_qp *qp; int qp_id, ret; + if (!qm_qp_avail_state(qm, NULL, QP_INIT)) + return ERR_PTR(-EPERM); + qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); - write_lock(&qm->qps_lock); - qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num); if (qp_id >= qm->qp_num) { - write_unlock(&qm->qps_lock); dev_info(&qm->pdev->dev, "QM all queues are busy!\n"); ret = -EBUSY; goto err_free_qp; @@ -1185,9 +1264,6 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) set_bit(qp_id, qm->qp_bitmap); qm->qp_array[qp_id] = qp; qm->qp_in_used++; - - write_unlock(&qm->qps_lock); - qp->qm = qm; if (qm->use_dma_api) { @@ -1206,18 +1282,36 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) qp->qp_id = qp_id; qp->alg_type = alg_type; + atomic_set(&qp->qp_status.flags, QP_INIT); return qp; err_clear_bit: - write_lock(&qm->qps_lock); qm->qp_array[qp_id] = NULL; clear_bit(qp_id, qm->qp_bitmap); - write_unlock(&qm->qps_lock); err_free_qp: kfree(qp); return ERR_PTR(ret); } + +/** + * hisi_qm_create_qp() - Create a queue pair from qm. + * @qm: The qm we create a qp from. + * @alg_type: Accelerator specific algorithm type in sqc. + * + * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating + * qp memory fails. + */ +struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) +{ + struct hisi_qp *qp; + + down_write(&qm->qps_lock); + qp = qm_create_qp_nolock(qm, alg_type); + up_write(&qm->qps_lock); + + return qp; +} EXPORT_SYMBOL_GPL(hisi_qm_create_qp); /** @@ -1232,16 +1326,23 @@ void hisi_qm_release_qp(struct hisi_qp *qp) struct qm_dma *qdma = &qp->qdma; struct device *dev = &qm->pdev->dev; + down_write(&qm->qps_lock); + + if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) { + up_write(&qm->qps_lock); + return; + } + if (qm->use_dma_api && qdma->va) dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); - write_lock(&qm->qps_lock); qm->qp_array[qp->qp_id] = NULL; clear_bit(qp->qp_id, qm->qp_bitmap); qm->qp_in_used--; - write_unlock(&qm->qps_lock); kfree(qp); + + up_write(&qm->qps_lock); } EXPORT_SYMBOL_GPL(hisi_qm_release_qp); @@ -1312,15 +1413,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) return ret; } -/** - * hisi_qm_start_qp() - Start a qp into running. - * @qp: The qp we want to start to run. - * @arg: Accelerator specific argument. - * - * After this function, qp can receive request from user. Return 0 if - * successful, Return -EBUSY if failed. - */ -int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) +static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; @@ -1330,6 +1423,9 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) size_t off = 0; int ret; + if (!qm_qp_avail_state(qm, qp, QP_START)) + return -EPERM; + #define QP_INIT_BUF(qp, type, size) do { \ (qp)->type = ((qp)->qdma.va + (off)); \ (qp)->type##_dma = (qp)->qdma.dma + (off); \ @@ -1360,10 +1456,31 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) if (ret) return ret; + atomic_set(&qp->qp_status.flags, QP_START); dev_dbg(dev, "queue %d started\n", qp_id); return 0; } + +/** + * hisi_qm_start_qp() - Start a qp into running. + * @qp: The qp we want to start to run. + * @arg: Accelerator specific argument. + * + * After this function, qp can receive request from user. Return 0 if + * successful, Return -EBUSY if failed. + */ +int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) +{ + struct hisi_qm *qm = qp->qm; + int ret; + + down_write(&qm->qps_lock); + ret = qm_start_qp_nolock(qp, arg); + up_write(&qm->qps_lock); + + return ret; +} EXPORT_SYMBOL_GPL(hisi_qm_start_qp); static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, @@ -1467,20 +1584,26 @@ static int qm_drain_qp(struct hisi_qp *qp) return ret; } -/** - * hisi_qm_stop_qp() - Stop a qp in qm. - * @qp: The qp we want to stop. - * - * This function is reverse of hisi_qm_start_qp. Return 0 if successful. - */ -int hisi_qm_stop_qp(struct hisi_qp *qp) +static int qm_stop_qp_nolock(struct hisi_qp *qp) { struct device *dev = &qp->qm->pdev->dev; int ret; - /* it is stopped */ - if (test_bit(QP_STOP, &qp->qp_status.flags)) + /* + * It is allowed to stop and release qp when reset, If the qp is + * stopped when reset but still want to be released then, the + * is_resetting flag should be set negative so that this qp will not + * be restarted after reset. + */ + if (atomic_read(&qp->qp_status.flags) == QP_STOP) { + qp->is_resetting = false; return 0; + } + + if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) + return -EPERM; + + atomic_set(&qp->qp_status.flags, QP_STOP); ret = qm_drain_qp(qp); if (ret) @@ -1491,12 +1614,27 @@ int hisi_qm_stop_qp(struct hisi_qp *qp) else flush_work(&qp->qm->work); - set_bit(QP_STOP, &qp->qp_status.flags); - dev_dbg(dev, "stop queue %u!", qp->qp_id); return 0; } + +/** + * hisi_qm_stop_qp() - Stop a qp in qm. + * @qp: The qp we want to stop. + * + * This function is reverse of hisi_qm_start_qp. Return 0 if successful. + */ +int hisi_qm_stop_qp(struct hisi_qp *qp) +{ + int ret; + + down_write(&qp->qm->qps_lock); + ret = qm_stop_qp_nolock(qp); + up_write(&qp->qm->qps_lock); + + return ret; +} EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); /** @@ -1506,6 +1644,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); * * This function will return -EBUSY if qp is currently full, and -EAGAIN * if qp related qm is resetting. + * + * Note: This function may run with qm_irq_thread and ACC reset at same time. + * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC + * reset may happen, we have no lock here considering performance. This + * causes current qm_db sending fail or can not receive sended sqe. QM + * sync/async receive function should handle the error sqe. ACC reset + * done function should clear used sqe to 0. */ int hisi_qp_send(struct hisi_qp *qp, const void *msg) { @@ -1514,7 +1659,9 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg) u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH; void *sqe = qm_get_avail_sqe(qp); - if (unlikely(test_bit(QP_STOP, &qp->qp_status.flags))) { + if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || + atomic_read(&qp->qm->status.flags) == QM_STOP || + qp->is_resetting)) { dev_info(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); return -EAGAIN; } @@ -1554,11 +1701,11 @@ static int hisi_qm_get_available_instances(struct uacce_device *uacce) int i, ret; struct hisi_qm *qm = uacce->priv; - read_lock(&qm->qps_lock); + down_read(&qm->qps_lock); for (i = 0, ret = 0; i < qm->qp_num; i++) if (!qm->qp_array[i]) ret++; - read_unlock(&qm->qps_lock); + up_read(&qm->qps_lock); return ret; } @@ -1658,9 +1805,9 @@ static int qm_set_sqctype(struct uacce_queue *q, u16 type) struct hisi_qm *qm = q->uacce->priv; struct hisi_qp *qp = q->priv; - write_lock(&qm->qps_lock); + down_write(&qm->qps_lock); qp->alg_type = type; - write_unlock(&qm->qps_lock); + up_write(&qm->qps_lock); return 0; } @@ -1762,9 +1909,9 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm) { int ret; - read_lock(&qm->qps_lock); + down_read(&qm->qps_lock); ret = qm->qp_num - qm->qp_in_used; - read_unlock(&qm->qps_lock); + up_read(&qm->qps_lock); return ret; } @@ -1840,9 +1987,10 @@ int hisi_qm_init(struct hisi_qm *qm) qm->qp_in_used = 0; mutex_init(&qm->mailbox_lock); - rwlock_init(&qm->qps_lock); + init_rwsem(&qm->qps_lock); INIT_WORK(&qm->work, qm_work_process); + atomic_set(&qm->status.flags, QM_INIT); dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf", qm->use_dma_api ? "dma api" : "iommu api"); @@ -1875,6 +2023,13 @@ void hisi_qm_uninit(struct hisi_qm *qm) struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; + down_write(&qm->qps_lock); + + if (!qm_avail_state(qm, QM_CLOSE)) { + up_write(&qm->qps_lock); + return; + } + uacce_remove(qm->uacce); qm->uacce = NULL; @@ -1890,6 +2045,8 @@ void hisi_qm_uninit(struct hisi_qm *qm) iounmap(qm->io_base); pci_release_mem_regions(pdev); pci_disable_device(pdev); + + up_write(&qm->qps_lock); } EXPORT_SYMBOL_GPL(hisi_qm_uninit); @@ -2072,12 +2229,21 @@ static int __hisi_qm_start(struct hisi_qm *qm) int hisi_qm_start(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; + int ret = 0; + + down_write(&qm->qps_lock); + + if (!qm_avail_state(qm, QM_START)) { + up_write(&qm->qps_lock); + return -EPERM; + } dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num); if (!qm->qp_num) { dev_err(dev, "qp_num should not be 0\n"); - return -EINVAL; + ret = -EINVAL; + goto err_unlock; } if (!qm->qp_bitmap) { @@ -2086,12 +2252,15 @@ int hisi_qm_start(struct hisi_qm *qm) qm->qp_array = devm_kcalloc(dev, qm->qp_num, sizeof(struct hisi_qp *), GFP_KERNEL); - if (!qm->qp_bitmap || !qm->qp_array) - return -ENOMEM; + if (!qm->qp_bitmap || !qm->qp_array) { + ret = -ENOMEM; + goto err_unlock; + } } if (!qm->use_dma_api) { dev_dbg(&qm->pdev->dev, "qm delay start\n"); + up_write(&qm->qps_lock); return 0; } else if (!qm->qdma.va) { qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) + @@ -2102,11 +2271,19 @@ int hisi_qm_start(struct hisi_qm *qm) &qm->qdma.dma, GFP_KERNEL); dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%zx)\n", qm->qdma.va, &qm->qdma.dma, qm->qdma.size); - if (!qm->qdma.va) - return -ENOMEM; + if (!qm->qdma.va) { + ret = -ENOMEM; + goto err_unlock; + } } - return __hisi_qm_start(qm); + ret = __hisi_qm_start(qm); + if (!ret) + atomic_set(&qm->status.flags, QM_START); + +err_unlock: + up_write(&qm->qps_lock); + return ret; } EXPORT_SYMBOL_GPL(hisi_qm_start); @@ -2120,20 +2297,44 @@ static int qm_restart(struct hisi_qm *qm) if (ret < 0) return ret; - write_lock(&qm->qps_lock); + down_write(&qm->qps_lock); for (i = 0; i < qm->qp_num; i++) { qp = qm->qp_array[i]; - if (qp) { - ret = hisi_qm_start_qp(qp, 0); + if (qp && atomic_read(&qp->qp_status.flags) == QP_STOP && + qp->is_resetting == true) { + ret = qm_start_qp_nolock(qp, 0); if (ret < 0) { dev_err(dev, "Failed to start qp%d!\n", i); - write_unlock(&qm->qps_lock); + up_write(&qm->qps_lock); + return ret; + } + qp->is_resetting = false; + } + } + up_write(&qm->qps_lock); + + return 0; +} + +/* Stop started qps in reset flow */ +static int qm_stop_started_qp(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + struct hisi_qp *qp; + int i, ret; + + for (i = 0; i < qm->qp_num; i++) { + qp = qm->qp_array[i]; + if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { + qp->is_resetting = true; + ret = qm_stop_qp_nolock(qp); + if (ret < 0) { + dev_err(dev, "Failed to stop qp%d!\n", i); return ret; } } } - write_unlock(&qm->qps_lock); return 0; } @@ -2149,7 +2350,7 @@ static void qm_clear_queues(struct hisi_qm *qm) for (i = 0; i < qm->qp_num; i++) { qp = qm->qp_array[i]; - if (qp) + if (qp && qp->is_resetting) memset(qp->qdma.va, 0, qp->qdma.size); } @@ -2166,41 +2367,43 @@ static void qm_clear_queues(struct hisi_qm *qm) */ int hisi_qm_stop(struct hisi_qm *qm) { - struct device *dev; - struct hisi_qp *qp; - int ret = 0, i; + struct device *dev = &qm->pdev->dev; + int ret = 0; - if (!qm || !qm->pdev) { - WARN_ON(1); - return -EINVAL; + down_write(&qm->qps_lock); + + if (!qm_avail_state(qm, QM_STOP)) { + ret = -EPERM; + goto err_unlock; } - dev = &qm->pdev->dev; + if (qm->status.stop_reason == QM_SOFT_RESET || + qm->status.stop_reason == QM_FLR) { + ret = qm_stop_started_qp(qm); + if (ret < 0) { + dev_err(dev, "Failed to stop started qp!\n"); + goto err_unlock; + } + } /* Mask eq and aeq irq */ writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); - /* Stop all qps belong to this qm */ - for (i = 0; i < qm->qp_num; i++) { - qp = qm->qp_array[i]; - if (qp) { - ret = hisi_qm_stop_qp(qp); - if (ret < 0) { - dev_err(dev, "Failed to stop qp%d!\n", i); - return -EBUSY; - } - } - } - if (qm->fun_type == QM_HW_PF) { ret = hisi_qm_set_vft(qm, 0, 0, 0); - if (ret < 0) + if (ret < 0) { dev_err(dev, "Failed to set vft!\n"); + ret = -EBUSY; + goto err_unlock; + } } qm_clear_queues(qm); + atomic_set(&qm->status.flags, QM_STOP); +err_unlock: + up_write(&qm->qps_lock); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_stop); @@ -2772,6 +2975,7 @@ static int qm_set_msi(struct hisi_qm *qm, bool set) static int qm_vf_reset_prepare(struct hisi_qm *qm) { struct hisi_qm_list *qm_list = qm->qm_list; + int stop_reason = qm->status.stop_reason; struct pci_dev *pdev = qm->pdev; struct pci_dev *virtfn; struct hisi_qm *vf_qm; @@ -2784,6 +2988,7 @@ static int qm_vf_reset_prepare(struct hisi_qm *qm) continue; if (pci_physfn(virtfn) == pdev) { + vf_qm->status.stop_reason = stop_reason; ret = hisi_qm_stop(vf_qm); if (ret) goto stop_fail; @@ -2830,6 +3035,7 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) } } + qm->status.stop_reason = QM_SOFT_RESET; ret = hisi_qm_stop(qm); if (ret) { pci_err(pdev, "Fails to stop QM!\n"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index d1be8cdc99a3..eff156a26f48 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -84,8 +84,24 @@ /* page number for queue file region */ #define QM_DOORBELL_PAGE_NR 1 +enum qm_stop_reason { + QM_NORMAL, + QM_SOFT_RESET, + QM_FLR, +}; + +enum qm_state { + QM_INIT = 0, + QM_START, + QM_CLOSE, + QM_STOP, +}; + enum qp_state { + QP_INIT = 1, + QP_START, QP_STOP, + QP_CLOSE, }; enum qm_hw_ver { @@ -129,7 +145,8 @@ struct hisi_qm_status { bool eqc_phase; u32 aeq_head; bool aeqc_phase; - unsigned long flags; + atomic_t flags; + int stop_reason; }; struct hisi_qm; @@ -196,7 +213,7 @@ struct hisi_qm { struct hisi_qm_err_status err_status; unsigned long reset_flag; - rwlock_t qps_lock; + struct rw_semaphore qps_lock; unsigned long *qp_bitmap; struct hisi_qp **qp_array; @@ -225,7 +242,7 @@ struct hisi_qp_status { u16 sq_tail; u16 cq_head; bool cqc_phase; - unsigned long flags; + atomic_t flags; }; struct hisi_qp_ops { @@ -250,6 +267,7 @@ struct hisi_qp { void (*event_cb)(struct hisi_qp *qp); struct hisi_qm *qm; + bool is_resetting; u16 pasid; struct uacce_queue *uacce_q; }; -- cgit v1.2.3-59-g8ed1b From 7ce396fa12a96a0e709a7b55cd5ab24161259634 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Sat, 9 May 2020 17:43:59 +0800 Subject: crypto: hisilicon - add FLR support Add callback reset_prepare and reset_done in QM, The callback reset_prepare will uninit device error configuration and stop the QM, the callback reset_done will init the device error configuration and restart the QM. Uninit the error configuration will disable device block master OOO when Multi-bit ECC error occurs to avoid the request of FLR will not return. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 16 ++++ drivers/crypto/hisilicon/qm.c | 133 +++++++++++++++++++++++++++++- drivers/crypto/hisilicon/qm.h | 2 + drivers/crypto/hisilicon/sec2/sec_main.c | 2 + drivers/crypto/hisilicon/zip/zip_main.c | 16 ++++ 5 files changed, 165 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 18b3bb1ae950..3475b1999635 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -310,12 +310,21 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm) static void hpre_hw_error_disable(struct hisi_qm *qm) { + u32 val; + /* disable hpre hw error interrupts */ writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK); + + /* disable HPRE block master OOO when m-bit error occur */ + val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + val &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE; + writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); } static void hpre_hw_error_enable(struct hisi_qm *qm) { + u32 val; + /* clear HPRE hw error source if having */ writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_HAC_SOURCE_INT); @@ -324,6 +333,11 @@ static void hpre_hw_error_enable(struct hisi_qm *qm) writel(HPRE_HAC_RAS_CE_ENABLE, qm->io_base + HPRE_RAS_CE_ENB); writel(HPRE_HAC_RAS_NFE_ENABLE, qm->io_base + HPRE_RAS_NFE_ENB); writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); + + /* enable HPRE block master OOO when m-bit error occur */ + val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); + val |= HPRE_AM_OOO_SHUTDOWN_ENABLE; + writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); } static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) @@ -851,6 +865,8 @@ static void hpre_remove(struct pci_dev *pdev) static const struct pci_error_handlers hpre_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, + .reset_prepare = hisi_qm_reset_prepare, + .reset_done = hisi_qm_reset_done, }; static struct pci_driver hpre_pci_driver = { diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index e42097e1d8a7..c30df080b9d0 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -175,6 +175,7 @@ #define QMC_ALIGN(sz) ALIGN(sz, 32) #define QM_DBG_TMP_BUF_LEN 22 +#define QM_PCI_COMMAND_INVALID ~0 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ @@ -2874,6 +2875,11 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, } EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); +static int qm_get_hw_error_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); +} + static int qm_check_req_recv(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -3166,9 +3172,7 @@ restart_fail: static int qm_get_dev_err_status(struct hisi_qm *qm) { - - return(qm->err_ini->get_dev_hw_err_status(qm) & - qm->err_ini->err_info.ecc_2bits_mask); + return qm->err_ini->get_dev_hw_err_status(qm); } static int qm_dev_hw_init(struct hisi_qm *qm) @@ -3190,7 +3194,8 @@ static void qm_restart_prepare(struct hisi_qm *qm) qm->io_base + ACC_AM_CFG_PORT_WR_EN); /* clear dev ecc 2bit error source if having */ - value = qm_get_dev_err_status(qm); + value = qm_get_dev_err_status(qm) & + qm->err_ini->err_info.ecc_2bits_mask; if (value && qm->err_ini->clear_dev_hw_err_status) qm->err_ini->clear_dev_hw_err_status(qm, value); @@ -3336,6 +3341,126 @@ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) } EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); +/* check the interrupt is ecc-mbit error or not */ +static int qm_check_dev_error(struct hisi_qm *qm) +{ + int ret; + + if (qm->fun_type == QM_HW_VF) + return 0; + + ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT; + if (ret) + return ret; + + return (qm_get_dev_err_status(qm) & + qm->err_ini->err_info.ecc_2bits_mask); +} + +void hisi_qm_reset_prepare(struct pci_dev *pdev) +{ + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + struct hisi_qm *qm = pci_get_drvdata(pdev); + u32 delay = 0; + int ret; + + hisi_qm_dev_err_uninit(pf_qm); + + /* + * Check whether there is an ECC mbit error, If it occurs, need to + * wait for soft reset to fix it. + */ + while (qm_check_dev_error(pf_qm)) { + msleep(++delay); + if (delay > QM_RESET_WAIT_TIMEOUT) + return; + } + + ret = qm_reset_prepare_ready(qm); + if (ret) { + pci_err(pdev, "FLR not ready!\n"); + return; + } + + if (qm->vfs_num) { + ret = qm_vf_reset_prepare(qm); + if (ret) { + pci_err(pdev, "Failed to prepare reset, ret = %d.\n", + ret); + return; + } + } + + ret = hisi_qm_stop(qm); + if (ret) { + pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); + return; + } + + pci_info(pdev, "FLR resetting...\n"); +} +EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); + +static bool qm_flr_reset_complete(struct pci_dev *pdev) +{ + struct pci_dev *pf_pdev = pci_physfn(pdev); + struct hisi_qm *qm = pci_get_drvdata(pf_pdev); + u32 id; + + pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); + if (id == QM_PCI_COMMAND_INVALID) { + pci_err(pdev, "Device can not be used!\n"); + return false; + } + + clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag); + + return true; +} + +void hisi_qm_reset_done(struct pci_dev *pdev) +{ + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + struct hisi_qm *qm = pci_get_drvdata(pdev); + int ret; + + hisi_qm_dev_err_init(pf_qm); + + ret = qm_restart(qm); + if (ret) { + pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); + goto flr_done; + } + + if (qm->fun_type == QM_HW_PF) { + ret = qm_dev_hw_init(qm); + if (ret) { + pci_err(pdev, "Failed to init PF, ret = %d.\n", ret); + goto flr_done; + } + + if (!qm->vfs_num) + goto flr_done; + + ret = qm_vf_q_assign(qm, qm->vfs_num); + if (ret) { + pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret); + goto flr_done; + } + + ret = qm_vf_reset_done(qm); + if (ret) { + pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret); + goto flr_done; + } + } + +flr_done: + if (qm_flr_reset_complete(pdev)) + pci_info(pdev, "FLR reset complete\n"); +} +EXPORT_SYMBOL_GPL(hisi_qm_reset_done); + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zhou Wang "); MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index eff156a26f48..25934e3c7acd 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -371,6 +371,8 @@ void hisi_qm_dev_err_uninit(struct hisi_qm *qm); pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, pci_channel_state_t state); pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); +void hisi_qm_reset_prepare(struct pci_dev *pdev); +void hisi_qm_reset_done(struct pci_dev *pdev); struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 06f840c397f2..067d1c22fc00 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -914,6 +914,8 @@ static void sec_remove(struct pci_dev *pdev) static const struct pci_error_handlers sec_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, + .reset_prepare = hisi_qm_reset_prepare, + .reset_done = hisi_qm_reset_done, }; static struct pci_driver sec_pci_driver = { diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index a8cb699c67b7..da90218207cb 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -278,6 +278,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) static void hisi_zip_hw_error_enable(struct hisi_qm *qm) { + u32 val; + if (qm->ver == QM_HW_V1) { writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG); @@ -296,12 +298,24 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm) /* enable ZIP hw error interrupts */ writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG); + + /* enable ZIP block master OOO when m-bit error occur */ + val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + val = val | HZIP_AXI_SHUTDOWN_ENABLE; + writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); } static void hisi_zip_hw_error_disable(struct hisi_qm *qm) { + u32 val; + /* disable ZIP hw error interrupts */ writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG); + + /* disable ZIP block master OOO when m-bit error occur */ + val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); + val = val & ~HZIP_AXI_SHUTDOWN_ENABLE; + writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); } static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) @@ -802,6 +816,8 @@ static void hisi_zip_remove(struct pci_dev *pdev) static const struct pci_error_handlers hisi_zip_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, + .reset_prepare = hisi_qm_reset_prepare, + .reset_done = hisi_qm_reset_done, }; static struct pci_driver hisi_zip_pci_driver = { -- cgit v1.2.3-59-g8ed1b From b977e03005127b1cbfef05517fbedaa7c5a177a1 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Sat, 9 May 2020 17:44:00 +0800 Subject: crypto: hisilicon - remove use_dma_api related codes The codes related use_dma_api is useless which should be removed. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 1 - drivers/crypto/hisilicon/qm.c | 34 ++++++++++++------------------- drivers/crypto/hisilicon/qm.h | 1 - drivers/crypto/hisilicon/sec2/sec_main.c | 1 - drivers/crypto/hisilicon/zip/zip_main.c | 1 - 5 files changed, 13 insertions(+), 25 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 3475b1999635..dba7b6012c5d 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -679,7 +679,6 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_base = HPRE_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; } - qm->use_dma_api = true; return hisi_qm_init(qm); } diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c30df080b9d0..800beef2639f 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1267,20 +1267,18 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) qm->qp_in_used++; qp->qm = qm; - if (qm->use_dma_api) { - qp->qdma.size = qm->sqe_size * QM_Q_DEPTH + - sizeof(struct qm_cqe) * QM_Q_DEPTH; - qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size, - &qp->qdma.dma, GFP_KERNEL); - if (!qp->qdma.va) { - ret = -ENOMEM; - goto err_clear_bit; - } - - dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n", - qp->qdma.va, &qp->qdma.dma, qp->qdma.size); + qp->qdma.size = qm->sqe_size * QM_Q_DEPTH + + sizeof(struct qm_cqe) * QM_Q_DEPTH; + qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size, + &qp->qdma.dma, GFP_KERNEL); + if (!qp->qdma.va) { + ret = -ENOMEM; + goto err_clear_bit; } + dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n", + qp->qdma.va, &qp->qdma.dma, qp->qdma.size); + qp->qp_id = qp_id; qp->alg_type = alg_type; atomic_set(&qp->qp_status.flags, QP_INIT); @@ -1334,7 +1332,7 @@ void hisi_qm_release_qp(struct hisi_qp *qp) return; } - if (qm->use_dma_api && qdma->va) + if (qdma->va) dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); qm->qp_array[qp->qp_id] = NULL; @@ -1992,8 +1990,6 @@ int hisi_qm_init(struct hisi_qm *qm) INIT_WORK(&qm->work, qm_work_process); atomic_set(&qm->status.flags, QM_INIT); - dev_dbg(dev, "init qm %s with %s\n", pdev->is_physfn ? "pf" : "vf", - qm->use_dma_api ? "dma api" : "iommu api"); return 0; @@ -2034,7 +2030,7 @@ void hisi_qm_uninit(struct hisi_qm *qm) uacce_remove(qm->uacce); qm->uacce = NULL; - if (qm->use_dma_api && qm->qdma.va) { + if (qm->qdma.va) { hisi_qm_cache_wb(qm); dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); @@ -2259,11 +2255,7 @@ int hisi_qm_start(struct hisi_qm *qm) } } - if (!qm->use_dma_api) { - dev_dbg(&qm->pdev->dev, "qm delay start\n"); - up_write(&qm->qps_lock); - return 0; - } else if (!qm->qdma.va) { + if (!qm->qdma.va) { qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) + QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 25934e3c7acd..743cb63d2de6 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -230,7 +230,6 @@ struct hisi_qm { struct work_struct work; const char *algs; - bool use_dma_api; bool use_sva; resource_size_t phys_base; resource_size_t phys_size; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 067d1c22fc00..8ff6e52a58d6 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -749,7 +749,6 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_base = SEC_PF_DEF_Q_NUM; qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; } - qm->use_dma_api = true; return hisi_qm_init(qm); } diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index da90218207cb..903dff968c40 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -692,7 +692,6 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) if (rev_id == QM_HW_UNKNOWN) return -EINVAL; - qm->use_dma_api = true; qm->pdev = pdev; qm->ver = rev_id; -- cgit v1.2.3-59-g8ed1b From d9701f8d9b12903bf212f542235659477024a43f Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 9 May 2020 17:44:01 +0800 Subject: crypto: hisilicon - unify initial value assignment into QM Some initial value assignment of struct hisi_qm could put into QM. Signed-off-by: Weili Qian Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 22 +++++++------- drivers/crypto/hisilicon/qm.c | 44 +++++++++++++++++++-------- drivers/crypto/hisilicon/sec2/sec_main.c | 50 +++++++++++++++---------------- drivers/crypto/hisilicon/zip/zip_main.c | 37 ++++++++++------------- 4 files changed, 81 insertions(+), 72 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index dba7b6012c5d..bfae7fb23e4c 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -672,12 +672,13 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->ver = rev_id; qm->sqe_size = HPRE_SQE_SIZE; qm->dev_name = hpre_name; - qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? - QM_HW_PF : QM_HW_VF; - if (pdev->is_physfn) { + qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? + QM_HW_PF : QM_HW_VF; + if (qm->fun_type == QM_HW_PF) { qm->qp_base = HPRE_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; + qm->qm_list = &hpre_devices; } return hisi_qm_init(qm); @@ -748,7 +749,6 @@ static int hpre_pf_probe_init(struct hpre *hpre) if (ret) return ret; - qm->qm_list = &hpre_devices; qm->err_ini = &hpre_err_ini; hisi_qm_dev_err_init(qm); @@ -758,15 +758,15 @@ static int hpre_pf_probe_init(struct hpre *hpre) static int hpre_probe_init(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; - int ret = -ENODEV; + int ret; - if (qm->fun_type == QM_HW_PF) + if (qm->fun_type == QM_HW_PF) { ret = hpre_pf_probe_init(hpre); - else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret) + return ret; + } - return ret; + return 0; } static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -779,8 +779,6 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!hpre) return -ENOMEM; - pci_set_drvdata(pdev, hpre); - qm = &hpre->qm; ret = hpre_qm_init(qm, pdev); if (ret) { diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 800beef2639f..e40163835346 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -1916,6 +1916,27 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num); +static void hisi_qm_pre_init(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + + switch (qm->ver) { + case QM_HW_V1: + qm->ops = &qm_hw_ops_v1; + break; + case QM_HW_V2: + qm->ops = &qm_hw_ops_v2; + break; + default: + return; + } + + pci_set_drvdata(pdev, qm); + mutex_init(&qm->mailbox_lock); + init_rwsem(&qm->qps_lock); + qm->qp_in_used = 0; +} + /** * hisi_qm_init() - Initialize configures about qm. * @qm: The qm needing init. @@ -1929,16 +1950,7 @@ int hisi_qm_init(struct hisi_qm *qm) unsigned int num_vec; int ret; - switch (qm->ver) { - case QM_HW_V1: - qm->ops = &qm_hw_ops_v1; - break; - case QM_HW_V2: - qm->ops = &qm_hw_ops_v2; - break; - default: - return -EINVAL; - } + hisi_qm_pre_init(qm); ret = qm_alloc_uacce(qm); if (ret < 0) @@ -1984,15 +1996,21 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) goto err_free_irq_vectors; - qm->qp_in_used = 0; - mutex_init(&qm->mailbox_lock); - init_rwsem(&qm->qps_lock); + if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { + /* v2 starts to support get vft by mailbox */ + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret) + goto err_irq_unregister; + } + INIT_WORK(&qm->work, qm_work_process); atomic_set(&qm->status.flags, QM_INIT); return 0; +err_irq_unregister: + qm_irq_unregister(qm); err_free_irq_vectors: pci_free_irq_vectors(pdev); err_iounmap: diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 8ff6e52a58d6..74e806fd9ff9 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -722,6 +722,7 @@ static int sec_pf_probe_init(struct sec_dev *sec) static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { enum qm_hw_ver rev_id; + int ret; rev_id = hisi_qm_get_hw_version(pdev); if (rev_id == QM_HW_UNKNOWN) @@ -729,9 +730,9 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->pdev = pdev; qm->ver = rev_id; - qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; + qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { @@ -750,7 +751,25 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; } - return hisi_qm_init(qm); + /* + * WQ_HIGHPRI: SEC request must be low delayed, + * so need a high priority workqueue. + * WQ_UNBOUND: SEC task is likely with long + * running CPU intensive workloads. + */ + qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | + WQ_UNBOUND, num_online_cpus(), + pci_name(qm->pdev)); + if (!qm->wq) { + pci_err(qm->pdev, "fail to alloc workqueue\n"); + return -ENOMEM; + } + + ret = hisi_qm_init(qm); + if (ret) + destroy_workqueue(qm->wq); + + return ret; } static void sec_qm_uninit(struct hisi_qm *qm) @@ -763,29 +782,10 @@ static int sec_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret; - /* - * WQ_HIGHPRI: SEC request must be low delayed, - * so need a high priority workqueue. - * WQ_UNBOUND: SEC task is likely with long - * running CPU intensive workloads. - */ - qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | - WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(), - pci_name(qm->pdev)); - if (!qm->wq) { - pci_err(qm->pdev, "fail to alloc workqueue\n"); - return -ENOMEM; - } - - if (qm->fun_type == QM_HW_PF) + if (qm->fun_type == QM_HW_PF) { ret = sec_pf_probe_init(sec); - else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - - if (ret) { - destroy_workqueue(qm->wq); - return ret; + if (ret) + return ret; } return 0; @@ -825,8 +825,6 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!sec) return -ENOMEM; - pci_set_drvdata(pdev, sec); - qm = &sec->qm; ret = sec_qm_init(qm, pdev); if (ret) { diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 903dff968c40..0ddd56a0a075 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -694,12 +694,27 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->pdev = pdev; qm->ver = rev_id; - qm->algs = "zlib\ngzip"; qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; + qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF : QM_HW_VF; + if (qm->fun_type == QM_HW_PF) { + qm->qp_base = HZIP_PF_DEF_Q_BASE; + qm->qp_num = pf_q_num; + qm->qm_list = &zip_devices; + } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { + /* + * have no way to get qm configure in VM in v1 hardware, + * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force + * to trigger only one VF in v1 hardware. + * + * v2 hardware has no such problem. + */ + qm->qp_base = HZIP_PF_DEF_Q_NUM; + qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; + } return hisi_qm_init(qm); } @@ -713,24 +728,6 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) ret = hisi_zip_pf_probe_init(hisi_zip); if (ret) return ret; - - qm->qp_base = HZIP_PF_DEF_Q_BASE; - qm->qp_num = pf_q_num; - qm->qm_list = &zip_devices; - } else if (qm->fun_type == QM_HW_VF) { - /* - * have no way to get qm configure in VM in v1 hardware, - * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force - * to trigger only one VF in v1 hardware. - * - * v2 hardware has no such problem. - */ - if (qm->ver == QM_HW_V1) { - qm->qp_base = HZIP_PF_DEF_Q_NUM; - qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; - } else if (qm->ver == QM_HW_V2) - /* v2 starts to support get vft by mailbox */ - return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); } return 0; @@ -746,8 +743,6 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!hisi_zip) return -ENOMEM; - pci_set_drvdata(pdev, hisi_zip); - qm = &hisi_zip->qm; ret = hisi_zip_qm_init(qm, pdev); -- cgit v1.2.3-59-g8ed1b From 5308f6600a393ee848ed9d9f77b167aa6b202e9c Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Sat, 9 May 2020 17:44:02 +0800 Subject: crypto: hisilicon - QM memory management optimization Put all the code for the memory allocation into the QM initialization process. Before, The qp memory was allocated when the qp was created, and released when the qp was released, It is now changed to allocate all the qp memory once. Signed-off-by: Weili Qian Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 265 ++++++++++++++++++++---------------------- drivers/crypto/hisilicon/qm.h | 4 +- 2 files changed, 128 insertions(+), 141 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index e40163835346..e988124e732a 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -575,7 +576,7 @@ static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe) { u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; - return qm->qp_array[cqn]; + return &qm->qp_array[cqn]; } static void qm_cq_head_update(struct hisi_qp *qp) @@ -625,8 +626,7 @@ static void qm_work_process(struct work_struct *work) while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { eqe_num++; qp = qm_to_hisi_qp(qm, eqe); - if (qp) - qm_poll_qp(qp, qm); + qm_poll_qp(qp, qm); if (qm->status.eq_head == QM_Q_DEPTH - 1) { qm->status.eqc_phase = !qm->status.eqc_phase; @@ -1247,50 +1247,36 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) { struct device *dev = &qm->pdev->dev; struct hisi_qp *qp; - int qp_id, ret; + int qp_id; if (!qm_qp_avail_state(qm, NULL, QP_INIT)) return ERR_PTR(-EPERM); - qp = kzalloc(sizeof(*qp), GFP_KERNEL); - if (!qp) - return ERR_PTR(-ENOMEM); - - qp_id = find_first_zero_bit(qm->qp_bitmap, qm->qp_num); - if (qp_id >= qm->qp_num) { - dev_info(&qm->pdev->dev, "QM all queues are busy!\n"); - ret = -EBUSY; - goto err_free_qp; + if (qm->qp_in_used == qm->qp_num) { + dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", + qm->qp_num); + return ERR_PTR(-EBUSY); } - set_bit(qp_id, qm->qp_bitmap); - qm->qp_array[qp_id] = qp; - qm->qp_in_used++; - qp->qm = qm; - qp->qdma.size = qm->sqe_size * QM_Q_DEPTH + - sizeof(struct qm_cqe) * QM_Q_DEPTH; - qp->qdma.va = dma_alloc_coherent(dev, qp->qdma.size, - &qp->qdma.dma, GFP_KERNEL); - if (!qp->qdma.va) { - ret = -ENOMEM; - goto err_clear_bit; + qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); + if (qp_id < 0) { + dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", + qm->qp_num); + return ERR_PTR(-EBUSY); } - dev_dbg(dev, "allocate qp dma buf(va=%pK, dma=%pad, size=%zx)\n", - qp->qdma.va, &qp->qdma.dma, qp->qdma.size); + qp = &qm->qp_array[qp_id]; + + memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); + qp->event_cb = NULL; + qp->req_cb = NULL; qp->qp_id = qp_id; qp->alg_type = alg_type; + qm->qp_in_used++; atomic_set(&qp->qp_status.flags, QP_INIT); return qp; - -err_clear_bit: - qm->qp_array[qp_id] = NULL; - clear_bit(qp_id, qm->qp_bitmap); -err_free_qp: - kfree(qp); - return ERR_PTR(ret); } /** @@ -1322,8 +1308,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_create_qp); void hisi_qm_release_qp(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; - struct qm_dma *qdma = &qp->qdma; - struct device *dev = &qm->pdev->dev; down_write(&qm->qps_lock); @@ -1332,14 +1316,8 @@ void hisi_qm_release_qp(struct hisi_qp *qp) return; } - if (qdma->va) - dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); - - qm->qp_array[qp->qp_id] = NULL; - clear_bit(qp->qp_id, qm->qp_bitmap); qm->qp_in_used--; - - kfree(qp); + idr_remove(&qm->qp_idr, qp->qp_id); up_write(&qm->qps_lock); } @@ -1416,41 +1394,13 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; - enum qm_hw_ver ver = qm->ver; int qp_id = qp->qp_id; int pasid = arg; - size_t off = 0; int ret; if (!qm_qp_avail_state(qm, qp, QP_START)) return -EPERM; -#define QP_INIT_BUF(qp, type, size) do { \ - (qp)->type = ((qp)->qdma.va + (off)); \ - (qp)->type##_dma = (qp)->qdma.dma + (off); \ - off += (size); \ -} while (0) - - if (!qp->qdma.dma) { - dev_err(dev, "cannot get qm dma buffer\n"); - return -EINVAL; - } - - /* sq need 128 bytes alignment */ - if (qp->qdma.dma & QM_SQE_DATA_ALIGN_MASK) { - dev_err(dev, "qm sq is not aligned to 128 byte\n"); - return -EINVAL; - } - - QP_INIT_BUF(qp, sqe, qm->sqe_size * QM_Q_DEPTH); - QP_INIT_BUF(qp, cqe, sizeof(struct qm_cqe) * QM_Q_DEPTH); - - dev_dbg(dev, "init qp buffer(v%d):\n" - " sqe (%pK, %lx)\n" - " cqe (%pK, %lx)\n", - ver, qp->sqe, (unsigned long)qp->sqe_dma, - qp->cqe, (unsigned long)qp->cqe_dma); - ret = qm_qp_ctx_cfg(qp, qp_id, pasid); if (ret) return ret; @@ -1697,16 +1647,7 @@ static void qm_qp_event_notifier(struct hisi_qp *qp) static int hisi_qm_get_available_instances(struct uacce_device *uacce) { - int i, ret; - struct hisi_qm *qm = uacce->priv; - - down_read(&qm->qps_lock); - for (i = 0, ret = 0; i < qm->qp_num; i++) - if (!qm->qp_array[i]) - ret++; - up_read(&qm->qps_lock); - - return ret; + return hisi_qm_get_free_qp_num(uacce->priv); } static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, @@ -1916,6 +1857,99 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num); +static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) +{ + struct device *dev = &qm->pdev->dev; + struct qm_dma *qdma; + int i; + + for (i = num - 1; i >= 0; i--) { + qdma = &qm->qp_array[i].qdma; + dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); + } + + kfree(qm->qp_array); +} + +static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id) +{ + struct device *dev = &qm->pdev->dev; + size_t off = qm->sqe_size * QM_Q_DEPTH; + struct hisi_qp *qp; + + qp = &qm->qp_array[id]; + qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, + GFP_KERNEL); + if (!qp->qdma.va) + return -ENOMEM; + + qp->sqe = qp->qdma.va; + qp->sqe_dma = qp->qdma.dma; + qp->cqe = qp->qdma.va + off; + qp->cqe_dma = qp->qdma.dma + off; + qp->qdma.size = dma_size; + qp->qm = qm; + qp->qp_id = id; + + return 0; +} + +static int hisi_qm_memory_init(struct hisi_qm *qm) +{ + struct device *dev = &qm->pdev->dev; + size_t qp_dma_size, off = 0; + int i, ret = 0; + +#define QM_INIT_BUF(qm, type, num) do { \ + (qm)->type = ((qm)->qdma.va + (off)); \ + (qm)->type##_dma = (qm)->qdma.dma + (off); \ + off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ +} while (0) + + idr_init(&qm->qp_idr); + qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) + + QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + + QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); + qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, + GFP_ATOMIC); + dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); + if (!qm->qdma.va) + return -ENOMEM; + + QM_INIT_BUF(qm, eqe, QM_Q_DEPTH); + QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); + QM_INIT_BUF(qm, sqc, qm->qp_num); + QM_INIT_BUF(qm, cqc, qm->qp_num); + + qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); + if (!qm->qp_array) { + ret = -ENOMEM; + goto err_alloc_qp_array; + } + + /* one more page for device or qp statuses */ + qp_dma_size = qm->sqe_size * QM_Q_DEPTH + + sizeof(struct qm_cqe) * QM_Q_DEPTH; + qp_dma_size = PAGE_ALIGN(qp_dma_size); + for (i = 0; i < qm->qp_num; i++) { + ret = hisi_qp_memory_init(qm, qp_dma_size, i); + if (ret) + goto err_init_qp_mem; + + dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); + } + + return ret; + +err_init_qp_mem: + hisi_qp_memory_uninit(qm, i); +err_alloc_qp_array: + dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); + + return ret; +} + static void hisi_qm_pre_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -2003,6 +2037,10 @@ int hisi_qm_init(struct hisi_qm *qm) goto err_irq_unregister; } + ret = hisi_qm_memory_init(qm); + if (ret) + goto err_irq_unregister; + INIT_WORK(&qm->work, qm_work_process); atomic_set(&qm->status.flags, QM_INIT); @@ -2048,6 +2086,9 @@ void hisi_qm_uninit(struct hisi_qm *qm) uacce_remove(qm->uacce); qm->uacce = NULL; + hisi_qp_memory_uninit(qm, qm->qp_num); + idr_destroy(&qm->qp_idr); + if (qm->qdma.va) { hisi_qm_cache_wb(qm); dma_free_coherent(dev, qm->qdma.size, @@ -2176,22 +2217,10 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) static int __hisi_qm_start(struct hisi_qm *qm) { - struct pci_dev *pdev = qm->pdev; - struct device *dev = &pdev->dev; - size_t off = 0; int ret; -#define QM_INIT_BUF(qm, type, num) do { \ - (qm)->type = ((qm)->qdma.va + (off)); \ - (qm)->type##_dma = (qm)->qdma.dma + (off); \ - off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ -} while (0) - WARN_ON(!qm->qdma.dma); - if (qm->qp_num == 0) - return -EINVAL; - if (qm->fun_type == QM_HW_PF) { ret = qm_dev_mem_reset(qm); if (ret) @@ -2202,21 +2231,6 @@ static int __hisi_qm_start(struct hisi_qm *qm) return ret; } - QM_INIT_BUF(qm, eqe, QM_Q_DEPTH); - QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH); - QM_INIT_BUF(qm, sqc, qm->qp_num); - QM_INIT_BUF(qm, cqc, qm->qp_num); - - dev_dbg(dev, "init qm buffer:\n" - " eqe (%pK, %lx)\n" - " aeqe (%pK, %lx)\n" - " sqc (%pK, %lx)\n" - " cqc (%pK, %lx)\n", - qm->eqe, (unsigned long)qm->eqe_dma, - qm->aeqe, (unsigned long)qm->aeqe_dma, - qm->sqc, (unsigned long)qm->sqc_dma, - qm->cqc, (unsigned long)qm->cqc_dma); - ret = qm_eq_ctx_cfg(qm); if (ret) return ret; @@ -2261,33 +2275,6 @@ int hisi_qm_start(struct hisi_qm *qm) goto err_unlock; } - if (!qm->qp_bitmap) { - qm->qp_bitmap = devm_kcalloc(dev, BITS_TO_LONGS(qm->qp_num), - sizeof(long), GFP_KERNEL); - qm->qp_array = devm_kcalloc(dev, qm->qp_num, - sizeof(struct hisi_qp *), - GFP_KERNEL); - if (!qm->qp_bitmap || !qm->qp_array) { - ret = -ENOMEM; - goto err_unlock; - } - } - - if (!qm->qdma.va) { - qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_Q_DEPTH) + - QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) + - QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + - QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); - qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, - &qm->qdma.dma, GFP_KERNEL); - dev_dbg(dev, "allocate qm dma buf(va=%pK, dma=%pad, size=%zx)\n", - qm->qdma.va, &qm->qdma.dma, qm->qdma.size); - if (!qm->qdma.va) { - ret = -ENOMEM; - goto err_unlock; - } - } - ret = __hisi_qm_start(qm); if (!ret) atomic_set(&qm->status.flags, QM_START); @@ -2310,8 +2297,8 @@ static int qm_restart(struct hisi_qm *qm) down_write(&qm->qps_lock); for (i = 0; i < qm->qp_num; i++) { - qp = qm->qp_array[i]; - if (qp && atomic_read(&qp->qp_status.flags) == QP_STOP && + qp = &qm->qp_array[i]; + if (atomic_read(&qp->qp_status.flags) == QP_STOP && qp->is_resetting == true) { ret = qm_start_qp_nolock(qp, 0); if (ret < 0) { @@ -2336,7 +2323,7 @@ static int qm_stop_started_qp(struct hisi_qm *qm) int i, ret; for (i = 0; i < qm->qp_num; i++) { - qp = qm->qp_array[i]; + qp = &qm->qp_array[i]; if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { qp->is_resetting = true; ret = qm_stop_qp_nolock(qp); @@ -2360,8 +2347,8 @@ static void qm_clear_queues(struct hisi_qm *qm) int i; for (i = 0; i < qm->qp_num; i++) { - qp = qm->qp_array[i]; - if (qp && qp->is_resetting) + qp = &qm->qp_array[i]; + if (qp->is_resetting) memset(qp->qdma.va, 0, qp->qdma.size); } diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 743cb63d2de6..80b9746dfe19 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -214,8 +214,8 @@ struct hisi_qm { unsigned long reset_flag; struct rw_semaphore qps_lock; - unsigned long *qp_bitmap; - struct hisi_qp **qp_array; + struct idr qp_idr; + struct hisi_qp *qp_array; struct mutex mailbox_lock; -- cgit v1.2.3-59-g8ed1b From 3176637ac10eddffdc3bd75281fa354a0d5a0c1e Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Sat, 9 May 2020 17:44:03 +0800 Subject: crypto: hisilicon - remove codes of directly report device errors through MSI The hardware device can be configured to report directly through MSI, but this method will not go through RAS, configure all hardware errors that should be processed by driver to NFE. Signed-off-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 1 - drivers/crypto/hisilicon/qm.c | 54 +++++++------------------------ drivers/crypto/hisilicon/qm.h | 4 +-- drivers/crypto/hisilicon/sec2/sec_main.c | 1 - drivers/crypto/hisilicon/zip/zip_main.c | 1 - 5 files changed, 13 insertions(+), 48 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index bfae7fb23e4c..c5ddd3a8ec85 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -730,7 +730,6 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .ce = QM_BASE_CE, .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, .fe = 0, - .msi = QM_DB_RANDOM_INVALID, .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR, .msi_wr_port = HPRE_WR_MSI_PORT, diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index e988124e732a..80935d661ed3 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -313,8 +313,7 @@ struct hisi_qm_hw_ops { u8 cmd, u16 index, u8 priority); u32 (*get_irq_num)(struct hisi_qm *qm); int (*debug_init)(struct hisi_qm *qm); - void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, - u32 msi); + void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); void (*hw_error_uninit)(struct hisi_qm *qm); pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm); }; @@ -707,26 +706,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data) static irqreturn_t qm_abnormal_irq(int irq, void *data) { - const struct hisi_qm_hw_error *err = qm_hw_error; - struct hisi_qm *qm = data; - struct device *dev = &qm->pdev->dev; - u32 error_status, tmp; - - /* read err sts */ - tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); - error_status = qm->msi_mask & tmp; - - while (err->msg) { - if (err->int_msk & error_status) - dev_err(dev, "%s [error status=0x%x] found\n", - err->msg, err->int_msk); - - err++; - } - - /* clear err sts */ - writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); - return IRQ_HANDLED; } @@ -1116,28 +1095,21 @@ static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) return 0; } -static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, - u32 msi) +static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) { writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); } -static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, - u32 msi) +static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe) { - u32 irq_enable = ce | nfe | fe | msi; + u32 irq_enable = ce | nfe | fe; u32 irq_unmask = ~irq_enable; - u32 error_status; qm->error_mask = ce | nfe | fe; - qm->msi_mask = msi; /* clear QM hw residual error source */ - error_status = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); - if (error_status) { - error_status &= qm->error_mask; - writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); - } + writel(QM_ABNORMAL_INT_SOURCE_CLR, + qm->io_base + QM_ABNORMAL_INT_SOURCE); /* configure error type */ writel(ce, qm->io_base + QM_RAS_CE_ENABLE); @@ -1145,9 +1117,6 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe, writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); writel(fe, qm->io_base + QM_RAS_FE_ENABLE); - /* use RAS irq default, so only set QM_RAS_MSI_INT_SEL for MSI */ - writel(msi, qm->io_base + QM_RAS_MSI_INT_SEL); - irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); } @@ -1207,9 +1176,11 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) qm->err_status.is_qm_ecc_mbit = true; qm_log_hw_error(qm, error_status); - - /* clear err sts */ - writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); + if (error_status == QM_DB_RANDOM_INVALID) { + writel(error_status, qm->io_base + + QM_ABNORMAL_INT_SOURCE); + return PCI_ERS_RESULT_RECOVERED; + } return PCI_ERS_RESULT_NEED_RESET; } @@ -2476,8 +2447,7 @@ static void qm_hw_error_init(struct hisi_qm *qm) return; } - qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, - err_info->fe, err_info->msi); + qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe); } static void qm_hw_error_uninit(struct hisi_qm *qm) diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 80b9746dfe19..fc5e96a02399 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -74,7 +74,7 @@ #define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ - QM_OF_FIFO_OF) + QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID) #define QM_BASE_CE QM_ECC_1BIT #define QM_Q_DEPTH 1024 @@ -158,7 +158,6 @@ struct hisi_qm_err_info { u32 ce; u32 nfe; u32 fe; - u32 msi; }; struct hisi_qm_err_status { @@ -224,7 +223,6 @@ struct hisi_qm { struct qm_debug debug; u32 error_mask; - u32 msi_mask; struct workqueue_struct *wq; struct work_struct work; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 74e806fd9ff9..c3381f253d55 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -682,7 +682,6 @@ static const struct hisi_qm_err_ini sec_err_ini = { .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | QM_ACC_WB_NOT_READY_TIMEOUT, .fe = 0, - .msi = QM_DB_RANDOM_INVALID, .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC, .msi_wr_port = BIT(0), .acpi_rst = "SRST", diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 0ddd56a0a075..6161b1025b7f 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -643,7 +643,6 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, .fe = 0, - .msi = QM_DB_RANDOM_INVALID, .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC, .msi_wr_port = HZIP_WR_PORT, .acpi_rst = "ZRST", -- cgit v1.2.3-59-g8ed1b From dbdc1ec31fc05c118eedb4211f502e6352c915b9 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Sat, 9 May 2020 17:44:04 +0800 Subject: crypto: hisilicon - add device error report through abnormal irq By configuring the device error in firmware to report through abnormal interruption, process all NFE errors in irq handler. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/qm.c | 339 +++++++++++++++++++++++------------------- drivers/crypto/hisilicon/qm.h | 1 + 2 files changed, 187 insertions(+), 153 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 80935d661ed3..6365f931bb73 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -219,6 +219,12 @@ enum vft_type { CQC_VFT, }; +enum acc_err_result { + ACC_ERR_NONE, + ACC_ERR_NEED_RESET, + ACC_ERR_RECOVERED, +}; + struct qm_cqe { __le32 rsvd0; __le16 cmd_id; @@ -315,7 +321,7 @@ struct hisi_qm_hw_ops { int (*debug_init)(struct hisi_qm *qm); void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); void (*hw_error_uninit)(struct hisi_qm *qm); - pci_ers_result_t (*hw_error_handle)(struct hisi_qm *qm); + enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); }; static const char * const qm_debug_file_name[] = { @@ -704,46 +710,6 @@ static irqreturn_t qm_aeq_irq(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t qm_abnormal_irq(int irq, void *data) -{ - return IRQ_HANDLED; -} - -static int qm_irq_register(struct hisi_qm *qm) -{ - struct pci_dev *pdev = qm->pdev; - int ret; - - ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), - qm_irq, IRQF_SHARED, qm->dev_name, qm); - if (ret) - return ret; - - if (qm->ver == QM_HW_V2) { - ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), - qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); - if (ret) - goto err_aeq_irq; - - if (qm->fun_type == QM_HW_PF) { - ret = request_irq(pci_irq_vector(pdev, - QM_ABNORMAL_EVENT_IRQ_VECTOR), - qm_abnormal_irq, IRQF_SHARED, - qm->dev_name, qm); - if (ret) - goto err_abonormal_irq; - } - } - - return 0; - -err_abonormal_irq: - free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); -err_aeq_irq: - free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); - return ret; -} - static void qm_irq_unregister(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -1163,7 +1129,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) } } -static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) +static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) { u32 error_status, tmp; @@ -1179,13 +1145,13 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm) if (error_status == QM_DB_RANDOM_INVALID) { writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); - return PCI_ERS_RESULT_RECOVERED; + return ACC_ERR_RECOVERED; } - return PCI_ERS_RESULT_NEED_RESET; + return ACC_ERR_NEED_RESET; } - return PCI_ERS_RESULT_RECOVERED; + return ACC_ERR_RECOVERED; } static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { @@ -1942,100 +1908,6 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) qm->qp_in_used = 0; } -/** - * hisi_qm_init() - Initialize configures about qm. - * @qm: The qm needing init. - * - * This function init qm, then we can call hisi_qm_start to put qm into work. - */ -int hisi_qm_init(struct hisi_qm *qm) -{ - struct pci_dev *pdev = qm->pdev; - struct device *dev = &pdev->dev; - unsigned int num_vec; - int ret; - - hisi_qm_pre_init(qm); - - ret = qm_alloc_uacce(qm); - if (ret < 0) - dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret); - - ret = pci_enable_device_mem(pdev); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to enable device mem!\n"); - goto err_remove_uacce; - } - - ret = pci_request_mem_regions(pdev, qm->dev_name); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to request mem regions!\n"); - goto err_disable_pcidev; - } - - qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); - qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2); - qm->io_base = ioremap(qm->phys_base, qm->phys_size); - if (!qm->io_base) { - ret = -EIO; - goto err_release_mem_regions; - } - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); - if (ret < 0) - goto err_iounmap; - pci_set_master(pdev); - - if (!qm->ops->get_irq_num) { - ret = -EOPNOTSUPP; - goto err_iounmap; - } - num_vec = qm->ops->get_irq_num(qm); - ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); - if (ret < 0) { - dev_err(dev, "Failed to enable MSI vectors!\n"); - goto err_iounmap; - } - - ret = qm_irq_register(qm); - if (ret) - goto err_free_irq_vectors; - - if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { - /* v2 starts to support get vft by mailbox */ - ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); - if (ret) - goto err_irq_unregister; - } - - ret = hisi_qm_memory_init(qm); - if (ret) - goto err_irq_unregister; - - INIT_WORK(&qm->work, qm_work_process); - - atomic_set(&qm->status.flags, QM_INIT); - - return 0; - -err_irq_unregister: - qm_irq_unregister(qm); -err_free_irq_vectors: - pci_free_irq_vectors(pdev); -err_iounmap: - iounmap(qm->io_base); -err_release_mem_regions: - pci_release_mem_regions(pdev); -err_disable_pcidev: - pci_disable_device(pdev); -err_remove_uacce: - uacce_remove(qm->uacce); - qm->uacce = NULL; - - return ret; -} -EXPORT_SYMBOL_GPL(hisi_qm_init); - /** * hisi_qm_uninit() - Uninitialize qm. * @qm: The qm needed uninit. @@ -2460,11 +2332,11 @@ static void qm_hw_error_uninit(struct hisi_qm *qm) qm->ops->hw_error_uninit(qm); } -static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm) +static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) { if (!qm->ops->hw_error_handle) { dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); - return PCI_ERS_RESULT_NONE; + return ACC_ERR_NONE; } return qm->ops->hw_error_handle(qm); @@ -2777,13 +2649,13 @@ int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) } EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); -static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm) +static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) { u32 err_sts; if (!qm->err_ini->get_dev_hw_err_status) { dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); - return PCI_ERS_RESULT_NONE; + return ACC_ERR_NONE; } /* get device hardware error status */ @@ -2794,20 +2666,19 @@ static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm) if (!qm->err_ini->log_dev_hw_err) { dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n"); - return PCI_ERS_RESULT_NEED_RESET; + return ACC_ERR_NEED_RESET; } qm->err_ini->log_dev_hw_err(qm, err_sts); - return PCI_ERS_RESULT_NEED_RESET; + return ACC_ERR_NEED_RESET; } - return PCI_ERS_RESULT_RECOVERED; + return ACC_ERR_RECOVERED; } -static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev) +static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) { - struct hisi_qm *qm = pci_get_drvdata(pdev); - pci_ers_result_t qm_ret, dev_ret; + enum acc_err_result qm_ret, dev_ret; /* log qm error */ qm_ret = qm_hw_error_handle(qm); @@ -2815,9 +2686,9 @@ static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev) /* log device error */ dev_ret = qm_dev_err_handle(qm); - return (qm_ret == PCI_ERS_RESULT_NEED_RESET || - dev_ret == PCI_ERS_RESULT_NEED_RESET) ? - PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED; + return (qm_ret == ACC_ERR_NEED_RESET || + dev_ret == ACC_ERR_NEED_RESET) ? + ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; } /** @@ -2831,6 +2702,9 @@ static pci_ers_result_t qm_process_dev_error(struct pci_dev *pdev) pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { + struct hisi_qm *qm = pci_get_drvdata(pdev); + enum acc_err_result ret; + if (pdev->is_virtfn) return PCI_ERS_RESULT_NONE; @@ -2838,7 +2712,11 @@ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; - return qm_process_dev_error(pdev); + ret = qm_process_dev_error(qm); + if (ret == ACC_ERR_NEED_RESET) + return PCI_ERS_RESULT_NEED_RESET; + + return PCI_ERS_RESULT_RECOVERED; } EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); @@ -3428,6 +3306,161 @@ flr_done: } EXPORT_SYMBOL_GPL(hisi_qm_reset_done); +static irqreturn_t qm_abnormal_irq(int irq, void *data) +{ + struct hisi_qm *qm = data; + enum acc_err_result ret; + + ret = qm_process_dev_error(qm); + if (ret == ACC_ERR_NEED_RESET) + schedule_work(&qm->rst_work); + + return IRQ_HANDLED; +} + +static int qm_irq_register(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + int ret; + + ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), + qm_irq, IRQF_SHARED, qm->dev_name, qm); + if (ret) + return ret; + + if (qm->ver == QM_HW_V2) { + ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), + qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); + if (ret) + goto err_aeq_irq; + + if (qm->fun_type == QM_HW_PF) { + ret = request_irq(pci_irq_vector(pdev, + QM_ABNORMAL_EVENT_IRQ_VECTOR), + qm_abnormal_irq, IRQF_SHARED, + qm->dev_name, qm); + if (ret) + goto err_abonormal_irq; + } + } + + return 0; + +err_abonormal_irq: + free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); +err_aeq_irq: + free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); + return ret; +} + +static void hisi_qm_controller_reset(struct work_struct *rst_work) +{ + struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); + int ret; + + /* reset pcie device controller */ + ret = qm_controller_reset(qm); + if (ret) + dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); + +} + +/** + * hisi_qm_init() - Initialize configures about qm. + * @qm: The qm needing init. + * + * This function init qm, then we can call hisi_qm_start to put qm into work. + */ +int hisi_qm_init(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + unsigned int num_vec; + int ret; + + hisi_qm_pre_init(qm); + + ret = qm_alloc_uacce(qm); + if (ret < 0) + dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret); + + ret = pci_enable_device_mem(pdev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to enable device mem!\n"); + goto err_remove_uacce; + } + + ret = pci_request_mem_regions(pdev, qm->dev_name); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to request mem regions!\n"); + goto err_disable_pcidev; + } + + qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); + qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2); + qm->io_base = ioremap(qm->phys_base, qm->phys_size); + if (!qm->io_base) { + ret = -EIO; + goto err_release_mem_regions; + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (ret < 0) + goto err_iounmap; + pci_set_master(pdev); + + if (!qm->ops->get_irq_num) { + ret = -EOPNOTSUPP; + goto err_iounmap; + } + num_vec = qm->ops->get_irq_num(qm); + ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); + if (ret < 0) { + dev_err(dev, "Failed to enable MSI vectors!\n"); + goto err_iounmap; + } + + ret = qm_irq_register(qm); + if (ret) + goto err_free_irq_vectors; + + if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { + /* v2 starts to support get vft by mailbox */ + ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); + if (ret) + goto err_irq_unregister; + } + + ret = hisi_qm_memory_init(qm); + if (ret) + goto err_irq_unregister; + + INIT_WORK(&qm->work, qm_work_process); + if (qm->fun_type == QM_HW_PF) + INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); + + atomic_set(&qm->status.flags, QM_INIT); + + return 0; + +err_irq_unregister: + qm_irq_unregister(qm); +err_free_irq_vectors: + pci_free_irq_vectors(pdev); +err_iounmap: + iounmap(qm->io_base); +err_release_mem_regions: + pci_release_mem_regions(pdev); +err_disable_pcidev: + pci_disable_device(pdev); +err_remove_uacce: + uacce_remove(qm->uacce); + qm->uacce = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(hisi_qm_init); + + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zhou Wang "); MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index fc5e96a02399..a431ff2fac3c 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -226,6 +226,7 @@ struct hisi_qm { struct workqueue_struct *wq; struct work_struct work; + struct work_struct rst_work; const char *algs; bool use_sva; -- cgit v1.2.3-59-g8ed1b From 2c959a33f8630a008c7047e90312ba10ea2c78b7 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Sat, 9 May 2020 17:44:05 +0800 Subject: crypto: hisilicon/zip - Use temporary sqe when doing work Currently zip sqe is stored in hisi_zip_qp_ctx, which will bring corruption with multiple parallel users of the crypto tfm. This patch removes the zip_sqe in hisi_zip_qp_ctx and uses a temporary sqe instead. Signed-off-by: Zhou Wang Signed-off-by: Jonathan Cameron Signed-off-by: Shukun Tan Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/zip/zip_crypto.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 369ec3220574..5fb9d4b41bb9 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -64,7 +64,6 @@ struct hisi_zip_req_q { struct hisi_zip_qp_ctx { struct hisi_qp *qp; - struct hisi_zip_sqe zip_sqe; struct hisi_zip_req_q req_q; struct hisi_acc_sgl_pool *sgl_pool; struct hisi_zip *zip_dev; @@ -484,11 +483,11 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, static int hisi_zip_do_work(struct hisi_zip_req *req, struct hisi_zip_qp_ctx *qp_ctx) { - struct hisi_zip_sqe *zip_sqe = &qp_ctx->zip_sqe; struct acomp_req *a_req = req->req; struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; + struct hisi_zip_sqe zip_sqe; dma_addr_t input; dma_addr_t output; int ret; @@ -511,13 +510,13 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, } req->dma_dst = output; - hisi_zip_fill_sqe(zip_sqe, qp->req_type, input, output, a_req->slen, + hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen, a_req->dlen, req->sskip, req->dskip); - hisi_zip_config_buf_type(zip_sqe, HZIP_SGL); - hisi_zip_config_tag(zip_sqe, req->req_id); + hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL); + hisi_zip_config_tag(&zip_sqe, req->req_id); /* send command to start a task */ - ret = hisi_qp_send(qp, zip_sqe); + ret = hisi_qp_send(qp, &zip_sqe); if (ret < 0) goto err_unmap_output; -- cgit v1.2.3-59-g8ed1b From 49c2c082e00e0bc4f5cbb7c21c7f0f873b35ab09 Mon Sep 17 00:00:00 2001 From: Nicolas Toromanoff Date: Tue, 12 May 2020 16:11:09 +0200 Subject: crypto: stm32/crc32 - fix ext4 chksum BUG_ON() Allow use of crc_update without prior call to crc_init. And change (and fix) driver to use CRC device even on unaligned buffers. Fixes: b51dbe90912a ("crypto: stm32 - Support for STM32 CRC32 crypto module") Signed-off-by: Nicolas Toromanoff Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 98 +++++++++++++++++++------------------- 1 file changed, 48 insertions(+), 50 deletions(-) diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 8e92e4ac79f1..c6156bf6c603 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -28,8 +28,10 @@ /* Registers values */ #define CRC_CR_RESET BIT(0) -#define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5)) #define CRC_INIT_DEFAULT 0xFFFFFFFF +#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5)) +#define CRC_CR_REV_IN_BYTE BIT(5) +#define CRC_CR_REV_OUT BIT(7) #define CRC_AUTOSUSPEND_DELAY 50 @@ -38,8 +40,6 @@ struct stm32_crc { struct device *dev; void __iomem *regs; struct clk *clk; - u8 pending_data[sizeof(u32)]; - size_t nb_pending_bytes; }; struct stm32_crc_list { @@ -59,7 +59,6 @@ struct stm32_crc_ctx { struct stm32_crc_desc_ctx { u32 partial; /* crc32c: partial in first 4 bytes of that struct */ - struct stm32_crc *crc; }; static int stm32_crc32_cra_init(struct crypto_tfm *tfm) @@ -99,25 +98,22 @@ static int stm32_crc_init(struct shash_desc *desc) struct stm32_crc *crc; spin_lock_bh(&crc_list.lock); - list_for_each_entry(crc, &crc_list.dev_list, list) { - ctx->crc = crc; - break; - } + crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); spin_unlock_bh(&crc_list.lock); - pm_runtime_get_sync(ctx->crc->dev); + pm_runtime_get_sync(crc->dev); /* Reset, set key, poly and configure in bit reverse mode */ - writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT); - writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL); - writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR); + writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT); + writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); + writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); /* Store partial result */ - ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR); - ctx->crc->nb_pending_bytes = 0; + ctx->partial = readl_relaxed(crc->regs + CRC_DR); - pm_runtime_mark_last_busy(ctx->crc->dev); - pm_runtime_put_autosuspend(ctx->crc->dev); + pm_runtime_mark_last_busy(crc->dev); + pm_runtime_put_autosuspend(crc->dev); return 0; } @@ -126,31 +122,49 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, unsigned int length) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); - struct stm32_crc *crc = ctx->crc; - u32 *d32; - unsigned int i; + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct stm32_crc *crc; + + spin_lock_bh(&crc_list.lock); + crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); + spin_unlock_bh(&crc_list.lock); pm_runtime_get_sync(crc->dev); - if (unlikely(crc->nb_pending_bytes)) { - while (crc->nb_pending_bytes != sizeof(u32) && length) { - /* Fill in pending data */ - crc->pending_data[crc->nb_pending_bytes++] = *(d8++); + /* + * Restore previously calculated CRC for this context as init value + * Restore polynomial configuration + * Configure in register for word input data, + * Configure out register in reversed bit mode data. + */ + writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT); + writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); + writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + + if (d8 != PTR_ALIGN(d8, sizeof(u32))) { + /* Configure for byte data */ + writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) { + writeb_relaxed(*d8++, crc->regs + CRC_DR); length--; } - - if (crc->nb_pending_bytes == sizeof(u32)) { - /* Process completed pending data */ - writel_relaxed(*(u32 *)crc->pending_data, - crc->regs + CRC_DR); - crc->nb_pending_bytes = 0; - } + /* Configure for word data */ + writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, + crc->regs + CRC_CR); } - d32 = (u32 *)d8; - for (i = 0; i < length >> 2; i++) - /* Process 32 bits data */ - writel_relaxed(*(d32++), crc->regs + CRC_DR); + for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32)) + writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR); + + if (length) { + /* Configure for byte data */ + writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, + crc->regs + CRC_CR); + while (length--) + writeb_relaxed(*d8++, crc->regs + CRC_DR); + } /* Store partial result */ ctx->partial = readl_relaxed(crc->regs + CRC_DR); @@ -158,22 +172,6 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, pm_runtime_mark_last_busy(crc->dev); pm_runtime_put_autosuspend(crc->dev); - /* Check for pending data (non 32 bits) */ - length &= 3; - if (likely(!length)) - return 0; - - if ((crc->nb_pending_bytes + length) >= sizeof(u32)) { - /* Shall not happen */ - dev_err(crc->dev, "Pending data overflow\n"); - return -EINVAL; - } - - d8 = (const u8 *)d32; - for (i = 0; i < length; i++) - /* Store pending data */ - crc->pending_data[crc->nb_pending_bytes++] = *(d8++); - return 0; } -- cgit v1.2.3-59-g8ed1b From a8cc3128bf2c01c4d448fe17149e87132113b445 Mon Sep 17 00:00:00 2001 From: Nicolas Toromanoff Date: Tue, 12 May 2020 16:11:10 +0200 Subject: crypto: stm32/crc32 - fix run-time self test issue. Fix wrong crc32 initialisation value: "alg: shash: stm32_crc32 test failed (wrong result) on test vector 0, cfg="init+update+final aligned buffer" cra_name="crc32c" expects an init value of 0XFFFFFFFF, cra_name="crc32" expects an init value of 0. Fixes: b51dbe90912a ("crypto: stm32 - Support for STM32 CRC32 crypto module") Signed-off-by: Nicolas Toromanoff Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index c6156bf6c603..1c3e411b7acb 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -28,10 +28,10 @@ /* Registers values */ #define CRC_CR_RESET BIT(0) -#define CRC_INIT_DEFAULT 0xFFFFFFFF #define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5)) #define CRC_CR_REV_IN_BYTE BIT(5) #define CRC_CR_REV_OUT BIT(7) +#define CRC32C_INIT_DEFAULT 0xFFFFFFFF #define CRC_AUTOSUSPEND_DELAY 50 @@ -65,7 +65,7 @@ static int stm32_crc32_cra_init(struct crypto_tfm *tfm) { struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); - mctx->key = CRC_INIT_DEFAULT; + mctx->key = 0; mctx->poly = CRC32_POLY_LE; return 0; } @@ -74,7 +74,7 @@ static int stm32_crc32c_cra_init(struct crypto_tfm *tfm) { struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); - mctx->key = CRC_INIT_DEFAULT; + mctx->key = CRC32C_INIT_DEFAULT; mctx->poly = CRC32C_POLY_LE; return 0; } -- cgit v1.2.3-59-g8ed1b From 10b89c43a64eb0d236903b79a3bc9d8f6cbfd9c7 Mon Sep 17 00:00:00 2001 From: Nicolas Toromanoff Date: Tue, 12 May 2020 16:11:11 +0200 Subject: crypto: stm32/crc32 - fix multi-instance Ensure CRC algorithm is registered only once in crypto framework when there are several instances of CRC devices. Update the CRC device list management to avoid that only the first CRC instance is used. Fixes: b51dbe90912a ("crypto: stm32 - Support for STM32 CRC32 crypto module") Signed-off-by: Nicolas Toromanoff Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 48 ++++++++++++++++++++++++++++---------- 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 1c3e411b7acb..10304511f9b4 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -91,16 +91,29 @@ static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key, return 0; } -static int stm32_crc_init(struct shash_desc *desc) +static struct stm32_crc *stm32_crc_get_next_crc(void) { - struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); - struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; spin_lock_bh(&crc_list.lock); crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); + if (crc) + list_move_tail(&crc->list, &crc_list.dev_list); spin_unlock_bh(&crc_list.lock); + return crc; +} + +static int stm32_crc_init(struct shash_desc *desc) +{ + struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); + struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); + struct stm32_crc *crc; + + crc = stm32_crc_get_next_crc(); + if (!crc) + return -ENODEV; + pm_runtime_get_sync(crc->dev); /* Reset, set key, poly and configure in bit reverse mode */ @@ -125,9 +138,9 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; - spin_lock_bh(&crc_list.lock); - crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); - spin_unlock_bh(&crc_list.lock); + crc = stm32_crc_get_next_crc(); + if (!crc) + return -ENODEV; pm_runtime_get_sync(crc->dev); @@ -200,6 +213,8 @@ static int stm32_crc_digest(struct shash_desc *desc, const u8 *data, return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out); } +static unsigned int refcnt; +static DEFINE_MUTEX(refcnt_lock); static struct shash_alg algs[] = { /* CRC-32 */ { @@ -290,12 +305,18 @@ static int stm32_crc_probe(struct platform_device *pdev) list_add(&crc->list, &crc_list.dev_list); spin_unlock(&crc_list.lock); - ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); - if (ret) { - dev_err(dev, "Failed to register\n"); - clk_disable_unprepare(crc->clk); - return ret; + mutex_lock(&refcnt_lock); + if (!refcnt) { + ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); + if (ret) { + mutex_unlock(&refcnt_lock); + dev_err(dev, "Failed to register\n"); + clk_disable_unprepare(crc->clk); + return ret; + } } + refcnt++; + mutex_unlock(&refcnt_lock); dev_info(dev, "Initialized\n"); @@ -316,7 +337,10 @@ static int stm32_crc_remove(struct platform_device *pdev) list_del(&crc->list); spin_unlock(&crc_list.lock); - crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); + mutex_lock(&refcnt_lock); + if (!--refcnt) + crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); + mutex_unlock(&refcnt_lock); pm_runtime_disable(crc->dev); pm_runtime_put_noidle(crc->dev); -- cgit v1.2.3-59-g8ed1b From 100f84beee4874234d04a1ea642b8c9738d7020d Mon Sep 17 00:00:00 2001 From: Nicolas Toromanoff Date: Tue, 12 May 2020 16:11:12 +0200 Subject: crypto: stm32/crc32 - don't sleep in runtime pm Ensure stm32_crc_update() and stm32_crc_init() can be called in atomic context and can't sleep. Add pm_runtime_irq_safe() to make pm_runtime_get_sync() atomic. Change runtime pm to call clk_enable()/clk_disable() and change system pm to unprepare/prepare the clock and force runtime pm suspend/resume. Signed-off-by: Nicolas Toromanoff Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 45 ++++++++++++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 10304511f9b4..413415c216ef 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -297,6 +297,7 @@ static int stm32_crc_probe(struct platform_device *pdev) pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); + pm_runtime_irq_safe(dev); pm_runtime_enable(dev); platform_set_drvdata(pdev, crc); @@ -350,34 +351,60 @@ static int stm32_crc_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int stm32_crc_runtime_suspend(struct device *dev) +static int __maybe_unused stm32_crc_suspend(struct device *dev) { struct stm32_crc *crc = dev_get_drvdata(dev); + int ret; - clk_disable_unprepare(crc->clk); + ret = pm_runtime_force_suspend(dev); + if (ret) + return ret; + + clk_unprepare(crc->clk); return 0; } -static int stm32_crc_runtime_resume(struct device *dev) +static int __maybe_unused stm32_crc_resume(struct device *dev) { struct stm32_crc *crc = dev_get_drvdata(dev); int ret; - ret = clk_prepare_enable(crc->clk); + ret = clk_prepare(crc->clk); if (ret) { - dev_err(crc->dev, "Failed to prepare_enable clock\n"); + dev_err(crc->dev, "Failed to prepare clock\n"); + return ret; + } + + return pm_runtime_force_resume(dev); +} + +static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev) +{ + struct stm32_crc *crc = dev_get_drvdata(dev); + + clk_disable(crc->clk); + + return 0; +} + +static int __maybe_unused stm32_crc_runtime_resume(struct device *dev) +{ + struct stm32_crc *crc = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(crc->clk); + if (ret) { + dev_err(crc->dev, "Failed to enable clock\n"); return ret; } return 0; } -#endif static const struct dev_pm_ops stm32_crc_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend, + stm32_crc_resume) SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend, stm32_crc_runtime_resume, NULL) }; -- cgit v1.2.3-59-g8ed1b From 7795c0baf5ac25e104fec8677ad134066a8fb8d3 Mon Sep 17 00:00:00 2001 From: Nicolas Toromanoff Date: Tue, 12 May 2020 16:11:13 +0200 Subject: crypto: stm32/crc32 - protect from concurrent accesses Protect STM32 CRC device from concurrent accesses. As we create a spinlocked section that increase with buffer size, we provide a module parameter to release the pressure by splitting critical section in chunks. Size of each chunk is defined in burst_size module parameter. By default burst_size=0, i.e. don't split incoming buffer. Signed-off-by: Nicolas Toromanoff Signed-off-by: Herbert Xu --- drivers/crypto/stm32/stm32-crc32.c | 47 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 413415c216ef..3ba41148c2a4 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -35,11 +35,16 @@ #define CRC_AUTOSUSPEND_DELAY 50 +static unsigned int burst_size; +module_param(burst_size, uint, 0644); +MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)"); + struct stm32_crc { struct list_head list; struct device *dev; void __iomem *regs; struct clk *clk; + spinlock_t lock; }; struct stm32_crc_list { @@ -109,6 +114,7 @@ static int stm32_crc_init(struct shash_desc *desc) struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; + unsigned long flags; crc = stm32_crc_get_next_crc(); if (!crc) @@ -116,6 +122,8 @@ static int stm32_crc_init(struct shash_desc *desc) pm_runtime_get_sync(crc->dev); + spin_lock_irqsave(&crc->lock, flags); + /* Reset, set key, poly and configure in bit reverse mode */ writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT); writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); @@ -125,18 +133,21 @@ static int stm32_crc_init(struct shash_desc *desc) /* Store partial result */ ctx->partial = readl_relaxed(crc->regs + CRC_DR); + spin_unlock_irqrestore(&crc->lock, flags); + pm_runtime_mark_last_busy(crc->dev); pm_runtime_put_autosuspend(crc->dev); return 0; } -static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, - unsigned int length) +static int burst_update(struct shash_desc *desc, const u8 *d8, + size_t length) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; + unsigned long flags; crc = stm32_crc_get_next_crc(); if (!crc) @@ -144,6 +155,8 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, pm_runtime_get_sync(crc->dev); + spin_lock_irqsave(&crc->lock, flags); + /* * Restore previously calculated CRC for this context as init value * Restore polynomial configuration @@ -182,12 +195,40 @@ static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, /* Store partial result */ ctx->partial = readl_relaxed(crc->regs + CRC_DR); + spin_unlock_irqrestore(&crc->lock, flags); + pm_runtime_mark_last_busy(crc->dev); pm_runtime_put_autosuspend(crc->dev); return 0; } +static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, + unsigned int length) +{ + const unsigned int burst_sz = burst_size; + unsigned int rem_sz; + const u8 *cur; + size_t size; + int ret; + + if (!burst_sz) + return burst_update(desc, d8, length); + + /* Digest first bytes not 32bit aligned at first pass in the loop */ + size = min(length, + burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8, + sizeof(u32))); + for (rem_sz = length, cur = d8; rem_sz; + rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) { + ret = burst_update(desc, cur, size); + if (ret) + return ret; + } + + return 0; +} + static int stm32_crc_final(struct shash_desc *desc, u8 *out) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); @@ -300,6 +341,8 @@ static int stm32_crc_probe(struct platform_device *pdev) pm_runtime_irq_safe(dev); pm_runtime_enable(dev); + spin_lock_init(&crc->lock); + platform_set_drvdata(pdev, crc); spin_lock(&crc_list.lock); -- cgit v1.2.3-59-g8ed1b From 8502652542c6684dd142f74c1bd1772730f653bd Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Fri, 15 May 2020 17:13:54 +0800 Subject: crypto: hisilicon/qm - add debugfs for QM Add DebugFS method to get the information of IRQ/Requests/QP .etc of QM for HPRE/ZIP/SEC drivers. Signed-off-by: Longfang Liu Signed-off-by: Shukun Tan Reviewed-by: Zaibo Xu Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-hisi-hpre | 31 ++++++++++++++++++ Documentation/ABI/testing/debugfs-hisi-sec | 31 ++++++++++++++++++ Documentation/ABI/testing/debugfs-hisi-zip | 31 ++++++++++++++++++ drivers/crypto/hisilicon/qm.c | 51 +++++++++++++++++++++++++++++ drivers/crypto/hisilicon/qm.h | 9 +++++ 5 files changed, 153 insertions(+) diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre index ec4a79e3a807..1f966654c177 100644 --- a/Documentation/ABI/testing/debugfs-hisi-hpre +++ b/Documentation/ABI/testing/debugfs-hisi-hpre @@ -55,3 +55,34 @@ Description: QM debug registers(qm_regs) read clear control. 1 means enable Writing to this file has no functional effect, only enable or disable counters clear after reading of these registers. Only available for PF. + +What: /sys/kernel/debug/hisi_hpre//qm/err_irq +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of invalid interrupts for + QM task completion. + Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//qm/aeq_irq +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of QM async event queue interrupts. + Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//qm/abnormal_irq +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of interrupts for QM abnormal event. + Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//qm/create_qp_err +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of queue allocation errors. + Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//qm/mb_err +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of failed QM mailbox commands. + Available for both PF and VF, and take no other effect on HPRE. diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec index 06adb899495e..c3ba09cf0b3e 100644 --- a/Documentation/ABI/testing/debugfs-hisi-sec +++ b/Documentation/ABI/testing/debugfs-hisi-sec @@ -41,3 +41,34 @@ Description: Enabling/disabling of clear action after reading the SEC's QM debug registers. 0: disable, 1: enable. Only available for PF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//qm/err_irq +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of invalid interrupts for + QM task completion. + Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//qm/aeq_irq +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of QM async event queue interrupts. + Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//qm/abnormal_irq +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of interrupts for QM abnormal event. + Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//qm/create_qp_err +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of queue allocation errors. + Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//qm/mb_err +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of failed QM mailbox commands. + Available for both PF and VF, and take no other effect on SEC. diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip index a7c63e6c4bc3..cc568fde8ff7 100644 --- a/Documentation/ABI/testing/debugfs-hisi-zip +++ b/Documentation/ABI/testing/debugfs-hisi-zip @@ -48,3 +48,34 @@ Description: QM debug registers(qm_regs) read clear control. 1 means enable Writing to this file has no functional effect, only enable or disable counters clear after reading of these registers. Only available for PF. + +What: /sys/kernel/debug/hisi_zip//qm/err_irq +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of invalid interrupts for + QM task completion. + Available for both PF and VF, and take no other effect on ZIP. + +What: /sys/kernel/debug/hisi_zip//qm/aeq_irq +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of QM async event queue interrupts. + Available for both PF and VF, and take no other effect on ZIP. + +What: /sys/kernel/debug/hisi_zip//qm/abnormal_irq +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of interrupts for QM abnormal event. + Available for both PF and VF, and take no other effect on ZIP. + +What: /sys/kernel/debug/hisi_zip//qm/create_qp_err +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of queue allocation errors. + Available for both PF and VF, and take no other effect on ZIP. + +What: /sys/kernel/debug/hisi_zip//qm/mb_err +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the number of failed QM mailbox commands. + Available for both PF and VF, and take no other effect on ZIP. diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 6365f931bb73..744d1310f6a9 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -324,6 +324,19 @@ struct hisi_qm_hw_ops { enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); }; +struct qm_dfx_item { + const char *name; + u32 offset; +}; + +static struct qm_dfx_item qm_dfx_files[] = { + {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)}, + {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)}, + {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)}, + {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)}, + {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)}, +}; + static const char * const qm_debug_file_name[] = { [CURRENT_Q] = "current_q", [CLEAR_ENABLE] = "clear_enable", @@ -514,6 +527,8 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, busy_unlock: mutex_unlock(&qm->mailbox_lock); + if (ret) + atomic64_inc(&qm->debug.dfx.mb_err_cnt); return ret; } @@ -671,6 +686,7 @@ static irqreturn_t qm_irq(int irq, void *data) if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) return do_qm_irq(irq, data); + atomic64_inc(&qm->debug.dfx.err_irq_cnt); dev_err(&qm->pdev->dev, "invalid int source\n"); qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); @@ -683,6 +699,7 @@ static irqreturn_t qm_aeq_irq(int irq, void *data) struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; u32 type; + atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) return IRQ_NONE; @@ -1192,6 +1209,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) if (qm->qp_in_used == qm->qp_num) { dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", qm->qp_num); + atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); return ERR_PTR(-EBUSY); } @@ -1199,6 +1217,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) if (qp_id < 0) { dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", qm->qp_num); + atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); return ERR_PTR(-EBUSY); } @@ -2249,6 +2268,26 @@ err_unlock: } EXPORT_SYMBOL_GPL(hisi_qm_stop); +static int qm_debugfs_atomic64_set(void *data, u64 val) +{ + if (val) + return -EINVAL; + + atomic64_set((atomic64_t *)data, 0); + + return 0; +} + +static int qm_debugfs_atomic64_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get, + qm_debugfs_atomic64_set, "%llu\n"); + /** * hisi_qm_debug_init() - Initialize qm related debugfs files. * @qm: The qm for which we want to add debugfs files. @@ -2257,7 +2296,9 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop); */ int hisi_qm_debug_init(struct hisi_qm *qm) { + struct qm_dfx *dfx = &qm->debug.dfx; struct dentry *qm_d; + void *data; int i, ret; qm_d = debugfs_create_dir("qm", qm->debug.debug_root); @@ -2273,6 +2314,15 @@ int hisi_qm_debug_init(struct hisi_qm *qm) debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); + for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { + data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); + debugfs_create_file(qm_dfx_files[i].name, + 0644, + qm_d, + data, + &qm_atomic64_ops); + } + return 0; failed_to_create: @@ -3311,6 +3361,7 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data) struct hisi_qm *qm = data; enum acc_err_result ret; + atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); ret = qm_process_dev_error(qm); if (ret == ACC_ERR_NEED_RESET) schedule_work(&qm->rst_work); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index a431ff2fac3c..e4b46a7b10ef 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -121,6 +121,14 @@ enum qm_debug_file { DEBUG_FILE_NUM, }; +struct qm_dfx { + atomic64_t err_irq_cnt; + atomic64_t aeq_irq_cnt; + atomic64_t abnormal_irq_cnt; + atomic64_t create_qp_err_cnt; + atomic64_t mb_err_cnt; +}; + struct debugfs_file { enum qm_debug_file index; struct mutex lock; @@ -129,6 +137,7 @@ struct debugfs_file { struct qm_debug { u32 curr_qm_qp_num; + struct qm_dfx dfx; struct dentry *debug_root; struct dentry *qm_d; struct debugfs_file files[DEBUG_FILE_NUM]; -- cgit v1.2.3-59-g8ed1b From 0a3a3960210b4bc6cfe5db45b4af714ee4a010e1 Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Fri, 15 May 2020 17:13:55 +0800 Subject: crypto: hisilicon/qm - add debugfs to the QM state machine The QM driver uses debugfs to provides the current state of the QM state machine Signed-off-by: Longfang Liu Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-hisi-hpre | 7 ++++++ Documentation/ABI/testing/debugfs-hisi-sec | 7 ++++++ Documentation/ABI/testing/debugfs-hisi-zip | 7 ++++++ drivers/crypto/hisilicon/qm.c | 34 +++++++++++++++++++++++++++++ 4 files changed, 55 insertions(+) diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre index 1f966654c177..093cf46a4e29 100644 --- a/Documentation/ABI/testing/debugfs-hisi-hpre +++ b/Documentation/ABI/testing/debugfs-hisi-hpre @@ -86,3 +86,10 @@ Date: Apr 2020 Contact: linux-crypto@vger.kernel.org Description: Dump the number of failed QM mailbox commands. Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//qm/status +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the status of the QM. + Four states: initiated, started, stopped and closed. + Available for both PF and VF, and take no other effect on HPRE. diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec index c3ba09cf0b3e..54699a1ac9b0 100644 --- a/Documentation/ABI/testing/debugfs-hisi-sec +++ b/Documentation/ABI/testing/debugfs-hisi-sec @@ -72,3 +72,10 @@ Date: Apr 2020 Contact: linux-crypto@vger.kernel.org Description: Dump the number of failed QM mailbox commands. Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//qm/status +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the status of the QM. + Four states: initiated, started, stopped and closed. + Available for both PF and VF, and take no other effect on SEC. diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip index cc568fde8ff7..1002a01e6342 100644 --- a/Documentation/ABI/testing/debugfs-hisi-zip +++ b/Documentation/ABI/testing/debugfs-hisi-zip @@ -79,3 +79,10 @@ Date: Apr 2020 Contact: linux-crypto@vger.kernel.org Description: Dump the number of failed QM mailbox commands. Available for both PF and VF, and take no other effect on ZIP. + +What: /sys/kernel/debug/hisi_zip//qm/status +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the status of the QM. + Four states: initiated, started, stopped and closed. + Available for both PF and VF, and take no other effect on ZIP. diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 744d1310f6a9..7c1982fd2156 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -175,6 +175,7 @@ #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0) #define QMC_ALIGN(sz) ALIGN(sz, 32) +#define QM_DBG_READ_LEN 256 #define QM_DBG_TMP_BUF_LEN 22 #define QM_PCI_COMMAND_INVALID ~0 @@ -2268,6 +2269,37 @@ err_unlock: } EXPORT_SYMBOL_GPL(hisi_qm_stop); +static ssize_t qm_status_read(struct file *filp, char __user *buffer, + size_t count, loff_t *pos) +{ + struct hisi_qm *qm = filp->private_data; + char buf[QM_DBG_READ_LEN]; + int val, cp_len, len; + + if (*pos) + return 0; + + if (count < QM_DBG_READ_LEN) + return -ENOSPC; + + val = atomic_read(&qm->status.flags); + len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); + if (!len) + return -EFAULT; + + cp_len = copy_to_user(buffer, buf, len); + if (cp_len) + return -EFAULT; + + return (*pos = len); +} + +static const struct file_operations qm_status_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_status_read, +}; + static int qm_debugfs_atomic64_set(void *data, u64 val) { if (val) @@ -2314,6 +2346,8 @@ int hisi_qm_debug_init(struct hisi_qm *qm) debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); + debugfs_create_file("status", 0444, qm->debug.qm_d, qm, + &qm_status_fops); for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); debugfs_create_file(qm_dfx_files[i].name, -- cgit v1.2.3-59-g8ed1b From 8213a1a60c5329501ff903339b248ceb84278cc3 Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Fri, 15 May 2020 17:13:56 +0800 Subject: crypto: hisilicon/sec2 - add debugfs for Hisilicon SEC Hisilicon SEC engine driver uses debugfs to provides IO operation debug information Signed-off-by: Kai Ye Signed-off-by: Longfang Liu Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-hisi-sec | 54 ++++++++++++++++++++++++------ drivers/crypto/hisilicon/sec2/sec.h | 4 +++ drivers/crypto/hisilicon/sec2/sec_crypto.c | 15 +++++++-- drivers/crypto/hisilicon/sec2/sec_main.c | 43 ++++++++++++++++++++---- 4 files changed, 95 insertions(+), 21 deletions(-) diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec index 54699a1ac9b0..725c7808ebbc 100644 --- a/Documentation/ABI/testing/debugfs-hisi-sec +++ b/Documentation/ABI/testing/debugfs-hisi-sec @@ -1,10 +1,4 @@ -What: /sys/kernel/debug/hisi_sec//sec_dfx -Date: Oct 2019 -Contact: linux-crypto@vger.kernel.org -Description: Dump the debug registers of SEC cores. - Only available for PF. - -What: /sys/kernel/debug/hisi_sec//clear_enable +What: /sys/kernel/debug/hisi_sec2//clear_enable Date: Oct 2019 Contact: linux-crypto@vger.kernel.org Description: Enabling/disabling of clear action after reading @@ -12,7 +6,7 @@ Description: Enabling/disabling of clear action after reading 0: disable, 1: enable. Only available for PF, and take no other effect on SEC. -What: /sys/kernel/debug/hisi_sec//current_qm +What: /sys/kernel/debug/hisi_sec2//current_qm Date: Oct 2019 Contact: linux-crypto@vger.kernel.org Description: One SEC controller has one PF and multiple VFs, each function @@ -20,21 +14,21 @@ Description: One SEC controller has one PF and multiple VFs, each function qm refers to. Only available for PF. -What: /sys/kernel/debug/hisi_sec//qm/qm_regs +What: /sys/kernel/debug/hisi_sec2//qm/qm_regs Date: Oct 2019 Contact: linux-crypto@vger.kernel.org Description: Dump of QM related debug registers. Available for PF and VF in host. VF in guest currently only has one debug register. -What: /sys/kernel/debug/hisi_sec//qm/current_q +What: /sys/kernel/debug/hisi_sec2//qm/current_q Date: Oct 2019 Contact: linux-crypto@vger.kernel.org Description: One QM of SEC may contain multiple queues. Select specific queue to show its debug registers in above 'qm_regs'. Only available for PF. -What: /sys/kernel/debug/hisi_sec//qm/clear_enable +What: /sys/kernel/debug/hisi_sec2//qm/clear_enable Date: Oct 2019 Contact: linux-crypto@vger.kernel.org Description: Enabling/disabling of clear action after reading @@ -79,3 +73,41 @@ Contact: linux-crypto@vger.kernel.org Description: Dump the status of the QM. Four states: initiated, started, stopped and closed. Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//sec_dfx/send_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of sent requests. + Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//sec_dfx/recv_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of received requests. + Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//sec_dfx/send_busy_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of requests sent with returning busy. + Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//sec_dfx/err_bd_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of BD type error requests + to be received. + Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//sec_dfx/invalid_req_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of invalid requests being received. + Available for both PF and VF, and take no other effect on SEC. + +What: /sys/kernel/debug/hisi_sec2//sec_dfx/done_flag_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of completed but marked error requests + to be received. + Available for both PF and VF, and take no other effect on SEC. diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 2326634a1d71..7b64aca704d6 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -160,6 +160,10 @@ struct sec_debug_file { struct sec_dfx { atomic64_t send_cnt; atomic64_t recv_cnt; + atomic64_t send_busy_cnt; + atomic64_t err_bd_cnt; + atomic64_t invalid_req_cnt; + atomic64_t done_flag_cnt; }; struct sec_debug { diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 848ab492d26e..64614a9bdf21 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -148,6 +148,7 @@ static int sec_aead_verify(struct sec_req *req) static void sec_req_cb(struct hisi_qp *qp, void *resp) { struct sec_qp_ctx *qp_ctx = qp->qp_ctx; + struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; struct sec_sqe *bd = resp; struct sec_ctx *ctx; struct sec_req *req; @@ -157,11 +158,16 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) type = bd->type_cipher_auth & SEC_TYPE_MASK; if (unlikely(type != SEC_BD_TYPE2)) { + atomic64_inc(&dfx->err_bd_cnt); pr_err("err bd type [%d]\n", type); return; } req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; + if (unlikely(!req)) { + atomic64_inc(&dfx->invalid_req_cnt); + return; + } req->err_type = bd->type2.error_type; ctx = req->ctx; done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; @@ -174,12 +180,13 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) "err_type[%d],done[%d],flag[%d]\n", req->err_type, done, flag); err = -EIO; + atomic64_inc(&dfx->done_flag_cnt); } if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt) err = sec_aead_verify(req); - atomic64_inc(&ctx->sec->debug.dfx.recv_cnt); + atomic64_inc(&dfx->recv_cnt); ctx->req_op->buf_unmap(ctx, req); @@ -200,10 +207,12 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) return -ENOBUFS; if (!ret) { - if (req->fake_busy) + if (req->fake_busy) { + atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); ret = -EBUSY; - else + } else { ret = -EINPROGRESS; + } } return ret; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index c3381f253d55..5ea44ad8d51c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -88,6 +88,11 @@ struct sec_hw_error { const char *msg; }; +struct sec_dfx_item { + const char *name; + u32 offset; +}; + static const char sec_name[] = "hisi_sec2"; static struct dentry *sec_debugfs_root; static struct hisi_qm_list sec_devices; @@ -110,6 +115,15 @@ static const char * const sec_dbg_file_name[] = { [SEC_CLEAR_ENABLE] = "clear_enable", }; +static struct sec_dfx_item sec_dfx_labels[] = { + {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, + {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, + {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, + {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, + {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, + {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, +}; + static const struct debugfs_reg32 sec_dfx_regs[] = { {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, {"SEC_SAA_EN ", 0x301270}, @@ -543,10 +557,22 @@ static const struct file_operations sec_dbg_fops = { static int sec_debugfs_atomic64_get(void *data, u64 *val) { *val = atomic64_read((atomic64_t *)data); + + return 0; +} + +static int sec_debugfs_atomic64_set(void *data, u64 val) +{ + if (val) + return -EINVAL; + + atomic64_set((atomic64_t *)data, 0); + return 0; } + DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, - NULL, "%lld\n"); + sec_debugfs_atomic64_set, "%lld\n"); static int sec_core_debug_init(struct sec_dev *sec) { @@ -555,6 +581,7 @@ static int sec_core_debug_init(struct sec_dev *sec) struct sec_dfx *dfx = &sec->debug.dfx; struct debugfs_regset32 *regset; struct dentry *tmp_d; + int i; tmp_d = debugfs_create_dir("sec_dfx", sec->qm.debug.debug_root); @@ -566,13 +593,15 @@ static int sec_core_debug_init(struct sec_dev *sec) regset->nregs = ARRAY_SIZE(sec_dfx_regs); regset->base = qm->io_base; - debugfs_create_regset32("regs", 0444, tmp_d, regset); + if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) + debugfs_create_regset32("regs", 0444, tmp_d, regset); - debugfs_create_file("send_cnt", 0444, tmp_d, - &dfx->send_cnt, &sec_atomic64_ops); - - debugfs_create_file("recv_cnt", 0444, tmp_d, - &dfx->recv_cnt, &sec_atomic64_ops); + for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { + atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + + sec_dfx_labels[i].offset); + debugfs_create_file(sec_dfx_labels[i].name, 0644, + tmp_d, data, &sec_atomic64_ops); + } return 0; } -- cgit v1.2.3-59-g8ed1b From 64a6301ebee769073e84daa14eeee01125aef79d Mon Sep 17 00:00:00 2001 From: Hui Tang Date: Fri, 15 May 2020 17:13:57 +0800 Subject: crypto: hisilicon/hpre - add debugfs for Hisilicon HPRE Add debugfs to provides IO operation debug information and add BD processing timeout count function Signed-off-by: Hui Tang Signed-off-by: Longfang Liu Signed-off-by: Shukun Tan Reviewed-by: Zaibo Xu Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-hisi-hpre | 45 +++++++++++++ drivers/crypto/hisilicon/hpre/hpre.h | 17 +++++ drivers/crypto/hisilicon/hpre/hpre_crypto.c | 99 ++++++++++++++++++++++++----- drivers/crypto/hisilicon/hpre/hpre_main.c | 56 ++++++++++++++++ 4 files changed, 202 insertions(+), 15 deletions(-) diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre index 093cf46a4e29..1fe3eae8ab21 100644 --- a/Documentation/ABI/testing/debugfs-hisi-hpre +++ b/Documentation/ABI/testing/debugfs-hisi-hpre @@ -93,3 +93,48 @@ Contact: linux-crypto@vger.kernel.org Description: Dump the status of the QM. Four states: initiated, started, stopped and closed. Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//hpre_dfx/send_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of sent requests. + Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//hpre_dfx/recv_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of received requests. + Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//hpre_dfx/send_busy_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of requests sent + with returning busy. + Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//hpre_dfx/send_fail_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of completed but error requests. + Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//hpre_dfx/invalid_req_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of invalid requests being received. + Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//hpre_dfx/overtime_thrhld +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Set the threshold time for counting the request which is + processed longer than the threshold. + 0: disable(default), 1: 1 microsecond. + Available for both PF and VF, and take no other effect on HPRE. + +What: /sys/kernel/debug/hisi_hpre//hpre_dfx/over_thrhld_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of time out requests. + Available for both PF and VF, and take no other effect on HPRE. diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index 0a8ba468e2be..ed730d173e95 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -25,6 +25,17 @@ enum hpre_ctrl_dbgfs_file { HPRE_DEBUG_FILE_NUM, }; +enum hpre_dfx_dbgfs_file { + HPRE_SEND_CNT, + HPRE_RECV_CNT, + HPRE_SEND_FAIL_CNT, + HPRE_SEND_BUSY_CNT, + HPRE_OVER_THRHLD_CNT, + HPRE_OVERTIME_THRHLD, + HPRE_INVALID_REQ_CNT, + HPRE_DFX_FILE_NUM +}; + #define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1) struct hpre_debugfs_file { @@ -34,6 +45,11 @@ struct hpre_debugfs_file { struct hpre_debug *debug; }; +struct hpre_dfx { + atomic64_t value; + enum hpre_dfx_dbgfs_file type; +}; + /* * One HPRE controller has one PF and multiple VFs, some global configurations * which PF has need this structure. @@ -41,6 +57,7 @@ struct hpre_debugfs_file { */ struct hpre_debug { struct dentry *debug_root; + struct hpre_dfx dfx[HPRE_DFX_FILE_NUM]; struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM]; }; diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 65425250b2e9..7b5cb27d473d 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "hpre.h" struct hpre_ctx; @@ -32,6 +33,9 @@ struct hpre_ctx; #define HPRE_SQE_DONE_SHIFT 30 #define HPRE_DH_MAX_P_SZ 512 +#define HPRE_DFX_SEC_TO_US 1000000 +#define HPRE_DFX_US_TO_NS 1000 + typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); struct hpre_rsa_ctx { @@ -68,6 +72,7 @@ struct hpre_dh_ctx { struct hpre_ctx { struct hisi_qp *qp; struct hpre_asym_request **req_list; + struct hpre *hpre; spinlock_t req_lock; unsigned int key_sz; bool crt_g2_mode; @@ -90,6 +95,7 @@ struct hpre_asym_request { int err; int req_id; hpre_cb cb; + struct timespec64 req_time; }; static DEFINE_MUTEX(hpre_alg_lock); @@ -119,6 +125,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id) static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) { struct hpre_ctx *ctx; + struct hpre_dfx *dfx; int id; ctx = hpre_req->ctx; @@ -129,6 +136,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) ctx->req_list[id] = hpre_req; hpre_req->req_id = id; + dfx = ctx->hpre->debug.dfx; + if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value)) + ktime_get_ts64(&hpre_req->req_time); + return id; } @@ -309,12 +320,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen) { + struct hpre *hpre; + if (!ctx || !qp || qlen < 0) return -EINVAL; spin_lock_init(&ctx->req_lock); ctx->qp = qp; + hpre = container_of(ctx->qp->qm, struct hpre, qm); + ctx->hpre = hpre; ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL); if (!ctx->req_list) return -ENOMEM; @@ -337,38 +352,80 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all) ctx->key_sz = 0; } +static bool hpre_is_bd_timeout(struct hpre_asym_request *req, + u64 overtime_thrhld) +{ + struct timespec64 reply_time; + u64 time_use_us; + + ktime_get_ts64(&reply_time); + time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) * + HPRE_DFX_SEC_TO_US + + (reply_time.tv_nsec - req->req_time.tv_nsec) / + HPRE_DFX_US_TO_NS; + + if (time_use_us <= overtime_thrhld) + return false; + + return true; +} + static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp) { + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_asym_request *req; struct kpp_request *areq; + u64 overtime_thrhld; int ret; ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); areq = req->areq.dh; areq->dst_len = ctx->key_sz; + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret); + atomic64_inc(&dfx[HPRE_RECV_CNT].value); } static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp) { + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_asym_request *req; struct akcipher_request *areq; + u64 overtime_thrhld; int ret; ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + areq = req->areq.rsa; areq->dst_len = ctx->key_sz; hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); akcipher_request_complete(areq, ret); + atomic64_inc(&dfx[HPRE_RECV_CNT].value); } static void hpre_alg_cb(struct hisi_qp *qp, void *resp) { struct hpre_ctx *ctx = qp->qp_ctx; + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_sqe *sqe = resp; + struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; - ctx->req_list[le16_to_cpu(sqe->tag)]->cb(ctx, resp); + + if (unlikely(!req)) { + atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value); + return; + } + + req->cb(ctx, resp); } static int hpre_ctx_init(struct hpre_ctx *ctx) @@ -436,6 +493,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) return 0; } +static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) +{ + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + int ctr = 0; + int ret; + + do { + atomic64_inc(&dfx[HPRE_SEND_CNT].value); + ret = hisi_qp_send(ctx->qp, msg); + if (ret != -EBUSY) + break; + atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value); + } while (ctr++ < HPRE_TRY_SEND_TIMES); + + if (likely(!ret)) + return ret; + + if (ret != -EBUSY) + atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value); + + return ret; +} + #ifdef CONFIG_CRYPTO_DH static int hpre_dh_compute_value(struct kpp_request *req) { @@ -444,7 +524,6 @@ static int hpre_dh_compute_value(struct kpp_request *req) void *tmp = kpp_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); struct hpre_sqe *msg = &hpre_req->req; - int ctr = 0; int ret; ret = hpre_msg_request_set(ctx, req, false); @@ -465,11 +544,9 @@ static int hpre_dh_compute_value(struct kpp_request *req) msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2); else msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH); - do { - ret = hisi_qp_send(ctx->qp, msg); - } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); /* success */ + ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; @@ -647,7 +724,6 @@ static int hpre_rsa_enc(struct akcipher_request *req) void *tmp = akcipher_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); struct hpre_sqe *msg = &hpre_req->req; - int ctr = 0; int ret; /* For 512 and 1536 bits key size, use soft tfm instead */ @@ -677,11 +753,8 @@ static int hpre_rsa_enc(struct akcipher_request *req) if (unlikely(ret)) goto clear_all; - do { - ret = hisi_qp_send(ctx->qp, msg); - } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); - /* success */ + ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; @@ -699,7 +772,6 @@ static int hpre_rsa_dec(struct akcipher_request *req) void *tmp = akcipher_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); struct hpre_sqe *msg = &hpre_req->req; - int ctr = 0; int ret; /* For 512 and 1536 bits key size, use soft tfm instead */ @@ -736,11 +808,8 @@ static int hpre_rsa_dec(struct akcipher_request *req) if (unlikely(ret)) goto clear_all; - do { - ret = hisi_qp_send(ctx->qp, msg); - } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES); - /* success */ + ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index c5ddd3a8ec85..fb3988f347f6 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -159,6 +159,16 @@ static const struct debugfs_reg32 hpre_com_dfx_regs[] = { {"INT_STATUS ", HPRE_INT_STATUS}, }; +static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = { + "send_cnt", + "recv_cnt", + "send_fail_cnt", + "send_busy_cnt", + "over_thrhld_cnt", + "overtime_thrhld", + "invalid_req_cnt" +}; + static int pf_q_num_set(const char *val, const struct kernel_param *kp) { return q_num_set(val, kp, HPRE_PCI_DEVICE_ID); @@ -524,6 +534,33 @@ static const struct file_operations hpre_ctrl_debug_fops = { .write = hpre_ctrl_debug_write, }; +static int hpre_debugfs_atomic64_get(void *data, u64 *val) +{ + struct hpre_dfx *dfx_item = data; + + *val = atomic64_read(&dfx_item->value); + + return 0; +} + +static int hpre_debugfs_atomic64_set(void *data, u64 val) +{ + struct hpre_dfx *dfx_item = data; + struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; + + if (val) + return -EINVAL; + + if (dfx_item->type == HPRE_OVERTIME_THRHLD) + atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0); + atomic64_set(&dfx_item->value, val); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get, + hpre_debugfs_atomic64_set, "%llu\n"); + static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir, enum hpre_ctrl_dbgfs_file type, int indx) { @@ -621,6 +658,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug) return hpre_cluster_debugfs_init(debug); } +static void hpre_dfx_debug_init(struct hpre_debug *debug) +{ + struct hpre *hpre = container_of(debug, struct hpre, debug); + struct hpre_dfx *dfx = hpre->debug.dfx; + struct hisi_qm *qm = &hpre->qm; + struct dentry *parent; + int i; + + parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root); + for (i = 0; i < HPRE_DFX_FILE_NUM; i++) { + dfx[i].type = i; + debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i], + &hpre_atomic64_ops); + } +} + static int hpre_debugfs_init(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; @@ -641,6 +694,9 @@ static int hpre_debugfs_init(struct hpre *hpre) if (ret) goto failed_to_create; } + + hpre_dfx_debug_init(&hpre->debug); + return 0; failed_to_create: -- cgit v1.2.3-59-g8ed1b From 6621e6492fbdf55d25ea7dd09c8a4cd520c0028d Mon Sep 17 00:00:00 2001 From: Longfang Liu Date: Fri, 15 May 2020 17:13:58 +0800 Subject: crypto: hisilicon/zip - add debugfs for Hisilicon ZIP Hisilicon ZIP engine driver uses debugfs to provides IO operation debug information Signed-off-by: Longfang Liu Signed-off-by: Shukun Tan Reviewed-by: Zaibo Xu Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-hisi-zip | 26 ++++++++++++++ drivers/crypto/hisilicon/zip/zip.h | 8 +++++ drivers/crypto/hisilicon/zip/zip_crypto.c | 9 ++++- drivers/crypto/hisilicon/zip/zip_main.c | 54 ++++++++++++++++++++++++++++++ 4 files changed, 96 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip index 1002a01e6342..4832ba038301 100644 --- a/Documentation/ABI/testing/debugfs-hisi-zip +++ b/Documentation/ABI/testing/debugfs-hisi-zip @@ -86,3 +86,29 @@ Contact: linux-crypto@vger.kernel.org Description: Dump the status of the QM. Four states: initiated, started, stopped and closed. Available for both PF and VF, and take no other effect on ZIP. + +What: /sys/kernel/debug/hisi_zip//zip_dfx/send_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of sent requests. + Available for both PF and VF, and take no other effect on ZIP. + +What: /sys/kernel/debug/hisi_zip//zip_dfx/recv_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of received requests. + Available for both PF and VF, and take no other effect on ZIP. + +What: /sys/kernel/debug/hisi_zip//zip_dfx/send_busy_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of requests received + with returning busy. + Available for both PF and VF, and take no other effect on ZIP. + +What: /sys/kernel/debug/hisi_zip//zip_dfx/err_bd_cnt +Date: Apr 2020 +Contact: linux-crypto@vger.kernel.org +Description: Dump the total number of BD type error requests + to be received. + Available for both PF and VF, and take no other effect on ZIP. diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 82dc6f867171..f3ed4c0e5493 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -28,12 +28,20 @@ enum hisi_zip_error_type { HZIP_NC_ERR = 0x0d, }; +struct hisi_zip_dfx { + atomic64_t send_cnt; + atomic64_t recv_cnt; + atomic64_t send_busy_cnt; + atomic64_t err_bd_cnt; +}; + struct hisi_zip_ctrl; struct hisi_zip { struct hisi_qm qm; struct list_head list; struct hisi_zip_ctrl *ctrl; + struct hisi_zip_dfx dfx; }; struct hisi_zip_sqe { diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 5fb9d4b41bb9..c73707c2e539 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -332,6 +332,7 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) { struct hisi_zip_sqe *sqe = data; struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; + struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct hisi_zip_req *req = req_q->q + sqe->tag; struct acomp_req *acomp_req = req->req; @@ -339,12 +340,14 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) u32 status, dlen, head_size; int err = 0; + atomic64_inc(&dfx->recv_cnt); status = sqe->dw3 & HZIP_BD_STATUS_M; if (status != 0 && status != HZIP_NC_ERR) { dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, sqe->produced); + atomic64_inc(&dfx->err_bd_cnt); err = -EIO; } dlen = sqe->produced; @@ -487,6 +490,7 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; + struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; struct hisi_zip_sqe zip_sqe; dma_addr_t input; dma_addr_t output; @@ -516,9 +520,12 @@ static int hisi_zip_do_work(struct hisi_zip_req *req, hisi_zip_config_tag(&zip_sqe, req->req_id); /* send command to start a task */ + atomic64_inc(&dfx->send_cnt); ret = hisi_qp_send(qp, &zip_sqe); - if (ret < 0) + if (ret < 0) { + atomic64_inc(&dfx->send_busy_cnt); goto err_unmap_output; + } return -EINPROGRESS; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 6161b1025b7f..cb3ed6bd3d86 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -99,6 +99,18 @@ struct hisi_zip_hw_error { const char *msg; }; +struct zip_dfx_item { + const char *name; + u32 offset; +}; + +static struct zip_dfx_item zip_dfx_files[] = { + {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)}, + {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)}, + {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)}, + {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)}, +}; + static const struct hisi_zip_hw_error zip_hw_error[] = { { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" }, { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" }, @@ -469,6 +481,27 @@ static const struct file_operations ctrl_debug_fops = { .write = ctrl_debug_write, }; + +static int zip_debugfs_atomic64_set(void *data, u64 val) +{ + if (val) + return -EINVAL; + + atomic64_set((atomic64_t *)data, 0); + + return 0; +} + +static int zip_debugfs_atomic64_get(void *data, u64 *val) +{ + *val = atomic64_read((atomic64_t *)data); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get, + zip_debugfs_atomic64_set, "%llu\n"); + static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) { struct hisi_zip *hisi_zip = ctrl->hisi_zip; @@ -500,6 +533,25 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl) return 0; } +static void hisi_zip_dfx_debug_init(struct hisi_qm *qm) +{ + struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); + struct hisi_zip_dfx *dfx = &zip->dfx; + struct dentry *tmp_dir; + void *data; + int i; + + tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root); + for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) { + data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset); + debugfs_create_file(zip_dfx_files[i].name, + 0644, + tmp_dir, + data, + &zip_atomic64_ops); + } +} + static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl) { int i; @@ -538,6 +590,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip) goto failed_to_create; } + hisi_zip_dfx_debug_init(qm); + return 0; failed_to_create: -- cgit v1.2.3-59-g8ed1b From c31dc9fe165d1b53c0494e0260a798d491de7bb4 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Fri, 15 May 2020 17:13:59 +0800 Subject: crypto: hisilicon/qm - add DebugFS for xQC and xQE dump Add dump information of SQC/CQC/EQC/AEQC/SQE/CQE/EQE/AEQE. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 4 + drivers/crypto/hisilicon/qm.c | 511 +++++++++++++++++++++++++++--- drivers/crypto/hisilicon/qm.h | 2 + drivers/crypto/hisilicon/sec2/sec_main.c | 6 + drivers/crypto/hisilicon/zip/zip_main.c | 4 + 5 files changed, 488 insertions(+), 39 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index fb3988f347f6..38405b5cb884 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -84,6 +84,8 @@ #define HPRE_OOO_ECC_2BIT_ERR BIT(5) #define HPRE_VIA_MSI_DSM 1 +#define HPRE_SQE_MASK_OFFSET 8 +#define HPRE_SQE_MASK_LEN 24 static struct hisi_qm_list hpre_devices; static const char hpre_name[] = "hisi_hpre"; @@ -683,6 +685,8 @@ static int hpre_debugfs_init(struct hpre *hpre) dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root); qm->debug.debug_root = dir; + qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; ret = hisi_qm_debug_init(qm); if (ret) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 7c1982fd2156..57ad13149e05 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -176,9 +176,12 @@ #define QMC_ALIGN(sz) ALIGN(sz, 32) #define QM_DBG_READ_LEN 256 +#define QM_DBG_WRITE_LEN 1024 #define QM_DBG_TMP_BUF_LEN 22 #define QM_PCI_COMMAND_INVALID ~0 +#define QM_SQE_ADDR_MASK GENMASK(7, 0) + #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ @@ -1064,6 +1067,473 @@ static const struct file_operations qm_regs_fops = { .release = single_release, }; +static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *pos) +{ + char buf[QM_DBG_READ_LEN]; + int len; + + if (*pos) + return 0; + + if (count < QM_DBG_READ_LEN) + return -ENOSPC; + + len = snprintf(buf, QM_DBG_READ_LEN, "%s\n", + "Please echo help to cmd to get help information"); + + if (copy_to_user(buffer, buf, len)) + return -EFAULT; + + return (*pos = len); +} + +static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, + dma_addr_t *dma_addr) +{ + struct device *dev = &qm->pdev->dev; + void *ctx_addr; + + ctx_addr = kzalloc(ctx_size, GFP_KERNEL); + if (!ctx_addr) + return ERR_PTR(-ENOMEM); + + *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, *dma_addr)) { + dev_err(dev, "DMA mapping error!\n"); + kfree(ctx_addr); + return ERR_PTR(-ENOMEM); + } + + return ctx_addr; +} + +static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, + const void *ctx_addr, dma_addr_t *dma_addr) +{ + struct device *dev = &qm->pdev->dev; + + dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); + kfree(ctx_addr); +} + +static int dump_show(struct hisi_qm *qm, void *info, + unsigned int info_size, char *info_name) +{ + struct device *dev = &qm->pdev->dev; + u8 *info_buf, *info_curr = info; + u32 i; +#define BYTE_PER_DW 4 + + info_buf = kzalloc(info_size, GFP_KERNEL); + if (!info_buf) + return -ENOMEM; + + for (i = 0; i < info_size; i++, info_curr++) { + if (i % BYTE_PER_DW == 0) + info_buf[i + 3UL] = *info_curr; + else if (i % BYTE_PER_DW == 1) + info_buf[i + 1UL] = *info_curr; + else if (i % BYTE_PER_DW == 2) + info_buf[i - 1] = *info_curr; + else if (i % BYTE_PER_DW == 3) + info_buf[i - 3] = *info_curr; + } + + dev_info(dev, "%s DUMP\n", info_name); + for (i = 0; i < info_size; i += BYTE_PER_DW) { + pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW, + info_buf[i], info_buf[i + 1UL], + info_buf[i + 2UL], info_buf[i + 3UL]); + } + + kfree(info_buf); + + return 0; +} + +static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) +{ + return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); +} + +static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) +{ + return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); +} + +static int qm_sqc_dump(struct hisi_qm *qm, const char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_sqc *sqc, *sqc_curr; + dma_addr_t sqc_dma; + u32 qp_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &qp_id); + if (ret || qp_id >= qm->qp_num) { + dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1); + return -EINVAL; + } + + sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma); + if (IS_ERR(sqc)) + return PTR_ERR(sqc); + + ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id); + if (ret) { + down_read(&qm->qps_lock); + if (qm->sqc) { + sqc_curr = qm->sqc + qp_id; + + ret = dump_show(qm, sqc_curr, sizeof(*sqc), + "SOFT SQC"); + if (ret) + dev_info(dev, "Show soft sqc failed!\n"); + } + up_read(&qm->qps_lock); + + goto err_free_ctx; + } + + ret = dump_show(qm, sqc, sizeof(*sqc), "SQC"); + if (ret) + dev_info(dev, "Show hw sqc failed!\n"); + +err_free_ctx: + qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); + return ret; +} + +static int qm_cqc_dump(struct hisi_qm *qm, const char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_cqc *cqc, *cqc_curr; + dma_addr_t cqc_dma; + u32 qp_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &qp_id); + if (ret || qp_id >= qm->qp_num) { + dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1); + return -EINVAL; + } + + cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); + if (IS_ERR(cqc)) + return PTR_ERR(cqc); + + ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id); + if (ret) { + down_read(&qm->qps_lock); + if (qm->cqc) { + cqc_curr = qm->cqc + qp_id; + + ret = dump_show(qm, cqc_curr, sizeof(*cqc), + "SOFT CQC"); + if (ret) + dev_info(dev, "Show soft cqc failed!\n"); + } + up_read(&qm->qps_lock); + + goto err_free_ctx; + } + + ret = dump_show(qm, cqc, sizeof(*cqc), "CQC"); + if (ret) + dev_info(dev, "Show hw cqc failed!\n"); + +err_free_ctx: + qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); + return ret; +} + +static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, + int cmd, char *name) +{ + struct device *dev = &qm->pdev->dev; + dma_addr_t xeqc_dma; + void *xeqc; + int ret; + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + xeqc = qm_ctx_alloc(qm, size, &xeqc_dma); + if (IS_ERR(xeqc)) + return PTR_ERR(xeqc); + + ret = qm_mb(qm, cmd, xeqc_dma, 0, 1); + if (ret) + goto err_free_ctx; + + ret = dump_show(qm, xeqc, size, name); + if (ret) + dev_info(dev, "Show hw %s failed!\n", name); + +err_free_ctx: + qm_ctx_free(qm, size, xeqc, &xeqc_dma); + return ret; +} + +static int q_dump_param_parse(struct hisi_qm *qm, char *s, + u32 *e_id, u32 *q_id) +{ + struct device *dev = &qm->pdev->dev; + unsigned int qp_num = qm->qp_num; + char *presult; + int ret; + + presult = strsep(&s, " "); + if (!presult) { + dev_err(dev, "Please input qp number!\n"); + return -EINVAL; + } + + ret = kstrtou32(presult, 0, q_id); + if (ret || *q_id >= qp_num) { + dev_err(dev, "Please input qp num (0-%d)", qp_num - 1); + return -EINVAL; + } + + presult = strsep(&s, " "); + if (!presult) { + dev_err(dev, "Please input sqe number!\n"); + return -EINVAL; + } + + ret = kstrtou32(presult, 0, e_id); + if (ret || *e_id >= QM_Q_DEPTH) { + dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1); + return -EINVAL; + } + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + return 0; +} + +static int qm_sq_dump(struct hisi_qm *qm, char *s) +{ + struct device *dev = &qm->pdev->dev; + void *sqe, *sqe_curr; + struct hisi_qp *qp; + u32 qp_id, sqe_id; + int ret; + + ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id); + if (ret) + return ret; + + sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL); + if (!sqe) + return -ENOMEM; + + qp = &qm->qp_array[qp_id]; + memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH); + sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); + memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, + qm->debug.sqe_mask_len); + + ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); + if (ret) + dev_info(dev, "Show sqe failed!\n"); + + kfree(sqe); + + return ret; +} + +static int qm_cq_dump(struct hisi_qm *qm, char *s) +{ + struct device *dev = &qm->pdev->dev; + struct qm_cqe *cqe_curr; + struct hisi_qp *qp; + u32 qp_id, cqe_id; + int ret; + + ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id); + if (ret) + return ret; + + qp = &qm->qp_array[qp_id]; + cqe_curr = qp->cqe + cqe_id; + ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE"); + if (ret) + dev_info(dev, "Show cqe failed!\n"); + + return ret; +} + +static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s, + size_t size, char *name) +{ + struct device *dev = &qm->pdev->dev; + void *xeqe; + u32 xeqe_id; + int ret; + + if (!s) + return -EINVAL; + + ret = kstrtou32(s, 0, &xeqe_id); + if (ret || xeqe_id >= QM_Q_DEPTH) { + dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1); + return -EINVAL; + } + + down_read(&qm->qps_lock); + + if (qm->eqe && !strcmp(name, "EQE")) { + xeqe = qm->eqe + xeqe_id; + } else if (qm->aeqe && !strcmp(name, "AEQE")) { + xeqe = qm->aeqe + xeqe_id; + } else { + ret = -EINVAL; + goto err_unlock; + } + + ret = dump_show(qm, xeqe, size, name); + if (ret) + dev_info(dev, "Show %s failed!\n", name); + +err_unlock: + up_read(&qm->qps_lock); + return ret; +} + +static int qm_dbg_help(struct hisi_qm *qm, char *s) +{ + struct device *dev = &qm->pdev->dev; + + if (strsep(&s, " ")) { + dev_err(dev, "Please do not input extra characters!\n"); + return -EINVAL; + } + + dev_info(dev, "available commands:\n"); + dev_info(dev, "sqc \n"); + dev_info(dev, "cqc \n"); + dev_info(dev, "eqc\n"); + dev_info(dev, "aeqc\n"); + dev_info(dev, "sq \n"); + dev_info(dev, "cq \n"); + dev_info(dev, "eq \n"); + dev_info(dev, "aeq \n"); + + return 0; +} + +static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf) +{ + struct device *dev = &qm->pdev->dev; + char *presult, *s; + int ret; + + s = kstrdup(cmd_buf, GFP_KERNEL); + if (!s) + return -ENOMEM; + + presult = strsep(&s, " "); + if (!presult) { + kfree(s); + return -EINVAL; + } + + if (!strcmp(presult, "sqc")) + ret = qm_sqc_dump(qm, s); + else if (!strcmp(presult, "cqc")) + ret = qm_cqc_dump(qm, s); + else if (!strcmp(presult, "eqc")) + ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc), + QM_MB_CMD_EQC, "EQC"); + else if (!strcmp(presult, "aeqc")) + ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc), + QM_MB_CMD_AEQC, "AEQC"); + else if (!strcmp(presult, "sq")) + ret = qm_sq_dump(qm, s); + else if (!strcmp(presult, "cq")) + ret = qm_cq_dump(qm, s); + else if (!strcmp(presult, "eq")) + ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE"); + else if (!strcmp(presult, "aeq")) + ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE"); + else if (!strcmp(presult, "help")) + ret = qm_dbg_help(qm, s); + else + ret = -EINVAL; + + if (ret) + dev_info(dev, "Please echo help\n"); + + kfree(s); + + return ret; +} + +static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct hisi_qm *qm = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int ret; + + if (*pos) + return 0; + + /* Judge if the instance is being reset. */ + if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) + return 0; + + if (count > QM_DBG_WRITE_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return -ENOMEM; + + if (copy_from_user(cmd_buf, buffer, count)) { + kfree(cmd_buf); + return -EFAULT; + } + + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + ret = qm_cmd_write_dump(qm, cmd_buf); + if (ret) { + kfree(cmd_buf); + return ret; + } + + kfree(cmd_buf); + + return count; +} + +static const struct file_operations qm_cmd_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = qm_cmd_read, + .write = qm_cmd_write, +}; + static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) { struct dentry *qm_d = qm->debug.qm_d; @@ -1389,45 +1859,6 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) } EXPORT_SYMBOL_GPL(hisi_qm_start_qp); -static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, - dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - void *ctx_addr; - - ctx_addr = kzalloc(ctx_size, GFP_KERNEL); - if (!ctx_addr) - return ERR_PTR(-ENOMEM); - - *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, *dma_addr)) { - dev_err(dev, "DMA mapping error!\n"); - kfree(ctx_addr); - return ERR_PTR(-ENOMEM); - } - - return ctx_addr; -} - -static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, - const void *ctx_addr, dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - - dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); - kfree(ctx_addr); -} - -static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); -} - -static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); -} - /** * Determine whether the queue is cleared by judging the tail pointers of * sq and cq. @@ -2346,6 +2777,8 @@ int hisi_qm_debug_init(struct hisi_qm *qm) debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); + debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops); + debugfs_create_file("status", 0444, qm->debug.qm_d, qm, &qm_status_fops); for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index e4b46a7b10ef..632674423c04 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -137,6 +137,8 @@ struct debugfs_file { struct qm_debug { u32 curr_qm_qp_num; + u32 sqe_mask_offset; + u32 sqe_mask_len; struct qm_dfx dfx; struct dentry *debug_root; struct dentry *qm_d; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 5ea44ad8d51c..829959bef8fb 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -80,6 +80,9 @@ #define SEC_VF_CNT_MASK 0xffffffc0 #define SEC_DBGFS_VAL_MAX_LEN 20 +#define SEC_SQE_MASK_OFFSET 64 +#define SEC_SQE_MASK_LEN 48 + #define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \ SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF) @@ -632,6 +635,9 @@ static int sec_debugfs_init(struct sec_dev *sec) qm->debug.debug_root = debugfs_create_dir(dev_name(dev), sec_debugfs_root); + + qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; ret = hisi_qm_debug_init(qm); if (ret) goto failed_to_create; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index cb3ed6bd3d86..87db2e1f5d37 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -89,6 +89,8 @@ #define HZIP_WR_PORT BIT(11) #define HZIP_BUF_SIZE 22 +#define HZIP_SQE_MASK_OFFSET 64 +#define HZIP_SQE_MASK_LEN 48 static const char hisi_zip_name[] = "hisi_zip"; static struct dentry *hzip_debugfs_root; @@ -578,6 +580,8 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip) dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root); + qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET; + qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN; qm->debug.debug_root = dev_d; ret = hisi_qm_debug_init(qm); if (ret) -- cgit v1.2.3-59-g8ed1b From 988453fb2f18be3f3915220e4c6f18018186aa89 Mon Sep 17 00:00:00 2001 From: Shukun Tan Date: Fri, 15 May 2020 17:14:00 +0800 Subject: crypto: hisilicon/qm - change debugfs file name from qm_regs to regs The debugfs qm_regs file is already in the qm directory, so no qm_ prefix is required. Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- Documentation/ABI/testing/debugfs-hisi-hpre | 6 +++--- Documentation/ABI/testing/debugfs-hisi-sec | 2 +- Documentation/ABI/testing/debugfs-hisi-zip | 6 +++--- drivers/crypto/hisilicon/qm.c | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre index 1fe3eae8ab21..b4be5f1db4b7 100644 --- a/Documentation/ABI/testing/debugfs-hisi-hpre +++ b/Documentation/ABI/testing/debugfs-hisi-hpre @@ -33,7 +33,7 @@ Contact: linux-crypto@vger.kernel.org Description: Dump debug registers from the HPRE. Only available for PF. -What: /sys/kernel/debug/hisi_hpre//qm/qm_regs +What: /sys/kernel/debug/hisi_hpre//qm/regs Date: Sep 2019 Contact: linux-crypto@vger.kernel.org Description: Dump debug registers from the QM. @@ -44,13 +44,13 @@ What: /sys/kernel/debug/hisi_hpre//qm/current_q Date: Sep 2019 Contact: linux-crypto@vger.kernel.org Description: One QM may contain multiple queues. Select specific queue to - show its debug registers in above qm_regs. + show its debug registers in above regs. Only available for PF. What: /sys/kernel/debug/hisi_hpre//qm/clear_enable Date: Sep 2019 Contact: linux-crypto@vger.kernel.org -Description: QM debug registers(qm_regs) read clear control. 1 means enable +Description: QM debug registers(regs) read clear control. 1 means enable register read clear, otherwise 0. Writing to this file has no functional effect, only enable or disable counters clear after reading of these registers. diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec index 725c7808ebbc..85feb4408e0f 100644 --- a/Documentation/ABI/testing/debugfs-hisi-sec +++ b/Documentation/ABI/testing/debugfs-hisi-sec @@ -25,7 +25,7 @@ What: /sys/kernel/debug/hisi_sec2//qm/current_q Date: Oct 2019 Contact: linux-crypto@vger.kernel.org Description: One QM of SEC may contain multiple queues. Select specific - queue to show its debug registers in above 'qm_regs'. + queue to show its debug registers in above 'regs'. Only available for PF. What: /sys/kernel/debug/hisi_sec2//qm/clear_enable diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip index 4832ba038301..3034a2bf99ca 100644 --- a/Documentation/ABI/testing/debugfs-hisi-zip +++ b/Documentation/ABI/testing/debugfs-hisi-zip @@ -26,7 +26,7 @@ Description: One ZIP controller has one PF and multiple VFs, each function has a QM. Select the QM which below qm refers to. Only available for PF. -What: /sys/kernel/debug/hisi_zip//qm/qm_regs +What: /sys/kernel/debug/hisi_zip//qm/regs Date: Nov 2018 Contact: linux-crypto@vger.kernel.org Description: Dump of QM related debug registers. @@ -37,13 +37,13 @@ What: /sys/kernel/debug/hisi_zip//qm/current_q Date: Nov 2018 Contact: linux-crypto@vger.kernel.org Description: One QM may contain multiple queues. Select specific queue to - show its debug registers in above qm_regs. + show its debug registers in above regs. Only available for PF. What: /sys/kernel/debug/hisi_zip//qm/clear_enable Date: Nov 2018 Contact: linux-crypto@vger.kernel.org -Description: QM debug registers(qm_regs) read clear control. 1 means enable +Description: QM debug registers(regs) read clear control. 1 means enable register read clear, otherwise 0. Writing to this file has no functional effect, only enable or disable counters clear after reading of these registers. diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 57ad13149e05..a781c0225198 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2775,7 +2775,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm) goto failed_to_create; } - debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); + debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops); -- cgit v1.2.3-59-g8ed1b From ae4052c59c2d12ee68b3e48eeef1d5ef202b1a0a Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 19 May 2020 22:36:54 +0200 Subject: crypto: cavium/nitrox - Fix a typo in a comment s/NITORX/NITROX/ Signed-off-by: Christophe JAILLET Signed-off-by: Herbert Xu --- drivers/crypto/cavium/nitrox/nitrox_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index e91be9b8b083..788c6607078b 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -346,7 +346,7 @@ static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev) } /** - * nitrox_bist_check - Check NITORX BIST registers status + * nitrox_bist_check - Check NITROX BIST registers status * @ndev: NITROX device */ static int nitrox_bist_check(struct nitrox_device *ndev) -- cgit v1.2.3-59-g8ed1b From d1c72f6e4c051620f44b170dff991240117dd911 Mon Sep 17 00:00:00 2001 From: Iuliana Prodan Date: Wed, 20 May 2020 01:17:25 +0300 Subject: crypto: engine - do not requeue in case of fatal error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now, in crypto-engine, if hardware queue is full (-ENOSPC), requeue request regardless of MAY_BACKLOG flag. If hardware throws any other error code (like -EIO, -EINVAL, -ENOMEM, etc.) only MAY_BACKLOG requests are enqueued back into crypto-engine's queue, since the others can be dropped. The latter case can be fatal error, so those cannot be recovered from. For example, in CAAM driver, -EIO is returned in case the job descriptor is broken, so there is no possibility to fix the job descriptor. Therefore, these errors might be fatal error, so we shouldn’t requeue the request. This will just be pass back and forth between crypto-engine and hardware. Fixes: 6a89f492f8e5 ("crypto: engine - support for parallel requests based on retry mechanism") Signed-off-by: Iuliana Prodan Reported-by: Horia Geantă Reviewed-by: Horia Geantă Signed-off-by: Herbert Xu --- crypto/crypto_engine.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c index 412149e8e3dc..3655d9d3f5df 100644 --- a/crypto/crypto_engine.c +++ b/crypto/crypto_engine.c @@ -169,13 +169,10 @@ start_request: /* * If hardware queue is full (-ENOSPC), requeue request * regardless of backlog flag. - * If hardware throws any other error code, - * requeue only backlog requests. * Otherwise, unprepare and complete the request. */ if (!engine->retry_support || - ((ret != -ENOSPC) && - !(async_req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) { + (ret != -ENOSPC)) { dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret); -- cgit v1.2.3-59-g8ed1b From 58ca0060ec4e51208d2eee12198fc55fd9e4feb3 Mon Sep 17 00:00:00 2001 From: Weili Qian Date: Wed, 20 May 2020 17:19:50 +0800 Subject: crypto: hisilicon - fix driver compatibility issue with different versions of devices In order to be compatible with devices of different versions, V1 in the accelerator driver is now isolated, and other versions are the previous V2 processing flow. Signed-off-by: Weili Qian Signed-off-by: Shukun Tan Reviewed-by: Zhou Wang Signed-off-by: Herbert Xu --- drivers/crypto/hisilicon/hpre/hpre_main.c | 10 +--- drivers/crypto/hisilicon/qm.c | 89 ++++++++++--------------------- drivers/crypto/hisilicon/qm.h | 13 ++--- drivers/crypto/hisilicon/sec2/sec_main.c | 19 ++----- drivers/crypto/hisilicon/zip/zip_main.c | 20 ++----- 5 files changed, 39 insertions(+), 112 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 38405b5cb884..a3ee127a70e3 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -717,19 +717,13 @@ static void hpre_debugfs_exit(struct hpre *hpre) static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - enum qm_hw_ver rev_id; - - rev_id = hisi_qm_get_hw_version(pdev); - if (rev_id < 0) - return -ENODEV; - - if (rev_id == QM_HW_V1) { + if (pdev->revision == QM_HW_V1) { pci_warn(pdev, "HPRE version 1 is not supported!\n"); return -EINVAL; } qm->pdev = pdev; - qm->ver = rev_id; + qm->ver = pdev->revision; qm->sqe_size = HPRE_SQE_SIZE; qm->dev_name = hpre_name; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index a781c0225198..9bb263cec6c3 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -737,13 +737,14 @@ static void qm_irq_unregister(struct hisi_qm *qm) free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm); - if (qm->ver == QM_HW_V2) { - free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); + if (qm->ver == QM_HW_V1) + return; - if (qm->fun_type == QM_HW_PF) - free_irq(pci_irq_vector(pdev, - QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); - } + free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm); + + if (qm->fun_type == QM_HW_PF) + free_irq(pci_irq_vector(pdev, + QM_ABNORMAL_EVENT_IRQ_VECTOR), qm); } static void qm_init_qp_status(struct hisi_qp *qp) @@ -764,36 +765,26 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, if (number > 0) { switch (type) { case SQC_VFT: - switch (qm->ver) { - case QM_HW_V1: + if (qm->ver == QM_HW_V1) { tmp = QM_SQC_VFT_BUF_SIZE | QM_SQC_VFT_SQC_SIZE | QM_SQC_VFT_INDEX_NUMBER | QM_SQC_VFT_VALID | (u64)base << QM_SQC_VFT_START_SQN_SHIFT; - break; - case QM_HW_V2: + } else { tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | QM_SQC_VFT_VALID | (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; - break; - case QM_HW_UNKNOWN: - break; } break; case CQC_VFT: - switch (qm->ver) { - case QM_HW_V1: + if (qm->ver == QM_HW_V1) { tmp = QM_CQC_VFT_BUF_SIZE | QM_CQC_VFT_SQC_SIZE | QM_CQC_VFT_INDEX_NUMBER | QM_CQC_VFT_VALID; - break; - case QM_HW_V2: + } else { tmp = QM_CQC_VFT_VALID; - break; - case QM_HW_UNKNOWN: - break; } break; } @@ -1777,7 +1768,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) if (ver == QM_HW_V1) { sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); - } else if (ver == QM_HW_V2) { + } else { sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); sqc->w8 = 0; /* rand_qc */ } @@ -1804,7 +1795,7 @@ static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid) if (ver == QM_HW_V1) { cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4)); cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); - } else if (ver == QM_HW_V2) { + } else { cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4)); cqc->w8 = 0; } @@ -2020,12 +2011,13 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm) { unsigned int val; - if (qm->ver == QM_HW_V2) { - writel(0x1, qm->io_base + QM_CACHE_WB_START); - if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, - val, val & BIT(0), 10, 1000)) - dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); - } + if (qm->ver == QM_HW_V1) + return; + + writel(0x1, qm->io_base + QM_CACHE_WB_START); + if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, + val, val & BIT(0), 10, 1000)) + dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); } static void qm_qp_event_notifier(struct hisi_qp *qp) @@ -2082,12 +2074,12 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q, switch (qfr->type) { case UACCE_QFRT_MMIO: - if (qm->ver == QM_HW_V2) { - if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + - QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) + if (qm->ver == QM_HW_V1) { + if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) return -EINVAL; } else { - if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) + if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) return -EINVAL; } @@ -2342,16 +2334,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; - switch (qm->ver) { - case QM_HW_V1: + if (qm->ver == QM_HW_V1) qm->ops = &qm_hw_ops_v1; - break; - case QM_HW_V2: + else qm->ops = &qm_hw_ops_v2; - break; - default: - return; - } pci_set_drvdata(pdev, qm); mutex_init(&qm->mailbox_lock); @@ -2859,25 +2845,6 @@ static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) return qm->ops->hw_error_handle(qm); } -/** - * hisi_qm_get_hw_version() - Get hardware version of a qm. - * @pdev: The device which hardware version we want to get. - * - * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN - * if the hardware version is not supported. - */ -enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev) -{ - switch (pdev->revision) { - case QM_HW_V1: - case QM_HW_V2: - return pdev->revision; - default: - return QM_HW_UNKNOWN; - } -} -EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version); - /** * hisi_qm_dev_err_init() - Initialize device error configuration. * @qm: The qm for which we want to do error initialization. @@ -3846,7 +3813,7 @@ static int qm_irq_register(struct hisi_qm *qm) if (ret) return ret; - if (qm->ver == QM_HW_V2) { + if (qm->ver != QM_HW_V1) { ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm); if (ret) @@ -3942,7 +3909,7 @@ int hisi_qm_init(struct hisi_qm *qm) if (ret) goto err_free_irq_vectors; - if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V2) { + if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { /* v2 starts to support get vft by mailbox */ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); if (ret) diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 632674423c04..0a351de8d838 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -108,6 +108,7 @@ enum qm_hw_ver { QM_HW_UNKNOWN = -1, QM_HW_V1 = 0x20, QM_HW_V2 = 0x21, + QM_HW_V3 = 0x30, }; enum qm_fun_type { @@ -287,7 +288,6 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp, struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); u32 n, q_num; - u8 rev_id; int ret; if (!val) @@ -298,17 +298,10 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp, pr_info("No device found currently, suppose queue number is %d\n", q_num); } else { - rev_id = pdev->revision; - switch (rev_id) { - case QM_HW_V1: + if (pdev->revision == QM_HW_V1) q_num = QM_QNUM_V1; - break; - case QM_HW_V2: + else q_num = QM_QNUM_V2; - break; - default: - return -EINVAL; - } } ret = kstrtou32(val, 10, &n); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 829959bef8fb..a4cb58b54b25 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -728,18 +728,10 @@ static int sec_pf_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret; - switch (qm->ver) { - case QM_HW_V1: + if (qm->ver == QM_HW_V1) qm->ctrl_qp_num = SEC_QUEUE_NUM_V1; - break; - - case QM_HW_V2: + else qm->ctrl_qp_num = SEC_QUEUE_NUM_V2; - break; - - default: - return -EINVAL; - } qm->err_ini = &sec_err_ini; @@ -755,15 +747,10 @@ static int sec_pf_probe_init(struct sec_dev *sec) static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - enum qm_hw_ver rev_id; int ret; - rev_id = hisi_qm_get_hw_version(pdev); - if (rev_id == QM_HW_UNKNOWN) - return -ENODEV; - qm->pdev = pdev; - qm->ver = rev_id; + qm->ver = pdev->revision; qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 87db2e1f5d37..2229a21ae7c8 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -719,18 +719,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) hisi_zip->ctrl = ctrl; ctrl->hisi_zip = hisi_zip; - switch (qm->ver) { - case QM_HW_V1: + if (qm->ver == QM_HW_V1) qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1; - break; - - case QM_HW_V2: + else qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2; - break; - - default: - return -EINVAL; - } qm->err_ini = &hisi_zip_err_ini; @@ -743,14 +735,8 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { - enum qm_hw_ver rev_id; - - rev_id = hisi_qm_get_hw_version(pdev); - if (rev_id == QM_HW_UNKNOWN) - return -EINVAL; - qm->pdev = pdev; - qm->ver = rev_id; + qm->ver = pdev->revision; qm->algs = "zlib\ngzip"; qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; -- cgit v1.2.3-59-g8ed1b