diff options
author | 2025-05-27 15:53:55 -0700 | |
---|---|---|
committer | 2025-05-27 15:53:55 -0700 | |
commit | 5722a6cecfff3e381b96bbbd7e9b3911731e80d9 (patch) | |
tree | b6d9cf6eff9a826c6aaf74a3436738fb61da71f1 | |
parent | Merge tag 'regulator-v6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator (diff) | |
parent | spi: spi-qpic-snand: extend bitmasks usage (diff) | |
download | wireguard-linux-5722a6cecfff3e381b96bbbd7e9b3911731e80d9.tar.xz wireguard-linux-5722a6cecfff3e381b96bbbd7e9b3911731e80d9.zip |
Merge tag 'spi-v6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
Pull spi updates from Mark Brown:
"The bulk of the changes in this release are driver work, as well as
new device support we have some important work on performance over
several drivers, and big overhauls for maintainability on a couple
too. Highlights include:
- Big cleanups of the sh-msiof driver from Geert Uytterhoeven, and of
the NXP FSPI driver from Haibo Chen
- Performance improvements for the AXI SPI engine
- Support for writes to memory mapped flashes on Renesas devices
- Integrated DMA support for Tegra210 QSPI, used by the Tegra234
- DMA support for Amlogic SPI controllers
- Support for AMD HID2, Qualcomm IPQ5018, Renesas RZ/G3E, Rockchip
RK3528 and Samsung Exynos Autov920
An update to fix some issues with the Atmel QSPI driver runtime PM
pulled in a new API from the PM core, and the Renesas memory mapped
write changes pull in some code that's shared in drivers/memory"
* tag 'spi-v6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (90 commits)
spi: spi-qpic-snand: return early on error from qcom_spi_io_op()
spi: loopback-test: fix up const pointer issue in rx_ranges_cmp()
spi: gpio: fix const issue in spi_to_spi_gpio()
spi: spi-qpic-snand: remove superfluous parameters of qcom_spi_check_error()
dt-bindings: spi: samsung: add exynosautov920-spi compatible
spi: spi-qpic-snand: reuse qcom_spi_check_raw_flash_errors()
spi: dt-bindings: Add rk3528-spi compatible
spi: spi_amd: Update Kconfig dependencies
spi: spi_amd: Add HIDDMA basic write support
spi: spi_amd: Remove read{q,b} usage on DMA buffer
spi: sh-msiof: Move register definitions to <linux/spi/sh_msiof.h>
spi: sh-msiof: Document frame start sync pulse mode
spi: sh-msiof: Double maximum DMA transfer size using two groups
spi: sh-msiof: Simplify BRG's Division Ratio
spi: sh-msiof: Increase TX FIFO size for R-Car V4H/V4M
spi: sh-msiof: Correct RX FIFO size for R-Car Gen3
spi: sh-msiof: Correct RX FIFO size for R-Car Gen2
spi: sh-msiof: Add core support for dual-group transfers
spi: sh-msiof: Correct SIMDR2_GRPMASK
spi: sh-msiof: SIFCTR bitfield conversion
...
47 files changed, 2368 insertions, 950 deletions
diff --git a/Documentation/devicetree/bindings/memory-controllers/renesas,rzg3e-xspi.yaml b/Documentation/devicetree/bindings/memory-controllers/renesas,rzg3e-xspi.yaml new file mode 100644 index 000000000000..2bfe63ec62dc --- /dev/null +++ b/Documentation/devicetree/bindings/memory-controllers/renesas,rzg3e-xspi.yaml @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/memory-controllers/renesas,rzg3e-xspi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas Expanded Serial Peripheral Interface (xSPI) + +maintainers: + - Biju Das <biju.das.jz@bp.renesas.com> + +description: | + Renesas xSPI allows a SPI flash connected to the SoC to be accessed via + the memory-mapping or the manual command mode. + + The flash chip itself should be represented by a subnode of the XSPI node. + The flash interface is selected based on the "compatible" property of this + subnode: + - "jedec,spi-nor"; + +allOf: + - $ref: /schemas/spi/spi-controller.yaml# + +properties: + compatible: + const: renesas,r9a09g047-xspi # RZ/G3E + + reg: + items: + - description: xSPI registers + - description: direct mapping area + + reg-names: + items: + - const: regs + - const: dirmap + + interrupts: + items: + - description: Interrupt pulse signal by factors excluding errors + - description: Interrupt pulse signal by error factors + + interrupt-names: + items: + - const: pulse + - const: err_pulse + + clocks: + items: + - description: AHB clock + - description: AXI clock + - description: SPI clock + - description: Double speed SPI clock + + clock-names: + items: + - const: ahb + - const: axi + - const: spi + - const: spix2 + + power-domains: + maxItems: 1 + + resets: + items: + - description: Hardware reset + - description: AXI reset + + reset-names: + items: + - const: hresetn + - const: aresetn + + renesas,xspi-cs-addr-sys: + $ref: /schemas/types.yaml#/definitions/phandle + description: | + Phandle to the system controller (sys) that allows to configure + xSPI CS0 and CS1 addresses. + +patternProperties: + "flash@[0-9a-f]+$": + type: object + additionalProperties: true + + properties: + compatible: + contains: + const: jedec,spi-nor + +required: + - compatible + - reg + - reg-names + - interrupts + - interrupt-names + - clocks + - clock-names + - power-domains + - resets + - reset-names + - '#address-cells' + - '#size-cells' + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/clock/renesas,r9a09g047-cpg.h> + + spi@11030000 { + compatible = "renesas,r9a09g047-xspi"; + reg = <0x11030000 0x10000>, <0x20000000 0x10000000>; + reg-names = "regs", "dirmap"; + interrupts = <GIC_SPI 228 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 229 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "pulse", "err_pulse"; + clocks = <&cpg CPG_MOD 0x9f>, <&cpg CPG_MOD 0xa0>, + <&cpg CPG_CORE 9>, <&cpg CPG_MOD 0xa1>; + clock-names = "ahb", "axi", "spi", "spix2"; + power-domains = <&cpg>; + resets = <&cpg 0xa3>, <&cpg 0xa4>; + reset-names = "hresetn", "aresetn"; + #address-cells = <1>; + #size-cells = <0>; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <40000000>; + spi-tx-bus-width = <1>; + spi-rx-bus-width = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/spi/fsl,dspi.yaml b/Documentation/devicetree/bindings/spi/fsl,dspi.yaml index 7ca8fceda717..bf9cce53c48d 100644 --- a/Documentation/devicetree/bindings/spi/fsl,dspi.yaml +++ b/Documentation/devicetree/bindings/spi/fsl,dspi.yaml @@ -105,12 +105,12 @@ examples: big-endian; flash@0 { - compatible = "jedec,spi-nor"; - reg = <0>; - spi-max-frequency = <16000000>; - spi-cpol; - spi-cpha; - spi-cs-setup-delay-ns = <100>; - spi-cs-hold-delay-ns = <50>; + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <16000000>; + spi-cpol; + spi-cpha; + spi-cs-setup-delay-ns = <100>; + spi-cs-hold-delay-ns = <50>; }; }; diff --git a/Documentation/devicetree/bindings/spi/nuvoton,wpcm450-fiu.yaml b/Documentation/devicetree/bindings/spi/nuvoton,wpcm450-fiu.yaml index 4e0d391e1d69..c97bf48b56b4 100644 --- a/Documentation/devicetree/bindings/spi/nuvoton,wpcm450-fiu.yaml +++ b/Documentation/devicetree/bindings/spi/nuvoton,wpcm450-fiu.yaml @@ -59,8 +59,3 @@ examples: reg = <0>; }; }; - - shm: syscon@c8001000 { - compatible = "nuvoton,wpcm450-shm", "syscon"; - reg = <0xc8001000 0x1000>; - }; diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml index 48e97e240265..8b3640280559 100644 --- a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml +++ b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml @@ -10,9 +10,6 @@ maintainers: - Thierry Reding <thierry.reding@gmail.com> - Jonathan Hunter <jonathanh@nvidia.com> -allOf: - - $ref: spi-controller.yaml# - properties: compatible: enum: @@ -47,6 +44,9 @@ properties: - const: rx - const: tx + iommus: + maxItems: 1 + patternProperties: "@[0-9a-f]+$": type: object @@ -69,6 +69,18 @@ required: unevaluatedProperties: false +allOf: + - $ref: spi-controller.yaml# + - if: + properties: + compatible: + not: + contains: + const: nvidia,tegra234-qspi + then: + properties: + iommus: false + examples: - | #include <dt-bindings/clock/tegra210-car.h> diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml b/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml index aa3f93319203..cb1f15224b45 100644 --- a/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml +++ b/Documentation/devicetree/bindings/spi/qcom,spi-qpic-snand.yaml @@ -21,8 +21,12 @@ allOf: properties: compatible: - enum: - - qcom,ipq9574-snand + oneOf: + - items: + - enum: + - qcom,ipq5018-snand + - const: qcom,ipq9574-snand + - const: qcom,ipq9574-snand reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/spi/samsung,spi.yaml b/Documentation/devicetree/bindings/spi/samsung,spi.yaml index 3c206a64d60a..fe298d47b1a9 100644 --- a/Documentation/devicetree/bindings/spi/samsung,spi.yaml +++ b/Documentation/devicetree/bindings/spi/samsung,spi.yaml @@ -29,6 +29,7 @@ properties: - items: - enum: - samsung,exynos8895-spi + - samsung,exynosautov920-spi - const: samsung,exynos850-spi - const: samsung,exynos7-spi deprecated: true diff --git a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml index 0bb443b8decd..8fc17e16efb2 100644 --- a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml +++ b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml @@ -8,12 +8,13 @@ title: Peripheral-specific properties for a SPI bus. description: Many SPI controllers need to add properties to peripheral devices. They could - be common properties like spi-max-frequency, spi-cpha, etc. or they could be - controller specific like delay in clock or data lines, etc. These properties - need to be defined in the peripheral node because they are per-peripheral and - there can be multiple peripherals attached to a controller. All those - properties are listed here. The controller specific properties should go in - their own separate schema that should be referenced from here. + be common properties like spi-max-frequency, spi-cs-high, etc. or they could + be controller specific like delay in clock or data lines, etc. These + properties need to be defined in the peripheral node because they are + per-peripheral and there can be multiple peripherals attached to a + controller. All those properties are listed here. The controller specific + properties should go in their own separate schema that should be referenced + from here. maintainers: - Mark Brown <broonie@kernel.org> diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml index 104f5ffdd04e..748faf7f7081 100644 --- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml +++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml @@ -34,6 +34,7 @@ properties: - rockchip,rk3328-spi - rockchip,rk3368-spi - rockchip,rk3399-spi + - rockchip,rk3528-spi - rockchip,rk3562-spi - rockchip,rk3568-spi - rockchip,rk3576-spi diff --git a/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml b/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml index 5f276f27dc4c..272bc308726b 100644 --- a/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml +++ b/Documentation/devicetree/bindings/spi/st,stm32mp25-ospi.yaml @@ -68,6 +68,7 @@ required: - compatible - reg - clocks + - resets - interrupts - st,syscfg-dlyb diff --git a/MAINTAINERS b/MAINTAINERS index 2f3297b1011a..96a5be48686c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1216,7 +1216,9 @@ AMD SPI DRIVER M: Raju Rangoju <Raju.Rangoju@amd.com> L: linux-spi@vger.kernel.org S: Supported +F: drivers/spi/spi-amd-pci.c F: drivers/spi/spi-amd.c +F: drivers/spi/spi-amd.h AMD XDNA DRIVER M: Min Ma <min.ma@amd.com> diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 0e127b0329c0..205a4f8828b0 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1568,6 +1568,32 @@ out: } EXPORT_SYMBOL_GPL(pm_runtime_enable); +static void pm_runtime_set_suspended_action(void *data) +{ + pm_runtime_set_suspended(data); +} + +/** + * devm_pm_runtime_set_active_enabled - set_active version of devm_pm_runtime_enable. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_set_active_enabled(struct device *dev) +{ + int err; + + err = pm_runtime_set_active(dev); + if (err) + return err; + + err = devm_add_action_or_reset(dev, pm_runtime_set_suspended_action, dev); + if (err) + return err; + + return devm_pm_runtime_enable(dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_set_active_enabled); + static void pm_runtime_disable_action(void *data) { pm_runtime_dont_use_autosuspend(data); @@ -1590,6 +1616,24 @@ int devm_pm_runtime_enable(struct device *dev) } EXPORT_SYMBOL_GPL(devm_pm_runtime_enable); +static void pm_runtime_put_noidle_action(void *data) +{ + pm_runtime_put_noidle(data); +} + +/** + * devm_pm_runtime_get_noresume - devres-enabled version of pm_runtime_get_noresume. + * + * @dev: Device to handle. + */ +int devm_pm_runtime_get_noresume(struct device *dev) +{ + pm_runtime_get_noresume(dev); + + return devm_add_action_or_reset(dev, pm_runtime_put_noidle_action, dev); +} +EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume); + /** * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. diff --git a/drivers/memory/renesas-rpc-if-regs.h b/drivers/memory/renesas-rpc-if-regs.h new file mode 100644 index 000000000000..e6b33f7c40a8 --- /dev/null +++ b/drivers/memory/renesas-rpc-if-regs.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * R-Car RPC Interface Registers Definitions + * + * Copyright (C) 2025 Renesas Electronics Corporation + */ + +#ifndef __RENESAS_RPC_IF_REGS_H__ +#define __RENESAS_RPC_IF_REGS_H__ + +#include <linux/bits.h> + +#define RPCIF_CMNCR 0x0000 /* R/W */ +#define RPCIF_CMNCR_MD BIT(31) +#define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22) +#define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20) +#define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18) +#define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16) +#define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \ + RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val)) +#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */ +#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */ +#define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8) +#define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \ + RPCIF_CMNCR_IO3FV(val)) +#define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0) + +#define RPCIF_SSLDR 0x0004 /* R/W */ +#define RPCIF_SSLDR_SPNDL(d) (((d) & 0x7) << 16) +#define RPCIF_SSLDR_SLNDL(d) (((d) & 0x7) << 8) +#define RPCIF_SSLDR_SCKDL(d) (((d) & 0x7) << 0) + +#define RPCIF_DRCR 0x000C /* R/W */ +#define RPCIF_DRCR_SSLN BIT(24) +#define RPCIF_DRCR_RBURST(v) ((((v) - 1) & 0x1F) << 16) +#define RPCIF_DRCR_RCF BIT(9) +#define RPCIF_DRCR_RBE BIT(8) +#define RPCIF_DRCR_SSLE BIT(0) + +#define RPCIF_DRCMR 0x0010 /* R/W */ +#define RPCIF_DRCMR_CMD(c) (((c) & 0xFF) << 16) +#define RPCIF_DRCMR_OCMD(c) (((c) & 0xFF) << 0) + +#define RPCIF_DREAR 0x0014 /* R/W */ +#define RPCIF_DREAR_EAV(c) (((c) & 0xF) << 16) +#define RPCIF_DREAR_EAC(c) (((c) & 0x7) << 0) + +#define RPCIF_DROPR 0x0018 /* R/W */ + +#define RPCIF_DRENR 0x001C /* R/W */ +#define RPCIF_DRENR_CDB(o) (((u32)((o) & 0x3)) << 30) +#define RPCIF_DRENR_OCDB(o) (((o) & 0x3) << 28) +#define RPCIF_DRENR_ADB(o) (((o) & 0x3) << 24) +#define RPCIF_DRENR_OPDB(o) (((o) & 0x3) << 20) +#define RPCIF_DRENR_DRDB(o) (((o) & 0x3) << 16) +#define RPCIF_DRENR_DME BIT(15) +#define RPCIF_DRENR_CDE BIT(14) +#define RPCIF_DRENR_OCDE BIT(12) +#define RPCIF_DRENR_ADE(v) (((v) & 0xF) << 8) +#define RPCIF_DRENR_OPDE(v) (((v) & 0xF) << 4) + +#define RPCIF_SMCR 0x0020 /* R/W */ +#define RPCIF_SMCR_SSLKP BIT(8) +#define RPCIF_SMCR_SPIRE BIT(2) +#define RPCIF_SMCR_SPIWE BIT(1) +#define RPCIF_SMCR_SPIE BIT(0) + +#define RPCIF_SMCMR 0x0024 /* R/W */ +#define RPCIF_SMCMR_CMD(c) (((c) & 0xFF) << 16) +#define RPCIF_SMCMR_OCMD(c) (((c) & 0xFF) << 0) + +#define RPCIF_SMADR 0x0028 /* R/W */ + +#define RPCIF_SMOPR 0x002C /* R/W */ +#define RPCIF_SMOPR_OPD3(o) (((o) & 0xFF) << 24) +#define RPCIF_SMOPR_OPD2(o) (((o) & 0xFF) << 16) +#define RPCIF_SMOPR_OPD1(o) (((o) & 0xFF) << 8) +#define RPCIF_SMOPR_OPD0(o) (((o) & 0xFF) << 0) + +#define RPCIF_SMENR 0x0030 /* R/W */ +#define RPCIF_SMENR_CDB(o) (((o) & 0x3) << 30) +#define RPCIF_SMENR_OCDB(o) (((o) & 0x3) << 28) +#define RPCIF_SMENR_ADB(o) (((o) & 0x3) << 24) +#define RPCIF_SMENR_OPDB(o) (((o) & 0x3) << 20) +#define RPCIF_SMENR_SPIDB(o) (((o) & 0x3) << 16) +#define RPCIF_SMENR_DME BIT(15) +#define RPCIF_SMENR_CDE BIT(14) +#define RPCIF_SMENR_OCDE BIT(12) +#define RPCIF_SMENR_ADE(v) (((v) & 0xF) << 8) +#define RPCIF_SMENR_OPDE(v) (((v) & 0xF) << 4) +#define RPCIF_SMENR_SPIDE(v) (((v) & 0xF) << 0) + +#define RPCIF_SMRDR0 0x0038 /* R */ +#define RPCIF_SMRDR1 0x003C /* R */ +#define RPCIF_SMWDR0 0x0040 /* W */ +#define RPCIF_SMWDR1 0x0044 /* W */ + +#define RPCIF_CMNSR 0x0048 /* R */ +#define RPCIF_CMNSR_SSLF BIT(1) +#define RPCIF_CMNSR_TEND BIT(0) + +#define RPCIF_DRDMCR 0x0058 /* R/W */ +#define RPCIF_DMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0) + +#define RPCIF_DRDRENR 0x005C /* R/W */ +#define RPCIF_DRDRENR_HYPE(v) (((v) & 0x7) << 12) +#define RPCIF_DRDRENR_ADDRE BIT(8) +#define RPCIF_DRDRENR_OPDRE BIT(4) +#define RPCIF_DRDRENR_DRDRE BIT(0) + +#define RPCIF_SMDMCR 0x0060 /* R/W */ +#define RPCIF_SMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0) + +#define RPCIF_SMDRENR 0x0064 /* R/W */ +#define RPCIF_SMDRENR_HYPE(v) (((v) & 0x7) << 12) +#define RPCIF_SMDRENR_ADDRE BIT(8) +#define RPCIF_SMDRENR_OPDRE BIT(4) +#define RPCIF_SMDRENR_SPIDRE BIT(0) + +#define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */ +#define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */ + +#define RPCIF_PHYCNT 0x007C /* R/W */ +#define RPCIF_PHYCNT_CAL BIT(31) +#define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22) +#define RPCIF_PHYCNT_EXDS BIT(21) +#define RPCIF_PHYCNT_OCT BIT(20) +#define RPCIF_PHYCNT_DDRCAL BIT(19) +#define RPCIF_PHYCNT_HS BIT(18) +#define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */ +#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15 | ((v) & 0x8) << 24) /* valid for R-Car and RZ/G2{E,H,M,N} */ + +#define RPCIF_PHYCNT_WBUF2 BIT(4) +#define RPCIF_PHYCNT_WBUF BIT(2) +#define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0) +#define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0) + +#define RPCIF_PHYOFFSET1 0x0080 /* R/W */ +#define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28) + +#define RPCIF_PHYOFFSET2 0x0084 /* R/W */ +#define RPCIF_PHYOFFSET2_OCTTMG(v) (((v) & 0x7) << 8) + +#define RPCIF_PHYINT 0x0088 /* R/W */ +#define RPCIF_PHYINT_WPVAL BIT(1) + +#endif /* __RENESAS_RPC_IF_REGS_H__ */ diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index 15b4706aafee..4a417b693080 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -18,139 +18,8 @@ #include <memory/renesas-rpc-if.h> -#define RPCIF_CMNCR 0x0000 /* R/W */ -#define RPCIF_CMNCR_MD BIT(31) -#define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22) -#define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20) -#define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18) -#define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16) -#define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \ - RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val)) -#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */ -#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */ -#define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8) -#define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \ - RPCIF_CMNCR_IO3FV(val)) -#define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0) - -#define RPCIF_SSLDR 0x0004 /* R/W */ -#define RPCIF_SSLDR_SPNDL(d) (((d) & 0x7) << 16) -#define RPCIF_SSLDR_SLNDL(d) (((d) & 0x7) << 8) -#define RPCIF_SSLDR_SCKDL(d) (((d) & 0x7) << 0) - -#define RPCIF_DRCR 0x000C /* R/W */ -#define RPCIF_DRCR_SSLN BIT(24) -#define RPCIF_DRCR_RBURST(v) ((((v) - 1) & 0x1F) << 16) -#define RPCIF_DRCR_RCF BIT(9) -#define RPCIF_DRCR_RBE BIT(8) -#define RPCIF_DRCR_SSLE BIT(0) - -#define RPCIF_DRCMR 0x0010 /* R/W */ -#define RPCIF_DRCMR_CMD(c) (((c) & 0xFF) << 16) -#define RPCIF_DRCMR_OCMD(c) (((c) & 0xFF) << 0) - -#define RPCIF_DREAR 0x0014 /* R/W */ -#define RPCIF_DREAR_EAV(c) (((c) & 0xF) << 16) -#define RPCIF_DREAR_EAC(c) (((c) & 0x7) << 0) - -#define RPCIF_DROPR 0x0018 /* R/W */ - -#define RPCIF_DRENR 0x001C /* R/W */ -#define RPCIF_DRENR_CDB(o) (u32)((((o) & 0x3) << 30)) -#define RPCIF_DRENR_OCDB(o) (((o) & 0x3) << 28) -#define RPCIF_DRENR_ADB(o) (((o) & 0x3) << 24) -#define RPCIF_DRENR_OPDB(o) (((o) & 0x3) << 20) -#define RPCIF_DRENR_DRDB(o) (((o) & 0x3) << 16) -#define RPCIF_DRENR_DME BIT(15) -#define RPCIF_DRENR_CDE BIT(14) -#define RPCIF_DRENR_OCDE BIT(12) -#define RPCIF_DRENR_ADE(v) (((v) & 0xF) << 8) -#define RPCIF_DRENR_OPDE(v) (((v) & 0xF) << 4) - -#define RPCIF_SMCR 0x0020 /* R/W */ -#define RPCIF_SMCR_SSLKP BIT(8) -#define RPCIF_SMCR_SPIRE BIT(2) -#define RPCIF_SMCR_SPIWE BIT(1) -#define RPCIF_SMCR_SPIE BIT(0) - -#define RPCIF_SMCMR 0x0024 /* R/W */ -#define RPCIF_SMCMR_CMD(c) (((c) & 0xFF) << 16) -#define RPCIF_SMCMR_OCMD(c) (((c) & 0xFF) << 0) - -#define RPCIF_SMADR 0x0028 /* R/W */ - -#define RPCIF_SMOPR 0x002C /* R/W */ -#define RPCIF_SMOPR_OPD3(o) (((o) & 0xFF) << 24) -#define RPCIF_SMOPR_OPD2(o) (((o) & 0xFF) << 16) -#define RPCIF_SMOPR_OPD1(o) (((o) & 0xFF) << 8) -#define RPCIF_SMOPR_OPD0(o) (((o) & 0xFF) << 0) - -#define RPCIF_SMENR 0x0030 /* R/W */ -#define RPCIF_SMENR_CDB(o) (((o) & 0x3) << 30) -#define RPCIF_SMENR_OCDB(o) (((o) & 0x3) << 28) -#define RPCIF_SMENR_ADB(o) (((o) & 0x3) << 24) -#define RPCIF_SMENR_OPDB(o) (((o) & 0x3) << 20) -#define RPCIF_SMENR_SPIDB(o) (((o) & 0x3) << 16) -#define RPCIF_SMENR_DME BIT(15) -#define RPCIF_SMENR_CDE BIT(14) -#define RPCIF_SMENR_OCDE BIT(12) -#define RPCIF_SMENR_ADE(v) (((v) & 0xF) << 8) -#define RPCIF_SMENR_OPDE(v) (((v) & 0xF) << 4) -#define RPCIF_SMENR_SPIDE(v) (((v) & 0xF) << 0) - -#define RPCIF_SMRDR0 0x0038 /* R */ -#define RPCIF_SMRDR1 0x003C /* R */ -#define RPCIF_SMWDR0 0x0040 /* W */ -#define RPCIF_SMWDR1 0x0044 /* W */ - -#define RPCIF_CMNSR 0x0048 /* R */ -#define RPCIF_CMNSR_SSLF BIT(1) -#define RPCIF_CMNSR_TEND BIT(0) - -#define RPCIF_DRDMCR 0x0058 /* R/W */ -#define RPCIF_DMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0) - -#define RPCIF_DRDRENR 0x005C /* R/W */ -#define RPCIF_DRDRENR_HYPE(v) (((v) & 0x7) << 12) -#define RPCIF_DRDRENR_ADDRE BIT(8) -#define RPCIF_DRDRENR_OPDRE BIT(4) -#define RPCIF_DRDRENR_DRDRE BIT(0) - -#define RPCIF_SMDMCR 0x0060 /* R/W */ -#define RPCIF_SMDMCR_DMCYC(v) ((((v) - 1) & 0x1F) << 0) - -#define RPCIF_SMDRENR 0x0064 /* R/W */ -#define RPCIF_SMDRENR_HYPE(v) (((v) & 0x7) << 12) -#define RPCIF_SMDRENR_ADDRE BIT(8) -#define RPCIF_SMDRENR_OPDRE BIT(4) -#define RPCIF_SMDRENR_SPIDRE BIT(0) - -#define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */ -#define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */ - -#define RPCIF_PHYCNT 0x007C /* R/W */ -#define RPCIF_PHYCNT_CAL BIT(31) -#define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22) -#define RPCIF_PHYCNT_EXDS BIT(21) -#define RPCIF_PHYCNT_OCT BIT(20) -#define RPCIF_PHYCNT_DDRCAL BIT(19) -#define RPCIF_PHYCNT_HS BIT(18) -#define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */ -#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15 | ((v) & 0x8) << 24) /* valid for R-Car and RZ/G2{E,H,M,N} */ - -#define RPCIF_PHYCNT_WBUF2 BIT(4) -#define RPCIF_PHYCNT_WBUF BIT(2) -#define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0) -#define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0) - -#define RPCIF_PHYOFFSET1 0x0080 /* R/W */ -#define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28) - -#define RPCIF_PHYOFFSET2 0x0084 /* R/W */ -#define RPCIF_PHYOFFSET2_OCTTMG(v) (((v) & 0x7) << 8) - -#define RPCIF_PHYINT 0x0088 /* R/W */ -#define RPCIF_PHYINT_WPVAL BIT(1) +#include "renesas-rpc-if-regs.h" +#include "renesas-xspi-if-regs.h" static const struct regmap_range rpcif_volatile_ranges[] = { regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1), @@ -163,7 +32,31 @@ static const struct regmap_access_table rpcif_volatile_table = { .n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges), }; +static const struct regmap_range xspi_volatile_ranges[] = { + regmap_reg_range(XSPI_CDD0BUF0, XSPI_CDD0BUF0), +}; + +static const struct regmap_access_table xspi_volatile_table = { + .yes_ranges = xspi_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(xspi_volatile_ranges), +}; + +struct rpcif_priv; + +struct rpcif_impl { + int (*hw_init)(struct rpcif_priv *rpc, bool hyperflash); + void (*prepare)(struct rpcif_priv *rpc, const struct rpcif_op *op, + u64 *offs, size_t *len); + int (*manual_xfer)(struct rpcif_priv *rpc); + size_t (*dirmap_read)(struct rpcif_priv *rpc, u64 offs, size_t len, + void *buf); + u32 status_reg; + u32 status_mask; +}; + struct rpcif_info { + const struct regmap_config *regmap_config; + const struct rpcif_impl *impl; enum rpcif_type type; u8 strtim; }; @@ -180,6 +73,8 @@ struct rpcif_priv { enum rpcif_data_dir dir; u8 bus_size; u8 xfer_size; + u8 addr_nbytes; /* Specified for xSPI */ + u32 proto; /* Specified for xSPI */ void *buffer; u32 xferlen; u32 smcr; @@ -191,26 +86,6 @@ struct rpcif_priv { u32 ddr; /* DRDRENR or SMDRENR */ }; -static const struct rpcif_info rpcif_info_r8a7796 = { - .type = RPCIF_RCAR_GEN3, - .strtim = 6, -}; - -static const struct rpcif_info rpcif_info_gen3 = { - .type = RPCIF_RCAR_GEN3, - .strtim = 7, -}; - -static const struct rpcif_info rpcif_info_rz_g2l = { - .type = RPCIF_RZ_G2L, - .strtim = 7, -}; - -static const struct rpcif_info rpcif_info_gen4 = { - .type = RPCIF_RCAR_GEN4, - .strtim = 15, -}; - /* * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with * proper width. Requires rpcif_priv.xfer_size to be correctly set before! @@ -300,6 +175,33 @@ static const struct regmap_config rpcif_regmap_config = { .volatile_table = &rpcif_volatile_table, }; +static int xspi_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + struct rpcif_priv *xspi = context; + + *val = readl(xspi->base + reg); + return 0; +} + +static int xspi_reg_write(void *context, unsigned int reg, unsigned int val) +{ + struct rpcif_priv *xspi = context; + + writel(val, xspi->base + reg); + return 0; +} + +static const struct regmap_config xspi_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .reg_read = xspi_reg_read, + .reg_write = xspi_reg_write, + .fast_io = true, + .max_register = XSPI_INTE, + .volatile_table = &xspi_volatile_table, +}; + int rpcif_sw_init(struct rpcif *rpcif, struct device *dev) { struct rpcif_priv *rpc = dev_get_drvdata(dev); @@ -307,6 +209,7 @@ int rpcif_sw_init(struct rpcif *rpcif, struct device *dev) rpcif->dev = dev; rpcif->dirmap = rpc->dirmap; rpcif->size = rpc->size; + rpcif->xspi = rpc->info->type == XSPI_RZ_G3E; return 0; } EXPORT_SYMBOL(rpcif_sw_init); @@ -325,16 +228,11 @@ static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc) regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032); } -int rpcif_hw_init(struct device *dev, bool hyperflash) +static int rpcif_hw_init_impl(struct rpcif_priv *rpc, bool hyperflash) { - struct rpcif_priv *rpc = dev_get_drvdata(dev); u32 dummy; int ret; - ret = pm_runtime_resume_and_get(dev); - if (ret) - return ret; - if (rpc->info->type == RPCIF_RZ_G2L) { ret = reset_control_reset(rpc->rstc); if (ret) @@ -382,21 +280,62 @@ int rpcif_hw_init(struct device *dev, bool hyperflash) regmap_write(rpc->regmap, RPCIF_SSLDR, RPCIF_SSLDR_SPNDL(7) | RPCIF_SSLDR_SLNDL(7) | RPCIF_SSLDR_SCKDL(7)); - pm_runtime_put(dev); - rpc->bus_size = hyperflash ? 2 : 1; return 0; } + +static int xspi_hw_init_impl(struct rpcif_priv *xspi, bool hyperflash) +{ + int ret; + + ret = reset_control_reset(xspi->rstc); + if (ret) + return ret; + + regmap_write(xspi->regmap, XSPI_WRAPCFG, 0x0); + + regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0, + XSPI_LIOCFG_PRTMD(0x3ff) | XSPI_LIOCFG_CSMIN(0xf) | + XSPI_LIOCFG_CSASTEX | XSPI_LIOCFG_CSNEGEX, + XSPI_LIOCFG_PRTMD(0) | XSPI_LIOCFG_CSMIN(0) | + XSPI_LIOCFG_CSASTEX | XSPI_LIOCFG_CSNEGEX); + + regmap_update_bits(xspi->regmap, XSPI_CCCTL0CS0, XSPI_CCCTL0_CAEN, 0); + + regmap_update_bits(xspi->regmap, XSPI_CDCTL0, + XSPI_CDCTL0_TRREQ | XSPI_CDCTL0_CSSEL, 0); + + regmap_update_bits(xspi->regmap, XSPI_INTE, XSPI_INTE_CMDCMPE, + XSPI_INTE_CMDCMPE); + + return 0; +} + +int rpcif_hw_init(struct device *dev, bool hyperflash) +{ + struct rpcif_priv *rpc = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; + + ret = rpc->info->impl->hw_init(rpc, hyperflash); + + pm_runtime_put(dev); + + return ret; +} EXPORT_SYMBOL(rpcif_hw_init); static int wait_msg_xfer_end(struct rpcif_priv *rpc) { u32 sts; - return regmap_read_poll_timeout(rpc->regmap, RPCIF_CMNSR, sts, - sts & RPCIF_CMNSR_TEND, 0, - USEC_PER_SEC); + return regmap_read_poll_timeout(rpc->regmap, rpc->info->impl->status_reg, + sts, sts & rpc->info->impl->status_mask, + 0, USEC_PER_SEC); } static u8 rpcif_bits_set(struct rpcif_priv *rpc, u32 nbytes) @@ -412,11 +351,9 @@ static u8 rpcif_bit_size(u8 buswidth) return buswidth > 4 ? 2 : ilog2(buswidth); } -void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs, - size_t *len) +static void rpcif_prepare_impl(struct rpcif_priv *rpc, const struct rpcif_op *op, + u64 *offs, size_t *len) { - struct rpcif_priv *rpc = dev_get_drvdata(dev); - rpc->smcr = 0; rpc->smadr = 0; rpc->enable = 0; @@ -497,18 +434,76 @@ void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs, rpc->enable |= RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth)); } } -EXPORT_SYMBOL(rpcif_prepare); -int rpcif_manual_xfer(struct device *dev) +static void xspi_prepare_impl(struct rpcif_priv *xspi, const struct rpcif_op *op, + u64 *offs, size_t *len) +{ + xspi->smadr = 0; + xspi->addr_nbytes = 0; + xspi->command = 0; + xspi->option = 0; + xspi->dummy = 0; + xspi->xferlen = 0; + xspi->proto = 0; + + if (op->cmd.buswidth) + xspi->command = op->cmd.opcode; + + if (op->ocmd.buswidth) + xspi->command = (xspi->command << 8) | op->ocmd.opcode; + + if (op->addr.buswidth) { + xspi->addr_nbytes = op->addr.nbytes; + if (offs && len) + xspi->smadr = *offs; + else + xspi->smadr = op->addr.val; + } + + if (op->dummy.buswidth) + xspi->dummy = op->dummy.ncycles; + + xspi->dir = op->data.dir; + if (op->data.buswidth) { + u32 nbytes; + + xspi->buffer = op->data.buf.in; + + if (offs && len) + nbytes = *len; + else + nbytes = op->data.nbytes; + xspi->xferlen = nbytes; + } + + if (op->cmd.buswidth == 1) { + if (op->addr.buswidth == 2 || op->data.buswidth == 2) + xspi->proto = PROTO_1S_2S_2S; + else if (op->addr.buswidth == 4 || op->data.buswidth == 4) + xspi->proto = PROTO_1S_4S_4S; + } else if (op->cmd.buswidth == 2 && + (op->addr.buswidth == 2 || op->data.buswidth == 2)) { + xspi->proto = PROTO_2S_2S_2S; + } else if (op->cmd.buswidth == 4 && + (op->addr.buswidth == 4 || op->data.buswidth == 4)) { + xspi->proto = PROTO_4S_4S_4S; + } +} + +void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs, + size_t *len) { struct rpcif_priv *rpc = dev_get_drvdata(dev); + + rpc->info->impl->prepare(rpc, op, offs, len); +} +EXPORT_SYMBOL(rpcif_prepare); + +static int rpcif_manual_xfer_impl(struct rpcif_priv *rpc) +{ u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4; int ret = 0; - ret = pm_runtime_resume_and_get(dev); - if (ret < 0) - return ret; - regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CAL, RPCIF_PHYCNT_CAL); regmap_update_bits(rpc->regmap, RPCIF_CMNCR, @@ -616,15 +611,169 @@ int rpcif_manual_xfer(struct device *dev) goto err_out; } -exit: - pm_runtime_put(dev); return ret; err_out: if (reset_control_reset(rpc->rstc)) - dev_err(dev, "Failed to reset HW\n"); - rpcif_hw_init(dev, rpc->bus_size == 2); - goto exit; + dev_err(rpc->dev, "Failed to reset HW\n"); + rpcif_hw_init_impl(rpc, rpc->bus_size == 2); + return ret; +} + +static int xspi_manual_xfer_impl(struct rpcif_priv *xspi) +{ + u32 pos = 0, max = 8; + int ret = 0; + + regmap_update_bits(xspi->regmap, XSPI_CDCTL0, XSPI_CDCTL0_TRNUM(0x3), + XSPI_CDCTL0_TRNUM(0)); + + regmap_update_bits(xspi->regmap, XSPI_CDCTL0, XSPI_CDCTL0_TRREQ, 0); + + regmap_write(xspi->regmap, XSPI_CDTBUF0, + XSPI_CDTBUF_CMDSIZE(0x1) | XSPI_CDTBUF_CMD_FIELD(xspi->command)); + + regmap_write(xspi->regmap, XSPI_CDABUF0, 0); + + regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, XSPI_CDTBUF_ADDSIZE(0x7), + XSPI_CDTBUF_ADDSIZE(xspi->addr_nbytes)); + + regmap_write(xspi->regmap, XSPI_CDABUF0, xspi->smadr); + + regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0, XSPI_LIOCFG_PRTMD(0x3ff), + XSPI_LIOCFG_PRTMD(xspi->proto)); + + switch (xspi->dir) { + case RPCIF_DATA_OUT: + while (pos < xspi->xferlen) { + u32 bytes_left = xspi->xferlen - pos; + u32 nbytes, data[2], *p = data; + + regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, + XSPI_CDTBUF_TRTYPE, XSPI_CDTBUF_TRTYPE); + + nbytes = bytes_left >= max ? max : bytes_left; + + regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, + XSPI_CDTBUF_DATASIZE(0xf), + XSPI_CDTBUF_DATASIZE(nbytes)); + + regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, + XSPI_CDTBUF_ADDSIZE(0x7), + XSPI_CDTBUF_ADDSIZE(xspi->addr_nbytes)); + + memcpy(data, xspi->buffer + pos, nbytes); + + if (nbytes > 4) { + regmap_write(xspi->regmap, XSPI_CDD0BUF0, *p++); + regmap_write(xspi->regmap, XSPI_CDD1BUF0, *p); + } else { + regmap_write(xspi->regmap, XSPI_CDD0BUF0, *p); + } + + regmap_write(xspi->regmap, XSPI_CDABUF0, xspi->smadr + pos); + + regmap_update_bits(xspi->regmap, XSPI_CDCTL0, + XSPI_CDCTL0_TRREQ, XSPI_CDCTL0_TRREQ); + + ret = wait_msg_xfer_end(xspi); + if (ret) + goto err_out; + + regmap_update_bits(xspi->regmap, XSPI_INTC, + XSPI_INTC_CMDCMPC, XSPI_INTC_CMDCMPC); + + pos += nbytes; + } + regmap_update_bits(xspi->regmap, XSPI_CDCTL0, XSPI_CDCTL0_TRREQ, 0); + break; + case RPCIF_DATA_IN: + while (pos < xspi->xferlen) { + u32 bytes_left = xspi->xferlen - pos; + u32 nbytes, data[2], *p = data; + + regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, + XSPI_CDTBUF_TRTYPE, + ~(u32)XSPI_CDTBUF_TRTYPE); + + /* nbytes can be up to 8 bytes */ + nbytes = bytes_left >= max ? max : bytes_left; + + regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, + XSPI_CDTBUF_DATASIZE(0xf), + XSPI_CDTBUF_DATASIZE(nbytes)); + + regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, + XSPI_CDTBUF_ADDSIZE(0x7), + XSPI_CDTBUF_ADDSIZE(xspi->addr_nbytes)); + + if (xspi->addr_nbytes) + regmap_write(xspi->regmap, XSPI_CDABUF0, + xspi->smadr + pos); + + regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, + XSPI_CDTBUF_LATE(0x1f), + XSPI_CDTBUF_LATE(xspi->dummy)); + + regmap_update_bits(xspi->regmap, XSPI_CDCTL0, + XSPI_CDCTL0_TRREQ, XSPI_CDCTL0_TRREQ); + + ret = wait_msg_xfer_end(xspi); + if (ret) + goto err_out; + + if (nbytes > 4) { + regmap_read(xspi->regmap, XSPI_CDD0BUF0, p++); + regmap_read(xspi->regmap, XSPI_CDD1BUF0, p); + } else { + regmap_read(xspi->regmap, XSPI_CDD0BUF0, p); + } + + memcpy(xspi->buffer + pos, data, nbytes); + + regmap_update_bits(xspi->regmap, XSPI_INTC, + XSPI_INTC_CMDCMPC, XSPI_INTC_CMDCMPC); + + pos += nbytes; + } + regmap_update_bits(xspi->regmap, XSPI_CDCTL0, + XSPI_CDCTL0_TRREQ, 0); + break; + default: + regmap_update_bits(xspi->regmap, XSPI_CDTBUF0, + XSPI_CDTBUF_TRTYPE, XSPI_CDTBUF_TRTYPE); + regmap_update_bits(xspi->regmap, XSPI_CDCTL0, + XSPI_CDCTL0_TRREQ, XSPI_CDCTL0_TRREQ); + + ret = wait_msg_xfer_end(xspi); + if (ret) + goto err_out; + + regmap_update_bits(xspi->regmap, XSPI_INTC, + XSPI_INTC_CMDCMPC, XSPI_INTC_CMDCMPC); + } + + return ret; + +err_out: + xspi_hw_init_impl(xspi, false); + return ret; +} + +int rpcif_manual_xfer(struct device *dev) +{ + struct rpcif_priv *rpc = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; + + ret = rpc->info->impl->manual_xfer(rpc); + + pm_runtime_put(dev); + + return ret; } EXPORT_SYMBOL(rpcif_manual_xfer); @@ -670,20 +819,15 @@ static void memcpy_fromio_readw(void *to, } } -ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf) +static size_t rpcif_dirmap_read_impl(struct rpcif_priv *rpc, u64 offs, + size_t len, void *buf) { - struct rpcif_priv *rpc = dev_get_drvdata(dev); loff_t from = offs & (rpc->size - 1); size_t size = rpc->size - from; - int ret; if (len > size) len = size; - ret = pm_runtime_resume_and_get(dev); - if (ret < 0) - return ret; - regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0); regmap_write(rpc->regmap, RPCIF_DRCR, 0); regmap_write(rpc->regmap, RPCIF_DRCMR, rpc->command); @@ -700,12 +844,129 @@ ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf) else memcpy_fromio(buf, rpc->dirmap + from, len); - pm_runtime_put(dev); + return len; +} + +static size_t xspi_dirmap_read_impl(struct rpcif_priv *xspi, u64 offs, + size_t len, void *buf) +{ + loff_t from = offs & (xspi->size - 1); + size_t size = xspi->size - from; + u8 addsize = xspi->addr_nbytes - 1; + + if (len > size) + len = size; + + regmap_update_bits(xspi->regmap, XSPI_CMCFG0CS0, + XSPI_CMCFG0_FFMT(0x3) | XSPI_CMCFG0_ADDSIZE(0x3), + XSPI_CMCFG0_FFMT(0) | XSPI_CMCFG0_ADDSIZE(addsize)); + + regmap_update_bits(xspi->regmap, XSPI_CMCFG1CS0, + XSPI_CMCFG1_RDCMD(0xffff) | XSPI_CMCFG1_RDLATE(0x1f), + XSPI_CMCFG1_RDCMD_UPPER_BYTE(xspi->command) | + XSPI_CMCFG1_RDLATE(xspi->dummy)); + + regmap_update_bits(xspi->regmap, XSPI_BMCTL0, XSPI_BMCTL0_CS0ACC(0xff), + XSPI_BMCTL0_CS0ACC(0x01)); + + regmap_update_bits(xspi->regmap, XSPI_BMCFG, + XSPI_BMCFG_WRMD | XSPI_BMCFG_MWRCOMB | + XSPI_BMCFG_MWRSIZE(0xff) | XSPI_BMCFG_PREEN, + 0 | XSPI_BMCFG_MWRCOMB | XSPI_BMCFG_MWRSIZE(0x0f) | + XSPI_BMCFG_PREEN); + + regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0, XSPI_LIOCFG_PRTMD(0x3ff), + XSPI_LIOCFG_PRTMD(xspi->proto)); + + memcpy_fromio(buf, xspi->dirmap + from, len); return len; } + +ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf) +{ + struct rpcif_priv *rpc = dev_get_drvdata(dev); + size_t read; + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; + + read = rpc->info->impl->dirmap_read(rpc, offs, len, buf); + + pm_runtime_put(dev); + + return read; +} EXPORT_SYMBOL(rpcif_dirmap_read); +/** + * xspi_dirmap_write - Write data to xspi memory. + * @dev: xspi device + * @offs: offset + * @len: Number of bytes to be written. + * @buf: Buffer holding write data. + * + * This function writes data into xspi memory. + * + * Returns number of bytes written on success, else negative errno. + */ +ssize_t xspi_dirmap_write(struct device *dev, u64 offs, size_t len, const void *buf) +{ + struct rpcif_priv *xspi = dev_get_drvdata(dev); + loff_t from = offs & (xspi->size - 1); + u8 addsize = xspi->addr_nbytes - 1; + size_t size = xspi->size - from; + ssize_t writebytes; + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; + + if (len > size) + len = size; + + if (len > MWRSIZE_MAX) + writebytes = MWRSIZE_MAX; + else + writebytes = len; + + regmap_update_bits(xspi->regmap, XSPI_CMCFG0CS0, + XSPI_CMCFG0_FFMT(0x3) | XSPI_CMCFG0_ADDSIZE(0x3), + XSPI_CMCFG0_FFMT(0) | XSPI_CMCFG0_ADDSIZE(addsize)); + + regmap_update_bits(xspi->regmap, XSPI_CMCFG2CS0, + XSPI_CMCFG2_WRCMD_UPPER(0xff) | XSPI_CMCFG2_WRLATE(0x1f), + XSPI_CMCFG2_WRCMD_UPPER(xspi->command) | + XSPI_CMCFG2_WRLATE(xspi->dummy)); + + regmap_update_bits(xspi->regmap, XSPI_BMCTL0, + XSPI_BMCTL0_CS0ACC(0xff), XSPI_BMCTL0_CS0ACC(0x03)); + + regmap_update_bits(xspi->regmap, XSPI_BMCFG, + XSPI_BMCFG_WRMD | XSPI_BMCFG_MWRCOMB | + XSPI_BMCFG_MWRSIZE(0xff) | XSPI_BMCFG_PREEN, + 0 | XSPI_BMCFG_MWRCOMB | XSPI_BMCFG_MWRSIZE(0x0f) | + XSPI_BMCFG_PREEN); + + regmap_update_bits(xspi->regmap, XSPI_LIOCFGCS0, XSPI_LIOCFG_PRTMD(0x3ff), + XSPI_LIOCFG_PRTMD(xspi->proto)); + + memcpy_toio(xspi->dirmap + from, buf, writebytes); + + /* Request to push the pending data */ + if (writebytes < MWRSIZE_MAX) + regmap_update_bits(xspi->regmap, XSPI_BMCTL1, + XSPI_BMCTL1_MWRPUSH, XSPI_BMCTL1_MWRPUSH); + + pm_runtime_put(dev); + + return writebytes; +} +EXPORT_SYMBOL_GPL(xspi_dirmap_write); + static int rpcif_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -740,8 +1001,8 @@ static int rpcif_probe(struct platform_device *pdev) rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs"); if (IS_ERR(rpc->base)) return PTR_ERR(rpc->base); - - rpc->regmap = devm_regmap_init(dev, NULL, rpc, &rpcif_regmap_config); + rpc->info = of_device_get_match_data(dev); + rpc->regmap = devm_regmap_init(dev, NULL, rpc, rpc->info->regmap_config); if (IS_ERR(rpc->regmap)) { dev_err(dev, "failed to init regmap for rpcif, error %ld\n", PTR_ERR(rpc->regmap)); @@ -754,11 +1015,29 @@ static int rpcif_probe(struct platform_device *pdev) return PTR_ERR(rpc->dirmap); rpc->size = resource_size(res); - rpc->info = of_device_get_match_data(dev); - rpc->rstc = devm_reset_control_get_exclusive(dev, NULL); + rpc->rstc = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(rpc->rstc)) return PTR_ERR(rpc->rstc); + /* + * The enabling/disabling of spi/spix2 clocks at runtime leading to + * flash write failure. So, enable these clocks during probe() and + * disable it in remove(). + */ + if (rpc->info->type == XSPI_RZ_G3E) { + struct clk *spi_clk; + + spi_clk = devm_clk_get_enabled(dev, "spix2"); + if (IS_ERR(spi_clk)) + return dev_err_probe(dev, PTR_ERR(spi_clk), + "cannot get enabled spix2 clk\n"); + + spi_clk = devm_clk_get_enabled(dev, "spi"); + if (IS_ERR(spi_clk)) + return dev_err_probe(dev, PTR_ERR(spi_clk), + "cannot get enabled spi clk\n"); + } + vdev = platform_device_alloc(name, pdev->id); if (!vdev) return -ENOMEM; @@ -784,8 +1063,61 @@ static void rpcif_remove(struct platform_device *pdev) platform_device_unregister(rpc->vdev); } +static const struct rpcif_impl rpcif_impl = { + .hw_init = rpcif_hw_init_impl, + .prepare = rpcif_prepare_impl, + .manual_xfer = rpcif_manual_xfer_impl, + .dirmap_read = rpcif_dirmap_read_impl, + .status_reg = RPCIF_CMNSR, + .status_mask = RPCIF_CMNSR_TEND, +}; + +static const struct rpcif_impl xspi_impl = { + .hw_init = xspi_hw_init_impl, + .prepare = xspi_prepare_impl, + .manual_xfer = xspi_manual_xfer_impl, + .dirmap_read = xspi_dirmap_read_impl, + .status_reg = XSPI_INTS, + .status_mask = XSPI_INTS_CMDCMP, +}; + +static const struct rpcif_info rpcif_info_r8a7796 = { + .regmap_config = &rpcif_regmap_config, + .impl = &rpcif_impl, + .type = RPCIF_RCAR_GEN3, + .strtim = 6, +}; + +static const struct rpcif_info rpcif_info_gen3 = { + .regmap_config = &rpcif_regmap_config, + .impl = &rpcif_impl, + .type = RPCIF_RCAR_GEN3, + .strtim = 7, +}; + +static const struct rpcif_info rpcif_info_rz_g2l = { + .regmap_config = &rpcif_regmap_config, + .impl = &rpcif_impl, + .type = RPCIF_RZ_G2L, + .strtim = 7, +}; + +static const struct rpcif_info rpcif_info_gen4 = { + .regmap_config = &rpcif_regmap_config, + .impl = &rpcif_impl, + .type = RPCIF_RCAR_GEN4, + .strtim = 15, +}; + +static const struct rpcif_info xspi_info_r9a09g047 = { + .regmap_config = &xspi_regmap_config, + .impl = &xspi_impl, + .type = XSPI_RZ_G3E, +}; + static const struct of_device_id rpcif_of_match[] = { { .compatible = "renesas,r8a7796-rpc-if", .data = &rpcif_info_r8a7796 }, + { .compatible = "renesas,r9a09g047-xspi", .data = &xspi_info_r9a09g047 }, { .compatible = "renesas,rcar-gen3-rpc-if", .data = &rpcif_info_gen3 }, { .compatible = "renesas,rcar-gen4-rpc-if", .data = &rpcif_info_gen4 }, { .compatible = "renesas,rzg2l-rpc-if", .data = &rpcif_info_rz_g2l }, diff --git a/drivers/memory/renesas-xspi-if-regs.h b/drivers/memory/renesas-xspi-if-regs.h new file mode 100644 index 000000000000..53f801d591f2 --- /dev/null +++ b/drivers/memory/renesas-xspi-if-regs.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * RZ xSPI Interface Registers Definitions + * + * Copyright (C) 2025 Renesas Electronics Corporation + */ + +#ifndef __RENESAS_XSPI_IF_REGS_H__ +#define __RENESAS_XSPI_IF_REGS_H__ + +#include <linux/bits.h> + +/* xSPI Wrapper Configuration Register */ +#define XSPI_WRAPCFG 0x0000 + +/* xSPI Bridge Configuration Register */ +#define XSPI_BMCFG 0x0008 +#define XSPI_BMCFG_WRMD BIT(0) +#define XSPI_BMCFG_MWRCOMB BIT(7) +#define XSPI_BMCFG_MWRSIZE(val) (((val) & 0xff) << 8) +#define XSPI_BMCFG_PREEN BIT(16) + +/* xSPI Command Map Configuration Register 0 CS0 */ +#define XSPI_CMCFG0CS0 0x0010 +#define XSPI_CMCFG0_FFMT(val) (((val) & 0x03) << 0) +#define XSPI_CMCFG0_ADDSIZE(val) (((val) & 0x03) << 2) + +/* xSPI Command Map Configuration Register 1 CS0 */ +#define XSPI_CMCFG1CS0 0x0014 +#define XSPI_CMCFG1_RDCMD(val) (((val) & 0xffff) << 0) +#define XSPI_CMCFG1_RDCMD_UPPER_BYTE(val) (((val) & 0xff) << 8) +#define XSPI_CMCFG1_RDLATE(val) (((val) & 0x1f) << 16) + +/* xSPI Command Map Configuration Register 2 CS0 */ +#define XSPI_CMCFG2CS0 0x0018 +#define XSPI_CMCFG2_WRCMD(val) (((val) & 0xffff) << 0) +#define XSPI_CMCFG2_WRCMD_UPPER(val) (((val) & 0xff) << 8) +#define XSPI_CMCFG2_WRLATE(val) (((val) & 0x1f) << 16) + +/* xSPI Link I/O Configuration Register CS0 */ +#define XSPI_LIOCFGCS0 0x0050 +#define XSPI_LIOCFG_PRTMD(val) (((val) & 0x3ff) << 0) +#define XSPI_LIOCFG_CSMIN(val) (((val) & 0x0f) << 16) +#define XSPI_LIOCFG_CSASTEX BIT(20) +#define XSPI_LIOCFG_CSNEGEX BIT(21) + +/* xSPI Bridge Map Control Register 0 */ +#define XSPI_BMCTL0 0x0060 +#define XSPI_BMCTL0_CS0ACC(val) (((val) & 0x03) << 0) + +/* xSPI Bridge Map Control Register 1 */ +#define XSPI_BMCTL1 0x0064 +#define XSPI_BMCTL1_MWRPUSH BIT(8) + +/* xSPI Command Manual Control Register 0 */ +#define XSPI_CDCTL0 0x0070 +#define XSPI_CDCTL0_TRREQ BIT(0) +#define XSPI_CDCTL0_CSSEL BIT(3) +#define XSPI_CDCTL0_TRNUM(val) (((val) & 0x03) << 4) + +/* xSPI Command Manual Type Buf */ +#define XSPI_CDTBUF0 0x0080 +#define XSPI_CDTBUF_CMDSIZE(val) (((val) & 0x03) << 0) +#define XSPI_CDTBUF_ADDSIZE(val) (((val) & 0x07) << 2) +#define XSPI_CDTBUF_DATASIZE(val) (((val) & 0x0f) << 5) +#define XSPI_CDTBUF_LATE(val) (((val) & 0x1f) << 9) +#define XSPI_CDTBUF_TRTYPE BIT(15) +#define XSPI_CDTBUF_CMD(val) (((val) & 0xffff) << 16) +#define XSPI_CDTBUF_CMD_FIELD(val) (((val) & 0xff) << 24) + +/* xSPI Command Manual Address Buff */ +#define XSPI_CDABUF0 0x0084 + +/* xSPI Command Manual Data 0 Buf */ +#define XSPI_CDD0BUF0 0x0088 + +/* xSPI Command Manual Data 1 Buf */ +#define XSPI_CDD1BUF0 0x008c + +/* xSPI Command Calibration Control Register 0 CS0 */ +#define XSPI_CCCTL0CS0 0x0130 +#define XSPI_CCCTL0_CAEN BIT(0) + +/* xSPI Interrupt Status Register */ +#define XSPI_INTS 0x0190 +#define XSPI_INTS_CMDCMP BIT(0) + +/* xSPI Interrupt Clear Register */ +#define XSPI_INTC 0x0194 +#define XSPI_INTC_CMDCMPC BIT(0) + +/* xSPI Interrupt Enable Register */ +#define XSPI_INTE 0x0198 +#define XSPI_INTE_CMDCMPE BIT(0) + +/* Maximum data size of MWRSIZE*/ +#define MWRSIZE_MAX 64 + +/* xSPI Protocol mode */ +#define PROTO_1S_2S_2S 0x48 +#define PROTO_2S_2S_2S 0x49 +#define PROTO_1S_4S_4S 0x090 +#define PROTO_4S_4S_4S 0x092 + +#endif /* __RENESAS_XSPI_IF_REGS_H__ */ diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ed38f6d41f47..c51da3fc3604 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1266,7 +1266,9 @@ config SPI_ZYNQMP_GQSPI config SPI_AMD tristate "AMD SPI controller" - depends on SPI_MASTER || COMPILE_TEST + depends on PCI + depends on SPI_MASTER || X86 || COMPILE_TEST + depends on SPI_MEM help Enables SPI controller driver for AMD SoC. diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index c3a1a47b3bf4..4ea89f6fc531 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -162,7 +162,7 @@ obj-$(CONFIG_SPI_XLP) += spi-xlp.o obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o -obj-$(CONFIG_SPI_AMD) += spi-amd.o +obj-$(CONFIG_SPI_AMD) += spi-amd.o spi-amd-pci.o # SPI slave protocol handlers obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 244ac0106862..2f6797324227 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -1287,9 +1287,9 @@ static int atmel_qspi_dma_init(struct spi_controller *ctrl) aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx"); if (IS_ERR(aq->rx_chan)) { - aq->rx_chan = NULL; - return dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan), - "RX DMA channel is not available\n"); + ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan), + "RX DMA channel is not available\n"); + goto null_rx_chan; } aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx"); @@ -1310,8 +1310,9 @@ static int atmel_qspi_dma_init(struct spi_controller *ctrl) release_rx_chan: dma_release_channel(aq->rx_chan); - aq->rx_chan = NULL; aq->tx_chan = NULL; +null_rx_chan: + aq->rx_chan = NULL; return ret; } @@ -1436,22 +1437,17 @@ static int atmel_qspi_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(&pdev->dev, 500); pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - pm_runtime_get_noresume(&pdev->dev); + devm_pm_runtime_set_active_enabled(&pdev->dev); + devm_pm_runtime_get_noresume(&pdev->dev); err = atmel_qspi_init(aq); if (err) goto dma_release; err = spi_register_controller(ctrl); - if (err) { - pm_runtime_put_noidle(&pdev->dev); - pm_runtime_disable(&pdev->dev); - pm_runtime_set_suspended(&pdev->dev); - pm_runtime_dont_use_autosuspend(&pdev->dev); + if (err) goto dma_release; - } + pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); @@ -1530,10 +1526,6 @@ static void atmel_qspi_remove(struct platform_device *pdev) */ dev_warn(&pdev->dev, "Failed to resume device on remove\n"); } - - pm_runtime_disable(&pdev->dev); - pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_put_noidle(&pdev->dev); } static int __maybe_unused atmel_qspi_suspend(struct device *dev) diff --git a/drivers/spi/spi-amd-pci.c b/drivers/spi/spi-amd-pci.c new file mode 100644 index 000000000000..e5faab414c17 --- /dev/null +++ b/drivers/spi/spi-amd-pci.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD SPI controller driver + * + * Copyright (c) 2025, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: Krishnamoorthi M <krishnamoorthi.m@amd.com> + * Akshata MukundShetty <akshata.mukundshetty@amd.com> + */ + +#include <linux/init.h> +#include <linux/spi/spi.h> +#include <linux/pci.h> + +#include "spi-amd.h" + +#define AMD_PCI_DEVICE_ID_LPC_BRIDGE 0x1682 +#define AMD_PCI_LPC_SPI_BASE_ADDR_REG 0xA0 +#define AMD_SPI_BASE_ADDR_MASK ~0xFF +#define AMD_HID2_PCI_BAR_OFFSET 0x00002000 +#define AMD_HID2_MEM_SIZE 0x200 + +static struct pci_device_id pci_spi_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_PCI_DEVICE_ID_LPC_BRIDGE) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, pci_spi_ids); + +static int amd_spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct spi_controller *host; + struct amd_spi *amd_spi; + u32 io_base_addr; + + /* Allocate storage for host and driver private data */ + host = devm_spi_alloc_host(dev, sizeof(struct amd_spi)); + if (!host) + return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n"); + + amd_spi = spi_controller_get_devdata(host); + + pci_read_config_dword(pdev, AMD_PCI_LPC_SPI_BASE_ADDR_REG, &io_base_addr); + io_base_addr = (io_base_addr & AMD_SPI_BASE_ADDR_MASK) + AMD_HID2_PCI_BAR_OFFSET; + amd_spi->io_remap_addr = devm_ioremap(dev, io_base_addr, AMD_HID2_MEM_SIZE); + + if (!amd_spi->io_remap_addr) + return dev_err_probe(dev, -ENOMEM, + "ioremap of SPI registers failed\n"); + + dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr); + + amd_spi->version = AMD_HID2_SPI; + host->bus_num = 2; + + return amd_spi_probe_common(dev, host); +} + +static struct pci_driver amd_spi_pci_driver = { + .name = "amd_spi_pci", + .id_table = pci_spi_ids, + .probe = amd_spi_pci_probe, +}; + +module_pci_driver(amd_spi_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AMD HID2 SPI Controller Driver"); diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index 17fc0b17e756..02e7fe095a0b 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -17,6 +17,8 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> +#include "spi-amd.h" + #define AMD_SPI_CTRL0_REG 0x00 #define AMD_SPI_EXEC_CMD BIT(16) #define AMD_SPI_FIFO_CLEAR BIT(20) @@ -52,10 +54,13 @@ #define AMD_SPI_SPD7_MASK GENMASK(13, AMD_SPI_SPD7_SHIFT) #define AMD_SPI_HID2_INPUT_RING_BUF0 0X100 +#define AMD_SPI_HID2_OUTPUT_BUF0 0x140 #define AMD_SPI_HID2_CNTRL 0x150 #define AMD_SPI_HID2_INT_STATUS 0x154 #define AMD_SPI_HID2_CMD_START 0x156 #define AMD_SPI_HID2_INT_MASK 0x158 +#define AMD_SPI_HID2_WRITE_CNTRL0 0x160 +#define AMD_SPI_HID2_WRITE_CNTRL1 0x164 #define AMD_SPI_HID2_READ_CNTRL0 0x170 #define AMD_SPI_HID2_READ_CNTRL1 0x174 #define AMD_SPI_HID2_READ_CNTRL2 0x180 @@ -81,17 +86,9 @@ #define AMD_SPI_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ #define AMD_SPI_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ -/** - * enum amd_spi_versions - SPI controller versions - * @AMD_SPI_V1: AMDI0061 hardware version - * @AMD_SPI_V2: AMDI0062 hardware version - * @AMD_HID2_SPI: AMDI0063 hardware version - */ -enum amd_spi_versions { - AMD_SPI_V1 = 1, - AMD_SPI_V2, - AMD_HID2_SPI, -}; +/* SPINAND write command opcodes */ +#define AMD_SPI_OP_PP 0x02 /* Page program */ +#define AMD_SPI_OP_PP_RANDOM 0x84 /* Page program */ enum amd_spi_speed { F_66_66MHz, @@ -118,22 +115,6 @@ struct amd_spi_freq { u32 spd7_val; }; -/** - * struct amd_spi - SPI driver instance - * @io_remap_addr: Start address of the SPI controller registers - * @phy_dma_buf: Physical address of DMA buffer - * @dma_virt_addr: Virtual address of DMA buffer - * @version: SPI controller hardware version - * @speed_hz: Device frequency - */ -struct amd_spi { - void __iomem *io_remap_addr; - dma_addr_t phy_dma_buf; - void *dma_virt_addr; - enum amd_spi_versions version; - unsigned int speed_hz; -}; - static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx) { return readb((u8 __iomem *)amd_spi->io_remap_addr + idx); @@ -445,6 +426,17 @@ static inline bool amd_is_spi_read_cmd(const u16 op) } } +static inline bool amd_is_spi_write_cmd(const u16 op) +{ + switch (op) { + case AMD_SPI_OP_PP: + case AMD_SPI_OP_PP_RANDOM: + return true; + default: + return false; + } +} + static bool amd_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { @@ -455,7 +447,7 @@ static bool amd_spi_supports_op(struct spi_mem *mem, return false; /* AMD SPI controllers support quad mode only for read operations */ - if (amd_is_spi_read_cmd(op->cmd.opcode)) { + if (amd_is_spi_read_cmd(op->cmd.opcode) || amd_is_spi_write_cmd(op->cmd.opcode)) { if (op->data.buswidth > 4) return false; @@ -464,7 +456,8 @@ static bool amd_spi_supports_op(struct spi_mem *mem, * doesn't support 4-byte address commands. */ if (amd_spi->version == AMD_HID2_SPI) { - if (amd_is_spi_read_cmd_4b(op->cmd.opcode) || + if ((amd_is_spi_read_cmd_4b(op->cmd.opcode) || + amd_is_spi_write_cmd(op->cmd.opcode)) && op->data.nbytes > AMD_SPI_HID2_DMA_SIZE) return false; } else if (op->data.nbytes > AMD_SPI_MAX_DATA) { @@ -490,7 +483,8 @@ static int amd_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) * controller index mode supports maximum of 64 bytes in a single * transaction. */ - if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_read_cmd(op->cmd.opcode)) + if (amd_spi->version == AMD_HID2_SPI && (amd_is_spi_read_cmd(op->cmd.opcode) || + amd_is_spi_write_cmd(op->cmd.opcode))) op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_HID2_DMA_SIZE); else op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_MAX_DATA); @@ -514,32 +508,96 @@ static void amd_spi_set_addr(struct amd_spi *amd_spi, } } +static void amd_spi_hiddma_write(struct amd_spi *amd_spi, const struct spi_mem_op *op) +{ + u16 hid_cmd_start, val; + u32 hid_regval; + + /* + * Program the HID2 output Buffer0. 4k aligned buf_memory_addr[31:12], + * buf_size[2:0]. + */ + hid_regval = amd_spi->phy_dma_buf | BIT(0); + amd_spi_writereg32(amd_spi, AMD_SPI_HID2_OUTPUT_BUF0, hid_regval); + + /* Program max write length in hid2_write_control1 register */ + hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_WRITE_CNTRL1); + hid_regval = (hid_regval & ~GENMASK(15, 0)) | ((op->data.nbytes) + 3); + amd_spi_writereg32(amd_spi, AMD_SPI_HID2_WRITE_CNTRL1, hid_regval); + + /* Set cmd start bit in hid2_cmd_start register to trigger HID basic write operation */ + hid_cmd_start = amd_spi_readreg16(amd_spi, AMD_SPI_HID2_CMD_START); + amd_spi_writereg16(amd_spi, AMD_SPI_HID2_CMD_START, (hid_cmd_start | BIT(2))); + + /* Check interrupt status of HIDDMA basic write operation in hid2_int_status register */ + readw_poll_timeout(amd_spi->io_remap_addr + AMD_SPI_HID2_INT_STATUS, val, + (val & BIT(2)), AMD_SPI_IO_SLEEP_US, AMD_SPI_IO_TIMEOUT_US); + + /* Clear the interrupts by writing to hid2_int_status register */ + val = amd_spi_readreg16(amd_spi, AMD_SPI_HID2_INT_STATUS); + amd_spi_writereg16(amd_spi, AMD_SPI_HID2_INT_STATUS, val); +} + static void amd_spi_mem_data_out(struct amd_spi *amd_spi, const struct spi_mem_op *op) { int base_addr = AMD_SPI_FIFO_BASE + op->addr.nbytes; u64 *buf_64 = (u64 *)op->data.buf.out; + u64 addr_val = op->addr.val; u32 nbytes = op->data.nbytes; u32 left_data = nbytes; u8 *buf; int i; - amd_spi_set_opcode(amd_spi, op->cmd.opcode); - amd_spi_set_addr(amd_spi, op); + /* + * Condition for using HID write mode. Only for writing complete page data, use HID write. + * Use index mode otherwise. + */ + if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_write_cmd(op->cmd.opcode)) { + u64 *dma_buf64 = (u64 *)(amd_spi->dma_virt_addr + op->addr.nbytes + op->cmd.nbytes); + u8 *dma_buf = (u8 *)amd_spi->dma_virt_addr; - for (i = 0; left_data >= 8; i++, left_data -= 8) - amd_spi_writereg64(amd_spi, base_addr + op->dummy.nbytes + (i * 8), *buf_64++); + /* Copy opcode and address to DMA buffer */ + *dma_buf = op->cmd.opcode; - buf = (u8 *)buf_64; - for (i = 0; i < left_data; i++) { - amd_spi_writereg8(amd_spi, base_addr + op->dummy.nbytes + nbytes + i - left_data, - buf[i]); - } + dma_buf = (u8 *)dma_buf64; + for (i = 0; i < op->addr.nbytes; i++) { + *--dma_buf = addr_val & GENMASK(7, 0); + addr_val >>= 8; + } - amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes); - amd_spi_set_rx_count(amd_spi, 0); - amd_spi_clear_fifo_ptr(amd_spi); - amd_spi_execute_opcode(amd_spi); + /* Copy data to DMA buffer */ + while (left_data >= 8) { + *dma_buf64++ = *buf_64++; + left_data -= 8; + } + + buf = (u8 *)buf_64; + dma_buf = (u8 *)dma_buf64; + while (left_data--) + *dma_buf++ = *buf++; + + amd_spi_hiddma_write(amd_spi, op); + } else { + amd_spi_set_opcode(amd_spi, op->cmd.opcode); + amd_spi_set_addr(amd_spi, op); + + for (i = 0; left_data >= 8; i++, left_data -= 8) + amd_spi_writereg64(amd_spi, base_addr + op->dummy.nbytes + (i * 8), + *buf_64++); + + buf = (u8 *)buf_64; + for (i = 0; i < left_data; i++) { + amd_spi_writereg8(amd_spi, + base_addr + op->dummy.nbytes + nbytes + i - left_data, + buf[i]); + } + + amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes); + amd_spi_set_rx_count(amd_spi, 0); + amd_spi_clear_fifo_ptr(amd_spi); + amd_spi_execute_opcode(amd_spi); + } } static void amd_spi_hiddma_read(struct amd_spi *amd_spi, const struct spi_mem_op *op) @@ -616,15 +674,21 @@ static void amd_spi_mem_data_in(struct amd_spi *amd_spi, * Use index mode otherwise. */ if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_read_cmd(op->cmd.opcode)) { + u64 *dma_buf64 = (u64 *)amd_spi->dma_virt_addr; + u8 *dma_buf; + amd_spi_hiddma_read(amd_spi, op); - for (i = 0; left_data >= 8; i++, left_data -= 8) - *buf_64++ = readq((u8 __iomem *)amd_spi->dma_virt_addr + (i * 8)); + /* Copy data from DMA buffer */ + while (left_data >= 8) { + *buf_64++ = *dma_buf64++; + left_data -= 8; + } buf = (u8 *)buf_64; - for (i = 0; i < left_data; i++) - buf[i] = readb((u8 __iomem *)amd_spi->dma_virt_addr + - (nbytes - left_data + i)); + dma_buf = (u8 *)dma_buf64; + while (left_data--) + *buf++ = *dma_buf++; /* Reset HID RX memory logic */ data = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_CNTRL); @@ -728,51 +792,38 @@ static int amd_spi_setup_hiddma(struct amd_spi *amd_spi, struct device *dev) { u32 hid_regval; - /* Allocate DMA buffer to use for HID basic read operation */ - amd_spi->dma_virt_addr = dma_alloc_coherent(dev, AMD_SPI_HID2_DMA_SIZE, - &amd_spi->phy_dma_buf, GFP_KERNEL); + /* Allocate DMA buffer to use for HID basic read and write operations. For write + * operations, the DMA buffer should include the opcode, address bytes and dummy + * bytes(if any) in addition to the data bytes. Additionally, the hardware requires + * that the buffer address be 4K aligned. So, allocate DMA buffer of size + * 2 * AMD_SPI_HID2_DMA_SIZE. + */ + amd_spi->dma_virt_addr = dmam_alloc_coherent(dev, AMD_SPI_HID2_DMA_SIZE * 2, + &amd_spi->phy_dma_buf, GFP_KERNEL); if (!amd_spi->dma_virt_addr) return -ENOMEM; /* * Enable interrupts and set mask bits in hid2_int_mask register to generate interrupt - * properly for HIDDMA basic read operations. + * properly for HIDDMA basic read and write operations. */ hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_INT_MASK); - hid_regval = (hid_regval & GENMASK(31, 8)) | BIT(19); + hid_regval = (hid_regval & GENMASK(31, 8)) | BIT(18) | BIT(19); amd_spi_writereg32(amd_spi, AMD_SPI_HID2_INT_MASK, hid_regval); - /* Configure buffer unit(4k) in hid2_control register */ + /* Configure buffer unit(4k) and write threshold in hid2_control register */ hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_CNTRL); - amd_spi_writereg32(amd_spi, AMD_SPI_HID2_CNTRL, hid_regval & ~BIT(3)); + amd_spi_writereg32(amd_spi, AMD_SPI_HID2_CNTRL, (hid_regval | GENMASK(13, 12)) & ~BIT(3)); return 0; } -static int amd_spi_probe(struct platform_device *pdev) +int amd_spi_probe_common(struct device *dev, struct spi_controller *host) { - struct device *dev = &pdev->dev; - struct spi_controller *host; - struct amd_spi *amd_spi; + struct amd_spi *amd_spi = spi_controller_get_devdata(host); int err; - /* Allocate storage for host and driver private data */ - host = devm_spi_alloc_host(dev, sizeof(struct amd_spi)); - if (!host) - return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n"); - - amd_spi = spi_controller_get_devdata(host); - amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(amd_spi->io_remap_addr)) - return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr), - "ioremap of SPI registers failed\n"); - - dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr); - - amd_spi->version = (uintptr_t) device_get_match_data(dev); - /* Initialize the spi_controller fields */ - host->bus_num = (amd_spi->version == AMD_HID2_SPI) ? 2 : 0; host->num_chipselect = 4; host->mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD; host->flags = SPI_CONTROLLER_HALF_DUPLEX; @@ -795,6 +846,32 @@ static int amd_spi_probe(struct platform_device *pdev) return err; } +EXPORT_SYMBOL_GPL(amd_spi_probe_common); + +static int amd_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_controller *host; + struct amd_spi *amd_spi; + + /* Allocate storage for host and driver private data */ + host = devm_spi_alloc_host(dev, sizeof(struct amd_spi)); + if (!host) + return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n"); + + amd_spi = spi_controller_get_devdata(host); + amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(amd_spi->io_remap_addr)) + return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr), + "ioremap of SPI registers failed\n"); + + dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr); + + amd_spi->version = (uintptr_t)device_get_match_data(dev); + host->bus_num = 0; + + return amd_spi_probe_common(dev, host); +} #ifdef CONFIG_ACPI static const struct acpi_device_id spi_acpi_match[] = { diff --git a/drivers/spi/spi-amd.h b/drivers/spi/spi-amd.h new file mode 100644 index 000000000000..5f39ce7b5587 --- /dev/null +++ b/drivers/spi/spi-amd.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AMD SPI controller driver common stuff + * + * Copyright (c) 2025, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Krishnamoorthi M <krishnamoorthi.m@amd.com> + */ + +#ifndef SPI_AMD_H +#define SPI_AMD_H + +/** + * enum amd_spi_versions - SPI controller versions + * @AMD_SPI_V1: AMDI0061 hardware version + * @AMD_SPI_V2: AMDI0062 hardware version + * @AMD_HID2_SPI: AMDI0063 hardware version + */ +enum amd_spi_versions { + AMD_SPI_V1 = 1, + AMD_SPI_V2, + AMD_HID2_SPI, +}; + +/** + * struct amd_spi - SPI driver instance + * @io_remap_addr: Start address of the SPI controller registers + * @phy_dma_buf: Physical address of DMA buffer + * @dma_virt_addr: Virtual address of DMA buffer + * @version: SPI controller hardware version + * @speed_hz: Device frequency + */ +struct amd_spi { + void __iomem *io_remap_addr; + dma_addr_t phy_dma_buf; + void *dma_virt_addr; + enum amd_spi_versions version; + unsigned int speed_hz; +}; + +int amd_spi_probe_common(struct device *dev, struct spi_controller *host); + +#endif /* SPI_AMD_H */ diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index da9840957778..8cc19934b48b 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -14,6 +14,7 @@ #include <linux/fpga/adi-axi-common.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/of.h> #include <linux/module.h> #include <linux/overflow.h> @@ -140,6 +141,8 @@ struct spi_engine_offload { struct spi_engine *spi_engine; unsigned long flags; unsigned int offload_num; + unsigned int spi_mode_config; + u8 bits_per_word; }; struct spi_engine { @@ -159,6 +162,7 @@ struct spi_engine { unsigned int offload_sdo_mem_size; struct spi_offload *offload; u32 offload_caps; + bool offload_requires_sync; }; static void spi_engine_program_add_cmd(struct spi_engine_program *p, @@ -265,6 +269,8 @@ static int spi_engine_precompile_message(struct spi_message *msg) { unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz; struct spi_transfer *xfer; + u8 min_bits_per_word = U8_MAX; + u8 max_bits_per_word = 0; list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* If we have an offload transfer, we can't rx to buffer */ @@ -273,6 +279,24 @@ static int spi_engine_precompile_message(struct spi_message *msg) clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz); xfer->effective_speed_hz = max_hz / min(clk_div, 256U); + + if (xfer->len) { + min_bits_per_word = min(min_bits_per_word, xfer->bits_per_word); + max_bits_per_word = max(max_bits_per_word, xfer->bits_per_word); + } + } + + /* + * If all xfers in the message use the same bits_per_word, we can + * provide some optimization when using SPI offload. + */ + if (msg->offload) { + struct spi_engine_offload *priv = msg->offload->priv; + + if (min_bits_per_word == max_bits_per_word) + priv->bits_per_word = min_bits_per_word; + else + priv->bits_per_word = 0; } return 0; @@ -283,6 +307,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, { struct spi_device *spi = msg->spi; struct spi_controller *host = spi->controller; + struct spi_engine_offload *priv; struct spi_transfer *xfer; int clk_div, new_clk_div, inst_ns; bool keep_cs = false; @@ -296,9 +321,24 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, clk_div = 1; - spi_engine_program_add_cmd(p, dry, - SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG, - spi_engine_get_config(spi))); + /* + * As an optimization, SPI offload sets once this when the offload is + * enabled instead of repeating the instruction in each message. + */ + if (msg->offload) { + priv = msg->offload->priv; + priv->spi_mode_config = spi_engine_get_config(spi); + + /* + * If all xfers use the same bits_per_word, it can be optimized + * in the same way. + */ + bits_per_word = priv->bits_per_word; + } else { + spi_engine_program_add_cmd(p, dry, + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG, + spi_engine_get_config(spi))); + } xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list); spi_engine_gen_cs(p, dry, spi, !xfer->cs_off); @@ -663,6 +703,8 @@ static void spi_engine_offload_unprepare(struct spi_offload *offload) static int spi_engine_optimize_message(struct spi_message *msg) { + struct spi_controller *host = msg->spi->controller; + struct spi_engine *spi_engine = spi_controller_get_devdata(host); struct spi_engine_program p_dry, *p; int ret; @@ -679,8 +721,13 @@ static int spi_engine_optimize_message(struct spi_message *msg) spi_engine_compile_message(msg, false, p); - spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC( - msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID)); + /* + * Non-offload needs SYNC for completion interrupt. Older versions of + * the IP core also need SYNC for offload to work properly. + */ + if (!msg->offload || spi_engine->offload_requires_sync) + spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC( + msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID)); msg->opt_state = p; @@ -739,12 +786,16 @@ static int spi_engine_setup(struct spi_device *device) { struct spi_controller *host = device->controller; struct spi_engine *spi_engine = spi_controller_get_devdata(host); + unsigned int reg; if (device->mode & SPI_CS_HIGH) spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0)); else spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0)); + writel_relaxed(SPI_ENGINE_CMD_SYNC(0), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv), spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); @@ -755,7 +806,11 @@ static int spi_engine_setup(struct spi_device *device) writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff), spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); - return 0; + writel_relaxed(SPI_ENGINE_CMD_SYNC(1), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + return readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID, + reg, reg == 1, 1, 1000); } static int spi_engine_transfer_one_message(struct spi_controller *host, @@ -833,6 +888,27 @@ static int spi_engine_trigger_enable(struct spi_offload *offload) struct spi_engine_offload *priv = offload->priv; struct spi_engine *spi_engine = priv->spi_engine; unsigned int reg; + int ret; + + writel_relaxed(SPI_ENGINE_CMD_SYNC(0), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG, + priv->spi_mode_config), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + if (priv->bits_per_word) + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS, + priv->bits_per_word), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + writel_relaxed(SPI_ENGINE_CMD_SYNC(1), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + ret = readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID, + reg, reg == 1, 1, 1000); + if (ret) + return ret; reg = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); @@ -987,6 +1063,9 @@ static int spi_engine_probe(struct platform_device *pdev) spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE; } + /* IP v1.5 dropped the requirement for SYNC in offload messages. */ + spi_engine->offload_requires_sync = ADI_AXI_PCORE_VER_MINOR(version) < 5; + writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET); writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index c90462783b3f..fe0f122f07b0 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1949,7 +1949,7 @@ static int cqspi_probe(struct platform_device *pdev) host->num_chipselect = cqspi->num_chipselect; - if (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET) + if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)) cqspi_device_reset(cqspi); if (cqspi->use_direct_mode) { diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c index 337aef12abcc..367ae7120bb3 100644 --- a/drivers/spi/spi-cavium-thunderx.c +++ b/drivers/spi/spi-cavium-thunderx.c @@ -34,7 +34,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev, if (ret) goto error; - ret = pci_request_regions(pdev, DRV_NAME); + ret = pcim_request_all_regions(pdev, DRV_NAME); if (ret) goto error; @@ -78,7 +78,6 @@ static int thunderx_spi_probe(struct pci_dev *pdev, return 0; error: - pci_release_regions(pdev); spi_controller_put(host); return ret; } @@ -92,7 +91,6 @@ static void thunderx_spi_remove(struct pci_dev *pdev) if (!p) return; - pci_release_regions(pdev); /* Put everything in a known state. */ writeq(0, p->register_base + OCTEON_SPI_CFG(p)); } diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index ceefc253c549..b28a840b3b04 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -237,7 +237,9 @@ static int cs42l43_get_speaker_id_gpios(struct cs42l43_spi *priv, int *result) int i, ret; descs = gpiod_get_array_optional(priv->dev, "spk-id", GPIOD_IN); - if (IS_ERR_OR_NULL(descs)) + if (!descs) + return 0; + else if (IS_ERR(descs)) return PTR_ERR(descs); spkid = 0; diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index 941ecc6f59f8..b3b883cb9541 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -423,7 +423,7 @@ static int dw_spi_transfer_one(struct spi_controller *host, int ret; dws->dma_mapped = 0; - dws->n_bytes = roundup_pow_of_two(BITS_TO_BYTES(transfer->bits_per_word)); + dws->n_bytes = spi_bpw_to_bytes(transfer->bits_per_word); dws->tx = (void *)transfer->tx_buf; dws->tx_len = transfer->len / dws->n_bytes; dws->rx = transfer->rx_buf; diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index b5ecffcaf795..c887abb028d7 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -264,14 +264,14 @@ static const struct fsl_qspi_devtype_data ls2080a_data = { struct fsl_qspi { void __iomem *iobase; void __iomem *ahb_addr; - u32 memmap_phy; - struct clk *clk, *clk_en; - struct device *dev; - struct completion c; const struct fsl_qspi_devtype_data *devtype_data; struct mutex lock; + struct completion c; + struct clk *clk, *clk_en; struct pm_qos_request pm_qos_req; + struct device *dev; int selected; + u32 memmap_phy; }; static inline int needs_swap_endian(struct fsl_qspi *q) @@ -844,13 +844,18 @@ static const struct spi_controller_mem_caps fsl_qspi_mem_caps = { .per_op_freq = true, }; -static void fsl_qspi_cleanup(void *data) +static void fsl_qspi_disable(void *data) { struct fsl_qspi *q = data; /* disable the hardware */ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); +} + +static void fsl_qspi_cleanup(void *data) +{ + struct fsl_qspi *q = data; fsl_qspi_clk_disable_unprep(q); @@ -866,7 +871,7 @@ static int fsl_qspi_probe(struct platform_device *pdev) struct fsl_qspi *q; int ret; - ctlr = spi_alloc_host(&pdev->dev, sizeof(*q)); + ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*q)); if (!ctlr) return -ENOMEM; @@ -876,68 +881,60 @@ static int fsl_qspi_probe(struct platform_device *pdev) q = spi_controller_get_devdata(ctlr); q->dev = dev; q->devtype_data = of_device_get_match_data(dev); - if (!q->devtype_data) { - ret = -ENODEV; - goto err_put_ctrl; - } + if (!q->devtype_data) + return -ENODEV; platform_set_drvdata(pdev, q); /* find the resources */ q->iobase = devm_platform_ioremap_resource_byname(pdev, "QuadSPI"); - if (IS_ERR(q->iobase)) { - ret = PTR_ERR(q->iobase); - goto err_put_ctrl; - } + if (IS_ERR(q->iobase)) + return PTR_ERR(q->iobase); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI-memory"); - if (!res) { - ret = -EINVAL; - goto err_put_ctrl; - } + if (!res) + return -EINVAL; q->memmap_phy = res->start; /* Since there are 4 cs, map size required is 4 times ahb_buf_size */ q->ahb_addr = devm_ioremap(dev, q->memmap_phy, (q->devtype_data->ahb_buf_size * 4)); - if (!q->ahb_addr) { - ret = -ENOMEM; - goto err_put_ctrl; - } + if (!q->ahb_addr) + return -ENOMEM; /* find the clocks */ q->clk_en = devm_clk_get(dev, "qspi_en"); - if (IS_ERR(q->clk_en)) { - ret = PTR_ERR(q->clk_en); - goto err_put_ctrl; - } + if (IS_ERR(q->clk_en)) + return PTR_ERR(q->clk_en); q->clk = devm_clk_get(dev, "qspi"); - if (IS_ERR(q->clk)) { - ret = PTR_ERR(q->clk); - goto err_put_ctrl; - } + if (IS_ERR(q->clk)) + return PTR_ERR(q->clk); + + mutex_init(&q->lock); ret = fsl_qspi_clk_prep_enable(q); if (ret) { dev_err(dev, "can not enable the clock\n"); - goto err_put_ctrl; + return ret; } + ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q); + if (ret) + return ret; + /* find the irq */ ret = platform_get_irq(pdev, 0); if (ret < 0) - goto err_disable_clk; + return ret; ret = devm_request_irq(dev, ret, fsl_qspi_irq_handler, 0, pdev->name, q); if (ret) { dev_err(dev, "failed to request irq: %d\n", ret); - goto err_disable_clk; + return ret; } - mutex_init(&q->lock); - ctlr->bus_num = -1; ctlr->num_chipselect = 4; ctlr->mem_ops = &fsl_qspi_mem_ops; @@ -947,23 +944,15 @@ static int fsl_qspi_probe(struct platform_device *pdev) ctlr->dev.of_node = np; - ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q); + ret = devm_add_action_or_reset(dev, fsl_qspi_disable, q); if (ret) - goto err_put_ctrl; + return ret; ret = devm_spi_register_controller(dev, ctlr); if (ret) - goto err_put_ctrl; + return ret; return 0; - -err_disable_clk: - fsl_qspi_clk_disable_unprep(q); - -err_put_ctrl: - spi_controller_put(ctlr); - - return ret; } static int fsl_qspi_suspend(struct device *dev) diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 405deb6677c1..ea5f1b10b79e 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -46,7 +46,7 @@ struct spi_gpio { static inline struct spi_gpio *__pure spi_to_spi_gpio(const struct spi_device *spi) { - const struct spi_bitbang *bang; + struct spi_bitbang *bang; struct spi_gpio *spi_gpio; bang = spi_controller_get_devdata(spi->controller); diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c index 4d9ffec900bb..4b63cb98df9c 100644 --- a/drivers/spi/spi-intel-pci.c +++ b/drivers/spi/spi-intel-pci.c @@ -44,6 +44,7 @@ static int intel_spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct intel_spi_boardinfo *info; + void __iomem *base; int ret; ret = pcim_enable_device(pdev); @@ -56,7 +57,12 @@ static int intel_spi_pci_probe(struct pci_dev *pdev, return -ENOMEM; info->data = pdev; - return intel_spi_probe(&pdev->dev, &pdev->resource[0], info); + + base = pcim_iomap_region(pdev, 0, KBUILD_MODNAME); + if (IS_ERR(base)) + return PTR_ERR(base); + + return intel_spi_probe(&pdev->dev, base, info); } static const struct pci_device_id intel_spi_pci_ids[] = { diff --git a/drivers/spi/spi-intel-platform.c b/drivers/spi/spi-intel-platform.c index 0974cca83a5d..6cbed0b2e063 100644 --- a/drivers/spi/spi-intel-platform.c +++ b/drivers/spi/spi-intel-platform.c @@ -14,14 +14,17 @@ static int intel_spi_platform_probe(struct platform_device *pdev) { struct intel_spi_boardinfo *info; - struct resource *mem; + void __iomem *base; info = dev_get_platdata(&pdev->dev); if (!info) return -EINVAL; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - return intel_spi_probe(&pdev->dev, mem, info); + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + return intel_spi_probe(&pdev->dev, base, info); } static struct platform_driver intel_spi_platform_driver = { diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c index b0dcdb6fb8fa..5d5a546c62ea 100644 --- a/drivers/spi/spi-intel.c +++ b/drivers/spi/spi-intel.c @@ -1467,13 +1467,13 @@ EXPORT_SYMBOL_GPL(intel_spi_groups); /** * intel_spi_probe() - Probe the Intel SPI flash controller * @dev: Pointer to the parent device - * @mem: MMIO resource + * @base: iomapped MMIO resource * @info: Platform specific information * * Probes Intel SPI flash controller and creates the flash chip device. * Returns %0 on success and negative errno in case of failure. */ -int intel_spi_probe(struct device *dev, struct resource *mem, +int intel_spi_probe(struct device *dev, void __iomem *base, const struct intel_spi_boardinfo *info) { struct spi_controller *host; @@ -1488,10 +1488,7 @@ int intel_spi_probe(struct device *dev, struct resource *mem, ispi = spi_controller_get_devdata(host); - ispi->base = devm_ioremap_resource(dev, mem); - if (IS_ERR(ispi->base)) - return PTR_ERR(ispi->base); - + ispi->base = base; ispi->dev = dev; ispi->host = host; ispi->info = info; diff --git a/drivers/spi/spi-intel.h b/drivers/spi/spi-intel.h index c5f35060dd63..53b292c6ea0c 100644 --- a/drivers/spi/spi-intel.h +++ b/drivers/spi/spi-intel.h @@ -11,11 +11,9 @@ #include <linux/platform_data/x86/spi-intel.h> -struct resource; - extern const struct attribute_group *intel_spi_groups[]; -int intel_spi_probe(struct device *dev, struct resource *mem, +int intel_spi_probe(struct device *dev, void __iomem *base, const struct intel_spi_boardinfo *info); #endif /* SPI_INTEL_H */ diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index 7740f94847a8..7dd92deffe3f 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -494,8 +494,8 @@ struct rx_ranges { static int rx_ranges_cmp(void *priv, const struct list_head *a, const struct list_head *b) { - struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list); - struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list); + const struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list); + const struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list); if (rx_a->start > rx_b->start) return 1; @@ -635,8 +635,8 @@ static int spi_test_check_loopback_result(struct spi_device *spi, } else { /* first byte received */ txb = ((u8 *)xfer->rx_buf)[0]; - /* first byte may be 0 or xff */ - if (!((txb == 0) || (txb == 0xff))) { + /* first byte may be 0 or 0xff */ + if (txb != 0 && txb != 0xff) { dev_err(&spi->dev, "loopback strangeness - we expect 0x00 or 0xff, but not 0x%02x\n", txb); diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index df74ad5060f8..6b9137307533 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -21,18 +21,26 @@ #include <linux/interrupt.h> #include <linux/reset.h> #include <linux/pinctrl/consumer.h> +#include <linux/dma-mapping.h> /* - * The Meson SPICC controller could support DMA based transfers, but is not - * implemented by the vendor code, and while having the registers documentation - * it has never worked on the GXL Hardware. - * The PIO mode is the only mode implemented, and due to badly designed HW : - * - all transfers are cutted in 16 words burst because the FIFO hangs on - * TX underflow, and there is no TX "Half-Empty" interrupt, so we go by - * FIFO max size chunk only - * - CS management is dumb, and goes UP between every burst, so is really a - * "Data Valid" signal than a Chip Select, GPIO link should be used instead - * to have a CS go down over the full transfer + * There are two modes for data transmission: PIO and DMA. + * When bits_per_word is 8, 16, 24, or 32, data is transferred using PIO mode. + * When bits_per_word is 64, DMA mode is used by default. + * + * DMA achieves a transfer with one or more SPI bursts, each SPI burst is made + * up of one or more DMA bursts. The DMA burst implementation mechanism is, + * For TX, when the number of words in TXFIFO is less than the preset + * reading threshold, SPICC starts a reading DMA burst, which reads the preset + * number of words from TX buffer, then writes them into TXFIFO. + * For RX, when the number of words in RXFIFO is greater than the preset + * writing threshold, SPICC starts a writing request burst, which reads the + * preset number of words from RXFIFO, then write them into RX buffer. + * DMA works if the transfer meets the following conditions, + * - 64 bits per word + * - The transfer length in word must be multiples of the dma_burst_len, and + * the dma_burst_len should be one of 8,7...2, otherwise, it will be split + * into several SPI bursts by this driver */ #define SPICC_MAX_BURST 128 @@ -128,6 +136,23 @@ #define SPICC_DWADDR 0x24 /* Write Address of DMA */ +#define SPICC_LD_CNTL0 0x28 +#define VSYNC_IRQ_SRC_SELECT BIT(0) +#define DMA_EN_SET_BY_VSYNC BIT(2) +#define XCH_EN_SET_BY_VSYNC BIT(3) +#define DMA_READ_COUNTER_EN BIT(4) +#define DMA_WRITE_COUNTER_EN BIT(5) +#define DMA_RADDR_LOAD_BY_VSYNC BIT(6) +#define DMA_WADDR_LOAD_BY_VSYNC BIT(7) +#define DMA_ADDR_LOAD_FROM_LD_ADDR BIT(8) + +#define SPICC_LD_CNTL1 0x2c +#define DMA_READ_COUNTER GENMASK(15, 0) +#define DMA_WRITE_COUNTER GENMASK(31, 16) +#define DMA_BURST_LEN_DEFAULT 8 +#define DMA_BURST_COUNT_MAX 0xffff +#define SPI_BURST_LEN_MAX (DMA_BURST_LEN_DEFAULT * DMA_BURST_COUNT_MAX) + #define SPICC_ENH_CTL0 0x38 /* Enhanced Feature */ #define SPICC_ENH_CLK_CS_DELAY_MASK GENMASK(15, 0) #define SPICC_ENH_DATARATE_MASK GENMASK(23, 16) @@ -171,6 +196,9 @@ struct meson_spicc_device { struct pinctrl *pinctrl; struct pinctrl_state *pins_idle_high; struct pinctrl_state *pins_idle_low; + dma_addr_t tx_dma; + dma_addr_t rx_dma; + bool using_dma; }; #define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div) @@ -202,6 +230,148 @@ static void meson_spicc_oen_enable(struct meson_spicc_device *spicc) writel_relaxed(conf, spicc->base + SPICC_ENH_CTL0); } +static int meson_spicc_dma_map(struct meson_spicc_device *spicc, + struct spi_transfer *t) +{ + struct device *dev = spicc->host->dev.parent; + + if (!(t->tx_buf && t->rx_buf)) + return -EINVAL; + + t->tx_dma = dma_map_single(dev, (void *)t->tx_buf, t->len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, t->tx_dma)) + return -ENOMEM; + + t->rx_dma = dma_map_single(dev, t->rx_buf, t->len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, t->rx_dma)) + return -ENOMEM; + + spicc->tx_dma = t->tx_dma; + spicc->rx_dma = t->rx_dma; + + return 0; +} + +static void meson_spicc_dma_unmap(struct meson_spicc_device *spicc, + struct spi_transfer *t) +{ + struct device *dev = spicc->host->dev.parent; + + if (t->tx_dma) + dma_unmap_single(dev, t->tx_dma, t->len, DMA_TO_DEVICE); + if (t->rx_dma) + dma_unmap_single(dev, t->rx_dma, t->len, DMA_FROM_DEVICE); +} + +/* + * According to the remain words length, calculate a suitable spi burst length + * and a dma burst length for current spi burst + */ +static u32 meson_spicc_calc_dma_len(struct meson_spicc_device *spicc, + u32 len, u32 *dma_burst_len) +{ + u32 i; + + if (len <= spicc->data->fifo_size) { + *dma_burst_len = len; + return len; + } + + *dma_burst_len = DMA_BURST_LEN_DEFAULT; + + if (len == (SPI_BURST_LEN_MAX + 1)) + return SPI_BURST_LEN_MAX - DMA_BURST_LEN_DEFAULT; + + if (len >= SPI_BURST_LEN_MAX) + return SPI_BURST_LEN_MAX; + + for (i = DMA_BURST_LEN_DEFAULT; i > 1; i--) + if ((len % i) == 0) { + *dma_burst_len = i; + return len; + } + + i = len % DMA_BURST_LEN_DEFAULT; + len -= i; + + if (i == 1) + len -= DMA_BURST_LEN_DEFAULT; + + return len; +} + +static void meson_spicc_setup_dma(struct meson_spicc_device *spicc) +{ + unsigned int len; + unsigned int dma_burst_len, dma_burst_count; + unsigned int count_en = 0; + unsigned int txfifo_thres = 0; + unsigned int read_req = 0; + unsigned int rxfifo_thres = 31; + unsigned int write_req = 0; + unsigned int ld_ctr1 = 0; + + writel_relaxed(spicc->tx_dma, spicc->base + SPICC_DRADDR); + writel_relaxed(spicc->rx_dma, spicc->base + SPICC_DWADDR); + + /* Set the max burst length to support a transmission with length of + * no more than 1024 bytes(128 words), which must use the CS management + * because of some strict timing requirements + */ + writel_bits_relaxed(SPICC_BURSTLENGTH_MASK, SPICC_BURSTLENGTH_MASK, + spicc->base + SPICC_CONREG); + + len = meson_spicc_calc_dma_len(spicc, spicc->xfer_remain, + &dma_burst_len); + spicc->xfer_remain -= len; + dma_burst_count = DIV_ROUND_UP(len, dma_burst_len); + dma_burst_len--; + + if (spicc->tx_dma) { + spicc->tx_dma += len; + count_en |= DMA_READ_COUNTER_EN; + txfifo_thres = spicc->data->fifo_size - dma_burst_len; + read_req = dma_burst_len; + ld_ctr1 |= FIELD_PREP(DMA_READ_COUNTER, dma_burst_count); + } + + if (spicc->rx_dma) { + spicc->rx_dma += len; + count_en |= DMA_WRITE_COUNTER_EN; + rxfifo_thres = dma_burst_len; + write_req = dma_burst_len; + ld_ctr1 |= FIELD_PREP(DMA_WRITE_COUNTER, dma_burst_count); + } + + writel_relaxed(count_en, spicc->base + SPICC_LD_CNTL0); + writel_relaxed(ld_ctr1, spicc->base + SPICC_LD_CNTL1); + writel_relaxed(SPICC_DMA_ENABLE + | SPICC_DMA_URGENT + | FIELD_PREP(SPICC_TXFIFO_THRESHOLD_MASK, txfifo_thres) + | FIELD_PREP(SPICC_READ_BURST_MASK, read_req) + | FIELD_PREP(SPICC_RXFIFO_THRESHOLD_MASK, rxfifo_thres) + | FIELD_PREP(SPICC_WRITE_BURST_MASK, write_req), + spicc->base + SPICC_DMAREG); +} + +static irqreturn_t meson_spicc_dma_irq(struct meson_spicc_device *spicc) +{ + if (readl_relaxed(spicc->base + SPICC_DMAREG) & SPICC_DMA_ENABLE) + return IRQ_HANDLED; + + if (spicc->xfer_remain) { + meson_spicc_setup_dma(spicc); + } else { + writel_bits_relaxed(SPICC_SMC, 0, spicc->base + SPICC_CONREG); + writel_relaxed(0, spicc->base + SPICC_INTREG); + writel_relaxed(0, spicc->base + SPICC_DMAREG); + meson_spicc_dma_unmap(spicc, spicc->xfer); + complete(&spicc->done); + } + + return IRQ_HANDLED; +} + static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc) { return !!FIELD_GET(SPICC_TF, @@ -293,6 +463,9 @@ static irqreturn_t meson_spicc_irq(int irq, void *data) writel_bits_relaxed(SPICC_TC, SPICC_TC, spicc->base + SPICC_STATREG); + if (spicc->using_dma) + return meson_spicc_dma_irq(spicc); + /* Empty RX FIFO */ meson_spicc_rx(spicc); @@ -426,9 +599,6 @@ static int meson_spicc_transfer_one(struct spi_controller *host, meson_spicc_reset_fifo(spicc); - /* Setup burst */ - meson_spicc_setup_burst(spicc); - /* Setup wait for completion */ reinit_completion(&spicc->done); @@ -442,11 +612,36 @@ static int meson_spicc_transfer_one(struct spi_controller *host, /* Increase it twice and add 200 ms tolerance */ timeout += timeout + 200; - /* Start burst */ - writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG); + if (xfer->bits_per_word == 64) { + int ret; + + /* dma_burst_len 1 can't trigger a dma burst */ + if (xfer->len < 16) + return -EINVAL; + + ret = meson_spicc_dma_map(spicc, xfer); + if (ret) { + meson_spicc_dma_unmap(spicc, xfer); + dev_err(host->dev.parent, "dma map failed\n"); + return ret; + } + + spicc->using_dma = true; + spicc->xfer_remain = DIV_ROUND_UP(xfer->len, spicc->bytes_per_word); + meson_spicc_setup_dma(spicc); + writel_relaxed(SPICC_TE_EN, spicc->base + SPICC_INTREG); + writel_bits_relaxed(SPICC_SMC, SPICC_SMC, spicc->base + SPICC_CONREG); + } else { + spicc->using_dma = false; + /* Setup burst */ + meson_spicc_setup_burst(spicc); - /* Enable interrupts */ - writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG); + /* Start burst */ + writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG); + + /* Enable interrupts */ + writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG); + } if (!wait_for_completion_timeout(&spicc->done, msecs_to_jiffies(timeout))) return -ETIMEDOUT; @@ -545,6 +740,14 @@ static int meson_spicc_setup(struct spi_device *spi) if (!spi->controller_state) spi->controller_state = spi_controller_get_devdata(spi->controller); + /* DMA works at 64 bits, the rest works on PIO */ + if (spi->bits_per_word != 8 && + spi->bits_per_word != 16 && + spi->bits_per_word != 24 && + spi->bits_per_word != 32 && + spi->bits_per_word != 64) + return -EINVAL; + return 0; } @@ -853,10 +1056,6 @@ static int meson_spicc_probe(struct platform_device *pdev) host->num_chipselect = 4; host->dev.of_node = pdev->dev.of_node; host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LOOP; - host->bits_per_word_mask = SPI_BPW_MASK(32) | - SPI_BPW_MASK(24) | - SPI_BPW_MASK(16) | - SPI_BPW_MASK(8); host->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX); host->min_speed_hz = spicc->data->min_speed_hz; host->max_speed_hz = spicc->data->max_speed_hz; diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index bad6b30bab0e..e63c77e41823 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -48,6 +48,8 @@ #include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pm_runtime.h> #include <linux/pm_qos.h> #include <linux/regmap.h> #include <linux/sizes.h> @@ -57,6 +59,9 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> +/* runtime pm timeout */ +#define FSPI_RPM_TIMEOUT 50 /* 50ms */ + /* Registers used by the driver */ #define FSPI_MCR0 0x00 #define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24) @@ -394,6 +399,8 @@ struct nxp_fspi { struct mutex lock; struct pm_qos_request pm_qos_req; int selected; +#define FSPI_NEED_INIT (1 << 0) + int flags; }; static inline int needs_ip_only(struct nxp_fspi *f) @@ -627,15 +634,15 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f) return 0; } -static int nxp_fspi_clk_disable_unprep(struct nxp_fspi *f) +static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f) { if (is_acpi_node(dev_fwnode(f->dev))) - return 0; + return; clk_disable_unprepare(f->clk); clk_disable_unprepare(f->clk_en); - return 0; + return; } static void nxp_fspi_dll_calibration(struct nxp_fspi *f) @@ -925,7 +932,13 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->controller); int err = 0; - mutex_lock(&f->lock); + guard(mutex)(&f->lock); + + err = pm_runtime_get_sync(f->dev); + if (err < 0) { + dev_err(f->dev, "Failed to enable clock %d\n", __LINE__); + return err; + } /* Wait for controller being ready. */ err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0, @@ -955,7 +968,8 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) /* Invalidate the data in the AHB buffer. */ nxp_fspi_invalid(f); - mutex_unlock(&f->lock); + pm_runtime_mark_last_busy(f->dev); + pm_runtime_put_autosuspend(f->dev); return err; } @@ -1154,6 +1168,24 @@ static const struct spi_controller_mem_caps nxp_fspi_mem_caps = { .per_op_freq = true, }; +static void nxp_fspi_cleanup(void *data) +{ + struct nxp_fspi *f = data; + + /* enable clock first since there is register access */ + pm_runtime_get_sync(f->dev); + + /* disable the hardware */ + fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0); + + pm_runtime_disable(f->dev); + pm_runtime_put_noidle(f->dev); + nxp_fspi_clk_disable_unprep(f); + + if (f->ahb_addr) + iounmap(f->ahb_addr); +} + static int nxp_fspi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -1161,10 +1193,10 @@ static int nxp_fspi_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct resource *res; struct nxp_fspi *f; - int ret; + int ret, irq; u32 reg; - ctlr = spi_alloc_host(&pdev->dev, sizeof(*f)); + ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*f)); if (!ctlr) return -ENOMEM; @@ -1174,10 +1206,8 @@ static int nxp_fspi_probe(struct platform_device *pdev) f = spi_controller_get_devdata(ctlr); f->dev = dev; f->devtype_data = (struct nxp_fspi_devtype_data *)device_get_match_data(dev); - if (!f->devtype_data) { - ret = -ENODEV; - goto err_put_ctrl; - } + if (!f->devtype_data) + return -ENODEV; platform_set_drvdata(pdev, f); @@ -1186,11 +1216,8 @@ static int nxp_fspi_probe(struct platform_device *pdev) f->iobase = devm_platform_ioremap_resource(pdev, 0); else f->iobase = devm_platform_ioremap_resource_byname(pdev, "fspi_base"); - - if (IS_ERR(f->iobase)) { - ret = PTR_ERR(f->iobase); - goto err_put_ctrl; - } + if (IS_ERR(f->iobase)) + return PTR_ERR(f->iobase); /* find the resources - controller memory mapped space */ if (is_acpi_node(dev_fwnode(f->dev))) @@ -1198,11 +1225,8 @@ static int nxp_fspi_probe(struct platform_device *pdev) else res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap"); - - if (!res) { - ret = -ENODEV; - goto err_put_ctrl; - } + if (!res) + return -ENODEV; /* assign memory mapped starting address and mapped size. */ f->memmap_phy = res->start; @@ -1211,100 +1235,109 @@ static int nxp_fspi_probe(struct platform_device *pdev) /* find the clocks */ if (dev_of_node(&pdev->dev)) { f->clk_en = devm_clk_get(dev, "fspi_en"); - if (IS_ERR(f->clk_en)) { - ret = PTR_ERR(f->clk_en); - goto err_put_ctrl; - } + if (IS_ERR(f->clk_en)) + return PTR_ERR(f->clk_en); f->clk = devm_clk_get(dev, "fspi"); - if (IS_ERR(f->clk)) { - ret = PTR_ERR(f->clk); - goto err_put_ctrl; - } - - ret = nxp_fspi_clk_prep_enable(f); - if (ret) { - dev_err(dev, "can not enable the clock\n"); - goto err_put_ctrl; - } + if (IS_ERR(f->clk)) + return PTR_ERR(f->clk); } + /* find the irq */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return dev_err_probe(dev, irq, "Failed to get irq source"); + + pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(dev, FSPI_RPM_TIMEOUT); + pm_runtime_use_autosuspend(dev); + + /* enable clock */ + ret = pm_runtime_get_sync(f->dev); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to enable clock"); + /* Clear potential interrupts */ reg = fspi_readl(f, f->iobase + FSPI_INTR); if (reg) fspi_writel(f, reg, f->iobase + FSPI_INTR); - /* find the irq */ - ret = platform_get_irq(pdev, 0); + nxp_fspi_default_setup(f); + + ret = pm_runtime_put_sync(dev); if (ret < 0) - goto err_disable_clk; + return dev_err_probe(dev, ret, "Failed to disable clock"); - ret = devm_request_irq(dev, ret, + ret = devm_request_irq(dev, irq, nxp_fspi_irq_handler, 0, pdev->name, f); - if (ret) { - dev_err(dev, "failed to request irq: %d\n", ret); - goto err_disable_clk; - } + if (ret) + return dev_err_probe(dev, ret, "Failed to request irq\n"); - mutex_init(&f->lock); + devm_mutex_init(dev, &f->lock); ctlr->bus_num = -1; ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT; ctlr->mem_ops = &nxp_fspi_mem_ops; ctlr->mem_caps = &nxp_fspi_mem_caps; - - nxp_fspi_default_setup(f); - ctlr->dev.of_node = np; - ret = devm_spi_register_controller(&pdev->dev, ctlr); + ret = devm_add_action_or_reset(dev, nxp_fspi_cleanup, f); if (ret) - goto err_destroy_mutex; + return dev_err_probe(dev, ret, "Failed to register nxp_fspi_cleanup\n"); - return 0; + return devm_spi_register_controller(&pdev->dev, ctlr); +} -err_destroy_mutex: - mutex_destroy(&f->lock); +static int nxp_fspi_runtime_suspend(struct device *dev) +{ + struct nxp_fspi *f = dev_get_drvdata(dev); -err_disable_clk: nxp_fspi_clk_disable_unprep(f); -err_put_ctrl: - spi_controller_put(ctlr); - - dev_err(dev, "NXP FSPI probe failed\n"); - return ret; + return 0; } -static void nxp_fspi_remove(struct platform_device *pdev) +static int nxp_fspi_runtime_resume(struct device *dev) { - struct nxp_fspi *f = platform_get_drvdata(pdev); - - /* disable the hardware */ - fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0); + struct nxp_fspi *f = dev_get_drvdata(dev); + int ret; - nxp_fspi_clk_disable_unprep(f); + ret = nxp_fspi_clk_prep_enable(f); + if (ret) + return ret; - mutex_destroy(&f->lock); + if (f->flags & FSPI_NEED_INIT) { + nxp_fspi_default_setup(f); + ret = pinctrl_pm_select_default_state(dev); + if (ret) + dev_err(dev, "select flexspi default pinctrl failed!\n"); + f->flags &= ~FSPI_NEED_INIT; + } - if (f->ahb_addr) - iounmap(f->ahb_addr); + return ret; } static int nxp_fspi_suspend(struct device *dev) { - return 0; -} - -static int nxp_fspi_resume(struct device *dev) -{ struct nxp_fspi *f = dev_get_drvdata(dev); + int ret; - nxp_fspi_default_setup(f); + ret = pinctrl_pm_select_sleep_state(dev); + if (ret) { + dev_err(dev, "select flexspi sleep pinctrl failed!\n"); + return ret; + } - return 0; + f->flags |= FSPI_NEED_INIT; + + return pm_runtime_force_suspend(dev); } +static const struct dev_pm_ops nxp_fspi_pm_ops = { + RUNTIME_PM_OPS(nxp_fspi_runtime_suspend, nxp_fspi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(nxp_fspi_suspend, pm_runtime_force_resume) +}; + static const struct of_device_id nxp_fspi_dt_ids[] = { { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, }, { .compatible = "nxp,imx8mm-fspi", .data = (void *)&imx8mm_data, }, @@ -1324,20 +1357,14 @@ static const struct acpi_device_id nxp_fspi_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, nxp_fspi_acpi_ids); #endif -static const struct dev_pm_ops nxp_fspi_pm_ops = { - .suspend = nxp_fspi_suspend, - .resume = nxp_fspi_resume, -}; - static struct platform_driver nxp_fspi_driver = { .driver = { .name = "nxp-fspi", .of_match_table = nxp_fspi_dt_ids, .acpi_match_table = ACPI_PTR(nxp_fspi_acpi_ids), - .pm = &nxp_fspi_pm_ops, + .pm = pm_ptr(&nxp_fspi_pm_ops), }, .probe = nxp_fspi_probe, - .remove = nxp_fspi_remove, }; module_platform_driver(nxp_fspi_driver); diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c index 6bad042fe437..e674097bf3be 100644 --- a/drivers/spi/spi-offload.c +++ b/drivers/spi/spi-offload.c @@ -183,9 +183,6 @@ static struct spi_offload_trigger guard(mutex)(&trigger->lock); - if (!trigger->ops) - return ERR_PTR(-ENODEV); - if (trigger->ops->request) { ret = trigger->ops->request(trigger, type, args->args, args->nargs); if (ret) @@ -434,7 +431,7 @@ int devm_spi_offload_trigger_register(struct device *dev, { struct spi_offload_trigger *trigger; - if (!info->fwnode || !info->ops) + if (!info->fwnode || !info->ops || !info->ops->match) return -EINVAL; trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c index fc98979eba48..330078b1d50f 100644 --- a/drivers/spi/spi-pci1xxxx.c +++ b/drivers/spi/spi-pci1xxxx.c @@ -741,21 +741,19 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * if (ret) return -ENOMEM; - ret = pci_request_regions(pdev, DRV_NAME); + ret = pcim_request_all_regions(pdev, DRV_NAME); if (ret) return -ENOMEM; spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); - if (!spi_bus->reg_base) { - ret = -EINVAL; - goto error; - } + if (!spi_bus->reg_base) + return -EINVAL; ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt, PCI_IRQ_ALL_TYPES); if (ret < 0) { dev_err(&pdev->dev, "Error allocating MSI vectors\n"); - goto error; + return ret; } init_completion(&spi_sub_ptr->spi_xfer_done); @@ -773,13 +771,12 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * if (ret < 0) { dev_err(&pdev->dev, "Unable to request irq : %d", spi_sub_ptr->irq); - ret = -ENODEV; - goto error; + return -ENODEV; } ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq); if (ret && ret != -EOPNOTSUPP) - goto error; + return ret; /* This register is only applicable for 1st instance */ regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0)); @@ -808,8 +805,7 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * if (ret < 0) { dev_err(&pdev->dev, "Unable to request irq : %d", spi_sub_ptr->irq); - ret = -ENODEV; - goto error; + return -ENODEV; } } @@ -828,15 +824,11 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * spi_controller_set_devdata(spi_host, spi_sub_ptr); ret = devm_spi_register_controller(dev, spi_host); if (ret) - goto error; + return ret; } pci_set_drvdata(pdev, spi_bus); return 0; - -error: - pci_release_regions(pdev); - return ret; } static void store_restore_config(struct pci1xxxx_spi *spi_ptr, diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c index 94948c8781e8..fd129650434f 100644 --- a/drivers/spi/spi-qpic-snand.c +++ b/drivers/spi/spi-qpic-snand.c @@ -116,7 +116,6 @@ struct qpic_spi_nand { struct nand_ecc_engine ecc_eng; u8 *data_buf; u8 *oob_buf; - u32 wlen; __le32 addr1; __le32 addr2; __le32 cmd; @@ -131,9 +130,9 @@ static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc, int is_last_read_loc) { __le32 locreg_val; - u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | - ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc) - << READ_LOCATION_LAST)); + u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) | + FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) | + FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc); locreg_val = cpu_to_le32(val); @@ -152,9 +151,9 @@ static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc, int is_last_read_loc) { __le32 locreg_val; - u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | - ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc) - << READ_LOCATION_LAST)); + u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) | + FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) | + FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc); locreg_val = cpu_to_le32(val); @@ -250,9 +249,11 @@ static const struct mtd_ooblayout_ops qcom_spi_ooblayout = { static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) { struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct nand_ecc_props *reqs = &nand->ecc.requirements; + struct nand_ecc_props *user = &nand->ecc.user_conf; struct nand_ecc_props *conf = &nand->ecc.ctx.conf; struct mtd_info *mtd = nanddev_to_mtd(nand); - int cwperpage, bad_block_byte; + int cwperpage, bad_block_byte, ret; struct qpic_ecc *ecc_cfg; cwperpage = mtd->writesize / NANDC_STEP_SIZE; @@ -261,11 +262,39 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); if (!ecc_cfg) return -ENOMEM; - snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize, + + if (user->step_size && user->strength) { + ecc_cfg->step_size = user->step_size; + ecc_cfg->strength = user->strength; + } else if (reqs->step_size && reqs->strength) { + ecc_cfg->step_size = reqs->step_size; + ecc_cfg->strength = reqs->strength; + } else { + /* use defaults */ + ecc_cfg->step_size = NANDC_STEP_SIZE; + ecc_cfg->strength = 4; + } + + if (ecc_cfg->step_size != NANDC_STEP_SIZE) { + dev_err(snandc->dev, + "only %u bytes ECC step size is supported\n", + NANDC_STEP_SIZE); + ret = -EOPNOTSUPP; + goto err_free_ecc_cfg; + } + + if (ecc_cfg->strength != 4) { + dev_err(snandc->dev, + "only 4 bits ECC strength is supported\n"); + ret = -EOPNOTSUPP; + goto err_free_ecc_cfg; + } + + snandc->qspi->oob_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); if (!snandc->qspi->oob_buf) { - kfree(ecc_cfg); - return -ENOMEM; + ret = -ENOMEM; + goto err_free_ecc_cfg; } memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize); @@ -280,8 +309,6 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size; ecc_cfg->steps = 4; - ecc_cfg->strength = 4; - ecc_cfg->step_size = 512; ecc_cfg->cw_data = 516; ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes; bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1; @@ -325,7 +352,7 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) FIELD_PREP(ECC_MODE_MASK, 0) | FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw); - ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS; + ecc_cfg->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203); ecc_cfg->clrflashstatus = FS_READY_BSY_N; ecc_cfg->clrreadstatus = 0xc0; @@ -339,6 +366,10 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) ecc_cfg->strength, ecc_cfg->step_size); return 0; + +err_free_ecc_cfg: + kfree(ecc_cfg); + return ret; } static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand) @@ -452,7 +483,8 @@ static int qcom_spi_block_erase(struct qcom_nand_controller *snandc) snandc->regs->cmd = snandc->qspi->cmd; snandc->regs->addr0 = snandc->qspi->addr1; snandc->regs->addr1 = snandc->qspi->addr2; - snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE)); + snandc->regs->cfg0 = cpu_to_le32((ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, 0)); snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw); snandc->regs->exec = cpu_to_le32(1); @@ -493,6 +525,22 @@ static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *sna qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0); } +static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt) +{ + int i; + + qcom_nandc_dev_to_mem(snandc, true); + + for (i = 0; i < cw_cnt; i++) { + u32 flash = le32_to_cpu(snandc->reg_read_buf[i]); + + if (flash & (FS_OP_ERR | FS_MPU_ERR)) + return -EIO; + } + + return 0; +} + static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc, const struct spi_mem_op *op) { @@ -513,8 +561,8 @@ static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc, snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); snandc->regs->addr1 = snandc->qspi->addr2; - cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | - 0 << CW_PER_PAGE; + cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, 0); cfg1 = ecc_cfg->cfg1_raw; ecc_bch_cfg = ECC_CFG_ECC_DISABLE; @@ -538,11 +586,9 @@ static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc, return ret; } - qcom_nandc_dev_to_mem(snandc, true); - u32 flash = le32_to_cpu(snandc->reg_read_buf[0]); - - if (flash & (FS_OP_ERR | FS_MPU_ERR)) - return -EIO; + ret = qcom_spi_check_raw_flash_errors(snandc, 1); + if (ret) + return ret; bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); @@ -556,7 +602,7 @@ static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc, return ret; } -static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf) +static int qcom_spi_check_error(struct qcom_nand_controller *snandc) { struct snandc_read_status *buf; struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; @@ -573,15 +619,6 @@ static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_bu for (i = 0; i < num_cw; i++, buf++) { u32 flash, buffer, erased_cw; - int data_len, oob_len; - - if (i == (num_cw - 1)) { - data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2); - oob_len = num_cw << 2; - } else { - data_len = ecc_cfg->cw_data; - oob_len = 0; - } flash = le32_to_cpu(buf->snandc_flash); buffer = le32_to_cpu(buf->snandc_buffer); @@ -605,11 +642,6 @@ static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_bu snandc->qspi->ecc_stats.corrected += stat; max_bitflips = max(max_bitflips, stat); } - - if (data_buf) - data_buf += data_len; - if (oob_buf) - oob_buf += oob_len + ecc_cfg->bytes; } if (flash_op_err) @@ -623,22 +655,6 @@ static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_bu return 0; } -static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt) -{ - int i; - - qcom_nandc_dev_to_mem(snandc, true); - - for (i = 0; i < cw_cnt; i++) { - u32 flash = le32_to_cpu(snandc->reg_read_buf[i]); - - if (flash & (FS_OP_ERR | FS_MPU_ERR)) - return -EIO; - } - - return 0; -} - static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf, int cw) { @@ -656,8 +672,8 @@ static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_bu qcom_clear_bam_transaction(snandc); raw_cw = num_cw - 1; - cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | - 0 << CW_PER_PAGE; + cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, 0); cfg1 = ecc_cfg->cfg1_raw; ecc_bch_cfg = ECC_CFG_ECC_DISABLE; @@ -763,22 +779,19 @@ static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc, const struct spi_mem_op *op) { struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; - u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start; + u8 *data_buf = NULL, *oob_buf = NULL; int ret, i; u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; data_buf = op->data.buf.in; - data_buf_start = data_buf; - oob_buf = snandc->qspi->oob_buf; - oob_buf_start = oob_buf; snandc->buf_count = 0; snandc->buf_start = 0; qcom_clear_read_regs(snandc); - cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | - (num_cw - 1) << CW_PER_PAGE; + cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); cfg1 = ecc_cfg->cfg1; ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; @@ -852,29 +865,26 @@ static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc, return ret; } - return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start); + return qcom_spi_check_error(snandc); } static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc, const struct spi_mem_op *op) { struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; - u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start; + u8 *oob_buf = NULL; int ret, i; u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; oob_buf = op->data.buf.in; - oob_buf_start = oob_buf; - - data_buf_start = data_buf; snandc->buf_count = 0; snandc->buf_start = 0; qcom_clear_read_regs(snandc); qcom_clear_bam_transaction(snandc); - cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | - (num_cw - 1) << CW_PER_PAGE; + cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); cfg1 = ecc_cfg->cfg1; ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; @@ -934,7 +944,7 @@ static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc, return ret; } - return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start); + return qcom_spi_check_error(snandc); } static int qcom_spi_read_page(struct qcom_nand_controller *snandc, @@ -984,8 +994,8 @@ static int qcom_spi_program_raw(struct qcom_nand_controller *snandc, int num_cw = snandc->qspi->num_cw; u32 cfg0, cfg1, ecc_bch_cfg; - cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | - (num_cw - 1) << CW_PER_PAGE; + cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); cfg1 = ecc_cfg->cfg1_raw; ecc_bch_cfg = ECC_CFG_ECC_DISABLE; @@ -1067,8 +1077,8 @@ static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc, int num_cw = snandc->qspi->num_cw; u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; - cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | - (num_cw - 1) << CW_PER_PAGE; + cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); cfg1 = ecc_cfg->cfg1; ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; @@ -1144,8 +1154,8 @@ static int qcom_spi_program_oob(struct qcom_nand_controller *snandc, int num_cw = snandc->qspi->num_cw; u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; - cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | - (num_cw - 1) << CW_PER_PAGE; + cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); cfg1 = ecc_cfg->cfg1; ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; @@ -1374,8 +1384,10 @@ static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_ } ret = qcom_submit_descs(snandc); - if (ret) + if (ret) { dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode); + return ret; + } if (copy) { qcom_nandc_dev_to_mem(snandc, true); @@ -1389,7 +1401,7 @@ static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_ memcpy(op->data.buf.in, &val, snandc->buf_count); } - return ret; + return 0; } static bool qcom_spi_is_page_op(const struct spi_mem_op *op) diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c index e0c66a24a3cb..627cffea5d5c 100644 --- a/drivers/spi/spi-rpc-if.c +++ b/drivers/spi/spi-rpc-if.c @@ -75,6 +75,19 @@ static bool rpcif_spi_mem_supports_op(struct spi_mem *mem, return true; } +static ssize_t xspi_spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, const void *buf) +{ + struct rpcif *rpc = spi_controller_get_devdata(desc->mem->spi->controller); + + if (offs + desc->info.offset + len > U32_MAX) + return -EINVAL; + + rpcif_spi_mem_prepare(desc->mem->spi, &desc->info.op_tmpl, &offs, &len); + + return xspi_dirmap_write(rpc->dev, offs, len, buf); +} + static ssize_t rpcif_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf) { @@ -103,7 +116,7 @@ static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc) if (!rpc->dirmap) return -EOPNOTSUPP; - if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) + if (!rpc->xspi && desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) return -EOPNOTSUPP; return 0; @@ -125,6 +138,7 @@ static const struct spi_controller_mem_ops rpcif_spi_mem_ops = { .exec_op = rpcif_spi_mem_exec_op, .dirmap_create = rpcif_spi_mem_dirmap_create, .dirmap_read = rpcif_spi_mem_dirmap_read, + .dirmap_write = xspi_spi_mem_dirmap_write, }; static int rpcif_spi_probe(struct platform_device *pdev) diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index d9e3d83fc7e8..94a867967e02 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -63,135 +63,6 @@ struct sh_msiof_spi_priv { #define MAX_SS 3 /* Maximum number of native chip selects */ -#define SITMDR1 0x00 /* Transmit Mode Register 1 */ -#define SITMDR2 0x04 /* Transmit Mode Register 2 */ -#define SITMDR3 0x08 /* Transmit Mode Register 3 */ -#define SIRMDR1 0x10 /* Receive Mode Register 1 */ -#define SIRMDR2 0x14 /* Receive Mode Register 2 */ -#define SIRMDR3 0x18 /* Receive Mode Register 3 */ -#define SITSCR 0x20 /* Transmit Clock Select Register */ -#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ -#define SICTR 0x28 /* Control Register */ -#define SIFCTR 0x30 /* FIFO Control Register */ -#define SISTR 0x40 /* Status Register */ -#define SIIER 0x44 /* Interrupt Enable Register */ -#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ -#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ -#define SITFDR 0x50 /* Transmit FIFO Data Register */ -#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ -#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ -#define SIRFDR 0x60 /* Receive FIFO Data Register */ - -/* SITMDR1 and SIRMDR1 */ -#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ -#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ -#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */ -#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */ -#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ -#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ -#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ -#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ -#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ -#define SIMDR1_FLD_SHIFT 2 -#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ -/* SITMDR1 */ -#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */ -#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ -#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ - -/* SITMDR2 and SIRMDR2 */ -#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ -#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ -#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ - -/* SITSCR and SIRSCR */ -#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ -#define SISCR_BRPS(i) (((i) - 1) << 8) -#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ -#define SISCR_BRDV_DIV_2 0 -#define SISCR_BRDV_DIV_4 1 -#define SISCR_BRDV_DIV_8 2 -#define SISCR_BRDV_DIV_16 3 -#define SISCR_BRDV_DIV_32 4 -#define SISCR_BRDV_DIV_1 7 - -/* SICTR */ -#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ -#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ -#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ -#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ -#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ -#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ -#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ -#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ -#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ -#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */ -#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */ -#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ -#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ -#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ -#define SICTR_TXE BIT(9) /* Transmit Enable */ -#define SICTR_RXE BIT(8) /* Receive Enable */ -#define SICTR_TXRST BIT(1) /* Transmit Reset */ -#define SICTR_RXRST BIT(0) /* Receive Reset */ - -/* SIFCTR */ -#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ -#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */ -#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */ -#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */ -#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */ -#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */ -#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */ -#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */ -#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */ -#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ -#define SIFCTR_TFUA_SHIFT 20 -#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT) -#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ -#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ -#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ -#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ -#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ -#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ -#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ -#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ -#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ -#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ -#define SIFCTR_RFUA_SHIFT 4 -#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT) - -/* SISTR */ -#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */ -#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */ -#define SISTR_TEOF BIT(23) /* Frame Transmission End */ -#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ -#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */ -#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */ -#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */ -#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */ -#define SISTR_REOF BIT(7) /* Frame Reception End */ -#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ -#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */ -#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */ - -/* SIIER */ -#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ -#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ -#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ -#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */ -#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ -#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ -#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ -#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ -#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */ -#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ -#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */ -#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ -#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ -#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ - - static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) { switch (reg_offs) { @@ -256,11 +127,6 @@ static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p) 100); } -static const u32 sh_msiof_spi_div_array[] = { - SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4, - SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32, -}; - static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, struct spi_transfer *t) { @@ -299,7 +165,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, t->effective_speed_hz = parent_rate / (brps << div_pow); - scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps); + /* div_pow == 0 maps to SISCR_BRDV_DIV_1 == all ones */ + scr = FIELD_PREP(SISCR_BRDV, div_pow - 1) | + FIELD_PREP(SISCR_BRPS, brps - 1); sh_msiof_write(p, SITSCR, scr); if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) sh_msiof_write(p, SIRSCR, scr); @@ -341,18 +209,19 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p) return 0; } - val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT; - val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT; + val = FIELD_PREP(SIMDR1_DTDL, sh_msiof_get_delay_bit(p->info->dtdl)) | + FIELD_PREP(SIMDR1_SYNCDL, + sh_msiof_get_delay_bit(p->info->syncdl)); return val; } static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, - u32 cpol, u32 cpha, - u32 tx_hi_z, u32 lsb_first, u32 cs_high) + bool cpol, bool cpha, bool tx_hi_z, + bool lsb_first, bool cs_high) { + bool edge; u32 tmp; - int edge; /* * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG @@ -361,16 +230,18 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, * 1 0 11 11 0 0 * 1 1 11 11 1 1 */ - tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP; - tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT; - tmp |= lsb_first << SIMDR1_BITLSB_SHIFT; + tmp = FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_SPI) | + FIELD_PREP(SIMDR1_FLD, 1) | SIMDR1_XXSTP | + FIELD_PREP(SIMDR1_SYNCAC, !cs_high) | + FIELD_PREP(SIMDR1_BITLSB, lsb_first); tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); if (spi_controller_is_target(p->ctlr)) { sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON); } else { sh_msiof_write(p, SITMDR1, tmp | SIMDR1_TRMD | SITMDR1_PCON | - (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT); + FIELD_PREP(SITMDR1_SYNCCH, + ss < MAX_SS ? ss : 0)); } if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { /* These bits are reserved if RX needs TX */ @@ -379,30 +250,42 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, sh_msiof_write(p, SIRMDR1, tmp); tmp = 0; - tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT; - tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT; + tmp |= SICTR_TSCKIZ_SCK | FIELD_PREP(SICTR_TSCKIZ_POL, cpol); + tmp |= SICTR_RSCKIZ_SCK | FIELD_PREP(SICTR_RSCKIZ_POL, cpol); edge = cpol ^ !cpha; - tmp |= edge << SICTR_TEDG_SHIFT; - tmp |= edge << SICTR_REDG_SHIFT; - tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW; + tmp |= FIELD_PREP(SICTR_TEDG, edge); + tmp |= FIELD_PREP(SICTR_REDG, edge); + tmp |= FIELD_PREP(SICTR_TXDIZ, + tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW); sh_msiof_write(p, SICTR, tmp); } static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, const void *tx_buf, void *rx_buf, - u32 bits, u32 words) + u32 bits, u32 words1, u32 words2) { - u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words); + u32 dr2 = FIELD_PREP(SIMDR2_GRP, words2 ? 1 : 0) | + FIELD_PREP(SIMDR2_BITLEN1, bits - 1) | + FIELD_PREP(SIMDR2_WDLEN1, words1 - 1); if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) sh_msiof_write(p, SITMDR2, dr2); else - sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1); + sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK); if (rx_buf) sh_msiof_write(p, SIRMDR2, dr2); + + if (words2) { + u32 dr3 = FIELD_PREP(SIMDR3_BITLEN2, bits - 1) | + FIELD_PREP(SIMDR3_WDLEN2, words2 - 1); + + sh_msiof_write(p, SITMDR3, dr3); + if (rx_buf) + sh_msiof_write(p, SIRMDR3, dr3); + } } static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) @@ -412,140 +295,154 @@ static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) } static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u8 *buf_8 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, buf_8[k] << fs); } static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u16 *buf_16 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, buf_16[k] << fs); } static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u16 *buf_16 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs); } static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u32 *buf_32 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, buf_32[k] << fs); } static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u32 *buf_32 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs); } static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u32 *buf_32 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs)); } static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, + unsigned int words, unsigned int fs) { const u32 *buf_32 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs)); } static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u8 *buf_8 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u16 *buf_16 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u16 *buf_16 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]); } static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u32 *buf_32 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u32 *buf_32 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]); } static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u32 *buf_32 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs); } static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u32 *buf_32 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]); @@ -565,12 +462,12 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; /* Configure native chip select mode/polarity early */ - clr = SIMDR1_SYNCMD_MASK; - set = SIMDR1_SYNCMD_SPI; + clr = SIMDR1_SYNCMD; + set = FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_SPI); if (spi->mode & SPI_CS_HIGH) - clr |= BIT(SIMDR1_SYNCAC_SHIFT); + clr |= SIMDR1_SYNCAC; else - set |= BIT(SIMDR1_SYNCAC_SHIFT); + set |= SIMDR1_SYNCAC; pm_runtime_get_sync(&p->pdev->dev); tmp = sh_msiof_read(p, SITMDR1) & ~clr; sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON); @@ -587,7 +484,8 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr, { struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); const struct spi_device *spi = msg->spi; - u32 ss, cs_high; + bool cs_high; + u32 ss; /* Configure pins before asserting CS */ if (spi_get_csgpiod(spi, 0)) { @@ -595,12 +493,11 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr, cs_high = p->native_cs_high; } else { ss = spi_get_chipselect(spi, 0); - cs_high = !!(spi->mode & SPI_CS_HIGH); + cs_high = spi->mode & SPI_CS_HIGH; } - sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL), - !!(spi->mode & SPI_CPHA), - !!(spi->mode & SPI_3WIRE), - !!(spi->mode & SPI_LSB_FIRST), cs_high); + sh_msiof_spi_set_pin_regs(p, ss, spi->mode & SPI_CPOL, + spi->mode & SPI_CPHA, spi->mode & SPI_3WIRE, + spi->mode & SPI_LSB_FIRST, cs_high); return 0; } @@ -673,20 +570,22 @@ static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p, static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, void (*tx_fifo)(struct sh_msiof_spi_priv *, - const void *, int, int), + const void *, unsigned int, + unsigned int), void (*rx_fifo)(struct sh_msiof_spi_priv *, - void *, int, int), + void *, unsigned int, + unsigned int), const void *tx_buf, void *rx_buf, - int words, int bits) + unsigned int words, unsigned int bits) { - int fifo_shift; + unsigned int fifo_shift; int ret; /* limit maximum word transfer to rx/tx fifo size */ if (tx_buf) - words = min_t(int, words, p->tx_fifo_size); + words = min(words, p->tx_fifo_size); if (rx_buf) - words = min_t(int, words, p->rx_fifo_size); + words = min(words, p->rx_fifo_size); /* the fifo contents need shifting */ fifo_shift = 32 - bits; @@ -695,7 +594,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, sh_msiof_write(p, SIFCTR, 0); /* setup msiof transfer mode registers */ - sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); + sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words, 0); sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE); /* write tx fifo */ @@ -745,10 +644,12 @@ static void sh_msiof_dma_complete(void *arg) } static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, - void *rx, unsigned int len) + void *rx, unsigned int len, + unsigned int max_wdlen) { u32 ier_bits = 0; struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; + unsigned int words1, words2; dma_cookie_t cookie; int ret; @@ -790,10 +691,14 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } /* 1 stage FIFO watermarks for DMA */ - sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1); + sh_msiof_write(p, SIFCTR, + FIELD_PREP(SIFCTR_TFWM, SIFCTR_TFWM_1) | + FIELD_PREP(SIFCTR_RFWM, SIFCTR_RFWM_1)); /* setup msiof transfer mode registers (32-bit words) */ - sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4); + words1 = min(len / 4, max_wdlen); + words2 = len / 4 - words1; + sh_msiof_spi_set_mode_regs(p, tx, rx, 32, words1, words2); sh_msiof_write(p, SIIER, ier_bits); @@ -912,9 +817,12 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr, struct spi_transfer *t) { struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); + unsigned int max_wdlen = FIELD_MAX(SIMDR2_WDLEN1) + 1; void (*copy32)(u32 *, const u32 *, unsigned int); - void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); - void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); + void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, unsigned int, + unsigned int); + void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, unsigned int, + unsigned int); const void *tx_buf = t->tx_buf; void *rx_buf = t->rx_buf; unsigned int len = t->len; @@ -932,17 +840,17 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr, if (!spi_controller_is_target(p->ctlr)) sh_msiof_spi_set_clk_regs(p, t); + if (tx_buf) + max_wdlen = min(max_wdlen, p->tx_fifo_size); + if (rx_buf) + max_wdlen = min(max_wdlen, p->rx_fifo_size); + while (ctlr->dma_tx && len > 15) { /* * DMA supports 32-bit words only, hence pack 8-bit and 16-bit * words, with byte resp. word swapping. */ - unsigned int l = 0; - - if (tx_buf) - l = min(round_down(len, 4), p->tx_fifo_size * 4); - if (rx_buf) - l = min(round_down(len, 4), p->rx_fifo_size * 4); + unsigned int l = min(round_down(len, 4), 2 * max_wdlen * 4); if (bits <= 8) { copy32 = copy_bswap32; @@ -955,7 +863,7 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr, if (tx_buf) copy32(p->tx_dma_page, tx_buf, l / 4); - ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l); + ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l, max_wdlen); if (ret == -EAGAIN) { dev_warn_once(&p->pdev->dev, "DMA not available, falling back to PIO\n"); @@ -1062,7 +970,7 @@ static const struct sh_msiof_chipdata rcar_gen2_data = { .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, - .rx_fifo_size = 64, + .rx_fifo_size = 128, .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 0, }; @@ -1071,7 +979,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = { .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, - .rx_fifo_size = 64, + .rx_fifo_size = 256, + .ctlr_flags = SPI_CONTROLLER_MUST_TX, + .min_div_pow = 1, +}; + +static const struct sh_msiof_chipdata rcar_gen4_data = { + .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(24) | SPI_BPW_MASK(32), + .tx_fifo_size = 256, + .rx_fifo_size = 256, .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 1, }; @@ -1080,7 +997,7 @@ static const struct sh_msiof_chipdata rcar_r8a7795_data = { .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, - .rx_fifo_size = 64, + .rx_fifo_size = 256, .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 1, .flags = SH_MSIOF_FLAG_FIXED_DTDL_200, @@ -1088,20 +1005,14 @@ static const struct sh_msiof_chipdata rcar_r8a7795_data = { static const struct of_device_id sh_msiof_match[] __maybe_unused = { { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, - { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data }, { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data }, { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data }, - { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data }, { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data }, - { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data }, + { .compatible = "renesas,msiof-r8a779a0", .data = &rcar_gen3_data }, + { .compatible = "renesas,msiof-r8a779f0", .data = &rcar_gen3_data }, + { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen4_data }, { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */ - {}, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sh_msiof_match); @@ -1339,7 +1250,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) goto err1; } - ret = devm_request_irq(dev, i, sh_msiof_spi_irq, 0, dev_name(&pdev->dev), p); + ret = devm_request_irq(dev, i, sh_msiof_spi_irq, 0, dev_name(dev), p); if (ret) { dev_err(dev, "unable to request irq\n"); goto err1; diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c index 9ec9823409cc..7c1fa55fbc47 100644 --- a/drivers/spi/spi-stm32-ospi.c +++ b/drivers/spi/spi-stm32-ospi.c @@ -804,7 +804,7 @@ static int stm32_ospi_get_resources(struct platform_device *pdev) return ret; } - ospi->rstc = devm_reset_control_array_get_optional_exclusive(dev); + ospi->rstc = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(ospi->rstc)) return dev_err_probe(dev, PTR_ERR(ospi->rstc), "Can't get reset\n"); diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 64e1b2f8a000..3581757a269b 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -22,6 +22,7 @@ #include <linux/spi/spi.h> #include <linux/acpi.h> #include <linux/property.h> +#include <linux/sizes.h> #define QSPI_COMMAND1 0x000 #define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0) @@ -110,6 +111,9 @@ #define QSPI_DMA_BLK 0x024 #define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0) +#define QSPI_DMA_MEM_ADDRESS 0x028 +#define QSPI_DMA_HI_ADDRESS 0x02c + #define QSPI_TX_FIFO 0x108 #define QSPI_RX_FIFO 0x188 @@ -134,7 +138,7 @@ #define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0) #define QSPI_CMB_SEQ_CMD_CFG 0x1a0 -#define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13) +#define QSPI_COMMAND_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) #define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13) #define QSPI_COMMAND_SDR_DDR BIT(12) #define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0) @@ -147,7 +151,7 @@ #define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0) #define QSPI_CMB_SEQ_ADDR_CFG 0x1ac -#define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13) +#define QSPI_ADDRESS_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) #define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13) #define QSPI_ADDRESS_SDR_DDR BIT(12) #define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0) @@ -156,15 +160,19 @@ #define DATA_DIR_RX BIT(1) #define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) -#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024) -#define CMD_TRANSFER 0 -#define ADDR_TRANSFER 1 -#define DATA_TRANSFER 2 +#define DEFAULT_QSPI_DMA_BUF_LEN SZ_64K + +enum tegra_qspi_transfer_type { + CMD_TRANSFER = 0, + ADDR_TRANSFER = 1, + DUMMY_TRANSFER = 2, + DATA_TRANSFER = 3 +}; struct tegra_qspi_soc_data { - bool has_dma; bool cmb_xfer_capable; bool supports_tpm; + bool has_ext_dma; unsigned int cs_count; }; @@ -600,13 +608,16 @@ static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_trans len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; - dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); - dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE); + if (t->tx_buf) + dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); + if (t->rx_buf) + dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE); } static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t) { struct dma_slave_config dma_sconfig = { 0 }; + dma_addr_t rx_dma_phys, tx_dma_phys; unsigned int len; u8 dma_burst; int ret = 0; @@ -629,60 +640,86 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct len = tqspi->curr_dma_words * 4; /* set attention level based on length of transfer */ - val = 0; - if (len & 0xf) { - val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1; - dma_burst = 1; - } else if (((len) >> 4) & 0x1) { - val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4; - dma_burst = 4; - } else { - val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8; - dma_burst = 8; + if (tqspi->soc_data->has_ext_dma) { + val = 0; + if (len & 0xf) { + val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1; + dma_burst = 1; + } else if (((len) >> 4) & 0x1) { + val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4; + dma_burst = 4; + } else { + val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8; + dma_burst = 8; + } + + tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); } - tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); tqspi->dma_control_reg = val; dma_sconfig.device_fc = true; + if (tqspi->cur_direction & DATA_DIR_TX) { - dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO; - dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - dma_sconfig.dst_maxburst = dma_burst; - ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig); - if (ret < 0) { - dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); - return ret; - } + if (tqspi->tx_dma_chan) { + dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO; + dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_sconfig.dst_maxburst = dma_burst; + ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig); + if (ret < 0) { + dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); + return ret; + } - tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); - ret = tegra_qspi_start_tx_dma(tqspi, t, len); - if (ret < 0) { - dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret); - return ret; + tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); + ret = tegra_qspi_start_tx_dma(tqspi, t, len); + if (ret < 0) { + dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret); + return ret; + } + } else { + if (tqspi->is_packed) + tx_dma_phys = t->tx_dma; + else + tx_dma_phys = tqspi->tx_dma_phys; + tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); + tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys), + QSPI_DMA_MEM_ADDRESS); + tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff), + QSPI_DMA_HI_ADDRESS); } } if (tqspi->cur_direction & DATA_DIR_RX) { - dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO; - dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - dma_sconfig.src_maxburst = dma_burst; - ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig); - if (ret < 0) { - dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); - return ret; - } - - dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, - tqspi->dma_buf_size, - DMA_FROM_DEVICE); + if (tqspi->rx_dma_chan) { + dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO; + dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_sconfig.src_maxburst = dma_burst; + ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig); + if (ret < 0) { + dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); + return ret; + } - ret = tegra_qspi_start_rx_dma(tqspi, t, len); - if (ret < 0) { - dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret); - if (tqspi->cur_direction & DATA_DIR_TX) - dmaengine_terminate_all(tqspi->tx_dma_chan); - return ret; + dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, + tqspi->dma_buf_size, DMA_FROM_DEVICE); + ret = tegra_qspi_start_rx_dma(tqspi, t, len); + if (ret < 0) { + dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret); + if (tqspi->cur_direction & DATA_DIR_TX) + dmaengine_terminate_all(tqspi->tx_dma_chan); + return ret; + } + } else { + if (tqspi->is_packed) + rx_dma_phys = t->rx_dma; + else + rx_dma_phys = tqspi->rx_dma_phys; + + tegra_qspi_writel(tqspi, lower_32_bits(rx_dma_phys), + QSPI_DMA_MEM_ADDRESS); + tegra_qspi_writel(tqspi, (upper_32_bits(rx_dma_phys) & 0xff), + QSPI_DMA_HI_ADDRESS); } } @@ -721,9 +758,6 @@ static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct s static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi) { - if (!tqspi->soc_data->has_dma) - return; - if (tqspi->tx_dma_buf) { dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, tqspi->tx_dma_buf, tqspi->tx_dma_phys); @@ -754,16 +788,29 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi) u32 *dma_buf; int err; - if (!tqspi->soc_data->has_dma) - return 0; + if (tqspi->soc_data->has_ext_dma) { + dma_chan = dma_request_chan(tqspi->dev, "rx"); + if (IS_ERR(dma_chan)) { + err = PTR_ERR(dma_chan); + goto err_out; + } - dma_chan = dma_request_chan(tqspi->dev, "rx"); - if (IS_ERR(dma_chan)) { - err = PTR_ERR(dma_chan); - goto err_out; - } + tqspi->rx_dma_chan = dma_chan; - tqspi->rx_dma_chan = dma_chan; + dma_chan = dma_request_chan(tqspi->dev, "tx"); + if (IS_ERR(dma_chan)) { + err = PTR_ERR(dma_chan); + goto err_out; + } + + tqspi->tx_dma_chan = dma_chan; + } else { + if (!device_iommu_mapped(tqspi->dev)) { + dev_warn(tqspi->dev, + "IOMMU not enabled in device-tree, falling back to PIO mode\n"); + return 0; + } + } dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); if (!dma_buf) { @@ -774,14 +821,6 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi) tqspi->rx_dma_buf = dma_buf; tqspi->rx_dma_phys = dma_phys; - dma_chan = dma_request_chan(tqspi->dev, "tx"); - if (IS_ERR(dma_chan)) { - err = PTR_ERR(dma_chan); - goto err_out; - } - - tqspi->tx_dma_chan = dma_chan; - dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); if (!dma_buf) { err = -ENOMEM; @@ -1036,10 +1075,6 @@ static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len) { u32 addr_config = 0; - /* Extract Address configuration and value */ - is_ddr = 0; //Only SDR mode supported - bus_width = 0; //X1 mode - if (is_ddr) addr_config |= QSPI_ADDRESS_SDR_DDR; else @@ -1079,16 +1114,23 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, switch (transfer_phase) { case CMD_TRANSFER: /* X1 SDR mode */ - cmd_config = tegra_qspi_cmd_config(false, 0, + cmd_config = tegra_qspi_cmd_config(false, xfer->tx_nbits, xfer->len); cmd_value = *((const u8 *)(xfer->tx_buf)); break; case ADDR_TRANSFER: /* X1 SDR mode */ - addr_config = tegra_qspi_addr_config(false, 0, + addr_config = tegra_qspi_addr_config(false, xfer->tx_nbits, xfer->len); address_value = *((const u32 *)(xfer->tx_buf)); break; + case DUMMY_TRANSFER: + if (xfer->dummy_data) { + tqspi->dummy_cycles = xfer->len * 8 / xfer->tx_nbits; + break; + } + transfer_phase++; + fallthrough; case DATA_TRANSFER: /* Program Command, Address value in register */ tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD); @@ -1120,15 +1162,14 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, if (WARN_ON_ONCE(ret == 0)) { dev_err_ratelimited(tqspi->dev, "QSPI Transfer failed with timeout\n"); - if (tqspi->is_curr_dma_xfer && - (tqspi->cur_direction & DATA_DIR_TX)) - dmaengine_terminate_all - (tqspi->tx_dma_chan); - - if (tqspi->is_curr_dma_xfer && - (tqspi->cur_direction & DATA_DIR_RX)) - dmaengine_terminate_all - (tqspi->rx_dma_chan); + if (tqspi->is_curr_dma_xfer) { + if ((tqspi->cur_direction & DATA_DIR_TX) && + tqspi->tx_dma_chan) + dmaengine_terminate_all(tqspi->tx_dma_chan); + if ((tqspi->cur_direction & DATA_DIR_RX) && + tqspi->rx_dma_chan) + dmaengine_terminate_all(tqspi->rx_dma_chan); + } /* Abort transfer by resetting pio/dma bit */ if (!tqspi->is_curr_dma_xfer) { @@ -1163,26 +1204,22 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, ret = -EIO; goto exit; } - if (!xfer->cs_change) { - tegra_qspi_transfer_end(spi); - spi_transfer_delay_exec(xfer); - } break; default: ret = -EINVAL; goto exit; } msg->actual_length += xfer->len; + if (!xfer->cs_change && transfer_phase == DATA_TRANSFER) { + tegra_qspi_transfer_end(spi); + spi_transfer_delay_exec(xfer); + } transfer_phase++; } ret = 0; exit: msg->status = ret; - if (ret < 0) { - tegra_qspi_transfer_end(spi); - spi_transfer_delay_exec(xfer); - } return ret; } @@ -1247,10 +1284,12 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi, QSPI_DMA_TIMEOUT); if (WARN_ON(ret == 0)) { dev_err(tqspi->dev, "transfer timeout\n"); - if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX)) - dmaengine_terminate_all(tqspi->tx_dma_chan); - if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX)) - dmaengine_terminate_all(tqspi->rx_dma_chan); + if (tqspi->is_curr_dma_xfer) { + if ((tqspi->cur_direction & DATA_DIR_TX) && tqspi->tx_dma_chan) + dmaengine_terminate_all(tqspi->tx_dma_chan); + if ((tqspi->cur_direction & DATA_DIR_RX) && tqspi->rx_dma_chan) + dmaengine_terminate_all(tqspi->rx_dma_chan); + } tegra_qspi_handle_error(tqspi); ret = -EIO; goto complete_xfer; @@ -1300,7 +1339,9 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi, list_for_each_entry(xfer, &msg->transfers, transfer_list) { transfer_count++; } - if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3) + if (!tqspi->soc_data->cmb_xfer_capable) + return false; + if (transfer_count > 4 || transfer_count < 3) return false; xfer = list_first_entry(&msg->transfers, typeof(*xfer), transfer_list); @@ -1310,7 +1351,14 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi, if (xfer->len > 4 || xfer->len < 3) return false; xfer = list_next_entry(xfer, transfer_list); - if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) + if (transfer_count == 4) { + if (xfer->dummy_data != 1) + return false; + if ((xfer->len * 8 / xfer->tx_nbits) > QSPI_DUMMY_CYCLES_MAX) + return false; + xfer = list_next_entry(xfer, transfer_list); + } + if (!tqspi->soc_data->has_ext_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) return false; return true; @@ -1371,41 +1419,43 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) unsigned int total_fifo_words; unsigned long flags; long wait_status; - int err = 0; + int num_errors = 0; if (tqspi->cur_direction & DATA_DIR_TX) { if (tqspi->tx_status) { - dmaengine_terminate_all(tqspi->tx_dma_chan); - err += 1; - } else { + if (tqspi->tx_dma_chan) + dmaengine_terminate_all(tqspi->tx_dma_chan); + num_errors++; + } else if (tqspi->tx_dma_chan) { wait_status = wait_for_completion_interruptible_timeout( &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT); if (wait_status <= 0) { dmaengine_terminate_all(tqspi->tx_dma_chan); dev_err(tqspi->dev, "failed TX DMA transfer\n"); - err += 1; + num_errors++; } } } if (tqspi->cur_direction & DATA_DIR_RX) { if (tqspi->rx_status) { - dmaengine_terminate_all(tqspi->rx_dma_chan); - err += 2; - } else { + if (tqspi->rx_dma_chan) + dmaengine_terminate_all(tqspi->rx_dma_chan); + num_errors++; + } else if (tqspi->rx_dma_chan) { wait_status = wait_for_completion_interruptible_timeout( &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT); if (wait_status <= 0) { dmaengine_terminate_all(tqspi->rx_dma_chan); dev_err(tqspi->dev, "failed RX DMA transfer\n"); - err += 2; + num_errors++; } } } spin_lock_irqsave(&tqspi->lock, flags); - if (err) { + if (num_errors) { tegra_qspi_dma_unmap_xfer(tqspi, t); tegra_qspi_handle_error(tqspi); complete(&tqspi->xfer_completion); @@ -1431,9 +1481,9 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) /* continue transfer in current message */ total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t); if (total_fifo_words > QSPI_FIFO_DEPTH) - err = tegra_qspi_start_dma_based_transfer(tqspi, t); + num_errors = tegra_qspi_start_dma_based_transfer(tqspi, t); else - err = tegra_qspi_start_cpu_based_transfer(tqspi, t); + num_errors = tegra_qspi_start_cpu_based_transfer(tqspi, t); exit: spin_unlock_irqrestore(&tqspi->lock, flags); @@ -1461,28 +1511,28 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) } static struct tegra_qspi_soc_data tegra210_qspi_soc_data = { - .has_dma = true, + .has_ext_dma = true, .cmb_xfer_capable = false, .supports_tpm = false, .cs_count = 1, }; static struct tegra_qspi_soc_data tegra186_qspi_soc_data = { - .has_dma = true, + .has_ext_dma = true, .cmb_xfer_capable = true, .supports_tpm = false, .cs_count = 1, }; static struct tegra_qspi_soc_data tegra234_qspi_soc_data = { - .has_dma = false, + .has_ext_dma = false, .cmb_xfer_capable = true, .supports_tpm = true, .cs_count = 1, }; static struct tegra_qspi_soc_data tegra241_qspi_soc_data = { - .has_dma = false, + .has_ext_dma = true, .cmb_xfer_capable = true, .supports_tpm = true, .cs_count = 4, diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c index 3bd0149d8f4e..1a40c4866ce1 100644 --- a/drivers/spi/spi-xcomm.c +++ b/drivers/spi/spi-xcomm.c @@ -44,8 +44,8 @@ struct spi_xcomm { u8 buf[63]; }; -static void spi_xcomm_gpio_set_value(struct gpio_chip *chip, - unsigned int offset, int val) +static int spi_xcomm_gpio_set_value(struct gpio_chip *chip, + unsigned int offset, int val) { struct spi_xcomm *spi_xcomm = gpiochip_get_data(chip); unsigned char buf[2]; @@ -53,7 +53,7 @@ static void spi_xcomm_gpio_set_value(struct gpio_chip *chip, buf[0] = SPI_XCOMM_CMD_GPIO_SET; buf[1] = !!val; - i2c_master_send(spi_xcomm->i2c, buf, 2); + return i2c_master_send(spi_xcomm->i2c, buf, 2); } static int spi_xcomm_gpio_get_direction(struct gpio_chip *chip, @@ -70,7 +70,7 @@ static int spi_xcomm_gpio_add(struct spi_xcomm *spi_xcomm) return 0; spi_xcomm->gc.get_direction = spi_xcomm_gpio_get_direction; - spi_xcomm->gc.set = spi_xcomm_gpio_set_value; + spi_xcomm->gc.set_rv = spi_xcomm_gpio_set_value; spi_xcomm->gc.can_sleep = 1; spi_xcomm->gc.base = -1; spi_xcomm->gc.ngpio = 1; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 90e27729ef6b..1bc0fdbb1bd7 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1076,10 +1076,8 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) * Avoid calling into the driver (or doing delays) if the chip select * isn't actually changing from the last time this was called. */ - if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask && - spi_is_last_cs(spi)) || - (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask && - !spi_is_last_cs(spi))) && + if (!force && (enable == spi_is_last_cs(spi)) && + (spi->controller->last_cs_index_mask == spi->cs_index_mask) && (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) return; @@ -1088,9 +1086,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) spi->controller->last_cs_index_mask = spi->cs_index_mask; for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS; - spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; - if (spi->mode & SPI_CS_HIGH) + spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; + if (spi->controller->last_cs_mode_high) enable = !enable; /* @@ -3802,7 +3800,7 @@ int spi_split_transfers_maxwords(struct spi_controller *ctlr, size_t maxsize; int ret; - maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word)); + maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word); if (xfer->len > maxsize) { ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, maxsize); @@ -4096,6 +4094,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) return -EINVAL; + /* DDR mode is supported only if controller has dtr_caps=true. + * default considered as SDR mode for SPI and QSPI controller. + * Note: This is applicable only to QSPI controller. + */ + if (xfer->dtr_mode && !ctlr->dtr_caps) + return -EINVAL; + /* * SPI transfer length should be multiple of SPI word size * where SPI word size should be power-of-two multiple. diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 7fb5a459847e..756b842dcd30 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -96,7 +96,9 @@ extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); extern void pm_runtime_release_supplier(struct device_link *link); +int devm_pm_runtime_set_active_enabled(struct device *dev); extern int devm_pm_runtime_enable(struct device *dev); +int devm_pm_runtime_get_noresume(struct device *dev); /** * pm_suspend_ignore_children - Set runtime PM behavior regarding children. @@ -294,7 +296,9 @@ static inline bool pm_runtime_blocked(struct device *dev) { return true; } static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} +static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; } static inline int devm_pm_runtime_enable(struct device *dev) { return 0; } +static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; } static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} static inline void pm_runtime_get_noresume(struct device *dev) {} diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h index f950d280461b..9fbef3fd4056 100644 --- a/include/linux/spi/sh_msiof.h +++ b/include/linux/spi/sh_msiof.h @@ -2,6 +2,131 @@ #ifndef __SPI_SH_MSIOF_H__ #define __SPI_SH_MSIOF_H__ +#include <linux/bitfield.h> +#include <linux/bits.h> + +#define SITMDR1 0x00 /* Transmit Mode Register 1 */ +#define SITMDR2 0x04 /* Transmit Mode Register 2 */ +#define SITMDR3 0x08 /* Transmit Mode Register 3 */ +#define SIRMDR1 0x10 /* Receive Mode Register 1 */ +#define SIRMDR2 0x14 /* Receive Mode Register 2 */ +#define SIRMDR3 0x18 /* Receive Mode Register 3 */ +#define SITSCR 0x20 /* Transmit Clock Select Register */ +#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ +#define SICTR 0x28 /* Control Register */ +#define SIFCTR 0x30 /* FIFO Control Register */ +#define SISTR 0x40 /* Status Register */ +#define SIIER 0x44 /* Interrupt Enable Register */ +#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ +#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ +#define SITFDR 0x50 /* Transmit FIFO Data Register */ +#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ +#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ +#define SIRFDR 0x60 /* Receive FIFO Data Register */ + +/* SITMDR1 and SIRMDR1 */ +#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ +#define SIMDR1_SYNCMD GENMASK(29, 28) /* SYNC Mode */ +#define SIMDR1_SYNCMD_PULSE 0U /* Frame start sync pulse */ +#define SIMDR1_SYNCMD_SPI 2U /* Level mode/SPI */ +#define SIMDR1_SYNCMD_LR 3U /* L/R mode */ +#define SIMDR1_SYNCAC BIT(25) /* Sync Polarity (1 = Active-low) */ +#define SIMDR1_BITLSB BIT(24) /* MSB/LSB First (1 = LSB first) */ +#define SIMDR1_DTDL GENMASK(22, 20) /* Data Pin Bit Delay for MSIOF_SYNC */ +#define SIMDR1_SYNCDL GENMASK(18, 16) /* Frame Sync Signal Timing Delay */ +#define SIMDR1_FLD GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ +#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ +/* SITMDR1 */ +#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */ +#define SITMDR1_SYNCCH GENMASK(27, 26) /* Sync Signal Channel Select */ + /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ + +/* SITMDR2 and SIRMDR2 */ +#define SIMDR2_GRP GENMASK(31, 30) /* Group Count */ +#define SIMDR2_BITLEN1 GENMASK(28, 24) /* Data Size (8-32 bits) */ +#define SIMDR2_WDLEN1 GENMASK(23, 16) /* Word Count (1-64/256 (SH, A1))) */ +#define SIMDR2_GRPMASK GENMASK(3, 0) /* Group Output Mask 1-4 (SH, A1) */ + +/* SITMDR3 and SIRMDR3 */ +#define SIMDR3_BITLEN2 GENMASK(28, 24) /* Data Size (8-32 bits) */ +#define SIMDR3_WDLEN2 GENMASK(23, 16) /* Word Count (1-64/256 (SH, A1))) */ + +/* SITSCR and SIRSCR */ +#define SISCR_BRPS GENMASK(12, 8) /* Prescaler Setting (1-32) */ +#define SISCR_BRDV GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ + +/* SICTR */ +#define SICTR_TSCKIZ GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ +#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ +#define SICTR_TSCKIZ_POL BIT(30) /* Transmit Clock Polarity */ +#define SICTR_RSCKIZ GENMASK(29, 28) /* Receive Clock Polarity Select */ +#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ +#define SICTR_RSCKIZ_POL BIT(28) /* Receive Clock Polarity */ +#define SICTR_TEDG BIT(27) /* Transmit Timing (1 = falling edge) */ +#define SICTR_REDG BIT(26) /* Receive Timing (1 = falling edge) */ +#define SICTR_TXDIZ GENMASK(23, 22) /* Pin Output When TX is Disabled */ +#define SICTR_TXDIZ_LOW 0U /* 0 */ +#define SICTR_TXDIZ_HIGH 1U /* 1 */ +#define SICTR_TXDIZ_HIZ 2U /* High-impedance */ +#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ +#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ +#define SICTR_TXE BIT(9) /* Transmit Enable */ +#define SICTR_RXE BIT(8) /* Receive Enable */ +#define SICTR_TXRST BIT(1) /* Transmit Reset */ +#define SICTR_RXRST BIT(0) /* Receive Reset */ + +/* SIFCTR */ +#define SIFCTR_TFWM GENMASK(31, 29) /* Transmit FIFO Watermark */ +#define SIFCTR_TFWM_64 0U /* Transfer Request when 64 empty stages */ +#define SIFCTR_TFWM_32 1U /* Transfer Request when 32 empty stages */ +#define SIFCTR_TFWM_24 2U /* Transfer Request when 24 empty stages */ +#define SIFCTR_TFWM_16 3U /* Transfer Request when 16 empty stages */ +#define SIFCTR_TFWM_12 4U /* Transfer Request when 12 empty stages */ +#define SIFCTR_TFWM_8 5U /* Transfer Request when 8 empty stages */ +#define SIFCTR_TFWM_4 6U /* Transfer Request when 4 empty stages */ +#define SIFCTR_TFWM_1 7U /* Transfer Request when 1 empty stage */ +#define SIFCTR_TFUA GENMASK(28, 20) /* Transmit FIFO Usable Area */ +#define SIFCTR_RFWM GENMASK(15, 13) /* Receive FIFO Watermark */ +#define SIFCTR_RFWM_1 0U /* Transfer Request when 1 valid stages */ +#define SIFCTR_RFWM_4 1U /* Transfer Request when 4 valid stages */ +#define SIFCTR_RFWM_8 2U /* Transfer Request when 8 valid stages */ +#define SIFCTR_RFWM_16 3U /* Transfer Request when 16 valid stages */ +#define SIFCTR_RFWM_32 4U /* Transfer Request when 32 valid stages */ +#define SIFCTR_RFWM_64 5U /* Transfer Request when 64 valid stages */ +#define SIFCTR_RFWM_128 6U /* Transfer Request when 128 valid stages */ +#define SIFCTR_RFWM_256 7U /* Transfer Request when 256 valid stages */ +#define SIFCTR_RFUA GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ + +/* SISTR */ +#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */ +#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */ +#define SISTR_TEOF BIT(23) /* Frame Transmission End */ +#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ +#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */ +#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */ +#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */ +#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */ +#define SISTR_REOF BIT(7) /* Frame Reception End */ +#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ +#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */ +#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */ + +/* SIIER */ +#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ +#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ +#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ +#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */ +#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ +#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ +#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ +#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ +#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */ +#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ +#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */ +#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ +#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ +#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ + enum { MSIOF_SPI_HOST, MSIOF_SPI_TARGET, diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 6e64f0193777..4789f91dae94 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -136,13 +136,6 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg, * @max_speed_hz: Maximum clock rate to be used with this chip * (on this board); may be changed by the device's driver. * The spi_transfer.speed_hz can override this for each transfer. - * @chip_select: Array of physical chipselect, spi->chipselect[i] gives - * the corresponding physical CS for logical CS i. - * @mode: The spi mode defines how data is clocked out and in. - * This may be changed by the device's driver. - * The "active low" default for chipselect mode can be overridden - * (by specifying SPI_CS_HIGH) as can the "MSB first" default for - * each word in a transfer (by specifying SPI_LSB_FIRST). * @bits_per_word: Data transfers involve one or more words; word sizes * like eight or 12 bits are common. In-memory wordsizes are * powers of two bytes (e.g. 20 bit samples use 32 bits). @@ -150,6 +143,11 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg, * default (0) indicating protocol words are eight bit bytes. * The spi_transfer.bits_per_word can override this for each transfer. * @rt: Make the pump thread real time priority. + * @mode: The spi mode defines how data is clocked out and in. + * This may be changed by the device's driver. + * The "active low" default for chipselect mode can be overridden + * (by specifying SPI_CS_HIGH) as can the "MSB first" default for + * each word in a transfer (by specifying SPI_LSB_FIRST). * @irq: Negative, or the number passed to request_irq() to receive * interrupts from this device. * @controller_state: Controller's runtime state @@ -162,8 +160,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg, * the device will bind to the named driver and only the named driver. * Do not set directly, because core frees it; use driver_set_override() to * set or clear it. - * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines - * (optional, NULL when not using a GPIO line) + * @pcpu_statistics: statistics for the spi_device * @word_delay: delay to be inserted between consecutive * words of a transfer * @cs_setup: delay to be introduced by the controller after CS is asserted @@ -171,8 +168,11 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg, * @cs_inactive: delay to be introduced by the controller after CS is * deasserted. If @cs_change_delay is used from @spi_transfer, then the * two delays will be added up. - * @pcpu_statistics: statistics for the spi_device + * @chip_select: Array of physical chipselect, spi->chipselect[i] gives + * the corresponding physical CS for logical CS i. * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array + * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines + * (optional, NULL when not using a GPIO line) * * A @spi_device is used to interchange data between an SPI target device * (usually a discrete chip) and CPU memory. @@ -187,7 +187,6 @@ struct spi_device { struct device dev; struct spi_controller *controller; u32 max_speed_hz; - u8 chip_select[SPI_CS_CNT_MAX]; u8 bits_per_word; bool rt; #define SPI_NO_TX BIT(31) /* No transmit wire */ @@ -218,23 +217,29 @@ struct spi_device { void *controller_data; char modalias[SPI_NAME_SIZE]; const char *driver_override; - struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */ + + /* The statistics */ + struct spi_statistics __percpu *pcpu_statistics; + struct spi_delay word_delay; /* Inter-word delay */ + /* CS delays */ struct spi_delay cs_setup; struct spi_delay cs_hold; struct spi_delay cs_inactive; - /* The statistics */ - struct spi_statistics __percpu *pcpu_statistics; + u8 chip_select[SPI_CS_CNT_MAX]; - /* Bit mask of the chipselect(s) that the driver need to use from - * the chipselect array.When the controller is capable to handle + /* + * Bit mask of the chipselect(s) that the driver need to use from + * the chipselect array. When the controller is capable to handle * multiple chip selects & memories are connected in parallel * then more than one bit need to be set in cs_index_mask. */ u32 cs_index_mask : SPI_CS_CNT_MAX; + struct gpio_desc *cs_gpiod[SPI_CS_CNT_MAX]; /* Chip select gpio desc */ + /* * Likely need more hooks for more protocol options affecting how * the controller talks to each chip, like: @@ -500,6 +505,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * found. * @put_offload: release the offload instance acquired by @get_offload. * @mem_caps: controller capabilities for the handling of memory operations. + * @dtr_caps: true if controller has dtr(single/dual transfer rate) capability. + * QSPI based controller should fill this based on controller's capability. * @unprepare_message: undo any work done by prepare_message(). * @target_abort: abort the ongoing transfer request on an SPI target controller * @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS @@ -743,6 +750,9 @@ struct spi_controller { const struct spi_controller_mem_ops *mem_ops; const struct spi_controller_mem_caps *mem_caps; + /* SPI or QSPI controller can set to true if supports SDR/DDR transfer rate */ + bool dtr_caps; + struct spi_offload *(*get_offload)(struct spi_device *spi, const struct spi_offload_config *config); void (*put_offload)(struct spi_offload *offload); @@ -995,6 +1005,7 @@ struct spi_res { * processed the word, i.e. the "pre" timestamp should be taken before * transmitting the "pre" word, and the "post" timestamp after receiving * transmit confirmation from the controller for the "post" word. + * @dtr_mode: true if supports double transfer rate. * @timestamped: true if the transfer has been timestamped * @error: Error status logged by SPI controller driver. * @@ -1046,6 +1057,9 @@ struct spi_res { * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x) * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer. * + * User may also set dtr_mode to true to use dual transfer mode if desired. if + * not, default considered as single transfer mode. + * * The code that submits an spi_message (and its spi_transfers) * to the lower layers is responsible for managing its memory. * Zero-initialize every field you don't set up explicitly, to @@ -1080,6 +1094,7 @@ struct spi_transfer { unsigned tx_nbits:4; unsigned rx_nbits:4; unsigned timestamped:1; + bool dtr_mode; #define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */ #define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */ #define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */ @@ -1323,6 +1338,32 @@ static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw) } /** + * spi_bpw_to_bytes - Covert bits per word to bytes + * @bpw: Bits per word + * + * This function converts the given @bpw to bytes. The result is always + * power-of-two, e.g., + * + * =============== ================= + * Input (in bits) Output (in bytes) + * =============== ================= + * 5 1 + * 9 2 + * 21 4 + * 37 8 + * =============== ================= + * + * It will return 0 for the 0 input. + * + * Returns: + * Bytes for the given @bpw. + */ +static inline u32 spi_bpw_to_bytes(u32 bpw) +{ + return roundup_pow_of_two(BITS_TO_BYTES(bpw)); +} + +/** * spi_controller_xfer_timeout - Compute a suitable timeout value * @ctlr: SPI device * @xfer: Transfer descriptor diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h index b8fa30fd6b50..53663c4e5ae3 100644 --- a/include/memory/renesas-rpc-if.h +++ b/include/memory/renesas-rpc-if.h @@ -61,12 +61,14 @@ enum rpcif_type { RPCIF_RCAR_GEN3, RPCIF_RCAR_GEN4, RPCIF_RZ_G2L, + XSPI_RZ_G3E, }; struct rpcif { struct device *dev; void __iomem *dirmap; size_t size; + bool xspi; }; int rpcif_sw_init(struct rpcif *rpc, struct device *dev); @@ -75,5 +77,7 @@ void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs, size_t *len); int rpcif_manual_xfer(struct device *dev); ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf); +ssize_t xspi_dirmap_write(struct device *dev, u64 offs, size_t len, + const void *buf); #endif // __RENESAS_RPC_IF_H |